diff -ruw linux-6.4/Makefile linux-6.4-fbx/Makefile
--- linux-6.4/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/Makefile	2024-04-19 16:21:06.808998072 +0200
@@ -386,6 +386,8 @@
 # CROSS_COMPILE can be set on the command line
 # make CROSS_COMPILE=ia64-linux-
 # Alternatively CROSS_COMPILE can be set in the environment.
+# A third alternative is to store a setting in .config so that plain
+# "make" in the configured kernel build directory always uses that.
 # Default value for CROSS_COMPILE is not to prefix executables
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
 ARCH		?= $(SUBARCH)
@@ -423,6 +425,9 @@
 KCONFIG_CONFIG	?= .config
 export KCONFIG_CONFIG
 
+CONFIG_CROSS_COMPILE := $(shell grep ^CONFIG_CROSS_COMPILE= $(KCONFIG_CONFIG) | cut -f 2 -d = | tr -d '"')
+CROSS_COMPILE	?= $(CONFIG_CROSS_COMPILE:"%"=%)
+
 # SHELL used by kbuild
 CONFIG_SHELL := sh
 
@@ -1366,7 +1371,7 @@
 quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include
       cmd_headers_install = \
 	mkdir -p $(INSTALL_HDR_PATH); \
-	rsync -mrl --include='*/' --include='*\.h' --exclude='*' \
+	rsync -cmrl --include='*/' --include='*\.h' --exclude='*' \
 	usr/include $(INSTALL_HDR_PATH)
 
 PHONY += headers_install
diff -ruw linux-6.4/arch/arm64/Kconfig linux-6.4-fbx/arch/arm64/Kconfig
--- linux-6.4/arch/arm64/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/Kconfig	2024-04-19 16:21:06.808998072 +0200
@@ -2327,4 +2327,3 @@
 source "drivers/acpi/Kconfig"
 
 source "arch/arm64/kvm/Kconfig"
-
diff -ruw linux-6.4/arch/arm64/Kconfig.platforms linux-6.4-fbx/arch/arm64/Kconfig.platforms
--- linux-6.4/arch/arm64/Kconfig.platforms	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/Kconfig.platforms	2023-11-16 15:32:31.909611440 +0100
@@ -63,6 +63,8 @@
 config ARCH_BCMBCA
 	bool "Broadcom Broadband Carrier Access (BCA) origin SoC"
 	select GPIOLIB
+	select PINCTRL
+	select PINCTRL_BCM63138
 	help
 	  Say Y if you intend to run the kernel on a Broadcom Broadband ARM-based
 	  BCA chipset.
@@ -78,6 +80,14 @@
 	help
 	  This enables support for Broadcom's ARMv8 Set Top Box SoCs
 
+config ARCH_BCM63XX_SHARED_OSH
+	bool "Make shared pages and translation table walks outer shareable"
+	depends on ARCH_BCMBCA
+	default y
+	help
+	  This is required for HW coherency on bcm63158. Say Y here if
+	  you are compiling a kernel for a bcm63158 board.
+
 endif
 
 config ARCH_BERLIN
@@ -243,6 +253,16 @@
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
+config ARCH_QCOM_DTB
+	bool "build qualcomm platforms DTB"
+	depends on ARCH_QCOM
+	default y
+
+config ARCH_QCOM_FBX_DTB
+	bool "build freebox DTB on qualcomm platform"
+	depends on ARCH_QCOM
+	default y
+
 config ARCH_REALTEK
 	bool "Realtek Platforms"
 	select RESET_CONTROLLER
diff -ruw linux-6.4/arch/arm64/boot/Makefile linux-6.4-fbx/arch/arm64/boot/Makefile
--- linux-6.4/arch/arm64/boot/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/boot/Makefile	2023-05-22 20:06:36.515648243 +0200
@@ -48,3 +48,5 @@
 				$(NM) vmlinux|grep _kernel_codesize|cut -d' ' -f1)
 
 include $(srctree)/drivers/firmware/efi/libstub/Makefile.zboot
+
+subdir-y += dts/
diff -ruw linux-6.4/arch/arm64/boot/dts/amlogic/Makefile linux-6.4-fbx/arch/arm64/boot/dts/amlogic/Makefile
--- linux-6.4/arch/arm64/boot/dts/amlogic/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/boot/dts/amlogic/Makefile	2023-11-27 19:13:52.958343154 +0100
@@ -1,8 +1,15 @@
 # SPDX-License-Identifier: GPL-2.0
+fbx-boards += \
+	fbxwmr.dtb \
+	fbxwmr-r1.dtb fbxwmr-r2.dtb \
+	fbxwmr-r3.dtb fbxwmr-r4.dtb
+
 dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j100.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-3.dtb
+DTC_FLAGS += -@
+dtb-$(CONFIG_ARCH_MESON) += $(fbx-boards)
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-radxa-zero.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-sei510.dtb
diff -ruw linux-6.4/arch/arm64/boot/dts/broadcom/Makefile linux-6.4-fbx/arch/arm64/boot/dts/broadcom/Makefile
--- linux-6.4/arch/arm64/boot/dts/broadcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/boot/dts/broadcom/Makefile	2023-05-22 20:06:36.535648775 +0200
@@ -11,3 +11,4 @@
 subdir-y	+= bcmbca
 subdir-y	+= northstar2
 subdir-y	+= stingray
+subdir-y	+= bcm63xx
diff -ruw linux-6.4/arch/arm64/boot/dts/marvell/Makefile linux-6.4-fbx/arch/arm64/boot/dts/marvell/Makefile
--- linux-6.4/arch/arm64/boot/dts/marvell/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/boot/dts/marvell/Makefile	2023-05-22 20:06:36.575649839 +0200
@@ -27,3 +27,19 @@
 dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-A.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-B.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += ac5-98dx35xx-rd.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_dsl_lte.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_pon.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_pericom.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_asmedia.dtb
+
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp2_ftth_p2p.dtb
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
diff -ruw linux-6.4/arch/arm64/boot/dts/qcom/Makefile linux-6.4-fbx/arch/arm64/boot/dts/qcom/Makefile
--- linux-6.4/arch/arm64/boot/dts/qcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/boot/dts/qcom/Makefile	2023-11-24 18:39:54.353042707 +0100
@@ -1,204 +1,208 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-ifc6640.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-mi01.2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp468.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq6018-cp01-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk01.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-al02-c7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-acer-a1-724.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-alcatel-idol347.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-asus-z00l.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-gplus-fl8005a.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-huawei-g7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8150.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8910.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a3u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a5u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-grandmax.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt510.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt58.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-serranove.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-uf896.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-ufi001c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt88047.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-yiming-uz801v3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-motorola-potter.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-daisy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-mido.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-tissot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-vince.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-kugo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-suzu.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-msft-lumia-octagon-talkman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-xiaomi-libra.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-huawei-angler-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-msft-lumia-octagon-cityman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-ivy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-karin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-sumire.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3t.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-dora.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-kagura.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-keyaki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-xiaomi-gemini.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-natrium.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-scorpio.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-asus-novago-tp370ql.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-fxtec-pro1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-hp-envy-x2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-lenovo-miix-630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-cheeseburger.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-dumpling.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-lilac.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-maple.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-poplar.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-xiaomi-sagit.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-1000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-4000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qdu1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb2210-rb1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb4210-rb2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5-vision-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qru1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8155p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8295p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8540p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8775p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-kingoftown.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-wifi.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd-pro.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-herobrine-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-crd-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sda660-inforce-ifc6560.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm450-motorola-ali.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-ganges-kirin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-discovery.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-pioneer.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-voyager.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-fairphone-fp3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-motorola-ocean.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm636-sony-xperia-ganges-mermaid.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm660-xiaomi-lavender.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm670-google-sargo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c-navigation-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyln.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-enchilada.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-fajita.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-samsung-starqltechn.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akari.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akatsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-apollo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-ebbg.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-tianma.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-polaris.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-shift-axolotl.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-lenovo-yoga-c630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-samsung-w737.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm4250-oneplus-billie2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6115p-lenovo-j606f.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-sony-xperia-seine-pdx201.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-xiaomi-laurel-sprout.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6350-sony-xperia-lena-pdx213.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6375-sony-xperia-murray-pdx225.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7225-fairphone-fp4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-microsoft-surface-duo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-bahamut.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-griffin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx203.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx206.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-csot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-microsoft-surface-duo2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx214.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx215.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx223.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx224.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-db820c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-ifc6640.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-mi01.2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp468.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq6018-cp01-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk01.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp418.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp433.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp449.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp454.dtb
+dtb-$(CONFIG_ARCH_QCOM_FBX_DTB)	+= fbxgw9r.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-acer-a1-724.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-alcatel-idol347.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-asus-z00l.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-gplus-fl8005a.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-huawei-g7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8150.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8910.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a3u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a5u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-grandmax.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt510.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt58.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-serranove.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-uf896.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-ufi001c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt88047.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-yiming-uz801v3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-motorola-potter.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-daisy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-mido.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-tissot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-vince.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-kugo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-suzu.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-msft-lumia-octagon-talkman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-xiaomi-libra.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-huawei-angler-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-msft-lumia-octagon-cityman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-ivy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-karin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-sumire.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3t.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-dora.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-kagura.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-keyaki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-xiaomi-gemini.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-natrium.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-scorpio.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-asus-novago-tp370ql.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-fxtec-pro1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-hp-envy-x2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-lenovo-miix-630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-cheeseburger.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-dumpling.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-lilac.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-maple.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-poplar.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-xiaomi-sagit.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-1000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-4000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qdu1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb2210-rb1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb4210-rb2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5-vision-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qru1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8155p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8295p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8540p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8775p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-kingoftown.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-wifi.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd-pro.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-herobrine-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-crd-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sda660-inforce-ifc6560.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm450-motorola-ali.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-ganges-kirin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-discovery.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-pioneer.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-voyager.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-fairphone-fp3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-motorola-ocean.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm636-sony-xperia-ganges-mermaid.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm660-xiaomi-lavender.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm670-google-sargo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c-navigation-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyln.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-enchilada.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-fajita.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-samsung-starqltechn.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akari.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akatsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-apollo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-ebbg.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-tianma.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-polaris.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-shift-axolotl.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-lenovo-yoga-c630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-samsung-w737.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm4250-oneplus-billie2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6115p-lenovo-j606f.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-sony-xperia-seine-pdx201.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-xiaomi-laurel-sprout.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6350-sony-xperia-lena-pdx213.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6375-sony-xperia-murray-pdx225.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7225-fairphone-fp4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-microsoft-surface-duo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-bahamut.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-griffin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx203.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx206.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-csot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-microsoft-surface-duo2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx214.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx215.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx223.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx224.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-qrd.dtb
diff -ruw linux-6.4/arch/arm64/boot/dts/qcom/ipq9574.dtsi linux-6.4-fbx/arch/arm64/boot/dts/qcom/ipq9574.dtsi
--- linux-6.4/arch/arm64/boot/dts/qcom/ipq9574.dtsi	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/boot/dts/qcom/ipq9574.dtsi	2024-04-19 15:59:31.193600561 +0200
@@ -6,9 +6,13 @@
  * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,apss-ipq.h>
 #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
+#include <dt-bindings/clock/qcom,nsscc-ipq9574.h>
+#include <dt-bindings/reset/qcom,nsscc-ipq9574.h>
+#include <dt-bindings/clock/qcom,uniphycc-ipq9574.h>
 
 / {
 	interrupt-parent = <&intc>;
@@ -16,23 +20,50 @@
 	#size-cells = <2>;
 
 	clocks {
-		bias_pll_ubi_nc_clk: bias-pll-ubi-nc-clk {
+		sleep_clk: sleep-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+		};
+
+		xo_board_clk: xo-board-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+		};
+
+		bias_pll_ubi_nc_clk: bias_pll_ubi_nc_clk {
 			compatible = "fixed-clock";
 			clock-frequency = <353000000>;
 			#clock-cells = <0>;
 		};
 
-		sleep_clk: sleep-clk {
+		bias_pll_cc_clk: bias_pll_cc_clk {
 			compatible = "fixed-clock";
+			clock-frequency = <1200000000>;
 			#clock-cells = <0>;
 		};
 
-		xo_board_clk: xo-board-clk {
+		bias_pll_nss_noc_clk: bias_pll_nss_noc_clk {
+			compatible = "fixed-clock";
+			clock-frequency = <461500000>;
+			#clock-cells = <0>;
+		};
+
+		gcc_gpll0_out_aux: gcc_gpll0_out_aux {
 			compatible = "fixed-clock";
+			clock-frequency = <800000000>;
 			#clock-cells = <0>;
 		};
 	};
 
+	imem_reset_reason: imem-reset-reason {
+		status = "disabled";
+		compatible = "qcom,imem-reset-reason-ipq9574",
+			"qcom-imem-reset-reason";
+
+		reg = <0x0 0x086006bc 0x0 0x78>;
+		reg-names = "imem";
+	};
+
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -43,6 +74,10 @@
 			reg = <0x0>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		CPU1: cpu@1 {
@@ -51,6 +86,10 @@
 			reg = <0x1>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		CPU2: cpu@2 {
@@ -59,6 +98,10 @@
 			reg = <0x2>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		CPU3: cpu@3 {
@@ -67,6 +110,10 @@
 			reg = <0x3>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		L2_0: l2-cache {
@@ -82,6 +129,58 @@
 		reg = <0x0 0x40000000 0x0 0x0>;
 	};
 
+	cpu_opp_table: opp-table-cpu {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-936000000 {
+			opp-hz = /bits/ 64 <936000000>;
+			opp-microvolt = <725000>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1104000000 {
+			opp-hz = /bits/ 64 <1104000000>;
+			opp-microvolt = <787500>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1416000000 {
+			opp-hz = /bits/ 64 <1416000000>;
+			opp-microvolt = <862500>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1488000000 {
+			opp-hz = /bits/ 64 <1488000000>;
+			opp-microvolt = <925000>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1800000000 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-microvolt = <987500>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-2208000000 {
+			opp-hz = /bits/ 64 <2208000000>;
+			opp-microvolt = <1062500>;
+			clock-latency-ns = <200000>;
+		};
+	};
+
+	firmware {
+		scm {
+			compatible = "qcom,scm-ipq9574", "qcom,scm";
+			qcom,dload-mode = <&tcsr 0x6100>;
+		};
+
+		qfprom {
+			compatible = "qcom,qfprom-sec";
+		};
+	};
+
 	pmu {
 		compatible = "arm,cortex-a73-pmu";
 		interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
@@ -92,6 +191,33 @@
 		method = "smc";
 	};
 
+	reg_usb_3p3: s3300 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+		regulator-name = "usb-phy-vdd-dummy";
+	};
+
+	reg_usb_1p8: s1800 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+		regulator-name = "usb-phy-pll-dummy";
+	};
+
+	reg_usb_0p925: s0925 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <925000>;
+		regulator-max-microvolt = <925000>;
+		regulator-boot-on;
+		regulator-always-on;
+		regulator-name = "usb-phy-dummy";
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -101,6 +227,25 @@
 			reg = <0x0 0x4a600000 0x0 0x400000>;
 			no-map;
 		};
+
+		smem@4aa00000 {
+			compatible = "qcom,smem";
+			reg = <0x0 0x4aa00000 0x0 0x00100000>;
+			hwlocks = <&tcsr_mutex 0>;
+			no-map;
+		};
+	};
+
+	rpm-glink {
+		compatible = "qcom,glink-rpm";
+		interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+		qcom,rpm-msg-ram = <&rpm_msg_ram>;
+		mboxes = <&apcs_glb 0>;
+
+		rpm_requests: rpm-requests {
+			compatible = "qcom,rpm-ipq9574";
+			qcom,glink-channels = "rpm_requests";
+		};
 	};
 
 	soc: soc@0 {
@@ -109,6 +254,232 @@
 		#size-cells = <1>;
 		ranges = <0 0 0 0xffffffff>;
 
+		rpm_msg_ram: sram@60000 {
+			compatible = "qcom,rpm-msg-ram";
+			reg = <0x00060000 0x6000>;
+		};
+
+		usb_0_qusbphy: phy@7b000 {
+			compatible = "qcom,ipq9574-qusb2-phy";
+			reg = <0x0007b000 0x180>;
+			#phy-cells = <0>;
+
+			clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+				 <&xo_board_clk>;
+			clock-names = "cfg_ahb",
+				      "ref";
+
+			vdd-supply = <&reg_usb_0p925>;
+			vdda-pll-supply = <&reg_usb_1p8>;
+			vdda-phy-dpdm-supply = <&reg_usb_3p3>;
+
+			resets = <&gcc GCC_QUSB2_0_PHY_BCR>;
+			status = "disabled";
+		};
+
+		usb_0_qmpphy: phy@7d000 {
+			compatible = "qcom,ipq9574-qmp-usb3-phy";
+			reg = <0x0007d000 0xa00>;
+			#phy-cells = <0>;
+
+			clocks = <&gcc GCC_USB0_AUX_CLK>,
+				 <&xo_board_clk>,
+				 <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+				 <&gcc GCC_USB0_PIPE_CLK>;
+			clock-names = "aux",
+				      "ref",
+				      "cfg_ahb",
+				      "pipe";
+
+			resets = <&gcc GCC_USB0_PHY_BCR>,
+				 <&gcc GCC_USB3PHY_0_PHY_BCR>;
+			reset-names = "phy",
+				      "phy_phy";
+
+			vdda-pll-supply = <&reg_usb_1p8>;
+			vdda-phy-supply = <&reg_usb_0p925>;
+
+			status = "disabled";
+
+			#clock-cells = <0>;
+			clock-output-names = "usb0_pipe_clk";
+		};
+
+		pcie0_phy: phy@84000 {
+			compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+			reg = <0x00084000 0x1000>;
+
+			clocks = <&gcc GCC_PCIE0_AUX_CLK>,
+				 <&gcc GCC_PCIE0_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE0_1LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE0_1LANE_S_CLK>,
+				 <&gcc GCC_PCIE0_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE0_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE0_PHY_BCR>,
+				 <&gcc GCC_PCIE0PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie0_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		pcie2_phy: phy@8c000 {
+			compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+			reg = <0x0008c000 0x2000>;
+
+			clocks = <&gcc GCC_PCIE2_AUX_CLK>,
+				 <&gcc GCC_PCIE2_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE2_2LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE2_2LANE_S_CLK>,
+				 <&gcc GCC_PCIE2_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE2_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE2_PHY_BCR>,
+				 <&gcc GCC_PCIE2PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie2_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		mdio: mdio@90000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "qcom,ipq9574-mdio";
+			reg = <0x90000 0x64>;
+			clocks = <&gcc GCC_MDIO_AHB_CLK>;
+			clock-names = "gcc_mdio_ahb_clk";
+			status = "disabled";
+		};
+
+		rng: rng@e3000 {
+			compatible = "qcom,prng-ee";
+			reg = <0x000e3000 0x1000>;
+			clocks = <&gcc GCC_PRNG_AHB_CLK>;
+			clock-names = "core";
+		};
+
+		pcie3_phy: phy@f4000 {
+			compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+			reg = <0x000f4000 0x2000>;
+
+			clocks = <&gcc GCC_PCIE3_AUX_CLK>,
+				 <&gcc GCC_PCIE3_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE3_2LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE3_2LANE_S_CLK>,
+				 <&gcc GCC_PCIE3_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE3_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE3_PHY_BCR>,
+				 <&gcc GCC_PCIE3PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie3_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		pcie1_phy: phy@fc000 {
+			compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+			reg = <0x000fc000 0x1000>;
+
+			clocks = <&gcc GCC_PCIE1_AUX_CLK>,
+				 <&gcc GCC_PCIE1_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE1_1LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE1_1LANE_S_CLK>,
+				 <&gcc GCC_PCIE1_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE1_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE1_PHY_BCR>,
+				 <&gcc GCC_PCIE1PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie1_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		lpass: lpass@0a000000 {
+			compatible = "qca,lpass-ipq9574";
+			reg =  <0xa000000 0x3bffff>;
+			clocks = <&gcc GCC_LPASS_SWAY_CLK>,
+				<&gcc GCC_LPASS_CORE_AXIM_CLK>,
+				<&gcc GCC_SNOC_LPASS_CFG_CLK>,
+				<&gcc GCC_PCNOC_LPASS_CLK>;
+			clock-names = "sway", "axim", "snoc_cfg", "pcnoc";
+			resets = <&gcc GCC_LPASS_BCR>;
+			reset-names = "lpass";
+			status = "disabled";
+                };
+
+		lpass_pcm: lpass-pcm@0a3c0000 {
+			compatible = "qca,ipq9574-lpass-pcm";
+			interrupts = <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "if0";
+			reg =  <0xa3c0000 0x23014>;
+			reg-names = "if0", "lpm";
+			status = "disabled";
+                };
+
+		nsscc: clock-controller@39b00000 {
+			compatible = "qcom,nsscc-ipq9574";
+			reg = <0x39b00000 0x80000>;
+			clocks = <&gcc GCC_NSSNOC_SNOC_CLK>,
+				<&gcc GCC_NSSNOC_SNOC_1_CLK>,
+				<&gcc GCC_NSSNOC_NSSCC_CLK>,
+				<&gcc GCC_NSSCC_CLK>,
+				<&xo_board_clk>,
+				<&bias_pll_cc_clk>,
+				<&bias_pll_nss_noc_clk>,
+				<&bias_pll_ubi_nc_clk>,
+				<&gcc_gpll0_out_aux>,
+				<&ess UNIPHY0_GCC_RX_CLK>,
+				<&ess UNIPHY0_GCC_TX_CLK>,
+				<&ess UNIPHY1_GCC_RX_CLK>,
+				<&ess UNIPHY1_GCC_TX_CLK>,
+				<&ess UNIPHY2_GCC_RX_CLK>,
+				<&ess UNIPHY2_GCC_TX_CLK>;
+
+			/*
+			 * those clocks are needed for the clock-controler
+			 * itself, regmap access will freeze if they are
+			 * not enabled
+			 */
+			clock-names = "noc_snoc",
+				"noc_snoc1",
+				"noc_nsscc",
+				"nsscc";
+			#clock-cells = <0x1>;
+			#reset-cells = <0x1>;
+		};
+
 		tlmm: pinctrl@1000000 {
 			compatible = "qcom,ipq9574-tlmm";
 			reg = <0x01000000 0x300000>;
@@ -119,6 +490,51 @@
 			interrupt-controller;
 			#interrupt-cells = <2>;
 
+			mdio_pins: mdio_pinmux {
+				mux_0 {
+					pins = "gpio38";
+					function = "mdc";
+					drive-strength = <8>;
+					bias-disable;
+				};
+				mux_1 {
+					pins = "gpio39";
+					function = "mdio";
+					drive-strength = <8>;
+					bias-pull-up;
+				};
+			};
+
+			audio_pins_pri: audio_pinmux_pri {
+				mux_1 {
+					pins = "gpio41";
+					function = "audio_pri";
+					drive-strength = <8>;
+					bias-pull-down;
+				};
+
+				mux_2 {
+					pins = "gpio40";
+					function = "audio_pri";
+					drive-strength = <8>;
+					bias-pull-down;
+				};
+
+				mux_3 {
+					pins = "gpio42";
+					function = "audio_pri";
+					drive-strength = <8>;
+					bias-pull-down;
+				};
+
+				mux_4 {
+					pins = "gpio43";
+					function = "audio_pri";
+					drive-strength = <16>;
+					bias-pull-down;
+				};
+			};
+
 			uart2_pins: uart2-state {
 				pins = "gpio34", "gpio35";
 				function = "blsp2_uart";
@@ -127,22 +543,501 @@
 			};
 		};
 
+		ess: ess@3a000000 {
+			compatible = "qcom,ipq9574-ess";
+			reg =   <0x3a000000 0xa00000>,
+				<0x3ab00000 0xef800>,
+				<0x07a00000 0x100000>;
+			reg-names = "ppe", "edma", "uniphy";
+
+			interrupts =
+				/* rx desc start */
+				<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+				/* rx fill start */
+				<GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
+				/* tx compl start */
+				<GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 364 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 365 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 498 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 503 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>;
+
+			interrupt-names =
+				"edma_rx_desc_ring_0",
+				"edma_rx_desc_ring_1",
+				"edma_rx_desc_ring_2",
+				"edma_rx_desc_ring_3",
+				"edma_rx_desc_ring_4",
+				"edma_rx_desc_ring_5",
+				"edma_rx_desc_ring_6",
+				"edma_rx_desc_ring_7",
+				"edma_rx_desc_ring_8",
+				"edma_rx_desc_ring_9",
+				"edma_rx_desc_ring_10",
+				"edma_rx_desc_ring_11",
+				"edma_rx_desc_ring_12",
+				"edma_rx_desc_ring_13",
+				"edma_rx_desc_ring_14",
+				"edma_rx_desc_ring_15",
+				"edma_rx_desc_ring_16",
+				"edma_rx_desc_ring_17",
+				"edma_rx_desc_ring_18",
+				"edma_rx_desc_ring_19",
+				"edma_rx_desc_ring_20",
+				"edma_rx_desc_ring_21",
+				"edma_rx_desc_ring_22",
+				"edma_rx_desc_ring_23",
+				"edma_rx_fill_ring_0",
+				"edma_rx_fill_ring_1",
+				"edma_rx_fill_ring_2",
+				"edma_rx_fill_ring_3",
+				"edma_rx_fill_ring_4",
+				"edma_rx_fill_ring_5",
+				"edma_rx_fill_ring_6",
+				"edma_rx_fill_ring_7",
+				"edma_tx_compl_ring_0",
+				"edma_tx_compl_ring_1",
+				"edma_tx_compl_ring_2",
+				"edma_tx_compl_ring_3",
+				"edma_tx_compl_ring_4",
+				"edma_tx_compl_ring_5",
+				"edma_tx_compl_ring_6",
+				"edma_tx_compl_ring_7",
+				"edma_tx_compl_ring_8",
+				"edma_tx_compl_ring_9",
+				"edma_tx_compl_ring_10",
+				"edma_tx_compl_ring_11",
+				"edma_tx_compl_ring_12",
+				"edma_tx_compl_ring_13",
+				"edma_tx_compl_ring_14",
+				"edma_tx_compl_ring_15",
+				"edma_tx_compl_ring_16",
+				"edma_tx_compl_ring_17",
+				"edma_tx_compl_ring_18",
+				"edma_tx_compl_ring_19",
+				"edma_tx_compl_ring_20",
+				"edma_tx_compl_ring_21",
+				"switch_misc_intr",
+				"edma_misc",
+				"edma_tx_compl_ring_22",
+				"edma_tx_compl_ring_23",
+				"edma_tx_compl_ring_24",
+				"edma_tx_compl_ring_25",
+				"edma_tx_compl_ring_26",
+				"edma_tx_compl_ring_27",
+				"edma_tx_compl_ring_28",
+				"edma_tx_compl_ring_29",
+				"edma_tx_compl_ring_30",
+				"edma_tx_compl_ring_31";
+
+			clocks = <&gcc GCC_CMN_12GPLL_AHB_CLK>,
+				<&gcc GCC_CMN_12GPLL_SYS_CLK>,
+				<&gcc GCC_UNIPHY0_AHB_CLK>,
+				<&gcc GCC_UNIPHY0_SYS_CLK>,
+				<&gcc GCC_UNIPHY1_AHB_CLK>,
+				<&gcc GCC_UNIPHY1_SYS_CLK>,
+				<&gcc GCC_UNIPHY2_AHB_CLK>,
+				<&gcc GCC_UNIPHY2_SYS_CLK>,
+				<&nsscc NSS_CC_PORT1_MAC_CLK>,
+				<&nsscc NSS_CC_PORT2_MAC_CLK>,
+				<&nsscc NSS_CC_PORT3_MAC_CLK>,
+				<&nsscc NSS_CC_PORT4_MAC_CLK>,
+				<&nsscc NSS_CC_PORT5_MAC_CLK>,
+				<&nsscc NSS_CC_PORT6_MAC_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_CFG_CLK>,
+				<&nsscc NSS_CC_NSSNOC_PPE_CLK>,
+				<&nsscc NSS_CC_NSSNOC_PPE_CFG_CLK>,
+				<&nsscc NSS_CC_PPE_EDMA_CLK>,
+				<&nsscc NSS_CC_PPE_EDMA_CFG_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_IPE_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_BTQ_CLK>,
+				<&nsscc NSS_CC_PORT1_RX_CLK>,
+				<&nsscc NSS_CC_PORT1_TX_CLK>,
+				<&nsscc NSS_CC_PORT2_RX_CLK>,
+				<&nsscc NSS_CC_PORT2_TX_CLK>,
+				<&nsscc NSS_CC_PORT3_RX_CLK>,
+				<&nsscc NSS_CC_PORT3_TX_CLK>,
+				<&nsscc NSS_CC_PORT4_RX_CLK>,
+				<&nsscc NSS_CC_PORT4_TX_CLK>,
+				<&nsscc NSS_CC_PORT5_RX_CLK>,
+				<&nsscc NSS_CC_PORT5_TX_CLK>,
+				<&nsscc NSS_CC_PORT6_RX_CLK>,
+				<&nsscc NSS_CC_PORT6_TX_CLK>,
+				<&nsscc NSS_CC_PORT1_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT1_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT2_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT2_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT3_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT3_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT4_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT4_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT5_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT5_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT6_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT6_TX_CLK_SRC>,
+				<&nsscc NSS_CC_UNIPHY_PORT1_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT1_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT2_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT2_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT3_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT3_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT4_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT4_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT5_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT5_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT6_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT6_TX_CLK>,
+
+				/* EDMA clocks */
+				<&nsscc NSS_CC_NSS_CSR_CLK>,
+				<&nsscc NSS_CC_NSSNOC_NSS_CSR_CLK>,
+				<&nsscc NSS_CC_IMEM_QSB_CLK>,
+				<&nsscc NSS_CC_NSSNOC_IMEM_QSB_CLK>,
+				<&nsscc NSS_CC_IMEM_AHB_CLK>,
+				<&nsscc NSS_CC_NSSNOC_IMEM_AHB_CLK>,
+				<&gcc GCC_MEM_NOC_NSSNOC_CLK>,
+				<&gcc GCC_NSS_TBU_CLK>,
+				<&gcc GCC_NSS_TS_CLK>,
+				<&gcc GCC_NSSCC_CLK>,
+				<&gcc GCC_NSSCFG_CLK>,
+				<&gcc GCC_NSSNOC_ATB_CLK>,
+				<&gcc GCC_NSSNOC_MEM_NOC_1_CLK>,
+				<&gcc GCC_NSSNOC_MEMNOC_CLK>,
+				<&gcc GCC_NSSNOC_NSSCC_CLK>,
+				<&gcc GCC_NSSNOC_PCNOC_1_CLK>,
+				<&gcc GCC_NSSNOC_QOSGEN_REF_CLK>,
+				<&gcc GCC_NSSNOC_SNOC_1_CLK>,
+				<&gcc GCC_NSSNOC_SNOC_CLK>,
+				<&gcc GCC_NSSNOC_TIMEOUT_REF_CLK>,
+				<&gcc GCC_NSSNOC_XO_DCD_CLK>;
+
+			clock-names = "cmn_ahb_clk",
+				"cmn_sys_clk",
+				"uniphy0_ahb_clk",
+				"uniphy0_sys_clk",
+				"uniphy1_ahb_clk",
+				"uniphy1_sys_clk",
+				"uniphy2_ahb_clk",
+				"uniphy2_sys_clk",
+				"port1_mac_clk",
+				"port2_mac_clk",
+				"port3_mac_clk",
+				"port4_mac_clk",
+				"port5_mac_clk",
+				"port6_mac_clk",
+				"nss_ppe_switch_clk",
+				"nss_ppe_cfg_clk",
+				"nssnoc_ppe_clk",
+				"nssnoc_ppe_cfg_clk",
+				"nss_edma_clk",
+				"nss_edma_cfg_clk",
+				"nss_ppe_ipe_clk",
+				"nss_ppe_btq_clk",
+				"nss_port1_rx_clk", "nss_port1_tx_clk",
+				"nss_port2_rx_clk", "nss_port2_tx_clk",
+				"nss_port3_rx_clk", "nss_port3_tx_clk",
+				"nss_port4_rx_clk", "nss_port4_tx_clk",
+				"nss_port5_rx_clk", "nss_port5_tx_clk",
+				"nss_port6_rx_clk", "nss_port6_tx_clk",
+				"nss_port1_rx_clk_src",
+				"nss_port1_tx_clk_src",
+				"nss_port2_rx_clk_src",
+				"nss_port2_tx_clk_src",
+				"nss_port3_rx_clk_src",
+				"nss_port3_tx_clk_src",
+				"nss_port4_rx_clk_src",
+				"nss_port4_tx_clk_src",
+				"nss_port5_rx_clk_src",
+				"nss_port5_tx_clk_src",
+				"nss_port6_rx_clk_src",
+				"nss_port6_tx_clk_src",
+				"uniphy_port1_rx_clk",
+				"uniphy_port1_tx_clk",
+				"uniphy_port2_rx_clk",
+				"uniphy_port2_tx_clk",
+				"uniphy_port3_rx_clk",
+				"uniphy_port3_tx_clk",
+				"uniphy_port4_rx_clk",
+				"uniphy_port4_tx_clk",
+				"uniphy_port5_rx_clk",
+				"uniphy_port5_tx_clk",
+				"uniphy_port6_rx_clk",
+				"uniphy_port6_tx_clk",
+				/* EDMA clocks */
+				"nss_cc_nss_csr_clk",
+				"nss_cc_nssnoc_nss_csr_clk",
+				"nss_cc_imem_qsb_clk",
+				"nss_cc_nssnoc_imem_qsb_clk",
+				"nss_cc_imem_ahb_clk",
+				"nss_cc_nssnoc_imem_ahb_clk",
+				"gcc_mem_noc_nssnoc_clk",
+				"gcc_nss_tbu_clk",
+				"gcc_nss_ts_clk",
+				"gcc_nsscc_clk",
+				"gcc_nsscfg_clk",
+				"gcc_nssnoc_atb_clk",
+				"gcc_nssnoc_mem_noc_1_clk",
+				"gcc_nssnoc_memnoc_clk",
+				"gcc_nssnoc_nsscc_clk",
+				"gcc_nssnoc_pcnoc_1_clk",
+				"gcc_nssnoc_qosgen_ref_clk",
+				"gcc_nssnoc_snoc_1_clk",
+				"gcc_nssnoc_snoc_clk",
+				"gcc_nssnoc_timeout_ref_clk",
+				"gcc_nssnoc_xo_dcd_clk";
+
+			#clock-cells = <1>;
+			clock-output-names = "uniphy0_gcc_rx_clk",
+				"uniphy0_gcc_tx_clk",
+				"uniphy1_gcc_rx_clk",
+				"uniphy1_gcc_tx_clk",
+				"uniphy2_gcc_rx_clk",
+				"uniphy2_gcc_tx_clk";
+
+			resets = <&nsscc PPE_FULL_RESET>,
+				<&nsscc EDMA_HW_RESET>,
+				<&nsscc UNIPHY0_SOFT_RESET>,
+				<&gcc GCC_UNIPHY0_XPCS_RESET>,
+				<&nsscc UNIPHY_PORT5_ARES>,
+				<&gcc GCC_UNIPHY1_XPCS_RESET>,
+				<&nsscc UNIPHY_PORT6_ARES>,
+				<&gcc GCC_UNIPHY2_XPCS_RESET>,
+				<&nsscc UNIPHY_PORT1_ARES>,
+				<&nsscc UNIPHY_PORT2_ARES>,
+				<&nsscc UNIPHY_PORT3_ARES>,
+				<&nsscc UNIPHY_PORT4_ARES>,
+				<&gcc GCC_UNIPHY0_SYS_RESET>,
+				<&gcc GCC_UNIPHY1_SYS_RESET>,
+				<&gcc GCC_UNIPHY2_SYS_RESET>,
+				<&nsscc NSSPORT1_RESET>,
+				<&nsscc NSSPORT2_RESET>,
+				<&nsscc NSSPORT3_RESET>,
+				<&nsscc NSSPORT4_RESET>,
+				<&nsscc NSSPORT5_RESET>,
+				<&nsscc NSSPORT6_RESET>,
+				<&nsscc PORT1_MAC_ARES>,
+				<&nsscc PORT2_MAC_ARES>,
+				<&nsscc PORT3_MAC_ARES>,
+				<&nsscc PORT4_MAC_ARES>,
+				<&nsscc PORT5_MAC_ARES>,
+				<&nsscc PORT6_MAC_ARES>;
+			reset-names = "ppe_rst",
+				"edma_rst",
+				"uniphy0_soft_rst",
+				"uniphy0_xpcs_rst",
+				"uniphy1_soft_rst",
+				"uniphy1_xpcs_rst",
+				"uniphy2_soft_rst",
+				"uniphy2_xpcs_rst",
+				"uniphy0_port1_dis",
+				"uniphy0_port2_dis",
+				"uniphy0_port3_dis",
+				"uniphy0_port4_dis",
+				"uniphy0_sys_rst",
+				"uniphy1_sys_rst",
+				"uniphy2_sys_rst",
+				"nss_port1_rst",
+				"nss_port2_rst",
+				"nss_port3_rst",
+				"nss_port4_rst",
+				"nss_port5_rst",
+				"nss_port6_rst",
+				"nss_port1_mac_rst",
+				"nss_port2_mac_rst",
+				"nss_port3_mac_rst",
+				"nss_port4_mac_rst",
+				"nss_port5_mac_rst",
+				"nss_port6_mac_rst";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				/*
+				 * NB: the id are *NOT* the physical
+				 * PPE port id, only the phyiscal
+				 * interfaces
+				*/
+				ess_phys_port0: port@0 {
+					reg = <0>;
+					ess,ppe-port-id = <1>;
+					status = "disabled";
+
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 */
+				};
+
+				ess_phys_port1: port@1 {
+					reg = <1>;
+					ess,ppe-port-id = <2>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 */
+				};
+
+				ess_phys_port2: port@2 {
+					reg = <2>;
+					ess,ppe-port-id = <3>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 */
+				};
+
+				ess_phys_port3: port@3 {
+					reg = <3>;
+					ess,ppe-port-id = <4>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 * - SGMII (port[0-2] disabled)
+					 * - 10GBASER (port[0-2] disabled)
+					 * - USXGMII (port[0-2] disabled)
+					 */
+				};
+
+				ess_phys_port4: port@4 {
+					reg = <4>;
+					ess,ppe-port-id = <5>;
+					status = "disabled";
+					/*
+					 * PCS can be muxed on either:
+					 *  - UNIPHY0
+					 *  - UNIPHY1
+					 *
+					 * possible phy-mode:
+					 * - PSGMII (UNIPHY0)
+					 * - SGMII (UNIPHY1)
+					 * - 10GBASER (UNIPHY1)
+					 * - USXGMII (UNIPHY1)
+					 */
+				};
+
+				ess_phys_port5: port@5 {
+					reg = <5>;
+					ess,ppe-port-id = <6>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY2
+					 *
+					 * possible phy-mode:
+					 * - SGMII
+					 * - 10GBASER
+					 * - USXGMII
+					 */
+				};
+			};
+		};
+
 		gcc: clock-controller@1800000 {
 			compatible = "qcom,ipq9574-gcc";
 			reg = <0x01800000 0x80000>;
 			clocks = <&xo_board_clk>,
 				 <&sleep_clk>,
-				 <&bias_pll_ubi_nc_clk>,
-				 <0>,
-				 <0>,
-				 <0>,
 				 <0>,
+				 <&pcie0_phy>,
+				 <&pcie1_phy>,
+				 <&pcie2_phy>,
+				 <&pcie3_phy>,
 				 <0>;
 			#clock-cells = <1>;
 			#reset-cells = <1>;
 			#power-domain-cells = <1>;
 		};
 
+		tcsr_mutex: hwlock@1905000 {
+			compatible = "qcom,tcsr-mutex";
+			reg = <0x01905000 0x20000>;
+			#hwlock-cells = <1>;
+		};
+
+		tcsr: syscon@1937000 {
+			compatible = "qcom,tcsr-ipq9574", "syscon";
+			reg = <0x01937000 0x21000>;
+		};
+
 		sdhc_1: mmc@7804000 {
 			compatible = "qcom,ipq9574-sdhci", "qcom,sdhci-msm-v5";
 			reg = <0x07804000 0x1000>, <0x07805000 0x1000>;
@@ -160,6 +1055,36 @@
 			status = "disabled";
 		};
 
+		blsp_dma: dma-controller@7884000 {
+			compatible = "qcom,bam-v1.7.0";
+			reg = <0x07884000 0x2b000>;
+			interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "bam_clk";
+			#dma-cells = <1>;
+			qcom,ee = <0>;
+		};
+
+		blsp1_uart0: serial@78af000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078af000 0x200>;
+			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_uart1: serial@78b0000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b0000 0x200>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
 		blsp1_uart2: serial@78b1000 {
 			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 			reg = <0x078b1000 0x200>;
@@ -170,17 +1095,222 @@
 			status = "disabled";
 		};
 
+		blsp1_uart3: serial@78b2000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b2000 0x200>;
+			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART4_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_uart4: serial@78b3000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b3000 0x200>;
+			interrupts = <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART5_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_uart5: serial@78b4000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b4000 0x200>;
+			interrupts = <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART6_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_spi0: spi@78b5000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b5000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 12>, <&blsp_dma 13>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c1: i2c@78b6000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b6000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 14>, <&blsp_dma 15>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi1: spi@78b6000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b6000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 14>, <&blsp_dma 15>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c2: i2c@78b7000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b7000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 16>, <&blsp_dma 17>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi2: spi@78b7000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b7000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 16>, <&blsp_dma 17>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c3: i2c@78b8000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b8000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 18>, <&blsp_dma 19>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi3: spi@78b8000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b8000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			spi-max-frequency = <50000000>;
+			clocks = <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 18>, <&blsp_dma 19>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c4: i2c@78b9000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b9000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 20>, <&blsp_dma 21>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi4: spi@78b9000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b9000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP5_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 20>, <&blsp_dma 21>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		usb3: usb@8a00000 {
+			compatible = "qcom,ipq9574-dwc3", "qcom,dwc3";
+			reg = <0x08af8800 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			clocks = <&gcc GCC_SNOC_USB_CLK>,
+				 <&gcc GCC_USB0_MASTER_CLK>,
+				 <&gcc GCC_ANOC_USB_AXI_CLK>,
+				 <&gcc GCC_USB0_SLEEP_CLK>,
+				 <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+
+			clock-names = "cfg_noc",
+				      "core",
+				      "iface",
+				      "sleep",
+				      "mock_utmi";
+
+			assigned-clocks = <&gcc GCC_USB0_MASTER_CLK>,
+					  <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+			assigned-clock-rates = <200000000>,
+					       <24000000>;
+
+			interrupts-extended = <&intc GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "pwr_event";
+
+			resets = <&gcc GCC_USB_BCR>;
+			status = "disabled";
+
+			dwc_0: usb@8a00000 {
+				dev_id = <0>;
+				compatible = "snps,dwc3";
+				reg = <0x8a00000 0xcd00>;
+				clocks = <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+				clock-names = "ref";
+				interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+				phys = <&usb_0_qusbphy>, <&usb_0_qmpphy>;
+				phy-names = "usb2-phy", "usb3-phy";
+				tx-fifo-resize;
+				snps,is-utmi-l1-suspend;
+				snps,hird-threshold = /bits/ 8 <0x0>;
+				snps,dis_u2_susphy_quirk;
+				snps,dis_u3_susphy_quirk;
+				dr_mode = "host";
+			};
+		};
+
 		intc: interrupt-controller@b000000 {
 			compatible = "qcom,msm-qgic2";
 			reg = <0x0b000000 0x1000>,  /* GICD */
-			      <0x0b002000 0x1000>,  /* GICC */
+			      <0x0b002000 0x2000>,  /* GICC */
 			      <0x0b001000 0x1000>,  /* GICH */
-			      <0x0b004000 0x1000>;  /* GICV */
+			      <0x0b004000 0x2000>;  /* GICV */
 			#address-cells = <1>;
 			#size-cells = <1>;
 			interrupt-controller;
 			#interrupt-cells = <3>;
-			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 			ranges = <0 0x0b00c000 0x3000>;
 
 			v2m0: v2m@0 {
@@ -202,6 +1332,32 @@
 			};
 		};
 
+		watchdog: watchdog@b017000 {
+			compatible = "qcom,apss-wdt-ipq9574", "qcom,kpss-wdt";
+			reg = <0x0b017000 0x1000>;
+			interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
+			clocks = <&sleep_clk>;
+			timeout-sec = <30>;
+		};
+
+		apcs_glb: mailbox@b111000 {
+			compatible = "qcom,ipq9574-apcs-apps-global",
+				     "qcom,ipq6018-apcs-apps-global";
+			reg = <0x0b111000 0x1000>;
+			#clock-cells = <1>;
+			clocks = <&a73pll>, <&xo_board_clk>;
+			clock-names = "pll", "xo";
+			#mbox-cells = <1>;
+		};
+
+		a73pll: clock@b116000 {
+			compatible = "qcom,ipq9574-a73pll";
+			reg = <0x0b116000 0x40>;
+			#clock-cells = <0>;
+			clocks = <&xo_board_clk>;
+			clock-names = "xo";
+		};
+
 		timer@b120000 {
 			compatible = "arm,armv7-timer-mem";
 			reg = <0x0b120000 0x1000>;
@@ -259,6 +1415,484 @@
 				status = "disabled";
 			};
 		};
+
+		pcie1: pci@10000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x10000000 0xf1d>,
+			       <0x10000F20 0xa8>,
+			       <0x10001000 0x1000>,
+			       <0x000F8000 0x4000>,
+			       <0x10100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <2>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <1>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x10200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x10300000 0x10300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 35 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 49 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 84 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 85 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE1_AHB_CLK>,
+				 <&gcc GCC_PCIE1_AUX_CLK>,
+				 <&gcc GCC_PCIE1_AXI_M_CLK>,
+				 <&gcc GCC_PCIE1_AXI_S_CLK>,
+				 <&gcc GCC_PCIE1_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE1_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE1_PIPE_ARES>,
+				 <&gcc GCC_PCIE1_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE1_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE1_AXI_S_ARES>,
+				 <&gcc GCC_PCIE1_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE1_AXI_M_ARES>,
+				 <&gcc GCC_PCIE1_AUX_ARES>,
+				 <&gcc GCC_PCIE1_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie1_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		pcie3: pci@18000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x18000000 0xf1d>,
+			       <0x18000F20 0xa8>,
+			       <0x18001000 0x1000>,
+			       <0x000F0000 0x4000>,
+			       <0x18100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <4>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <2>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x18200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x18300000 0x18300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 189 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 190 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 191 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 192 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE3_AHB_CLK>,
+				 <&gcc GCC_PCIE3_AUX_CLK>,
+				 <&gcc GCC_PCIE3_AXI_M_CLK>,
+				 <&gcc GCC_PCIE3_AXI_S_CLK>,
+				 <&gcc GCC_PCIE3_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE3_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE3_PIPE_ARES>,
+				 <&gcc GCC_PCIE3_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE3_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE3_AXI_S_ARES>,
+				 <&gcc GCC_PCIE3_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE3_AXI_M_ARES>,
+				 <&gcc GCC_PCIE3_AUX_ARES>,
+				 <&gcc GCC_PCIE3_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie3_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		pcie2: pci@20000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x20000000 0xf1d>,
+			       <0x20000F20 0xa8>,
+			       <0x20001000 0x1000>,
+			       <0x00088000 0x4000>,
+			       <0x20100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <3>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <2>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x20200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x20300000 0x20300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 164 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 165 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 186 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 187 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE2_AHB_CLK>,
+				 <&gcc GCC_PCIE2_AUX_CLK>,
+				 <&gcc GCC_PCIE2_AXI_M_CLK>,
+				 <&gcc GCC_PCIE2_AXI_S_CLK>,
+				 <&gcc GCC_PCIE2_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE2_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE2_PIPE_ARES>,
+				 <&gcc GCC_PCIE2_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE2_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE2_AXI_S_ARES>,
+				 <&gcc GCC_PCIE2_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE2_AXI_M_ARES>,
+				 <&gcc GCC_PCIE2_AUX_ARES>,
+				 <&gcc GCC_PCIE2_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie2_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		pcie0: pci@28000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x28000000 0xf1d>,
+			       <0x28000F20 0xa8>,
+			       <0x28001000 0x1000>,
+			       <0x00080000 0x4000>,
+			       <0x28100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <1>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <1>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x28200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x28300000 0x28300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE0_AHB_CLK>,
+				 <&gcc GCC_PCIE0_AUX_CLK>,
+				 <&gcc GCC_PCIE0_AXI_M_CLK>,
+				 <&gcc GCC_PCIE0_AXI_S_CLK>,
+				 <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE0_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+				 <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE0_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE0_AXI_S_ARES>,
+				 <&gcc GCC_PCIE0_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE0_AXI_M_ARES>,
+				 <&gcc GCC_PCIE0_AUX_ARES>,
+				 <&gcc GCC_PCIE0_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie0_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		tsens: thermal-sensor@4a9000 {
+			compatible = "qcom,ipq9574-tsens";
+			reg = <0x4a9000 0x1000>, /* TM */
+			      <0x4a8000 0x1000>; /* SROT */
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "combined";
+			#qcom,sensors = <16>;
+			#thermal-sensor-cells = <1>;
+		};
+	};
+
+	thermal_zones: thermal-zones {
+		nss_top {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 3>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc0 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 4>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc1 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 5>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc2 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 6>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc3 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 7>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		a73ss0 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 8>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		a73ss1 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 9>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		a73_cpu0 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 10>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		a73_cpu1 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 11>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		a73_cpu2 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 12>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		a73_cpu3 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 13>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		wcss_phyb_tile3 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 14>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		top_glue_logic {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 15>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
 	};
 
 	timer {
diff -ruw linux-6.4/arch/arm64/include/asm/assembler.h linux-6.4-fbx/arch/arm64/include/asm/assembler.h
--- linux-6.4/arch/arm64/include/asm/assembler.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/include/asm/assembler.h	2024-01-19 17:01:19.841846374 +0100
@@ -428,6 +428,45 @@
 
 /*
  * Macro to perform a data cache maintenance for the interval
+ *	[kaddr, kaddr + size)
+ *	This macro does not do "data synchronization barrier". Caller should
+ *	do "dsb" after transaction.
+ *
+ *	op:     operation passed to dc instruction
+ *	kaddr:      starting virtual address of the region
+ *	size:       size of the region
+ *	Corrupts:   kaddr, size, tmp1, tmp2
+ */
+	.macro dcache_by_line_op_no_dsb op, kaddr, size, tmp1, tmp2
+	dcache_line_size \tmp1, \tmp2
+	add \size, \kaddr, \size
+	sub \tmp2, \tmp1, #1
+	bic \kaddr, \kaddr, \tmp2
+9998:
+	.ifc    \op, cvau
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvac
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvap
+	sys	3, c7, c12, 1, \kaddr	// dc cvap
+	.else
+	.ifc	\op, cvadp
+	sys	3, c7, c13, 1, \kaddr	// dc cvadp
+	.else
+	dc	\op, \kaddr
+	.endif
+	.endif
+	.endif
+	.endif
+	add	\kaddr, \kaddr, \tmp1
+	cmp	\kaddr, \size
+	b.lo	9998b
+	.endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
  * [start, end)
  *
  * 	op:		operation passed to dc instruction
diff -ruw linux-6.4/arch/arm64/include/asm/cacheflush.h linux-6.4-fbx/arch/arm64/include/asm/cacheflush.h
--- linux-6.4/arch/arm64/include/asm/cacheflush.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/include/asm/cacheflush.h	2024-01-19 17:01:19.841846374 +0100
@@ -79,6 +79,17 @@
 extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(unsigned long start, unsigned long end);
 
+extern void dmac_flush_range(const void *start, const void *end);
+extern void dmac_inv_range(const void *start, const void *end);
+extern void dmac_clean_range(const void *start, const void *end);
+extern void __dma_flush_area_no_dsb(const void *start, size_t size);
+extern void __dma_inv_area_no_dsb(const void *start, size_t size);
+extern void __dma_clean_area_no_dsb(const void *start, size_t size);
+
+extern void dmac_flush_range_no_dsb(const void *start, const void *end);
+extern void dmac_inv_range_no_dsb(const void *start, const void *end);
+extern void dmac_clean_range_no_dsb(const void *start, const void *end);
+
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
 	caches_clean_inval_pou(start, end);
diff -ruw linux-6.4/arch/arm64/include/asm/memory.h linux-6.4-fbx/arch/arm64/include/asm/memory.h
--- linux-6.4/arch/arm64/include/asm/memory.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/include/asm/memory.h	2023-05-22 20:06:36.727653882 +0200
@@ -74,7 +74,7 @@
 #define KASAN_SHADOW_END	((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
 					+ KASAN_SHADOW_OFFSET)
 #define PAGE_END		(KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT)))
-#define KASAN_THREAD_SHIFT	1
+#define KASAN_THREAD_SHIFT	2
 #else
 #define KASAN_THREAD_SHIFT	0
 #define PAGE_END		(_PAGE_END(VA_BITS_MIN))
diff -ruw linux-6.4/arch/arm64/include/asm/pgtable-hwdef.h linux-6.4-fbx/arch/arm64/include/asm/pgtable-hwdef.h
--- linux-6.4/arch/arm64/include/asm/pgtable-hwdef.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/include/asm/pgtable-hwdef.h	2023-05-22 20:06:36.727653882 +0200
@@ -146,7 +146,11 @@
 #define PTE_TABLE_BIT		(_AT(pteval_t, 1) << 1)
 #define PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
 #define PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
+#ifdef CONFIG_ARCH_BCM63XX_SHARED_OSH
+#define PTE_SHARED		(_AT(pteval_t, 2) << 8)		/* SH[1:0], outer shareable */
+#else
 #define PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+#endif
 #define PTE_AF			(_AT(pteval_t, 1) << 10)	/* Access Flag */
 #define PTE_NG			(_AT(pteval_t, 1) << 11)	/* nG */
 #define PTE_GP			(_AT(pteval_t, 1) << 50)	/* BTI guarded */
@@ -242,12 +246,19 @@
 
 #define TCR_SH0_SHIFT		12
 #define TCR_SH0_MASK		(UL(3) << TCR_SH0_SHIFT)
+#define TCR_SH0_OUTER		(UL(2) << TCR_SH0_SHIFT)
 #define TCR_SH0_INNER		(UL(3) << TCR_SH0_SHIFT)
 
 #define TCR_SH1_SHIFT		28
 #define TCR_SH1_MASK		(UL(3) << TCR_SH1_SHIFT)
+#define TCR_SH1_OUTER		(UL(2) << TCR_SH1_SHIFT)
 #define TCR_SH1_INNER		(UL(3) << TCR_SH1_SHIFT)
+
+#ifdef CONFIG_ARCH_BCM63XX_SHARED_OSH
+#define TCR_SHARED		(TCR_SH0_OUTER | TCR_SH1_OUTER)
+#else
 #define TCR_SHARED		(TCR_SH0_INNER | TCR_SH1_INNER)
+#endif
 
 #define TCR_TG0_SHIFT		14
 #define TCR_TG0_MASK		(UL(3) << TCR_TG0_SHIFT)
diff -ruw linux-6.4/arch/arm64/mm/cache.S linux-6.4-fbx/arch/arm64/mm/cache.S
--- linux-6.4/arch/arm64/mm/cache.S	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/mm/cache.S	2024-01-19 17:01:19.841846374 +0100
@@ -164,6 +164,64 @@
 SYM_FUNC_ALIAS(dcache_inval_poc, __pi_dcache_inval_poc)
 
 /*
+ *  __dma_inv_area_no_dsb(start, size)
+ *
+ *	This macro does not do "data synchronization barrier". Caller should
+ *	do "dsb" after transaction.
+ *
+ *	 start   - virtual start address of region
+ *	 size    - size in question
+ */
+SYM_FUNC_START(__dma_inv_area_no_dsb)
+	add	x1, x1, x0
+	dcache_line_size	x2, x3
+	sub	x3, x2, #1
+	tst	x1, x3				// end cache line aligned?
+	bic	x1, x1, x3
+	b.eq	1f
+	dc	civac, x1			// clean & invalidate D / U line
+1:	tst	x0, x3				// start cache line aligned?
+	bic	x0, x0, x3
+	b.eq    2f
+	dc	civac, x0			// clean & invalidate D / U line
+	b	3f
+2:	dc  ivac, x0			// invalidate D / U line
+3:	add x0, x0, x2
+	cmp	x0, x1
+	b.lo	2b
+	ret
+SYM_FUNC_END(__dma_inv_area_no_dsb)
+
+/*
+ *  __dma_clean_area_no_dsb(start, size)
+ *
+ *	his macro does not do "data synchronization barrier". Caller should
+ *	o "dsb" after transaction.
+ *
+ *	 start   - virtual start address of region
+ *	 size    - size in question
+ */
+SYM_FUNC_START(__dma_clean_area_no_dsb)
+	dcache_by_line_op_no_dsb cvac, x0, x1, x2, x3
+	ret
+SYM_FUNC_END(__dma_clean_area_no_dsb)
+
+/*
+ *  __dma_flush_area_no_dsb(start, size)
+ *
+ *	clean & invalidate D / U line
+ *	his macro does not do "data synchronization barrier". Caller should
+ *	o "dsb" after transaction.
+ *
+ *	 start   - virtual start address of region
+ *	 size    - size in question
+ */
+SYM_FUNC_START(__dma_flush_area_no_dsb)
+	dcache_by_line_op_no_dsb civac, x0, x1, x2, x3
+	ret
+SYM_FUNC_END(__dma_flush_area_no_dsb)
+
+/*
  *	dcache_clean_poc(start, end)
  *
  * 	Ensure that any D-cache lines for the interval [start, end)
diff -ruw linux-6.4/arch/arm64/mm/flush.c linux-6.4-fbx/arch/arm64/mm/flush.c
--- linux-6.4/arch/arm64/mm/flush.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/arch/arm64/mm/flush.c	2024-01-19 17:01:19.841846374 +0100
@@ -107,3 +107,39 @@
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif
+
+void dmac_flush_range(const void *start, const void *end)
+{
+	dcache_clean_inval_poc((unsigned long)start, (unsigned long)end);
+}
+EXPORT_SYMBOL(dmac_flush_range);
+
+void dmac_flush_range_no_dsb(const void *start, const void *end)
+{
+	__dma_flush_area_no_dsb(start, (void *)(end) - (void *)(start));
+}
+EXPORT_SYMBOL(dmac_flush_range_no_dsb);
+
+void dmac_inv_range(const void *start, const void *end)
+{
+	dcache_inval_poc((unsigned long)start, (unsigned long)(end));
+}
+EXPORT_SYMBOL(dmac_inv_range);
+
+void dmac_inv_range_no_dsb(const void *start, const void *end)
+{
+	__dma_inv_area_no_dsb(start, (void *)(end) - (void *)(start));
+}
+EXPORT_SYMBOL(dmac_inv_range_no_dsb);
+
+void dmac_clean_range(const void *start, const void *end)
+{
+      dcache_clean_poc((unsigned long)start, (unsigned long)end);
+}
+EXPORT_SYMBOL(dmac_clean_range);
+
+void dmac_clean_range_no_dsb(const void *start, const void *end)
+{
+	__dma_clean_area_no_dsb(start, (void *)(end) - (void *)(start));
+}
+EXPORT_SYMBOL(dmac_clean_range_no_dsb);
diff -ruw linux-6.4/block/blk-flush.c linux-6.4-fbx/block/blk-flush.c
--- linux-6.4/block/blk-flush.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/blk-flush.c	2023-05-22 20:06:37.459673353 +0200
@@ -172,10 +172,21 @@
 	rq->flush.seq |= seq;
 	cmd_flags = rq->cmd_flags;
 
-	if (likely(!error))
+	if (likely(!error)) {
 		seq = blk_flush_cur_seq(rq);
-	else
+	} else {
 		seq = REQ_FSEQ_DONE;
+		printk_once(KERN_ERR "%s: flush failed: data integrity problem\n",
+				   rq->q->disk ? rq->q->disk->disk_name : "?");
+		/*
+		 * returning an error to the FS is wrong: the data is all
+		 * there, it just might not be written out in the expected
+		 * order and thus have a window where the integrity is suspect
+		 * in a crash.  Given the small likelihood of actually
+		 * crashing, we should just log a warning here.
+		 */
+		error = 0;
+	}
 
 	switch (seq) {
 	case REQ_FSEQ_PREFLUSH:
diff -ruw linux-6.4/block/blk-mq.c linux-6.4-fbx/block/blk-mq.c
--- linux-6.4/block/blk-mq.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/blk-mq.c	2023-06-27 11:47:15.663855962 +0200
@@ -955,7 +955,7 @@
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
-static inline void blk_account_io_done(struct request *req, u64 now)
+static inline void blk_account_io_done(struct request *req, u64 now, blk_status_t error)
 {
 	/*
 	 * Account IO completion.  flush_rq isn't accounted as a
@@ -970,6 +970,8 @@
 		update_io_ticks(req->part, jiffies, true);
 		part_stat_inc(req->part, ios[sgrp]);
 		part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
+		if (error)
+			part_stat_inc(req->part, io_errors[rq_data_dir(req)]);
 		part_stat_unlock();
 	}
 }
@@ -994,19 +996,19 @@
 	}
 }
 
-static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
+static inline void __blk_mq_end_request_acct(struct request *rq, u64 now, blk_status_t error)
 {
 	if (rq->rq_flags & RQF_STATS)
 		blk_stat_add(rq, now);
 
 	blk_mq_sched_completed_request(rq, now);
-	blk_account_io_done(rq, now);
+	blk_account_io_done(rq, now, error);
 }
 
 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
 	if (blk_mq_need_time_stamp(rq))
-		__blk_mq_end_request_acct(rq, ktime_get_ns());
+		__blk_mq_end_request_acct(rq, ktime_get_ns(), error);
 
 	if (rq->end_io) {
 		rq_qos_done(rq->q, rq);
@@ -1060,7 +1062,7 @@
 
 		blk_complete_request(rq);
 		if (iob->need_ts)
-			__blk_mq_end_request_acct(rq, now);
+			__blk_mq_end_request_acct(rq, now, 0);
 
 		rq_qos_done(rq->q, rq);
 
@@ -3049,7 +3051,7 @@
 	blk_mq_run_dispatch_ops(q,
 			ret = blk_mq_request_issue_directly(rq, true));
 	if (ret)
-		blk_account_io_done(rq, ktime_get_ns());
+		blk_account_io_done(rq, ktime_get_ns(), 0);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
diff -ruw linux-6.4/block/blk.h linux-6.4-fbx/block/blk.h
--- linux-6.4/block/blk.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/blk.h	2023-05-22 20:06:37.475673778 +0200
@@ -404,6 +404,7 @@
 #define ADDPART_FLAG_NONE	0
 #define ADDPART_FLAG_RAID	1
 #define ADDPART_FLAG_WHOLEDISK	2
+#define ADDPART_FLAG_RO		4
 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
 		sector_t length);
 int bdev_del_partition(struct gendisk *disk, int partno);
diff -ruw linux-6.4/block/genhd.c linux-6.4-fbx/block/genhd.c
--- linux-6.4/block/genhd.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/genhd.c	2023-05-22 20:06:37.475673778 +0200
@@ -984,6 +984,7 @@
 		"%8u %8u %8u "
 		"%8lu %8lu %8llu %8u "
 		"%8lu %8u"
+		"%8lu %8lu"
 		"\n",
 		stat.ios[STAT_READ],
 		stat.merges[STAT_READ],
@@ -1005,7 +1006,9 @@
 		(unsigned long long)stat.sectors[STAT_DISCARD],
 		(unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC),
 		stat.ios[STAT_FLUSH],
-		(unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
+		(unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC),
+		part_stat_read(bdev, io_errors[READ]),
+		part_stat_read(bdev, io_errors[WRITE]));
 }
 
 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff -ruw linux-6.4/block/partitions/Kconfig linux-6.4-fbx/block/partitions/Kconfig
--- linux-6.4/block/partitions/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/partitions/Kconfig	2023-05-22 20:06:37.479673885 +0200
@@ -270,4 +270,12 @@
 	  Say Y here if you want to read the partition table from bootargs.
 	  The format for the command line is just like mtdparts.
 
+config OF_PARTITION
+	bool "Device tree partition support" if PARTITION_ADVANCED
+	depends on OF
+
+config OF_PARTITION_IGNORE_RO
+	bool "ignore read-only flag"
+	depends on OF_PARTITION
+
 endmenu
diff -ruw linux-6.4/block/partitions/Makefile linux-6.4-fbx/block/partitions/Makefile
--- linux-6.4/block/partitions/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/partitions/Makefile	2023-02-24 19:09:45.669973308 +0100
@@ -20,3 +20,4 @@
 obj-$(CONFIG_EFI_PARTITION) += efi.o
 obj-$(CONFIG_KARMA_PARTITION) += karma.o
 obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o
+obj-$(CONFIG_OF_PARTITION) += dt.o
diff -ruw linux-6.4/block/partitions/check.h linux-6.4-fbx/block/partitions/check.h
--- linux-6.4/block/partitions/check.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/partitions/check.h	2023-05-22 20:06:37.479673885 +0200
@@ -67,3 +67,4 @@
 int sun_partition(struct parsed_partitions *state);
 int sysv68_partition(struct parsed_partitions *state);
 int ultrix_partition(struct parsed_partitions *state);
+int dt_partition(struct parsed_partitions *);
diff -ruw linux-6.4/block/partitions/core.c linux-6.4-fbx/block/partitions/core.c
--- linux-6.4/block/partitions/core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/block/partitions/core.c	2023-05-22 20:06:37.479673885 +0200
@@ -13,6 +13,10 @@
 #include "check.h"
 
 static int (*check_part[])(struct parsed_partitions *) = {
+#ifdef CONFIG_OF_PARTITION
+	dt_partition,
+#endif
+
 	/*
 	 * Probe partition formats with tables at disk address 0
 	 * that also have an ADFS boot block at 0xdc0.
@@ -341,6 +345,7 @@
 
 	bdev->bd_start_sect = start;
 	bdev_set_nr_sectors(bdev, len);
+	bdev->bd_read_only = (flags & ADDPART_FLAG_RO);
 
 	pdev = &bdev->bd_device;
 	dname = dev_name(ddev);
diff -ruw linux-6.4/drivers/Kconfig linux-6.4-fbx/drivers/Kconfig
--- linux-6.4/drivers/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/Kconfig	2023-05-22 20:06:37.519674949 +0200
@@ -21,6 +21,8 @@
 
 source "drivers/gnss/Kconfig"
 
+source "drivers/fbxprocfs/Kconfig"
+
 source "drivers/mtd/Kconfig"
 
 source "drivers/of/Kconfig"
@@ -77,6 +79,10 @@
 
 source "drivers/gpio/Kconfig"
 
+source "drivers/fbxgpio/Kconfig"
+
+source "drivers/fbxjtag/Kconfig"
+
 source "drivers/w1/Kconfig"
 
 source "drivers/power/Kconfig"
@@ -85,6 +91,8 @@
 
 source "drivers/thermal/Kconfig"
 
+source "drivers/fbxwatchdog/Kconfig"
+
 source "drivers/watchdog/Kconfig"
 
 source "drivers/ssb/Kconfig"
diff -ruw linux-6.4/drivers/Makefile linux-6.4-fbx/drivers/Makefile
--- linux-6.4/drivers/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/Makefile	2023-05-22 20:06:37.519674949 +0200
@@ -21,7 +21,9 @@
 obj-$(CONFIG_GPIOLIB)		+= gpio/
 obj-y				+= pwm/
 
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio/
 obj-y				+= pci/
+obj-$(CONFIG_FREEBOX_JTAG)	+= fbxjtag/
 
 obj-$(CONFIG_PARISC)		+= parisc/
 obj-$(CONFIG_RAPIDIO)		+= rapidio/
@@ -121,6 +123,7 @@
 obj-y				+= power/
 obj-$(CONFIG_HWMON)		+= hwmon/
 obj-$(CONFIG_THERMAL)		+= thermal/
+obj-$(CONFIG_FREEBOX_WATCHDOG)	+= fbxwatchdog/
 obj-$(CONFIG_WATCHDOG)		+= watchdog/
 obj-$(CONFIG_MD)		+= md/
 obj-$(CONFIG_BT)		+= bluetooth/
@@ -195,3 +198,5 @@
 obj-$(CONFIG_HTE)		+= hte/
 obj-$(CONFIG_DRM_ACCEL)		+= accel/
 obj-$(CONFIG_CDX_BUS)		+= cdx/
+
+obj-$(CONFIG_FREEBOX_PROCFS)	+= fbxprocfs/
diff -ruw linux-6.4/drivers/base/regmap/internal.h linux-6.4-fbx/drivers/base/regmap/internal.h
--- linux-6.4/drivers/base/regmap/internal.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/base/regmap/internal.h	2023-05-22 20:06:37.743680907 +0200
@@ -326,5 +326,6 @@
 #define regmap_init_ram(config, data)					\
 	__regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
 
+void *regmap_mmio_ctx_get_base(const void *priv);
 
 #endif
diff -ruw linux-6.4/drivers/base/regmap/regmap-mmio.c linux-6.4-fbx/drivers/base/regmap/regmap-mmio.c
--- linux-6.4/drivers/base/regmap/regmap-mmio.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/base/regmap/regmap-mmio.c	2023-05-22 20:06:37.747681013 +0200
@@ -633,4 +633,10 @@
 }
 EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
 
+void *regmap_mmio_ctx_get_base(const void *priv)
+{
+	struct regmap_mmio_context *ctx = (struct regmap_mmio_context *)priv;
+	return ctx->regs;
+}
+
 MODULE_LICENSE("GPL v2");
diff -ruw linux-6.4/drivers/base/regmap/regmap.c linux-6.4-fbx/drivers/base/regmap/regmap.c
--- linux-6.4/drivers/base/regmap/regmap.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/base/regmap/regmap.c	2023-06-27 11:47:15.675856288 +0200
@@ -3542,6 +3542,15 @@
 }
 EXPORT_SYMBOL_GPL(regmap_parse_val);
 
+#ifdef CONFIG_REGMAP_MMIO
+void *regmap_get_mmio_base_address(struct regmap *map)
+{
+	return regmap_mmio_ctx_get_base(map->bus_context);
+}
+
+EXPORT_SYMBOL_GPL(regmap_get_mmio_base_address);
+#endif
+
 static int __init regmap_initcall(void)
 {
 	regmap_debugfs_initcall();
diff -ruw linux-6.4/drivers/char/Kconfig linux-6.4-fbx/drivers/char/Kconfig
--- linux-6.4/drivers/char/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/char/Kconfig	2023-05-22 20:06:37.807682609 +0200
@@ -315,6 +315,15 @@
 	  memory.
 	  When in doubt, say "Y".
 
+config DEVPHYSMEM
+	bool "/dev/physmem virtual device support"
+	default n
+	help
+	  Say Y here if you want to support the /dev/physmem device. The
+	  /dev/physmem device allows unprivileged access to physical memory
+	  unused by the kernel.
+	  When in doubt, say "N".
+
 config NVRAM
 	tristate "/dev/nvram support"
 	depends on X86 || HAVE_ARCH_NVRAM_OPS
@@ -422,3 +431,5 @@
 	  driver include crash and makedumpfile.
 
 endmenu
+
+source "drivers/char/diag/Kconfig"
diff -ruw linux-6.4/drivers/char/Makefile linux-6.4-fbx/drivers/char/Makefile
--- linux-6.4/drivers/char/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/char/Makefile	2023-05-22 20:06:37.807682609 +0200
@@ -44,3 +44,5 @@
 obj-$(CONFIG_XILLYBUS_CLASS)	+= xillybus/
 obj-$(CONFIG_POWERNV_OP_PANEL)	+= powernv-op-panel.o
 obj-$(CONFIG_ADI)		+= adi.o
+
+obj-$(CONFIG_DIAG_CHAR)		+= diag/
diff -ruw linux-6.4/drivers/char/hw_random/Kconfig linux-6.4-fbx/drivers/char/hw_random/Kconfig
--- linux-6.4/drivers/char/hw_random/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/char/hw_random/Kconfig	2023-05-22 20:30:14.537853935 +0200
@@ -98,6 +98,11 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_BCM63XX
+	tristate "Broadcom BCM63xx Random Number Generator support"
+	depends on ARCH_BCMBCA || BCM63XX
+	default HW_RANDOM
+
 config HW_RANDOM_IPROC_RNG200
 	tristate "Broadcom iProc/STB RNG200 support"
 	depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
@@ -559,6 +564,13 @@
 	  To compile this driver as a module, choose M here.
 	  The module will be called jh7110-trng.
 
+config HW_RANDOM_QCOM
+	tristate "Qualcomm Random Number Generator Driver"
+	depends on ARCH_QCOM || COMPILE_TEST
+	help
+	  This driver provides support for the Random Number
+	  Generator hardware found on Qualcomm SoCs.
+
 endif # HW_RANDOM
 
 config UML_RANDOM
diff -ruw linux-6.4/drivers/char/hw_random/Makefile linux-6.4-fbx/drivers/char/hw_random/Makefile
--- linux-6.4/drivers/char/hw_random/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/char/hw_random/Makefile	2023-05-22 20:30:14.537853935 +0200
@@ -30,6 +30,7 @@
 obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
 obj-$(CONFIG_HW_RANDOM_HISI)	+= hisi-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
 obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
 obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
 obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
@@ -48,3 +49,4 @@
 obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
 obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
 obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
+obj-$(CONFIG_HW_RANDOM_QCOM) += qcom-rng.o
diff -ruw linux-6.4/drivers/char/mem.c linux-6.4-fbx/drivers/char/mem.c
--- linux-6.4/drivers/char/mem.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/char/mem.c	2023-05-22 20:06:37.819682928 +0200
@@ -28,6 +28,8 @@
 #include <linux/export.h>
 #include <linux/io.h>
 #include <linux/uio.h>
+#include <linux/memblock.h>
+
 #include <linux/uaccess.h>
 #include <linux/security.h>
 
@@ -402,6 +404,14 @@
 	return 0;
 }
 
+static int mmap_physmem(struct file * file, struct vm_area_struct * vma)
+{
+	if (vma->vm_pgoff < max_pfn && !capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	return mmap_mem(file, vma);
+}
+
 static ssize_t read_port(struct file *file, char __user *buf,
 			 size_t count, loff_t *ppos)
 {
@@ -643,6 +653,11 @@
 	return 0;
 }
 
+static int open_physmem(struct inode * inode, struct file * filp)
+{
+	return 0;
+}
+
 #define zero_lseek	null_lseek
 #define full_lseek      null_lseek
 #define write_zero	write_null
@@ -697,6 +712,14 @@
 	.write		= write_full,
 };
 
+static const struct file_operations __maybe_unused physmem_fops = {
+	.mmap		= mmap_physmem,
+	.open		= open_physmem,
+#ifndef CONFIG_MMU
+	.get_unmapped_area = get_unmapped_area_mem,
+#endif
+};
+
 static const struct memdev {
 	const char *name;
 	umode_t mode;
@@ -717,6 +740,9 @@
 #ifdef CONFIG_PRINTK
 	[11] = { "kmsg", 0644, &kmsg_fops, 0 },
 #endif
+#ifdef CONFIG_DEVPHYSMEM
+	[16] = { "physmem", 0, &physmem_fops, FMODE_UNSIGNED_OFFSET },
+#endif
 };
 
 static int memory_open(struct inode *inode, struct file *filp)
diff -ruw linux-6.4/drivers/clk/qcom/Kconfig linux-6.4-fbx/drivers/clk/qcom/Kconfig
--- linux-6.4/drivers/clk/qcom/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/Kconfig	2023-05-22 20:30:14.537853935 +0200
@@ -189,6 +189,12 @@
 	  i2c, USB, SD/eMMC, etc. Select this for the root clock
 	  of ipq9574.
 
+config IPQ_NSSCC_9574
+	tristate "IPQ9574 NSS Clock Controller"
+	depends on IPQ_GCC_9574
+	help
+	  Support for NSS clock controller on ipq9574 devices.
+
 config MSM_GCC_8660
 	tristate "MSM8660 Global Clock Controller"
 	help
diff -ruw linux-6.4/drivers/clk/qcom/Makefile linux-6.4-fbx/drivers/clk/qcom/Makefile
--- linux-6.4/drivers/clk/qcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/Makefile	2023-05-22 20:30:14.537853935 +0200
@@ -29,6 +29,7 @@
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
 obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
+obj-$(CONFIG_IPQ_NSSCC_9574) += nsscc-ipq9574.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
diff -ruw linux-6.4/drivers/clk/qcom/apss-ipq-pll.c linux-6.4-fbx/drivers/clk/qcom/apss-ipq-pll.c
--- linux-6.4/drivers/clk/qcom/apss-ipq-pll.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/apss-ipq-pll.c	2024-04-11 13:35:37.261436406 +0200
@@ -111,6 +111,18 @@
 	.test_ctl_hi_val = 0x4000,
 };
 
+static const struct alpha_pll_config ipq9574_pll_config = {
+	.l = 0x3b,
+	.config_ctl_val = 0x200d4828,
+	.config_ctl_hi_val = 0x6,
+	.early_output_mask = BIT(3),
+	.aux2_output_mask = BIT(2),
+	.aux_output_mask = BIT(1),
+	.main_output_mask = BIT(0),
+	.test_ctl_val = 0x0,
+	.test_ctl_hi_val = 0x4000,
+};
+
 struct apss_pll_data {
 	int pll_type;
 	struct clk_alpha_pll *pll;
@@ -135,6 +147,12 @@
 	.pll_config = &ipq6018_pll_config,
 };
 
+static struct apss_pll_data ipq9574_pll_data = {
+	.pll_type = CLK_ALPHA_PLL_TYPE_HUAYRA,
+	.pll = &ipq_pll_huayra,
+	.pll_config = &ipq9574_pll_config,
+};
+
 static const struct regmap_config ipq_pll_regmap_config = {
 	.reg_bits		= 32,
 	.reg_stride		= 4,
@@ -180,6 +198,7 @@
 	{ .compatible = "qcom,ipq5332-a53pll", .data = &ipq5332_pll_data },
 	{ .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_data },
 	{ .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_data },
+	{ .compatible = "qcom,ipq9574-a73pll", .data = &ipq9574_pll_data },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
diff -ruw linux-6.4/drivers/clk/qcom/clk-alpha-pll.h linux-6.4-fbx/drivers/clk/qcom/clk-alpha-pll.h
--- linux-6.4/drivers/clk/qcom/clk-alpha-pll.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/clk-alpha-pll.h	2023-05-22 20:30:14.537853935 +0200
@@ -26,6 +26,7 @@
 	CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
 	CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
 	CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
+	CLK_ALPHA_PLL_TYPE_NSS_HUAYRA = CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
 	CLK_ALPHA_PLL_TYPE_STROMER,
 	CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
 	CLK_ALPHA_PLL_TYPE_MAX,
diff -ruw linux-6.4/drivers/clk/qcom/clk-rcg.h linux-6.4-fbx/drivers/clk/qcom/clk-rcg.h
--- linux-6.4/drivers/clk/qcom/clk-rcg.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/clk-rcg.h	2023-06-09 19:10:34.765484755 +0200
@@ -17,6 +17,23 @@
 	u16 n;
 };
 
+#define C(s, h, m, n) { (s), (2 * (h) - 1), (m), (n) }
+#define FM(f, confs) { (f), ARRAY_SIZE(confs), (confs) }
+#define FMS(f, s, h, m, n) { (f), 1, (const struct freq_conf []){ C(s, h, m, n) } }
+
+struct freq_conf {
+	u8 src;
+	u8 pre_div;
+	u16 m;
+	u16 n;
+};
+
+struct freq_multi_tbl {
+	unsigned long freq;
+	int num_confs;
+	const struct freq_conf *confs;
+};
+
 /**
  * struct mn - M/N:D counter
  * @mnctr_en_bit: bit to enable mn counter
@@ -138,6 +155,7 @@
  * @safe_src_index: safe src index value
  * @parent_map: map from software's parent index to hardware's src_sel field
  * @freq_tbl: frequency table
+ * @freq_multi_tbl: frequency table for clocks reachable with multiple RCGs conf
  * @clkr: regmap clock handle
  * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
  * @parked_cfg: cached value of the CFG register for parked RCGs
@@ -148,7 +166,10 @@
 	u8			hid_width;
 	u8			safe_src_index;
 	const struct parent_map	*parent_map;
+	union {
 	const struct freq_tbl	*freq_tbl;
+		const struct freq_multi_tbl	*freq_multi_tbl;
+	};
 	struct clk_regmap	clkr;
 	u8			cfg_off;
 	u32			parked_cfg;
@@ -167,6 +188,7 @@
 
 extern const struct clk_ops clk_rcg2_ops;
 extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_rcg2_fm_ops;
 extern const struct clk_ops clk_rcg2_mux_closest_ops;
 extern const struct clk_ops clk_edp_pixel_ops;
 extern const struct clk_ops clk_byte_ops;
diff -ruw linux-6.4/drivers/clk/qcom/clk-rcg2.c linux-6.4-fbx/drivers/clk/qcom/clk-rcg2.c
--- linux-6.4/drivers/clk/qcom/clk-rcg2.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/clk-rcg2.c	2023-06-09 19:10:34.765484755 +0200
@@ -266,6 +266,114 @@
 	return 0;
 }
 
+static const struct freq_conf *
+__clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
+		       unsigned long req_rate)
+{
+	unsigned long best_rate = 0, parent_rate, rate;
+	const struct freq_conf *conf, *best_conf;
+	unsigned long clk_flags = clk_hw_get_flags(hw);
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	struct clk_hw *p;
+	int index, cur_index, i;
+
+	if (clk_flags & CLK_SET_RATE_NO_REPARENT)
+		cur_index = clk_rcg2_get_parent(hw);
+	else
+		cur_index = -1;
+
+	/* Exit early if only one config is defined */
+	if (f->num_confs == 1)
+		return f->confs;
+
+	/* Search in each provided config the one that is near the wanted rate */
+	for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
+		index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
+		if (index < 0)
+			continue;
+
+		if ((clk_flags & CLK_SET_RATE_NO_REPARENT) &&
+		    index != cur_index)
+			continue;
+
+		p = clk_hw_get_parent_by_index(hw, index);
+		if (!p)
+			continue;
+
+		parent_rate =  clk_hw_get_rate(p);
+		rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
+
+		if (rate == req_rate) {
+			best_conf = conf;
+			break;
+		}
+
+		if (abs(req_rate - rate) < abs(best_rate - rate)) {
+			best_rate = rate;
+			best_conf = conf;
+		}
+	}
+
+	/*
+	 * Very unlikely.
+	 * Force the first conf if we can't find a correct config.
+	 */
+	if (unlikely(i == f->num_confs))
+		best_conf = f->confs;
+
+	return best_conf;
+}
+
+static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
+				       struct clk_rate_request *req)
+{
+	unsigned long clk_flags, rate = req->rate;
+	const struct freq_conf *conf;
+	struct clk_hw *p;
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	int index;
+
+	f = qcom_find_freq_multi(f, rate);
+	if (!f || !f->confs)
+		return -EINVAL;
+
+	conf = __clk_rcg2_select_conf(hw, f, rate);
+	index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
+	if (index < 0)
+		return index;
+
+	clk_flags = clk_hw_get_flags(hw);
+	p = clk_hw_get_parent_by_index(hw, index);
+	if (!p)
+		return -EINVAL;
+
+	if (clk_flags & CLK_SET_RATE_PARENT) {
+		rate = f->freq;
+		if (conf->pre_div) {
+			if (!rate)
+				rate = req->rate;
+			rate /= 2;
+			rate *= conf->pre_div + 1;
+		}
+
+		if (conf->n) {
+			u64 tmp = rate;
+
+			tmp = tmp * conf->n;
+			do_div(tmp, conf->m);
+			rate = tmp;
+		}
+	} else {
+		rate =  clk_hw_get_rate(p);
+	}
+
+	req->best_parent_hw = p;
+	req->best_parent_rate = rate;
+	req->rate = f->freq;
+
+	return 0;
+}
+
 static int clk_rcg2_determine_rate(struct clk_hw *hw,
 				   struct clk_rate_request *req)
 {
@@ -282,6 +390,14 @@
 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
 }
 
+static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
+				      struct clk_rate_request *req)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
+}
+
 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
 				u32 *_cfg)
 {
@@ -375,6 +491,27 @@
 	return clk_rcg2_configure(rcg, f);
 }
 
+static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	const struct freq_multi_tbl *f;
+	const struct freq_conf *conf;
+	struct freq_tbl f_tbl;
+
+	f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
+	if (!f || !f->confs)
+		return -EINVAL;
+
+	conf = __clk_rcg2_select_conf(hw, f, rate);
+	f_tbl.freq = f->freq;
+	f_tbl.src = conf->src;
+	f_tbl.pre_div = conf->pre_div;
+	f_tbl.m = conf->m;
+	f_tbl.n = conf->n;
+
+	return clk_rcg2_configure(rcg, &f_tbl);
+}
+
 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
 			    unsigned long parent_rate)
 {
@@ -387,6 +524,12 @@
 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
 }
 
+static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	return __clk_rcg2_fm_set_rate(hw, rate);
+}
+
 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
 		unsigned long rate, unsigned long parent_rate, u8 index)
 {
@@ -399,6 +542,12 @@
 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
 }
 
+static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
+		unsigned long rate, unsigned long parent_rate, u8 index)
+{
+	return __clk_rcg2_fm_set_rate(hw, rate);
+}
+
 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -509,6 +658,19 @@
 };
 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
 
+const struct clk_ops clk_rcg2_fm_ops = {
+	.is_enabled = clk_rcg2_is_enabled,
+	.get_parent = clk_rcg2_get_parent,
+	.set_parent = clk_rcg2_set_parent,
+	.recalc_rate = clk_rcg2_recalc_rate,
+	.determine_rate = clk_rcg2_fm_determine_rate,
+	.set_rate = clk_rcg2_fm_set_rate,
+	.set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
+	.get_duty_cycle = clk_rcg2_get_duty_cycle,
+	.set_duty_cycle = clk_rcg2_set_duty_cycle,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
+
 const struct clk_ops clk_rcg2_mux_closest_ops = {
 	.determine_rate = __clk_mux_determine_rate_closest,
 	.get_parent = clk_rcg2_get_parent,
diff -ruw linux-6.4/drivers/clk/qcom/common.c linux-6.4-fbx/drivers/clk/qcom/common.c
--- linux-6.4/drivers/clk/qcom/common.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/common.c	2023-05-22 20:30:14.537853935 +0200
@@ -41,6 +41,24 @@
 }
 EXPORT_SYMBOL_GPL(qcom_find_freq);
 
+const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
+						  unsigned long rate)
+{
+	if (!f)
+		return NULL;
+
+	if (!f->freq)
+		return f;
+
+	for (; f->freq; f++)
+		if (rate <= f->freq)
+			return f;
+
+	/* Default to our fastest rate */
+	return f - 1;
+}
+EXPORT_SYMBOL_GPL(qcom_find_freq_multi);
+
 const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
 					    unsigned long rate)
 {
diff -ruw linux-6.4/drivers/clk/qcom/common.h linux-6.4-fbx/drivers/clk/qcom/common.h
--- linux-6.4/drivers/clk/qcom/common.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/common.h	2023-05-22 20:30:14.537853935 +0200
@@ -45,6 +45,8 @@
 					     unsigned long rate);
 extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
 						   unsigned long rate);
+extern const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
+							 unsigned long rate);
 extern void
 qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
 extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
diff -ruw linux-6.4/drivers/clk/qcom/gcc-ipq9574.c linux-6.4-fbx/drivers/clk/qcom/gcc-ipq9574.c
--- linux-6.4/drivers/clk/qcom/gcc-ipq9574.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/clk/qcom/gcc-ipq9574.c	2023-09-01 19:10:21.963053178 +0200
@@ -3,24 +3,24 @@
  * Copyright (c) 2023 The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk-provider.h>
 #include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 
-#include <linux/reset-controller.h>
 #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
 #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
 
-#include "clk-rcg.h"
-#include "clk-branch.h"
 #include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
 #include "clk-regmap-divider.h"
 #include "clk-regmap-mux.h"
 #include "clk-regmap-phy-mux.h"
+#include "common.h"
 #include "reset.h"
 
 /* Need to match the order of clocks in DT binding */
@@ -69,7 +69,7 @@
 	.clkr = {
 		.enable_reg = 0x0b000,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gpll0_main",
 			.parent_data = gcc_xo_data,
 			.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -81,7 +81,7 @@
 static struct clk_fixed_factor gpll0_out_main_div2 = {
 	.mult = 1,
 	.div = 2,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "gpll0_out_main_div2",
 		.parent_hws = (const struct clk_hw *[]) {
 			&gpll0_main.clkr.hw
@@ -96,7 +96,7 @@
 	.offset = 0x20000,
 	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
 	.width = 4,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "gpll0",
 		.parent_hws = (const struct clk_hw *[]) {
 			&gpll0_main.clkr.hw
@@ -113,7 +113,7 @@
 	.clkr = {
 		.enable_reg = 0x0b000,
 		.enable_mask = BIT(2),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gpll4_main",
 			.parent_data = gcc_xo_data,
 			.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -126,7 +126,7 @@
 	.offset = 0x22000,
 	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
 	.width = 4,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "gpll4",
 		.parent_hws = (const struct clk_hw *[]) {
 			&gpll4_main.clkr.hw
@@ -143,7 +143,7 @@
 	.clkr = {
 		.enable_reg = 0x0b000,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gpll2_main",
 			.parent_data = gcc_xo_data,
 			.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -156,7 +156,7 @@
 	.offset = 0x21000,
 	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
 	.width = 4,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "gpll2",
 		.parent_hws = (const struct clk_hw *[]) {
 			&gpll2_main.clkr.hw
@@ -172,7 +172,7 @@
 	.clkr = {
 		.enable_reg = 0x3400c,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_sleep_clk_src",
 			.parent_data = gcc_sleep_clk_data,
 			.num_parents = ARRAY_SIZE(gcc_sleep_clk_data),
@@ -420,7 +420,7 @@
 	.freq_tbl = ftbl_apss_ahb_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "apss_ahb_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -438,7 +438,7 @@
 	.freq_tbl = ftbl_apss_axi_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_div2_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "apss_axi_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_div2_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_div2_gpll0),
@@ -447,6 +447,8 @@
 };
 
 static const struct freq_tbl ftbl_blsp1_qup_i2c_apps_clk_src[] = {
+	F(960000, P_XO, 10, 0, 0),
+	F(4800000, P_XO, 5, 0, 0),
 	F(9600000, P_XO, 2.5, 0, 0),
 	F(24000000, P_XO, 1, 0, 0),
 	F(50000000, P_GPLL0, 16, 0, 0),
@@ -458,7 +460,7 @@
 	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup1_i2c_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -483,7 +485,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup1_spi_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -496,7 +498,7 @@
 	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup2_i2c_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -510,7 +512,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup2_spi_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -523,7 +525,7 @@
 	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup3_i2c_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -537,7 +539,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup3_spi_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -550,7 +552,7 @@
 	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup4_i2c_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -564,7 +566,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup4_spi_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -577,7 +579,7 @@
 	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup5_i2c_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -591,7 +593,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup5_spi_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -604,7 +606,7 @@
 	.freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup6_i2c_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -618,7 +620,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_qup6_spi_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -650,7 +652,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_uart1_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -664,7 +666,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_uart2_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -678,7 +680,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_uart3_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -692,7 +694,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_uart4_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -706,7 +708,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_uart5_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -720,7 +722,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "blsp1_uart6_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -734,7 +736,7 @@
 	.clkr = {
 		.enable_reg = 0x0b004,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_apss_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&apss_ahb_clk_src.clkr.hw
@@ -752,7 +754,7 @@
 	.clkr = {
 		.enable_reg = 0x0b004,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_apss_axi_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&apss_axi_clk_src.clkr.hw
@@ -769,7 +771,7 @@
 	.clkr = {
 		.enable_reg = 0x2024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup1_i2c_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup1_i2c_apps_clk_src.clkr.hw
@@ -786,7 +788,7 @@
 	.clkr = {
 		.enable_reg = 0x02020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup1_spi_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup1_spi_apps_clk_src.clkr.hw
@@ -803,7 +805,7 @@
 	.clkr = {
 		.enable_reg = 0x03024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup2_i2c_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup2_i2c_apps_clk_src.clkr.hw
@@ -820,7 +822,7 @@
 	.clkr = {
 		.enable_reg = 0x03020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup2_spi_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup2_spi_apps_clk_src.clkr.hw
@@ -837,7 +839,7 @@
 	.clkr = {
 		.enable_reg = 0x04024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup3_i2c_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup3_i2c_apps_clk_src.clkr.hw
@@ -854,7 +856,7 @@
 	.clkr = {
 		.enable_reg = 0x04020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup3_spi_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup3_spi_apps_clk_src.clkr.hw
@@ -871,7 +873,7 @@
 	.clkr = {
 		.enable_reg = 0x05024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup4_i2c_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup4_i2c_apps_clk_src.clkr.hw
@@ -888,7 +890,7 @@
 	.clkr = {
 		.enable_reg = 0x05020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup4_spi_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup4_spi_apps_clk_src.clkr.hw
@@ -905,7 +907,7 @@
 	.clkr = {
 		.enable_reg = 0x06024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup5_i2c_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup5_i2c_apps_clk_src.clkr.hw
@@ -922,7 +924,7 @@
 	.clkr = {
 		.enable_reg = 0x06020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup5_spi_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup5_spi_apps_clk_src.clkr.hw
@@ -939,7 +941,7 @@
 	.clkr = {
 		.enable_reg = 0x07024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup6_i2c_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup6_i2c_apps_clk_src.clkr.hw
@@ -956,7 +958,7 @@
 	.clkr = {
 		.enable_reg = 0x07020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_qup6_spi_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_qup6_spi_apps_clk_src.clkr.hw
@@ -973,7 +975,7 @@
 	.clkr = {
 		.enable_reg = 0x02040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_uart1_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_uart1_apps_clk_src.clkr.hw
@@ -990,7 +992,7 @@
 	.clkr = {
 		.enable_reg = 0x03040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_uart2_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_uart2_apps_clk_src.clkr.hw
@@ -1007,7 +1009,7 @@
 	.clkr = {
 		.enable_reg = 0x04054,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_uart3_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_uart3_apps_clk_src.clkr.hw
@@ -1024,7 +1026,7 @@
 	.clkr = {
 		.enable_reg = 0x05040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_uart4_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_uart4_apps_clk_src.clkr.hw
@@ -1041,7 +1043,7 @@
 	.clkr = {
 		.enable_reg = 0x06040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_uart5_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_uart5_apps_clk_src.clkr.hw
@@ -1058,7 +1060,7 @@
 	.clkr = {
 		.enable_reg = 0x07040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_uart6_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&blsp1_uart6_apps_clk_src.clkr.hw
@@ -1080,7 +1082,7 @@
 	.freq_tbl = ftbl_pcie0_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie0_axi_m_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1093,7 +1095,7 @@
 	.clkr = {
 		.enable_reg = 0x28038,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie0_axi_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie0_axi_m_clk_src.clkr.hw
@@ -1110,7 +1112,7 @@
 	.clkr = {
 		.enable_reg = 0x2e07c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_anoc_pcie0_1lane_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie0_axi_m_clk_src.clkr.hw
@@ -1127,7 +1129,7 @@
 	.freq_tbl = ftbl_pcie0_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie1_axi_m_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1140,7 +1142,7 @@
 	.clkr = {
 		.enable_reg = 0x29038,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie1_axi_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie1_axi_m_clk_src.clkr.hw
@@ -1157,7 +1159,7 @@
 	.clkr = {
 		.enable_reg = 0x2e08c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_anoc_pcie1_1lane_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie1_axi_m_clk_src.clkr.hw
@@ -1179,7 +1181,7 @@
 	.freq_tbl = ftbl_pcie2_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie2_axi_m_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
@@ -1192,7 +1194,7 @@
 	.clkr = {
 		.enable_reg = 0x2a038,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie2_axi_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie2_axi_m_clk_src.clkr.hw
@@ -1209,7 +1211,7 @@
 	.clkr = {
 		.enable_reg = 0x2e080,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_anoc_pcie2_2lane_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie2_axi_m_clk_src.clkr.hw
@@ -1226,7 +1228,7 @@
 	.freq_tbl = ftbl_pcie2_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie3_axi_m_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
@@ -1239,7 +1241,7 @@
 	.clkr = {
 		.enable_reg = 0x2b038,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie3_axi_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie3_axi_m_clk_src.clkr.hw
@@ -1256,7 +1258,7 @@
 	.clkr = {
 		.enable_reg = 0x2e090,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_anoc_pcie3_2lane_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie3_axi_m_clk_src.clkr.hw
@@ -1273,7 +1275,7 @@
 	.freq_tbl = ftbl_pcie0_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie0_axi_s_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1286,7 +1288,7 @@
 	.clkr = {
 		.enable_reg = 0x2803c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie0_axi_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie0_axi_s_clk_src.clkr.hw
@@ -1303,7 +1305,7 @@
 	.clkr = {
 		.enable_reg = 0x28040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie0_axi_s_bridge_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie0_axi_s_clk_src.clkr.hw
@@ -1320,7 +1322,7 @@
 	.clkr = {
 		.enable_reg = 0x2e048,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_snoc_pcie0_1lane_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie0_axi_s_clk_src.clkr.hw
@@ -1337,7 +1339,7 @@
 	.freq_tbl = ftbl_pcie0_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie1_axi_s_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1350,7 +1352,7 @@
 	.clkr = {
 		.enable_reg = 0x2903c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie1_axi_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie1_axi_s_clk_src.clkr.hw
@@ -1367,7 +1369,7 @@
 	.clkr = {
 		.enable_reg = 0x29040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie1_axi_s_bridge_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie1_axi_s_clk_src.clkr.hw
@@ -1384,7 +1386,7 @@
 	.clkr = {
 		.enable_reg = 0x2e04c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_snoc_pcie1_1lane_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie1_axi_s_clk_src.clkr.hw
@@ -1401,7 +1403,7 @@
 	.freq_tbl = ftbl_pcie0_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie2_axi_s_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1414,7 +1416,7 @@
 	.clkr = {
 		.enable_reg = 0x2a03c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie2_axi_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie2_axi_s_clk_src.clkr.hw
@@ -1431,7 +1433,7 @@
 	.clkr = {
 		.enable_reg = 0x2a040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie2_axi_s_bridge_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie2_axi_s_clk_src.clkr.hw
@@ -1448,7 +1450,7 @@
 	.clkr = {
 		.enable_reg = 0x2e050,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_snoc_pcie2_2lane_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie2_axi_s_clk_src.clkr.hw
@@ -1465,7 +1467,7 @@
 	.freq_tbl = ftbl_pcie0_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie3_axi_s_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -1478,7 +1480,7 @@
 	.clkr = {
 		.enable_reg = 0x2b03c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie3_axi_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie3_axi_s_clk_src.clkr.hw
@@ -1495,7 +1497,7 @@
 	.clkr = {
 		.enable_reg = 0x2b040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie3_axi_s_bridge_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie3_axi_s_clk_src.clkr.hw
@@ -1512,7 +1514,7 @@
 	.clkr = {
 		.enable_reg = 0x2e054,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_snoc_pcie3_2lane_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie3_axi_s_clk_src.clkr.hw
@@ -1527,7 +1529,7 @@
 static struct clk_regmap_phy_mux pcie0_pipe_clk_src = {
 	.reg = 0x28064,
 	.clkr = {
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "pcie0_pipe_clk_src",
 			.parent_data = &(const struct clk_parent_data) {
 				.index = DT_PCIE30_PHY0_PIPE_CLK,
@@ -1538,10 +1540,28 @@
 	},
 };
 
+static struct clk_branch gcc_pcie0_pipe_clk = {
+	.halt_reg = 0x28044,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x28044,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data) {
+			.name = "gcc_pcie0_pipe_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&pcie0_pipe_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_regmap_phy_mux pcie1_pipe_clk_src = {
 	.reg = 0x29064,
 	.clkr = {
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "pcie1_pipe_clk_src",
 			.parent_data = &(const struct clk_parent_data) {
 				.index = DT_PCIE30_PHY1_PIPE_CLK,
@@ -1552,10 +1572,28 @@
 	},
 };
 
+static struct clk_branch gcc_pcie1_pipe_clk = {
+	.halt_reg = 0x29044,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x29044,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data) {
+			.name = "gcc_pcie1_pipe_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&pcie1_pipe_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_regmap_phy_mux pcie2_pipe_clk_src = {
 	.reg = 0x2a064,
 	.clkr = {
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "pcie2_pipe_clk_src",
 			.parent_data = &(const struct clk_parent_data) {
 				.index = DT_PCIE30_PHY2_PIPE_CLK,
@@ -1566,10 +1604,28 @@
 	},
 };
 
+static struct clk_branch gcc_pcie2_pipe_clk = {
+	.halt_reg = 0x2a044,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x2a044,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data) {
+			.name = "gcc_pcie2_pipe_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&pcie2_pipe_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_regmap_phy_mux pcie3_pipe_clk_src = {
 	.reg = 0x2b064,
 	.clkr = {
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "pcie3_pipe_clk_src",
 			.parent_data = &(const struct clk_parent_data) {
 				.index = DT_PCIE30_PHY3_PIPE_CLK,
@@ -1580,6 +1636,24 @@
 	},
 };
 
+static struct clk_branch gcc_pcie3_pipe_clk = {
+	.halt_reg = 0x2b044,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x2b044,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data) {
+			.name = "gcc_pcie3_pipe_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&pcie3_pipe_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
 	F(24000000, P_XO, 1, 0, 0),
 	F(100000000, P_GPLL0, 8, 0, 0),
@@ -1591,7 +1665,7 @@
 	.freq_tbl = ftbl_pcie_rchng_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie0_rchng_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1604,7 +1678,7 @@
 	.clkr = {
 		.enable_reg = 0x28028,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie0_rchng_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie0_rchng_clk_src.clkr.hw
@@ -1622,7 +1696,7 @@
 	.freq_tbl = ftbl_pcie_rchng_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie1_rchng_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1635,7 +1709,7 @@
 	.clkr = {
 		.enable_reg = 0x29028,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie1_rchng_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie1_rchng_clk_src.clkr.hw
@@ -1652,7 +1726,7 @@
 	.freq_tbl = ftbl_pcie_rchng_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie2_rchng_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1665,7 +1739,7 @@
 	.clkr = {
 		.enable_reg = 0x2a028,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie2_rchng_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie2_rchng_clk_src.clkr.hw
@@ -1682,7 +1756,7 @@
 	.freq_tbl = ftbl_pcie_rchng_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie3_rchng_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -1695,7 +1769,7 @@
 	.clkr = {
 		.enable_reg = 0x2b028,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie3_rchng_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie3_rchng_clk_src.clkr.hw
@@ -1718,7 +1792,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcie_aux_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_aux_core_pi_sleep_clk),
@@ -1731,7 +1805,7 @@
 	.clkr = {
 		.enable_reg = 0x28034,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie0_aux_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie_aux_clk_src.clkr.hw
@@ -1748,7 +1822,7 @@
 	.clkr = {
 		.enable_reg = 0x29034,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie1_aux_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie_aux_clk_src.clkr.hw
@@ -1765,7 +1839,7 @@
 	.clkr = {
 		.enable_reg = 0x2a034,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie2_aux_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie_aux_clk_src.clkr.hw
@@ -1782,7 +1856,7 @@
 	.clkr = {
 		.enable_reg = 0x2b034,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie3_aux_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcie_aux_clk_src.clkr.hw
@@ -1805,7 +1879,7 @@
 	.mnd_width = 16,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_core_pi_sleep_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "usb0_aux_clk_src",
 		.parent_data = gcc_xo_gpll0_core_pi_sleep_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_core_pi_sleep_clk),
@@ -1818,7 +1892,7 @@
 	.clkr = {
 		.enable_reg = 0x2c048,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_usb0_aux_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&usb0_aux_clk_src.clkr.hw
@@ -1842,7 +1916,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_out_main_div2_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "usb0_master_clk_src",
 		.parent_data = gcc_xo_gpll0_out_main_div2_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_out_main_div2_gpll0),
@@ -1855,7 +1929,7 @@
 	.clkr = {
 		.enable_reg = 0x2c044,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_usb0_master_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&usb0_master_clk_src.clkr.hw
@@ -1872,7 +1946,7 @@
 	.clkr = {
 		.enable_reg = 0x2e058,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_snoc_usb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&usb0_master_clk_src.clkr.hw
@@ -1889,7 +1963,7 @@
 	.clkr = {
 		.enable_reg = 0x2e084,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_anoc_usb_axi_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&usb0_master_clk_src.clkr.hw
@@ -1913,7 +1987,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll4_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "usb0_mock_utmi_clk_src",
 		.parent_data = gcc_xo_gpll4_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_out_main_div2),
@@ -1925,7 +1999,7 @@
 	.reg = 0x2c040,
 	.shift = 0,
 	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "usb0_mock_utmi_div_clk_src",
 		.parent_data = &(const struct clk_parent_data) {
 			.hw = &usb0_mock_utmi_clk_src.clkr.hw,
@@ -1941,7 +2015,7 @@
 	.clkr = {
 		.enable_reg = 0x2c04c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_usb0_mock_utmi_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&usb0_mock_utmi_div_clk_src.clkr.hw
@@ -1959,7 +2033,7 @@
 	.width = 2,
 	.parent_map = gcc_usb3phy_0_cc_pipe_clk_xo_map,
 	.clkr = {
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "usb0_pipe_clk_src",
 			.parent_data = gcc_usb3phy_0_cc_pipe_clk_xo,
 			.num_parents = ARRAY_SIZE(gcc_usb3phy_0_cc_pipe_clk_xo),
@@ -1969,6 +2043,41 @@
 	},
 };
 
+static struct clk_branch gcc_usb0_pipe_clk = {
+	.halt_reg = 0x2c054,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x2c054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_pipe_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&usb0_pipe_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb0_sleep_clk = {
+	.halt_reg = 0x2c058,
+	.clkr = {
+		.enable_reg = 0x2c058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb0_sleep_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&gcc_sleep_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static const struct freq_tbl ftbl_sdcc_apps_clk_src[] = {
 	F(144000, P_XO, 16, 12, 125),
 	F(400000, P_XO, 12, 1, 5),
@@ -1988,7 +2097,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll2_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "sdcc1_apps_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll0_out_main_div2),
@@ -2001,7 +2110,7 @@
 	.clkr = {
 		.enable_reg = 0x3302c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_sdcc1_apps_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&sdcc1_apps_clk_src.clkr.hw
@@ -2024,7 +2133,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_gpll0_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "sdcc1_ice_core_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4_gpll0_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_gpll0_div2),
@@ -2037,7 +2146,7 @@
 	.clkr = {
 		.enable_reg = 0x33030,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_sdcc1_ice_core_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&sdcc1_ice_core_clk_src.clkr.hw
@@ -2062,7 +2171,7 @@
 	.freq_tbl = ftbl_pcnoc_bfdcd_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "pcnoc_bfdcd_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -2076,7 +2185,7 @@
 	.clkr = {
 		.enable_reg = 0x1702c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nsscfg_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2093,7 +2202,7 @@
 	.clkr = {
 		.enable_reg = 0x17030,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_nsscc_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2110,7 +2219,7 @@
 	.clkr = {
 		.enable_reg = 0x17034,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nsscc_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2127,7 +2236,7 @@
 	.clkr = {
 		.enable_reg = 0x17080,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_pcnoc_1_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2144,7 +2253,7 @@
 	.clkr = {
 		.enable_reg = 0x2d064,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_dap_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2161,7 +2270,7 @@
 	.clkr = {
 		.enable_reg = 0x2d068,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_cfg_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2178,7 +2287,7 @@
 	.clkr = {
 		.enable_reg = 0x32010,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qpic_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2195,7 +2304,7 @@
 	.clkr = {
 		.enable_reg = 0x32014,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qpic_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2213,7 +2322,7 @@
 	.clkr = {
 		.enable_reg = 0x0b004,
 		.enable_mask = BIT(4),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_blsp1_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2230,7 +2339,7 @@
 	.clkr = {
 		.enable_reg = 0x17040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_mdio_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2248,7 +2357,7 @@
 	.clkr = {
 		.enable_reg = 0x0b004,
 		.enable_mask = BIT(10),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_prng_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2265,7 +2374,7 @@
 	.clkr = {
 		.enable_reg = 0x1704c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_uniphy0_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2282,7 +2391,7 @@
 	.clkr = {
 		.enable_reg = 0x1705c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_uniphy1_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2299,7 +2408,7 @@
 	.clkr = {
 		.enable_reg = 0x1706c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_uniphy2_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2316,7 +2425,7 @@
 	.clkr = {
 		.enable_reg = 0x3a004,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_cmn_12gpll_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2333,7 +2442,7 @@
 	.clkr = {
 		.enable_reg = 0x3a00c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_cmn_12gpll_apu_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2350,7 +2459,7 @@
 	.clkr = {
 		.enable_reg = 0x28030,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie0_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2367,7 +2476,7 @@
 	.clkr = {
 		.enable_reg = 0x29030,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie1_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2384,7 +2493,7 @@
 	.clkr = {
 		.enable_reg = 0x2a030,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie2_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2401,7 +2510,7 @@
 	.clkr = {
 		.enable_reg = 0x2b030,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcie3_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2418,7 +2527,7 @@
 	.clkr = {
 		.enable_reg = 0x2c05c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_usb0_phy_cfg_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2435,7 +2544,7 @@
 	.clkr = {
 		.enable_reg = 0x33034,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_sdcc1_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&pcnoc_bfdcd_clk_src.clkr.hw
@@ -2460,7 +2569,7 @@
 	.freq_tbl = ftbl_system_noc_bfdcd_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "system_noc_bfdcd_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
@@ -2475,7 +2584,7 @@
 	.clkr = {
 		.enable_reg = 0x25080,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6ss_boot_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&system_noc_bfdcd_clk_src.clkr.hw
@@ -2492,7 +2601,7 @@
 	.clkr = {
 		.enable_reg = 0x17028,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_snoc_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&system_noc_bfdcd_clk_src.clkr.hw
@@ -2509,7 +2618,7 @@
 	.clkr = {
 		.enable_reg = 0x1707c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_snoc_1_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&system_noc_bfdcd_clk_src.clkr.hw
@@ -2526,7 +2635,7 @@
 	.clkr = {
 		.enable_reg = 0x2d060,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_etr_usb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&system_noc_bfdcd_clk_src.clkr.hw
@@ -2549,7 +2658,7 @@
 	.freq_tbl = ftbl_wcss_ahb_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "wcss_ahb_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -2562,7 +2671,7 @@
 	.clkr = {
 		.enable_reg = 0x25014,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&wcss_ahb_clk_src.clkr.hw
@@ -2579,7 +2688,7 @@
 	.clkr = {
 		.enable_reg = 0x25018,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6_ahb_s_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&wcss_ahb_clk_src.clkr.hw
@@ -2596,7 +2705,7 @@
 	.clkr = {
 		.enable_reg = 0x25058,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_wcss_ecahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&wcss_ahb_clk_src.clkr.hw
@@ -2613,7 +2722,7 @@
 	.clkr = {
 		.enable_reg = 0x2505c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_wcss_acmt_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&wcss_ahb_clk_src.clkr.hw
@@ -2630,7 +2739,7 @@
 	.clkr = {
 		.enable_reg = 0x2e030,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_sys_noc_wcss_ahb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&wcss_ahb_clk_src.clkr.hw
@@ -2654,7 +2763,7 @@
 	.freq_tbl = ftbl_wcss_axi_m_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "wcss_axi_m_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -2667,7 +2776,7 @@
 	.clkr = {
 		.enable_reg = 0x2e0a8,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_anoc_wcss_axi_m_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&wcss_axi_m_clk_src.clkr.hw
@@ -2689,7 +2798,7 @@
 	.freq_tbl = ftbl_qdss_at_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_at_clk_src",
 		.parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
@@ -2702,7 +2811,7 @@
 	.clkr = {
 		.enable_reg = 0x2501c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6ss_atbm_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_at_clk_src.clkr.hw
@@ -2719,7 +2828,7 @@
 	.clkr = {
 		.enable_reg = 0x2503c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_wcss_dbg_ifc_atb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_at_clk_src.clkr.hw
@@ -2736,7 +2845,7 @@
 	.clkr = {
 		.enable_reg = 0x17014,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_atb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_at_clk_src.clkr.hw
@@ -2753,7 +2862,7 @@
 	.clkr = {
 		.enable_reg = 0x2d038,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_at_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_at_clk_src.clkr.hw
@@ -2770,7 +2879,7 @@
 	.clkr = {
 		.enable_reg = 0x2e038,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_sys_noc_at_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_at_clk_src.clkr.hw
@@ -2787,7 +2896,7 @@
 	.clkr = {
 		.enable_reg = 0x31024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_pcnoc_at_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_at_clk_src.clkr.hw
@@ -2802,7 +2911,7 @@
 static struct clk_fixed_factor gcc_eud_at_div_clk_src = {
 	.mult = 1,
 	.div = 6,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "gcc_eud_at_div_clk_src",
 		.parent_hws = (const struct clk_hw *[]) {
 			&qdss_at_clk_src.clkr.hw
@@ -2818,7 +2927,7 @@
 	.clkr = {
 		.enable_reg = 0x30004,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_usb0_eud_at_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&gcc_eud_at_div_clk_src.hw
@@ -2835,7 +2944,7 @@
 	.clkr = {
 		.enable_reg = 0x2d06c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_eud_at_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&gcc_eud_at_div_clk_src.hw
@@ -2858,7 +2967,7 @@
 	.freq_tbl = ftbl_qdss_stm_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_stm_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_out_main_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_out_main_div2),
@@ -2871,7 +2980,7 @@
 	.clkr = {
 		.enable_reg = 0x2d03c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_stm_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_stm_clk_src.clkr.hw
@@ -2888,7 +2997,7 @@
 	.clkr = {
 		.enable_reg = 0x2e034,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_sys_noc_qdss_stm_axi_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_stm_clk_src.clkr.hw
@@ -2910,7 +3019,7 @@
 	.freq_tbl = ftbl_qdss_traceclkin_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_traceclkin_clk_src",
 		.parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
@@ -2923,7 +3032,7 @@
 	.clkr = {
 		.enable_reg = 0x2d040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_traceclkin_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_traceclkin_clk_src.clkr.hw
@@ -2945,7 +3054,7 @@
 	.freq_tbl = ftbl_qdss_tsctr_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll4_gpll0_gpll0_div2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_tsctr_clk_src",
 		.parent_data = gcc_xo_gpll4_gpll0_gpll0_div2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll4_gpll0_gpll0_div2),
@@ -2956,7 +3065,7 @@
 static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
 	.mult = 1,
 	.div = 2,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_tsctr_div2_clk_src",
 		.parent_hws = (const struct clk_hw *[]) {
 			&qdss_tsctr_clk_src.clkr.hw
@@ -2972,7 +3081,7 @@
 	.clkr = {
 		.enable_reg = 0x25020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6_tsctr_1to2_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_tsctr_div2_clk_src.hw
@@ -2989,7 +3098,7 @@
 	.clkr = {
 		.enable_reg = 0x25040,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_wcss_dbg_ifc_nts_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_tsctr_div2_clk_src.hw
@@ -3006,7 +3115,7 @@
 	.clkr = {
 		.enable_reg = 0x2d044,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_tsctr_div2_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_tsctr_div2_clk_src.hw
@@ -3029,7 +3138,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "uniphy_sys_clk_src",
 		.parent_data = gcc_xo_data,
 		.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -3043,7 +3152,7 @@
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_xo_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "nss_ts_clk_src",
 		.parent_data = gcc_xo_data,
 		.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -3056,7 +3165,7 @@
 	.clkr = {
 		.enable_reg = 0x2d078,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_ts_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&nss_ts_clk_src.clkr.hw
@@ -3071,7 +3180,7 @@
 static struct clk_fixed_factor qdss_dap_sync_clk_src = {
 	.mult = 1,
 	.div = 4,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_dap_sync_clk_src",
 		.parent_hws = (const struct clk_hw *[]) {
 			&qdss_tsctr_clk_src.clkr.hw
@@ -3086,7 +3195,7 @@
 	.clkr = {
 		.enable_reg = 0x2d04c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_tsctr_div4_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_dap_sync_clk_src.hw
@@ -3101,7 +3210,7 @@
 static struct clk_fixed_factor qdss_tsctr_div8_clk_src = {
 	.mult = 1,
 	.div = 8,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_tsctr_div8_clk_src",
 		.parent_hws = (const struct clk_hw *[]) {
 			&qdss_tsctr_clk_src.clkr.hw
@@ -3116,7 +3225,7 @@
 	.clkr = {
 		.enable_reg = 0x17018,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nss_ts_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&nss_ts_clk_src.clkr.hw
@@ -3133,7 +3242,7 @@
 	.clkr = {
 		.enable_reg = 0x2d050,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_tsctr_div8_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_tsctr_div8_clk_src.hw
@@ -3148,7 +3257,7 @@
 static struct clk_fixed_factor qdss_tsctr_div16_clk_src = {
 	.mult = 1,
 	.div = 16,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_tsctr_div16_clk_src",
 		.parent_hws = (const struct clk_hw *[]) {
 			&qdss_tsctr_clk_src.clkr.hw
@@ -3163,7 +3272,7 @@
 	.clkr = {
 		.enable_reg = 0x2d054,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_tsctr_div16_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_tsctr_div16_clk_src.hw
@@ -3180,7 +3289,7 @@
 	.clkr = {
 		.enable_reg = 0x25024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6ss_pclkdbg_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_dap_sync_clk_src.hw
@@ -3197,7 +3306,7 @@
 	.clkr = {
 		.enable_reg = 0x25068,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6ss_trig_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_dap_sync_clk_src.hw
@@ -3214,7 +3323,7 @@
 	.clkr = {
 		.enable_reg = 0x25038,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_wcss_dbg_ifc_apb_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_dap_sync_clk_src.hw
@@ -3231,7 +3340,7 @@
 	.clkr = {
 		.enable_reg = 0x25044,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_wcss_dbg_ifc_dapbus_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_dap_sync_clk_src.hw
@@ -3248,7 +3357,7 @@
 	.clkr = {
 		.enable_reg = 0x2d058,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_dap_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_dap_sync_clk_src.hw
@@ -3265,7 +3374,7 @@
 	.clkr = {
 		.enable_reg = 0x2d05c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_apb2jtag_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_dap_sync_clk_src.hw
@@ -3280,7 +3389,7 @@
 static struct clk_fixed_factor qdss_tsctr_div3_clk_src = {
 	.mult = 1,
 	.div = 3,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "qdss_tsctr_div3_clk_src",
 		.parent_hws = (const struct clk_hw *[]) {
 			&qdss_tsctr_clk_src.clkr.hw
@@ -3295,7 +3404,7 @@
 	.clkr = {
 		.enable_reg = 0x2d048,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_qdss_tsctr_div3_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&qdss_tsctr_div3_clk_src.hw
@@ -3321,7 +3430,7 @@
 	.freq_tbl = ftbl_qpic_io_macro_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "qpic_io_macro_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2),
@@ -3334,7 +3443,7 @@
 	.clkr = {
 		.enable_reg = 0x3200c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
+		.hw.init = &(const struct clk_init_data){
 			.name = "gcc_qpic_io_macro_clk",
 			.parent_hws = (const struct clk_hw *[]){
 				&qpic_io_macro_clk_src.clkr.hw
@@ -3356,7 +3465,7 @@
 	.freq_tbl = ftbl_q6_axi_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll2_gpll4_pi_sleep_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "q6_axi_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll2_gpll4_pi_sleep,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll4_pi_sleep),
@@ -3369,7 +3478,7 @@
 	.clkr = {
 		.enable_reg = 0x2500c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_q6_axim_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&q6_axi_clk_src.clkr.hw
@@ -3387,7 +3496,7 @@
 	.clkr = {
 		.enable_reg = 0xb00c,
 		.enable_mask = BIT(6),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_wcss_q6_tbu_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&q6_axi_clk_src.clkr.hw
@@ -3404,7 +3513,7 @@
 	.clkr = {
 		.enable_reg = 0x19010,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_mem_noc_q6_axi_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&q6_axi_clk_src.clkr.hw
@@ -3433,7 +3542,7 @@
 	.freq_tbl = ftbl_q6_axim2_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll4_bias_pll_ubinc_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "q6_axim2_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4_bias_pll_ubi_nc_clk),
@@ -3451,7 +3560,7 @@
 	.freq_tbl = ftbl_nssnoc_memnoc_bfdcd_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_aux_gpll2_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "nssnoc_memnoc_bfdcd_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_aux_gpll2,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_aux_gpll2),
@@ -3464,7 +3573,7 @@
 	.clkr = {
 		.enable_reg = 0x17024,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_memnoc_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3481,7 +3590,7 @@
 	.clkr = {
 		.enable_reg = 0x17084,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_mem_noc_1_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3498,7 +3607,7 @@
 	.clkr = {
 		.enable_reg = 0xb00c,
 		.enable_mask = BIT(4),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nss_tbu_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3515,7 +3624,7 @@
 	.clkr = {
 		.enable_reg = 0x19014,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_mem_noc_nssnoc_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&nssnoc_memnoc_bfdcd_clk_src.clkr.hw
@@ -3537,7 +3646,7 @@
 	.freq_tbl = ftbl_lpass_axim_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "lpass_axim_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -3545,12 +3654,46 @@
 	},
 };
 
+static struct clk_branch gcc_lpass_core_axim_clk = {
+	.halt_reg = 0x27018,
+	.clkr = {
+		.enable_reg = 0x27018,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data){
+			.name = "gcc_lpass_core_axim_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&lpass_axim_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pcnoc_lpass_clk = {
+	.halt_reg = 0x31020,
+	.clkr = {
+		.enable_reg = 0x31020,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data) {
+			.name = "gcc_pcnoc_lpass_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&lpass_axim_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_rcg2 lpass_sway_clk_src = {
 	.cmd_rcgr = 0x27004,
 	.freq_tbl = ftbl_lpass_axim_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "lpass_sway_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -3558,6 +3701,40 @@
 	},
 };
 
+static struct clk_branch gcc_lpass_sway_clk = {
+	.halt_reg = 0x27014,
+	.clkr = {
+		.enable_reg = 0x27014,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data) {
+			.name = "gcc_lpass_sway_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&lpass_sway_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_snoc_lpass_cfg_clk = {
+	.halt_reg = 0x2e028,
+	.clkr = {
+		.enable_reg = 0x2e028,
+		.enable_mask = BIT(0),
+		.hw.init = &(const struct clk_init_data) {
+			.name = "gcc_snoc_lpass_cfg_clk",
+			.parent_hws = (const struct clk_hw *[]) {
+				&lpass_sway_clk_src.clkr.hw
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static const struct freq_tbl ftbl_adss_pwm_clk_src[] = {
 	F(24000000, P_XO, 1, 0, 0),
 	F(100000000, P_GPLL0, 8, 0, 0),
@@ -3569,7 +3746,7 @@
 	.freq_tbl = ftbl_adss_pwm_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "adss_pwm_clk_src",
 		.parent_data = gcc_xo_gpll0,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
@@ -3582,7 +3759,7 @@
 	.clkr = {
 		.enable_reg = 0x1c00c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_adss_pwm_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&adss_pwm_clk_src.clkr.hw
@@ -3605,7 +3782,7 @@
 	.freq_tbl = ftbl_gp1_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "gp1_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
@@ -3618,7 +3795,7 @@
 	.freq_tbl = ftbl_gp1_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "gp2_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
@@ -3631,7 +3808,7 @@
 	.freq_tbl = ftbl_gp1_clk_src,
 	.hid_width = 5,
 	.parent_map = gcc_xo_gpll0_gpll0_sleep_clk_map,
-	.clkr.hw.init = &(struct clk_init_data) {
+	.clkr.hw.init = &(const struct clk_init_data) {
 		.name = "gp3_clk_src",
 		.parent_data = gcc_xo_gpll0_gpll0_sleep_clk,
 		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_sleep_clk),
@@ -3644,7 +3821,7 @@
 	.clkr = {
 		.enable_reg = 0x34004,
 		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_xo_clk_src",
 			.parent_data = gcc_xo_data,
 			.num_parents = ARRAY_SIZE(gcc_xo_data),
@@ -3659,7 +3836,7 @@
 	.clkr = {
 		.enable_reg = 0x17074,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_xo_dcd_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&gcc_xo_clk_src.clkr.hw
@@ -3676,7 +3853,7 @@
 	.clkr = {
 		.enable_reg = 0x34018,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_xo_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&gcc_xo_clk_src.clkr.hw
@@ -3693,7 +3870,7 @@
 	.clkr = {
 		.enable_reg = 0x17048,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_uniphy0_sys_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&uniphy_sys_clk_src.clkr.hw
@@ -3710,7 +3887,7 @@
 	.clkr = {
 		.enable_reg = 0x17058,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_uniphy1_sys_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&uniphy_sys_clk_src.clkr.hw
@@ -3727,7 +3904,7 @@
 	.clkr = {
 		.enable_reg = 0x17068,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_uniphy2_sys_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&uniphy_sys_clk_src.clkr.hw
@@ -3744,7 +3921,7 @@
 	.clkr = {
 		.enable_reg = 0x3a008,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_cmn_12gpll_sys_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&uniphy_sys_clk_src.clkr.hw
@@ -3759,7 +3936,7 @@
 static struct clk_fixed_factor gcc_xo_div4_clk_src = {
 	.mult = 1,
 	.div = 4,
-	.hw.init = &(struct clk_init_data) {
+	.hw.init = &(const struct clk_init_data) {
 		.name = "gcc_xo_div4_clk_src",
 		.parent_hws = (const struct clk_hw *[]) {
 			&gcc_xo_clk_src.clkr.hw
@@ -3775,7 +3952,7 @@
 	.clkr = {
 		.enable_reg = 0x1701c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_qosgen_ref_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&gcc_xo_div4_clk_src.hw
@@ -3792,7 +3969,7 @@
 	.clkr = {
 		.enable_reg = 0x17020,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_nssnoc_timeout_ref_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&gcc_xo_div4_clk_src.hw
@@ -3809,7 +3986,7 @@
 	.clkr = {
 		.enable_reg = 0x3401c,
 		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data) {
+		.hw.init = &(const struct clk_init_data) {
 			.name = "gcc_xo_div4_clk",
 			.parent_hws = (const struct clk_hw *[]) {
 				&gcc_xo_div4_clk_src.hw
@@ -3901,9 +4078,13 @@
 	[GCC_PCIE3_AXI_S_BRIDGE_CLK] = &gcc_pcie3_axi_s_bridge_clk.clkr,
 	[GCC_PCIE3_AXI_S_CLK] = &gcc_pcie3_axi_s_clk.clkr,
 	[PCIE0_PIPE_CLK_SRC] = &pcie0_pipe_clk_src.clkr,
+	[GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
 	[PCIE1_PIPE_CLK_SRC] = &pcie1_pipe_clk_src.clkr,
+	[GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
 	[PCIE2_PIPE_CLK_SRC] = &pcie2_pipe_clk_src.clkr,
+	[GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
 	[PCIE3_PIPE_CLK_SRC] = &pcie3_pipe_clk_src.clkr,
+	[GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
 	[PCIE_AUX_CLK_SRC] = &pcie_aux_clk_src.clkr,
 	[GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr,
 	[GCC_PCIE1_AUX_CLK] = &gcc_pcie1_aux_clk.clkr,
@@ -3932,6 +4113,8 @@
 	[GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
 	[USB0_PIPE_CLK_SRC] = &usb0_pipe_clk_src.clkr,
 	[GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+	[GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
+	[GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
 	[SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
 	[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
 	[SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
@@ -4009,7 +4192,11 @@
 	[GCC_NSS_TBU_CLK] = &gcc_nss_tbu_clk.clkr,
 	[GCC_MEM_NOC_NSSNOC_CLK] = &gcc_mem_noc_nssnoc_clk.clkr,
 	[LPASS_AXIM_CLK_SRC] = &lpass_axim_clk_src.clkr,
+	[GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr,
+        [GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr,
 	[LPASS_SWAY_CLK_SRC] = &lpass_sway_clk_src.clkr,
+	[GCC_LPASS_SWAY_CLK] = &gcc_lpass_sway_clk.clkr,
+        [GCC_SNOC_LPASS_CFG_CLK] = &gcc_snoc_lpass_cfg_clk.clkr,
 	[ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr,
 	[GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
 	[GP1_CLK_SRC] = &gp1_clk_src.clkr,
diff -ruw linux-6.4/drivers/cpufreq/Kconfig linux-6.4-fbx/drivers/cpufreq/Kconfig
--- linux-6.4/drivers/cpufreq/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/cpufreq/Kconfig	2023-05-22 20:06:38.023688355 +0200
@@ -311,5 +311,10 @@
 	  This adds the CPUFreq driver support for Freescale QorIQ SoCs
 	  which are capable of changing the CPU's frequency dynamically.
 
+config BCM63158_CPUFREQ
+	tristate "CPU frequency scaling driver for BCM63158 SoC"
+	depends on ARCH_BCMBCA
+
 endif
+
 endmenu
diff -ruw linux-6.4/drivers/cpufreq/Makefile linux-6.4-fbx/drivers/cpufreq/Makefile
--- linux-6.4/drivers/cpufreq/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/cpufreq/Makefile	2023-05-22 20:06:38.023688355 +0200
@@ -107,3 +107,5 @@
 obj-$(CONFIG_SH_CPU_FREQ)		+= sh-cpufreq.o
 obj-$(CONFIG_SPARC_US2E_CPUFREQ)	+= sparc-us2e-cpufreq.o
 obj-$(CONFIG_SPARC_US3_CPUFREQ)		+= sparc-us3-cpufreq.o
+
+obj-$(CONFIG_BCM63158_CPUFREQ)		+= bcm63158-cpufreq.o
diff -ruw linux-6.4/drivers/firmware/qcom_scm.c linux-6.4-fbx/drivers/firmware/qcom_scm.c
--- linux-6.4/drivers/firmware/qcom_scm.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/firmware/qcom_scm.c	2023-07-20 17:19:14.402360150 +0200
@@ -1402,6 +1402,31 @@
 	return IRQ_HANDLED;
 }
 
+int qti_fuseipq_scm_call(struct device *dev, const struct fuse_blow *fuse_blow)
+{
+	int ret;
+	struct qcom_scm_res res;
+	struct qcom_scm_desc desc = {
+		.owner		= ARM_SMCCC_OWNER_SIP,
+		.svc		= QTI_SCM_SVC_FUSE,
+		.cmd		= TZ_BLOW_FUSE_SECDAT,
+		.args[0]	= fuse_blow->address,
+	};
+
+	if (fuse_blow->size) {
+		desc.args[1] = fuse_blow->size;
+		desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RO, QCOM_SCM_VAL);
+	} else {
+		desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_RO);
+	}
+
+	ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+	*fuse_blow->status = res.result[0];
+	return ret;
+}
+EXPORT_SYMBOL(qti_fuseipq_scm_call);
+
 static int qcom_scm_probe(struct platform_device *pdev)
 {
 	struct qcom_scm *scm;
diff -ruw linux-6.4/drivers/gpio/Kconfig linux-6.4-fbx/drivers/gpio/Kconfig
--- linux-6.4/drivers/gpio/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/gpio/Kconfig	2023-12-05 13:51:11.322098873 +0100
@@ -1018,6 +1018,11 @@
 	  enough to represent all pins, but the driver will assume a
 	  register layout for 64 pins (8 registers).
 
+config GPIO_FBXGWR_PMU
+	tristate "Freebox PMU I2C GPIO expander"
+	depends on MFD_FBXGWR_PMU
+	select GPIOLIB_IRQCHIP
+
 config GPIO_FXL6408
 	tristate "FXL6408 I2C GPIO expander"
 	select GPIO_REGMAP
diff -ruw linux-6.4/drivers/gpio/Makefile linux-6.4-fbx/drivers/gpio/Makefile
--- linux-6.4/drivers/gpio/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/gpio/Makefile	2023-12-05 13:50:59.101765002 +0100
@@ -60,6 +60,7 @@
 obj-$(CONFIG_GPIO_EP93XX)		+= gpio-ep93xx.o
 obj-$(CONFIG_GPIO_EXAR)			+= gpio-exar.o
 obj-$(CONFIG_GPIO_F7188X)		+= gpio-f7188x.o
+obj-$(CONFIG_GPIO_FBXGWR_PMU)		+= gpio-fbxgwr-pmu.o
 obj-$(CONFIG_GPIO_FTGPIO010)		+= gpio-ftgpio010.o
 obj-$(CONFIG_GPIO_FXL6408)		+= gpio-fxl6408.o
 obj-$(CONFIG_GPIO_GE_FPGA)		+= gpio-ge.o
diff -ruw linux-6.4/drivers/hid/Kconfig linux-6.4-fbx/drivers/hid/Kconfig
--- linux-6.4/drivers/hid/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/hid/Kconfig	2023-05-22 20:06:40.203746341 +0200
@@ -484,6 +484,11 @@
 	  Adds support for side buttons of Xiaomi Mi Dual Mode Wireless
 	  Mouse Silent Edition.
 
+config HID_FBX_REMOTE_AUDIO
+	tristate "Freebox BLE remote audio driver"
+	depends on HID && SND
+	select SND_PCM
+
 config HID_GYRATION
 	tristate "Gyration remote control"
 	help
diff -ruw linux-6.4/drivers/hid/Makefile linux-6.4-fbx/drivers/hid/Makefile
--- linux-6.4/drivers/hid/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/hid/Makefile	2023-05-22 20:06:40.207746447 +0200
@@ -57,6 +57,7 @@
 obj-$(CONFIG_HID_GOOGLE_HAMMER)	+= hid-google-hammer.o
 obj-$(CONFIG_HID_VIVALDI)	+= hid-vivaldi.o
 obj-$(CONFIG_HID_GT683R)	+= hid-gt683r.o
+obj-$(CONFIG_HID_FBX_REMOTE_AUDIO)	+= hid-fbx-remote-audio.o
 obj-$(CONFIG_HID_GYRATION)	+= hid-gyration.o
 obj-$(CONFIG_HID_HOLTEK)	+= hid-holtek-kbd.o
 obj-$(CONFIG_HID_HOLTEK)	+= hid-holtek-mouse.o
diff -ruw linux-6.4/drivers/hid/hid-quirks.c linux-6.4-fbx/drivers/hid/hid-quirks.c
--- linux-6.4/drivers/hid/hid-quirks.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/hid/hid-quirks.c	2023-05-22 20:06:40.223746873 +0200
@@ -719,6 +719,7 @@
 #if IS_ENABLED(CONFIG_HID_ZYDACRON)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
 #endif
+	{ HID_BLUETOOTH_DEVICE(0x10eb, 0x0023) },
 	{ }
 };
 
diff -ruw linux-6.4/drivers/hwmon/Kconfig linux-6.4-fbx/drivers/hwmon/Kconfig
--- linux-6.4/drivers/hwmon/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/hwmon/Kconfig	2023-06-22 21:43:23.070921124 +0200
@@ -344,6 +344,10 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called fam15h_power.
 
+config SENSORS_FBXGWR_PMU
+	tristate "Freebox GWR PMU hardware monitoring driver"
+	depends on MFD_FBXGWR_PMU
+
 config SENSORS_APPLESMC
 	tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
 	depends on INPUT && X86
@@ -2359,6 +2363,17 @@
 	  sensors monitor various telemetry data of different components on the
 	  card, e.g. board temperature, FPGA core temperature/voltage/current.
 
+config SENSORS_KIRKWOOD_CORETEMP
+	tristate "Kirkwood core temperature censor"
+	depends on MACH_KIRKWOOD
+
+config SENSORS_LD6710_FBX
+	tristate "LD6710 hardware monitoring driver (as seen on Freebox hardware)"
+	depends on I2C
+
+config SENSORS_AP806
+	tristate "Marvell AP806/CP110 hardware monitoring driver"
+
 if ACPI
 
 comment "ACPI drivers"
diff -ruw linux-6.4/drivers/hwmon/Makefile linux-6.4-fbx/drivers/hwmon/Makefile
--- linux-6.4/drivers/hwmon/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/hwmon/Makefile	2023-06-22 21:43:23.074921233 +0200
@@ -74,6 +74,7 @@
 obj-$(CONFIG_SENSORS_F71805F)	+= f71805f.o
 obj-$(CONFIG_SENSORS_F71882FG)	+= f71882fg.o
 obj-$(CONFIG_SENSORS_F75375S)	+= f75375s.o
+obj-$(CONFIG_SENSORS_FBXGWR_PMU)	+= fbxgwr_pmu_hwmon.o
 obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o
 obj-$(CONFIG_SENSORS_FSCHMD)	+= fschmd.o
 obj-$(CONFIG_SENSORS_FTSTEUTATES) += ftsteutates.o
@@ -102,6 +103,7 @@
 obj-$(CONFIG_SENSORS_K8TEMP)	+= k8temp.o
 obj-$(CONFIG_SENSORS_K10TEMP)	+= k10temp.o
 obj-$(CONFIG_SENSORS_LAN966X)	+= lan966x-hwmon.o
+obj-$(CONFIG_SENSORS_LD6710_FBX) += ld6710-fbx.o
 obj-$(CONFIG_SENSORS_LINEAGE)	+= lineage-pem.o
 obj-$(CONFIG_SENSORS_LOCHNAGAR)	+= lochnagar-hwmon.o
 obj-$(CONFIG_SENSORS_LM63)	+= lm63.o
@@ -218,6 +220,8 @@
 obj-$(CONFIG_SENSORS_WM831X)	+= wm831x-hwmon.o
 obj-$(CONFIG_SENSORS_WM8350)	+= wm8350-hwmon.o
 obj-$(CONFIG_SENSORS_XGENE)	+= xgene-hwmon.o
+obj-$(CONFIG_SENSORS_KIRKWOOD_CORETEMP)+= kirkwood-coretemp.o
+obj-$(CONFIG_SENSORS_AP806)	+= ap806-hwmon.o
 
 obj-$(CONFIG_SENSORS_OCC)	+= occ/
 obj-$(CONFIG_SENSORS_PECI)	+= peci/
diff -ruw linux-6.4/drivers/i2c/busses/Kconfig linux-6.4-fbx/drivers/i2c/busses/Kconfig
--- linux-6.4/drivers/i2c/busses/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/i2c/busses/Kconfig	2023-05-22 20:06:40.343750065 +0200
@@ -1433,6 +1433,10 @@
 	  to SLIMpro (On chip coprocessor) mailbox mechanism.
 	  If unsure, say N.
 
+config I2C_WP3
+	tristate "Wintegra WP3 I2C controll"
+	depends on WINTEGRA_WINPATH3
+
 config SCx200_ACB
 	tristate "Geode ACCESS.bus support"
 	depends on X86_32 && PCI
diff -ruw linux-6.4/drivers/i2c/busses/Makefile linux-6.4-fbx/drivers/i2c/busses/Makefile
--- linux-6.4/drivers/i2c/busses/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/i2c/busses/Makefile	2023-05-22 20:06:40.343750065 +0200
@@ -129,6 +129,7 @@
 obj-$(CONFIG_I2C_XLP9XX)	+= i2c-xlp9xx.o
 obj-$(CONFIG_I2C_RCAR)		+= i2c-rcar.o
 obj-$(CONFIG_I2C_GXP)		+= i2c-gxp.o
+obj-$(CONFIG_I2C_WP3)		+= i2c-wp3.o
 
 # External I2C/SMBus adapter drivers
 obj-$(CONFIG_I2C_DIOLAN_U2C)	+= i2c-diolan-u2c.o
diff -ruw linux-6.4/drivers/i2c/busses/i2c-qup.c linux-6.4-fbx/drivers/i2c/busses/i2c-qup.c
--- linux-6.4/drivers/i2c/busses/i2c-qup.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/i2c/busses/i2c-qup.c	2023-11-16 17:05:12.429530415 +0100
@@ -1867,6 +1867,12 @@
 		qup->clk_ctl = ((fs_div / 2) << 16) | (hs_div << 8) | (fs_div & 0xff);
 	}
 
+	if (fs_div > 0xff) {
+		dev_err(qup->dev, "cannot achieve requested clock rate\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
 	/*
 	 * Time it takes for a byte to be clocked out on the bus.
 	 * Each byte takes 9 clock cycles (8 bits + 1 ack).
diff -ruw linux-6.4/drivers/i2c/i2c-core-base.c linux-6.4-fbx/drivers/i2c/i2c-core-base.c
--- linux-6.4/drivers/i2c/i2c-core-base.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/i2c/i2c-core-base.c	2023-05-22 20:06:40.379751022 +0200
@@ -249,12 +249,14 @@
 			bri->set_sda(adap, scl);
 		ndelay(RECOVERY_NDELAY / 2);
 
+		if (0) {
 		if (scl) {
 			ret = i2c_generic_bus_free(adap);
 			if (ret == 0)
 				break;
 		}
 	}
+	}
 
 	/* If we can't check bus status, assume recovery worked */
 	if (ret == -EOPNOTSUPP)
diff -ruw linux-6.4/drivers/input/misc/Kconfig linux-6.4-fbx/drivers/input/misc/Kconfig
--- linux-6.4/drivers/input/misc/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/input/misc/Kconfig	2023-05-22 20:06:40.643758045 +0200
@@ -939,4 +939,9 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called stpmic1_onkey.
 
+config INPUT_SMSC_CAP1066
+	tristate "SMSC CAP1066 capacitive sensor driver"
+	select I2C
+	select INPUT_POLLDEV
+
 endif
diff -ruw linux-6.4/drivers/input/misc/Makefile linux-6.4-fbx/drivers/input/misc/Makefile
--- linux-6.4/drivers/input/misc/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/input/misc/Makefile	2023-05-22 20:06:40.643758045 +0200
@@ -90,3 +90,4 @@
 obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND)	+= xen-kbdfront.o
 obj-$(CONFIG_INPUT_YEALINK)		+= yealink.o
 obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR)	+= ideapad_slidebar.o
+obj-$(CONFIG_INPUT_SMSC_CAP1066)	+= smsc_cap1066.o
diff -ruw linux-6.4/drivers/leds/Kconfig linux-6.4-fbx/drivers/leds/Kconfig
--- linux-6.4/drivers/leds/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/leds/Kconfig	2023-11-27 19:13:52.966343373 +0100
@@ -328,6 +328,10 @@
 	  defined as platform devices and/or OpenFirmware platform devices.
 	  The code to use these bindings can be selected below.
 
+config LEDS_FBXGWR_PMU
+	tristate "Freebox GWR PMU LED controller"
+	depends on MFD_FBXGWR_PMU
+
 config LEDS_LP3944
 	tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
 	depends on LEDS_CLASS
@@ -710,6 +714,13 @@
 	  LED controllers. They are I2C devices with multiple constant-current
 	  channels, each with independent 256-level PWM control.
 
+config LEDS_IS31FL3299
+	tristate "LED support for ISSI IS31FL3299 I2C LED controller"
+	depends on LEDS_CLASS && I2C && OF
+	select REGMAP_I2C
+	help
+	  This option enables support for the IS31FL3299 LED driver.
+
 config LEDS_SC27XX_BLTC
 	tristate "LED support for the SC27xx breathing light controller"
 	depends on LEDS_CLASS && MFD_SC27XX_PMIC
@@ -859,6 +870,17 @@
 	  This option enables support for the Power Button LED of
 	  Acer Iconia Tab A500.
 
+config LEDS_LED1202
+	tristate "LED support for STMicroElectronics LED1202"
+	depends on LEDS_CLASS && I2C && OF
+	select REGMAP_I2C
+	help
+	  This option enables support for the LED1202 12-channel
+	  LED driver.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called leds-led1202.
+
 source "drivers/leds/blink/Kconfig"
 
 comment "Flash and Torch LED drivers"
diff -ruw linux-6.4/drivers/leds/Makefile linux-6.4-fbx/drivers/leds/Makefile
--- linux-6.4/drivers/leds/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/leds/Makefile	2023-11-27 19:13:52.966343373 +0100
@@ -28,12 +28,14 @@
 obj-$(CONFIG_LEDS_DA9052)		+= leds-da9052.o
 obj-$(CONFIG_LEDS_GPIO)			+= leds-gpio.o
 obj-$(CONFIG_LEDS_GPIO_REGISTER)	+= leds-gpio-register.o
+obj-$(CONFIG_LEDS_FBXGWR_PMU)		+= leds-fbxgwr-pmu.o
 obj-$(CONFIG_LEDS_HP6XX)		+= leds-hp6xx.o
 obj-$(CONFIG_LEDS_INTEL_SS4200)		+= leds-ss4200.o
 obj-$(CONFIG_LEDS_IP30)			+= leds-ip30.o
 obj-$(CONFIG_LEDS_IPAQ_MICRO)		+= leds-ipaq-micro.o
 obj-$(CONFIG_LEDS_IS31FL319X)		+= leds-is31fl319x.o
 obj-$(CONFIG_LEDS_IS31FL32XX)		+= leds-is31fl32xx.o
+obj-$(CONFIG_LEDS_IS31FL3299)		+= leds-is31fl3299.o
 obj-$(CONFIG_LEDS_LM3530)		+= leds-lm3530.o
 obj-$(CONFIG_LEDS_LM3532)		+= leds-lm3532.o
 obj-$(CONFIG_LEDS_LM3533)		+= leds-lm3533.o
@@ -85,6 +87,7 @@
 obj-$(CONFIG_LEDS_WM831X_STATUS)	+= leds-wm831x-status.o
 obj-$(CONFIG_LEDS_WM8350)		+= leds-wm8350.o
 obj-$(CONFIG_LEDS_WRAP)			+= leds-wrap.o
+obj-$(CONFIG_LEDS_LED1202)		+= leds-led1202.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_CR0014114)		+= leds-cr0014114.o
diff -ruw linux-6.4/drivers/media/dvb-core/dvb_frontend.c linux-6.4-fbx/drivers/media/dvb-core/dvb_frontend.c
--- linux-6.4/drivers/media/dvb-core/dvb_frontend.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/media/dvb-core/dvb_frontend.c	2023-06-27 11:47:15.779859115 +0200
@@ -820,6 +820,7 @@
 	if (fe->exit != DVB_FE_DEVICE_REMOVED)
 		fe->exit = DVB_FE_NORMAL_EXIT;
 	mb();
+	wake_up_all(&fepriv->events.wait_queue);
 
 	if (!fepriv->thread)
 		return;
@@ -2755,6 +2756,9 @@
 
 	poll_wait(file, &fepriv->events.wait_queue, wait);
 
+	if (fe->exit)
+		return POLLERR | POLLHUP;
+
 	if (fepriv->events.eventw != fepriv->events.eventr)
 		return (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
 
diff -ruw linux-6.4/drivers/media/rc/keymaps/Makefile linux-6.4-fbx/drivers/media/rc/keymaps/Makefile
--- linux-6.4/drivers/media/rc/keymaps/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/media/rc/keymaps/Makefile	2023-05-22 20:06:41.083769748 +0200
@@ -103,6 +103,7 @@
 			rc-purpletv.o \
 			rc-pv951.o \
 			rc-rc6-mce.o \
+			rc-rc6-freebox.o \
 			rc-real-audio-220-32-keys.o \
 			rc-reddo.o \
 			rc-snapstream-firefly.o \
diff -ruw linux-6.4/drivers/media/usb/dvb-usb/dib0700_devices.c linux-6.4-fbx/drivers/media/usb/dvb-usb/dib0700_devices.c
--- linux-6.4/drivers/media/usb/dvb-usb/dib0700_devices.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/media/usb/dvb-usb/dib0700_devices.c	2023-05-22 20:06:41.115770599 +0200
@@ -3897,6 +3897,7 @@
 	DIBCOM_STK8096PVR,
 	HAMA_DVBT_HYBRID,
 	MICROSOFT_XBOX_ONE_TUNER,
+	DIBCOM_HOOK_DEFAULT_STK7770P,
 };
 
 struct usb_device_id dib0700_usb_id_table[] = {
@@ -3987,6 +3988,7 @@
 	DVB_USB_DEV(DIBCOM, DIBCOM_STK8096PVR),
 	DVB_USB_DEV(HAMA, HAMA_DVBT_HYBRID),
 	DVB_USB_DEV(MICROSOFT, MICROSOFT_XBOX_ONE_TUNER),
+	DVB_USB_DEV(DIBCOM, DIBCOM_HOOK_DEFAULT_STK7770P),
 	{ }
 };
 
@@ -5230,6 +5232,30 @@
 				{ NULL },
 			},
 		},
+	}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+		.num_adapters = 1,
+		.adapter = {
+			{
+			DIB0700_NUM_FRONTENDS(1),
+			.fe = {{
+				.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+				.pid_filter_count = 32,
+				.pid_filter       = stk70x0p_pid_filter,
+				.pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+				.frontend_attach  = stk7770p_frontend_attach,
+				.tuner_attach     = dib7770p_tuner_attach,
+
+				DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+			}},
+			},
+		},
+		.num_device_descs = 1,
+		.devices = {
+			{   "DiBcom STK7770P reference design no IR",
+				{ &dib0700_usb_id_table[DIBCOM_HOOK_DEFAULT_STK7770P], NULL },
+				{ NULL },
+			},
+		},
 	},
 };
 
diff -ruw linux-6.4/drivers/mfd/Kconfig linux-6.4-fbx/drivers/mfd/Kconfig
--- linux-6.4/drivers/mfd/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mfd/Kconfig	2023-06-12 14:21:20.824611302 +0200
@@ -2052,6 +2052,24 @@
 	  additional drivers must be enabled in order to use the functionality
 	  of the device.
 
+config MFD_FBXGW7R_PANEL
+	tristate "Freebox fbxgw7r panel support"
+	depends on FB
+	depends on SPI_MASTER
+	depends on OF
+	select FB_SYS_FOPS
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_DEFERRED_IO
+
+config MFD_FBXGWR_PMU
+	tristate "Freebox fbxgwr PMU"
+	depends on I2C
+	depends on OF
+	select MFD_CORE
+	select REGMAP_I2C
+
 config MFD_WCD934X
 	tristate "Support for WCD9340/WCD9341 Codec"
 	depends on SLIMBUS
diff -ruw linux-6.4/drivers/mfd/Makefile linux-6.4-fbx/drivers/mfd/Makefile
--- linux-6.4/drivers/mfd/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mfd/Makefile	2023-06-12 14:21:20.824611302 +0200
@@ -270,6 +270,8 @@
 
 obj-$(CONFIG_MFD_ATC260X)	+= atc260x-core.o
 obj-$(CONFIG_MFD_ATC260X_I2C)	+= atc260x-i2c.o
+obj-$(CONFIG_MFD_FBXGW7R_PANEL)	+= fbxgw7r-panel.o
+obj-$(CONFIG_MFD_FBXGWR_PMU)	+= fbxgwr-pmu.o
 
 rsmu-i2c-objs			:= rsmu_core.o rsmu_i2c.o
 rsmu-spi-objs			:= rsmu_core.o rsmu_spi.o
diff -ruw linux-6.4/drivers/misc/Kconfig linux-6.4-fbx/drivers/misc/Kconfig
--- linux-6.4/drivers/misc/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/misc/Kconfig	2023-09-01 15:04:29.529632893 +0200
@@ -5,6 +5,9 @@
 
 menu "Misc devices"
 
+config WINTEGRA_MMAP
+	bool "wintegra mmap driver"
+
 config SENSORS_LIS3LV02D
 	tristate
 	depends on INPUT
@@ -403,6 +406,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmw_balloon.
 
+config INTELCE_PIC16PMU
+	tristate "PIC16 PMU, LED, hwmon support"
+	select INPUT_POLLDEV
+	select NEW_LEDS
+	select I2C
+	select HWMON
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  Freebox v6 HD PIC16 PMU interface support, enables
+	  control of the on-board LEDs and reports the power status,
+	  reset status and button status.
+
 config PCH_PHUB
 	tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
 	select GENERIC_NET_UTILS
@@ -424,6 +439,11 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called pch_phub.
 
+config FBXSERIAL_OF
+	bool "read fbxserial through DT chosen node"
+	depends on OF
+	select ARCH_HAS_FBXSERIAL
+
 config LATTICE_ECP3_CONFIG
 	tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
 	depends on SPI && SYSFS
@@ -538,6 +558,9 @@
 
 	  Say N here unless you know what you are doing.
 
+config DGASP
+	bool "dying gasp infrastructure"
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -555,4 +578,6 @@
 source "drivers/misc/uacce/Kconfig"
 source "drivers/misc/pvpanic/Kconfig"
 source "drivers/misc/mchp_pci1xxxx/Kconfig"
+source "drivers/misc/remoti/Kconfig"
+source "drivers/misc/hdmi-cec/Kconfig"
 endmenu
diff -ruw linux-6.4/drivers/misc/Makefile linux-6.4-fbx/drivers/misc/Makefile
--- linux-6.4/drivers/misc/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/misc/Makefile	2023-09-01 15:04:29.529632893 +0200
@@ -3,6 +3,7 @@
 # Makefile for misc devices that really don't fit anywhere else.
 #
 
+obj-$(CONFIG_WINTEGRA_MMAP)	+= wintegra_mmap.o
 obj-$(CONFIG_IBM_ASM)		+= ibmasm/
 obj-$(CONFIG_IBMVMC)		+= ibmvmc.o
 obj-$(CONFIG_AD525X_DPOT)	+= ad525x_dpot.o
@@ -21,7 +22,9 @@
 obj-$(CONFIG_SENSORS_APDS990X)	+= apds990x.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
+obj-$(CONFIG_FBXSERIAL_OF)	+= fbxserial_of.o
 obj-$(CONFIG_SGI_XP)		+= sgi-xp/
+obj-$(CONFIG_INTELCE_PIC16PMU)	+= pic16-pmu.o
 obj-$(CONFIG_SGI_GRU)		+= sgi-gru/
 obj-$(CONFIG_SMPRO_ERRMON)	+= smpro-errmon.o
 obj-$(CONFIG_SMPRO_MISC)	+= smpro-misc.o
@@ -39,6 +42,7 @@
 obj-y				+= cb710/
 obj-$(CONFIG_VMWARE_BALLOON)	+= vmw_balloon.o
 obj-$(CONFIG_PCH_PHUB)		+= pch_phub.o
+obj-y				+= hdmi-cec/
 obj-y				+= ti-st/
 obj-y				+= lis3lv02d/
 obj-$(CONFIG_ALTERA_STAPL)	+=altera-stapl/
@@ -65,3 +69,5 @@
 obj-$(CONFIG_VCPU_STALL_DETECTOR)	+= vcpu_stall_detector.o
 obj-$(CONFIG_TMR_MANAGER)      += xilinx_tmr_manager.o
 obj-$(CONFIG_TMR_INJECT)	+= xilinx_tmr_inject.o
+obj-y				+= remoti/
+obj-$(CONFIG_DGASP)		+= dgasp.o
diff -ruw linux-6.4/drivers/misc/eeprom/Kconfig linux-6.4-fbx/drivers/misc/eeprom/Kconfig
--- linux-6.4/drivers/misc/eeprom/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/misc/eeprom/Kconfig	2023-06-27 11:47:15.783859224 +0200
@@ -131,4 +131,8 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called ee1004.
 
+config EEPROM_EE1004_RAW
+	tristate "SPD EEPROMs on DDR4 memory modules (non smbus)"
+	depends on I2C && SYSFS
+
 endmenu
diff -ruw linux-6.4/drivers/misc/eeprom/Makefile linux-6.4-fbx/drivers/misc/eeprom/Makefile
--- linux-6.4/drivers/misc/eeprom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/misc/eeprom/Makefile	2023-03-09 16:41:54.887957334 +0100
@@ -8,3 +8,4 @@
 obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
 obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
 obj-$(CONFIG_EEPROM_EE1004)	+= ee1004.o
+obj-$(CONFIG_EEPROM_EE1004_RAW)	+= ee1004_raw.o
diff -ruw linux-6.4/drivers/misc/eeprom/at24.c linux-6.4-fbx/drivers/misc/eeprom/at24.c
--- linux-6.4/drivers/misc/eeprom/at24.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/misc/eeprom/at24.c	2023-05-22 20:06:41.207773046 +0200
@@ -594,7 +594,6 @@
 	bool full_power;
 	struct regmap *regmap;
 	bool writable;
-	u8 test_byte;
 	int err;
 
 	i2c_fn_i2c = i2c_check_functionality(client->adapter, I2C_FUNC_I2C);
@@ -615,8 +614,10 @@
 		page_size = 1;
 
 	flags = cdata->flags;
+#ifndef CONFIG_NVMEM_IGNORE_RO
 	if (device_property_present(dev, "read-only"))
 		flags |= AT24_FLAG_READONLY;
+#endif
 	if (device_property_present(dev, "no-read-rollover"))
 		flags |= AT24_FLAG_NO_RDROL;
 
@@ -764,21 +765,6 @@
 		return PTR_ERR(at24->nvmem);
 	}
 
-	/*
-	 * Perform a one-byte test read to verify that the chip is functional,
-	 * unless powering on the device is to be avoided during probe (i.e.
-	 * it's powered off right now).
-	 */
-	if (full_power) {
-		err = at24_read(at24, 0, &test_byte, 1);
-		if (err) {
-			pm_runtime_disable(dev);
-			if (!pm_runtime_status_suspended(dev))
-				regulator_disable(at24->vcc_reg);
-			return -ENODEV;
-		}
-	}
-
 	pm_runtime_idle(dev);
 
 	if (writable)
diff -ruw linux-6.4/drivers/mmc/core/block.c linux-6.4-fbx/drivers/mmc/core/block.c
--- linux-6.4/drivers/mmc/core/block.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mmc/core/block.c	2023-06-27 11:47:15.783859224 +0200
@@ -2463,7 +2463,7 @@
 	md->disk->private_data = md;
 	md->parent = parent;
 	set_disk_ro(md->disk, md->read_only || default_ro);
-	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
+	if (area_type & (MMC_BLK_DATA_AREA_RPMB))
 		md->disk->flags |= GENHD_FL_NO_PART;
 
 	/*
diff -ruw linux-6.4/drivers/mmc/host/Kconfig linux-6.4-fbx/drivers/mmc/host/Kconfig
--- linux-6.4/drivers/mmc/host/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mmc/host/Kconfig	2023-05-22 20:06:41.243774004 +0200
@@ -997,7 +997,7 @@
 
 config MMC_SDHCI_BRCMSTB
 	tristate "Broadcom SDIO/SD/MMC support"
-	depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+	depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST
 	depends on MMC_SDHCI_PLTFM
 	select MMC_CQHCI
 	default ARCH_BRCMSTB || BMIPS_GENERIC
diff -ruw linux-6.4/drivers/mtd/Kconfig linux-6.4-fbx/drivers/mtd/Kconfig
--- linux-6.4/drivers/mtd/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/Kconfig	2023-02-24 19:08:06.439275900 +0100
@@ -23,6 +23,9 @@
 	  WARNING: some of the tests will ERASE entire MTD device which they
 	  test. Do not use these tests unless you really know what you do.
 
+config MTD_ERASE_PRINTK
+	bool "write to kernel log when a block is erased"
+
 menu "Partition parsers"
 source "drivers/mtd/parsers/Kconfig"
 endmenu
diff -ruw linux-6.4/drivers/mtd/mtdchar.c linux-6.4-fbx/drivers/mtd/mtdchar.c
--- linux-6.4/drivers/mtd/mtdchar.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/mtdchar.c	2023-06-27 11:47:15.791859441 +0200
@@ -168,6 +168,7 @@
 		{
 			struct mtd_oob_ops ops = {};
 
+			memset(&ops, 0, sizeof (ops));
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
 			ops.oobbuf = NULL;
@@ -262,6 +263,7 @@
 		{
 			struct mtd_oob_ops ops = {};
 
+			memset(&ops, 0, sizeof (ops));
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
 			ops.oobbuf = NULL;
@@ -947,6 +949,11 @@
 				erase->len = einfo32.length;
 			}
 
+#ifdef CONFIG_MTD_ERASE_PRINTK
+			printk(KERN_DEBUG "mtd: %s: ERASE offset=@%08llx\n",
+			       mtd->name, erase->addr);
+#endif
+
 			ret = mtd_erase(mtd, erase);
 			kfree(erase);
 		}
diff -ruw linux-6.4/drivers/mtd/mtdcore.c linux-6.4-fbx/drivers/mtd/mtdcore.c
--- linux-6.4/drivers/mtd/mtdcore.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/mtdcore.c	2023-05-22 20:06:41.279774962 +0200
@@ -308,6 +308,56 @@
 }
 MTD_DEVICE_ATTR_RO(bbt_blocks);
 
+static ssize_t mtd_nand_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->nand_type);
+}
+static DEVICE_ATTR(nand_type, S_IRUGO, mtd_nand_type_show, NULL);
+
+static ssize_t mtd_nand_manufacturer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->nand_manufacturer);
+}
+static DEVICE_ATTR(nand_manufacturer, S_IRUGO, mtd_nand_manufacturer_show, NULL);
+
+static ssize_t mtd_nand_onfi_ecc_bits_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", mtd->onfi_ecc_bits);
+}
+static DEVICE_ATTR(onfi_ecc_bits, S_IRUGO, mtd_nand_onfi_ecc_bits_show, NULL);
+
+static ssize_t mtd_nand_onfi_model_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			mtd->onfi_model ? mtd->onfi_model : "unknown");
+}
+static DEVICE_ATTR(onfi_model, S_IRUGO, mtd_nand_onfi_model_show, NULL);
+
+static ssize_t mtd_nand_ids_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			mtd->nand_ids[0], mtd->nand_ids[1],
+			mtd->nand_ids[2], mtd->nand_ids[3],
+			mtd->nand_ids[4], mtd->nand_ids[5],
+			mtd->nand_ids[6], mtd->nand_ids[7]);
+}
+static DEVICE_ATTR(nand_ids, S_IRUGO, mtd_nand_ids_show, NULL);
+
 static struct attribute *mtd_attrs[] = {
 	&dev_attr_type.attr,
 	&dev_attr_flags.attr,
@@ -326,6 +376,11 @@
 	&dev_attr_bad_blocks.attr,
 	&dev_attr_bbt_blocks.attr,
 	&dev_attr_bitflip_threshold.attr,
+	&dev_attr_nand_type.attr,
+	&dev_attr_nand_manufacturer.attr,
+	&dev_attr_onfi_ecc_bits.attr,
+	&dev_attr_onfi_model.attr,
+	&dev_attr_nand_ids.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(mtd);
diff -ruw linux-6.4/drivers/mtd/mtdpart.c linux-6.4-fbx/drivers/mtd/mtdpart.c
--- linux-6.4/drivers/mtd/mtdpart.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/mtdpart.c	2023-05-22 20:06:41.279774962 +0200
@@ -68,6 +68,11 @@
 	child->oobsize = parent->oobsize;
 	child->oobavail = parent->oobavail;
 	child->subpage_sft = parent->subpage_sft;
+	child->nand_type = parent->nand_type;
+	child->nand_manufacturer = parent->nand_manufacturer;
+	child->onfi_ecc_bits = parent->onfi_ecc_bits;
+	child->onfi_model = parent->onfi_model;
+	memcpy(child->nand_ids, parent->nand_ids, 8);
 
 	child->name = name;
 	child->owner = parent->owner;
diff -ruw linux-6.4/drivers/mtd/nand/raw/Kconfig linux-6.4-fbx/drivers/mtd/nand/raw/Kconfig
--- linux-6.4/drivers/mtd/nand/raw/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/nand/raw/Kconfig	2023-05-22 20:06:41.283775068 +0200
@@ -12,6 +12,14 @@
 
 comment "Raw/parallel NAND flash controllers"
 
+config MTD_FORCE_BAD_BLOCK_ERASE
+	bool "Force erase on bad blocks (useful for bootloader parts)"
+	default n
+	help
+	  Enable this option only when you need to force an erase on
+	  blocks being marked as "bad" by Linux (i.e: other ECC/bad block
+	  marker layout).
+
 config MTD_NAND_DENALI
 	tristate
 
@@ -31,6 +39,18 @@
 	  Enable the driver for NAND flash on platforms using a Denali NAND
 	  controller as a DT device.
 
+config MTD_NAND_DENALI_FBX
+	tristate "NAND Denali controller support"
+	depends on PCI
+	select BCH_CONST_PARAMS
+
+if MTD_NAND_DENALI_FBX
+	config BCH_CONST_M
+		default 13
+	config BCH_CONST_T
+		default 4
+endif
+
 config MTD_NAND_AMS_DELTA
 	tristate "Amstrad E3 NAND controller"
 	depends on MACH_AMS_DELTA || COMPILE_TEST
diff -ruw linux-6.4/drivers/mtd/nand/raw/Makefile linux-6.4-fbx/drivers/mtd/nand/raw/Makefile
--- linux-6.4/drivers/mtd/nand/raw/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/nand/raw/Makefile	2023-05-22 20:06:41.283775068 +0200
@@ -8,6 +8,7 @@
 obj-$(CONFIG_MTD_NAND_DENALI)		+= denali.o
 obj-$(CONFIG_MTD_NAND_DENALI_PCI)	+= denali_pci.o
 obj-$(CONFIG_MTD_NAND_DENALI_DT)	+= denali_dt.o
+obj-$(CONFIG_MTD_NAND_DENALI_FBX)	+= denali_nand.o
 obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
 obj-$(CONFIG_MTD_NAND_S3C2410)		+= s3c2410.o
 obj-$(CONFIG_MTD_NAND_DAVINCI)		+= davinci_nand.o
diff -ruw linux-6.4/drivers/mtd/parsers/Kconfig linux-6.4-fbx/drivers/mtd/parsers/Kconfig
--- linux-6.4/drivers/mtd/parsers/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/parsers/Kconfig	2023-05-22 20:06:41.307775706 +0200
@@ -96,6 +96,10 @@
 	  two "firmware" partitions. Currently used firmware has to be detected
 	  using CFE environment variable.
 
+config MTD_OF_PARTS_IGNORE_RO
+	bool "ignore read-only flag"
+	depends on MTD_OF_PARTS
+
 config MTD_PARSER_IMAGETAG
 	tristate "Parser for BCM963XX Image Tag format partitions"
 	depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
@@ -220,3 +224,14 @@
 	  partition map. This partition table contains real partition
 	  offsets, which may differ from device to device depending on the
 	  number and location of bad blocks on NAND.
+
+config MTD_FBX6HD_PARTS
+	tristate "Freebox V6 HD partitioning support"
+	help
+	  Freebox V6 HD partitioning support
+
+config MTD_FBX6HD_PARTS_WRITE_ALL
+	bool "make all partitions writeable"
+	depends on MTD_FBX6HD_PARTS
+	help
+	  Freebox V6 HD partitions support
diff -ruw linux-6.4/drivers/mtd/parsers/Makefile linux-6.4-fbx/drivers/mtd/parsers/Makefile
--- linux-6.4/drivers/mtd/parsers/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/parsers/Makefile	2023-05-22 20:06:41.307775706 +0200
@@ -16,3 +16,4 @@
 obj-$(CONFIG_MTD_SHARPSL_PARTS)		+= sharpslpart.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS)		+= redboot.o
 obj-$(CONFIG_MTD_QCOMSMEM_PARTS)	+= qcomsmempart.o
+obj-$(CONFIG_MTD_FBX6HD_PARTS)	+= fbx6hd-mtdparts.o
diff -ruw linux-6.4/drivers/mtd/parsers/ofpart_core.c linux-6.4-fbx/drivers/mtd/parsers/ofpart_core.c
--- linux-6.4/drivers/mtd/parsers/ofpart_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/mtd/parsers/ofpart_core.c	2023-05-22 20:06:41.307775706 +0200
@@ -157,8 +157,10 @@
 			partname = of_get_property(pp, "name", &len);
 		parts[i].name = partname;
 
+#ifndef CONFIG_MTD_OF_PARTS_IGNORE_RO
 		if (of_get_property(pp, "read-only", &len))
 			parts[i].mask_flags |= MTD_WRITEABLE;
+#endif
 
 		if (of_get_property(pp, "lock", &len))
 			parts[i].mask_flags |= MTD_POWERUP_LOCK;
diff -ruw linux-6.4/drivers/net/ethernet/Kconfig linux-6.4-fbx/drivers/net/ethernet/Kconfig
--- linux-6.4/drivers/net/ethernet/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ethernet/Kconfig	2023-05-22 20:06:41.395778047 +0200
@@ -190,6 +190,7 @@
 source "drivers/net/ethernet/via/Kconfig"
 source "drivers/net/ethernet/wangxun/Kconfig"
 source "drivers/net/ethernet/wiznet/Kconfig"
+source "drivers/net/ethernet/wintegra/Kconfig"
 source "drivers/net/ethernet/xilinx/Kconfig"
 source "drivers/net/ethernet/xircom/Kconfig"
 
diff -ruw linux-6.4/drivers/net/ethernet/Makefile linux-6.4-fbx/drivers/net/ethernet/Makefile
--- linux-6.4/drivers/net/ethernet/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ethernet/Makefile	2023-05-22 20:06:41.395778047 +0200
@@ -100,6 +100,7 @@
 obj-$(CONFIG_NET_VENDOR_VIA) += via/
 obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/
 obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
+obj-$(CONFIG_NET_VENDOR_WINTEGRA) += wintegra/
 obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
 obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
 obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
diff -ruw linux-6.4/drivers/net/ethernet/broadcom/Kconfig linux-6.4-fbx/drivers/net/ethernet/broadcom/Kconfig
--- linux-6.4/drivers/net/ethernet/broadcom/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ethernet/broadcom/Kconfig	2023-05-22 20:06:41.431779005 +0200
@@ -68,6 +68,39 @@
 	  This driver supports the ethernet MACs in the Broadcom 63xx
 	  MIPS chipset family (BCM63XX).
 
+config BCM63XX_ENET_RUNNER
+	tristate "Broadcom 63xx (63138) runner ethernet support"
+	select MII
+	select FIXED_PHY
+	select PHYLIB
+	select BCM7XXX_PHY
+	select BROADCOM_PHY
+	select SOC_BCM63XX_RDP
+
+config BCM63158_SF2
+	tristate "Broadcom 63158 SF2 support"
+	select MII
+	select PHYLINK
+	select BCM7XXX_PHY
+	select BROADCOM_PHY
+	select NET_DSA
+	select NET_DSA_TAG_BRCM_FBX
+
+config BCM63158_ENET_RUNNER
+	tristate "Broadcom 63158 runner ethernet support"
+	select MII
+	select PHYLINK
+	select SOC_BCM63XX_XRDP
+
+config BCM63158_ENET_RUNNER_FF
+	bool "fastpath support for freebox boards"
+	depends on BCM63158_ENET_RUNNER
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
+
 config BCMGENET
 	tristate "Broadcom GENET internal MAC support"
 	depends on HAS_IOMEM
diff -ruw linux-6.4/drivers/net/ethernet/broadcom/Makefile linux-6.4-fbx/drivers/net/ethernet/broadcom/Makefile
--- linux-6.4/drivers/net/ethernet/broadcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ethernet/broadcom/Makefile	2023-03-09 15:06:11.356234011 +0100
@@ -17,3 +17,5 @@
 obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
 obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
 obj-$(CONFIG_BNXT) += bnxt/
+obj-$(CONFIG_BCM63XX_ENET_RUNNER) += bcm63xx_enet_runner/
+obj-y += bcm63158/
diff -ruw linux-6.4/drivers/net/ethernet/marvell/Kconfig linux-6.4-fbx/drivers/net/ethernet/marvell/Kconfig
--- linux-6.4/drivers/net/ethernet/marvell/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ethernet/marvell/Kconfig	2023-05-22 20:06:41.719786665 +0200
@@ -23,6 +23,7 @@
 	depends on INET
 	select PHYLIB
 	select MVMDIO
+	select MII
 	help
 	  This driver supports the gigabit ethernet MACs in the
 	  Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -31,6 +32,15 @@
 	  Some boards that use the Discovery chipset are the Momenco
 	  Ocelot C and Jaguar ATX and Pegasos II.
 
+config MV643XX_ETH_FBX_FF
+	bool "fastpath support for freebox boards"
+	depends on MV643XX_ETH
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
+
 config MVMDIO
 	tristate "Marvell MDIO interface support"
 	depends on HAS_IOMEM
@@ -90,6 +100,7 @@
 	select MVMDIO
 	select PHYLINK
 	select PAGE_POOL
+	select MII
 	help
 	  This driver supports the network interface units in the
 	  Marvell ARMADA 375, 7K and 8K SoCs.
@@ -99,6 +110,15 @@
 	depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
 		   (PTP_1588_CLOCK && MVPP2 = m)
 
+config MVPP2_FBX_FF
+	bool "fastpath support for freebox boards"
+	depends on MVPP2
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
+
 config PXA168_ETH
 	tristate "Marvell pxa168 ethernet support"
 	depends on HAS_IOMEM
diff -ruw linux-6.4/drivers/net/ethernet/qualcomm/Kconfig linux-6.4-fbx/drivers/net/ethernet/qualcomm/Kconfig
--- linux-6.4/drivers/net/ethernet/qualcomm/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/Kconfig	2023-05-22 20:30:14.537853935 +0200
@@ -62,5 +62,6 @@
 	  Precision Clock Synchronization Protocol.
 
 source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
+source "drivers/net/ethernet/qualcomm/ipq95xx/Kconfig"
 
 endif # NET_VENDOR_QUALCOMM
diff -ruw linux-6.4/drivers/net/ethernet/qualcomm/Makefile linux-6.4-fbx/drivers/net/ethernet/qualcomm/Makefile
--- linux-6.4/drivers/net/ethernet/qualcomm/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/Makefile	2023-05-22 20:30:14.537853935 +0200
@@ -12,3 +12,4 @@
 obj-y += emac/
 
 obj-$(CONFIG_RMNET) += rmnet/
+obj-y += ipq95xx/
diff -ruw linux-6.4/drivers/net/mdio/mdio-ipq4019.c linux-6.4-fbx/drivers/net/mdio/mdio-ipq4019.c
--- linux-6.4/drivers/net/mdio/mdio-ipq4019.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/mdio/mdio-ipq4019.c	2023-05-22 20:30:14.541854042 +0200
@@ -26,6 +26,8 @@
 #define MDIO_CMD_ACCESS_CODE_C45_WRITE	1
 #define MDIO_CMD_ACCESS_CODE_C45_READ	2
 
+#define MODE_DIV_FACTOR_MASK			0xff
+
 /* 0 = Clause 22, 1 = Clause 45 */
 #define MDIO_MODE_C45				BIT(8)
 
@@ -38,9 +40,11 @@
 #define IPQ_PHY_SET_DELAY_US	100000
 
 struct ipq4019_mdio_data {
+	struct platform_device *pdev;
 	void __iomem	*membase;
 	void __iomem *eth_ldo_rdy;
 	struct clk *mdio_clk;
+	unsigned int clk_freq;
 };
 
 static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
@@ -203,6 +207,41 @@
 	return 0;
 }
 
+static void ipq_mdio_set_freq(struct ipq4019_mdio_data *priv)
+{
+	struct device *dev = &priv->pdev->dev;
+	bool adjusted;
+	u32 div_req, div, reg;
+
+	reg = readl(priv->membase + MDIO_MODE_REG);
+
+	/* Keep the hardware default values */
+	if (!priv->clk_freq) {
+		reg = readl(priv->membase + MDIO_MODE_REG);
+		return;
+	}
+
+	/* only power of 2 divider from 8 to 256 are supported */
+	div_req = IPQ_MDIO_CLK_RATE / priv->clk_freq;
+	adjusted = false;
+	if (div_req < 8)
+		div = 8;
+	else if (div_req > 256)
+		div = 256;
+	else
+		div = __roundup_pow_of_two(div_req);
+
+	if (div_req != div)
+		dev_info(dev, "requested mdio clock freq "
+			 "not available, adjusted to %u\n",
+			 (IPQ_MDIO_CLK_RATE / div));
+
+	reg = readl(priv->membase + MDIO_MODE_REG);
+	reg &= ~MODE_DIV_FACTOR_MASK;
+	reg |= div - 1;
+	writel(reg, priv->membase + MDIO_MODE_REG);
+}
+
 static int ipq_mdio_reset(struct mii_bus *bus)
 {
 	struct ipq4019_mdio_data *priv = bus->priv;
@@ -228,11 +267,14 @@
 	if (ret == 0)
 		mdelay(10);
 
+	ipq_mdio_set_freq(priv);
+
 	return ret;
 }
 
 static int ipq4019_mdio_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
 	struct ipq4019_mdio_data *priv;
 	struct mii_bus *bus;
 	struct resource *res;
@@ -243,6 +285,7 @@
 		return -ENOMEM;
 
 	priv = bus->priv;
+	priv->pdev = pdev;
 
 	priv->membase = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(priv->membase))
@@ -258,6 +301,9 @@
 	if (res)
 		priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
 
+	if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
+		priv->clk_freq = 0;
+
 	bus->name = "ipq4019_mdio";
 	bus->read = ipq4019_mdio_read_c22;
 	bus->write = ipq4019_mdio_write_c22;
@@ -290,6 +336,7 @@
 static const struct of_device_id ipq4019_mdio_dt_ids[] = {
 	{ .compatible = "qcom,ipq4019-mdio" },
 	{ .compatible = "qcom,ipq5018-mdio" },
+	{ .compatible = "qcom,ipq9574-mdio" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids);
diff -ruw linux-6.4/drivers/net/mdio/of_mdio.c linux-6.4-fbx/drivers/net/mdio/of_mdio.c
--- linux-6.4/drivers/net/mdio/of_mdio.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/mdio/of_mdio.c	2023-05-31 17:11:03.417680605 +0200
@@ -173,6 +173,8 @@
 	mdio->reset_post_delay_us = 0;
 	of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
 
+	mdio->keep_broken_phy = of_property_read_bool(np, "keep-broken-phy");
+
 	/* Register the MDIO bus */
 	rc = __mdiobus_register(mdio, owner);
 	if (rc)
diff -ruw linux-6.4/drivers/net/phy/Kconfig linux-6.4-fbx/drivers/net/phy/Kconfig
--- linux-6.4/drivers/net/phy/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/Kconfig	2023-05-31 17:11:03.417680605 +0200
@@ -309,6 +309,12 @@
 	  Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
 	  QCA8337(Internal qca8k PHY) model
 
+config QCA807X_PHY
+	tristate "Qualcomm QCA870x PHYs"
+
+config QCA8084_PHY
+	tristate "Qualcomm QCA8084 Quad-PHY"
+
 config QSEMI_PHY
 	tristate "Quality Semiconductor PHYs"
 	help
diff -ruw linux-6.4/drivers/net/phy/Makefile linux-6.4-fbx/drivers/net/phy/Makefile
--- linux-6.4/drivers/net/phy/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/Makefile	2023-05-31 17:11:03.417680605 +0200
@@ -82,7 +82,13 @@
 obj-$(CONFIG_NXP_C45_TJA11XX_PHY)	+= nxp-c45-tja11xx.o
 obj-$(CONFIG_NXP_CBTX_PHY)	+= nxp-cbtx.o
 obj-$(CONFIG_NXP_TJA11XX_PHY)	+= nxp-tja11xx.o
+obj-$(CONFIG_QCA807X_PHY) 	+= qca807x.o
+obj-$(CONFIG_QCA8084_PHY) 	+= qca8084.o
 obj-$(CONFIG_QSEMI_PHY)		+= qsemi.o
+realtek-objs += realtek.o
+ifdef CONFIG_HWMON
+realtek-objs += realtek-hwmon.o
+endif
 obj-$(CONFIG_REALTEK_PHY)	+= realtek.o
 obj-$(CONFIG_RENESAS_PHY)	+= uPD60620.o
 obj-$(CONFIG_ROCKCHIP_PHY)	+= rockchip.o
diff -ruw linux-6.4/drivers/net/phy/aquantia_main.c linux-6.4-fbx/drivers/net/phy/aquantia_main.c
--- linux-6.4/drivers/net/phy/aquantia_main.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/aquantia_main.c	2023-05-22 20:30:14.541854042 +0200
@@ -12,6 +12,7 @@
 #include <linux/delay.h>
 #include <linux/bitfield.h>
 #include <linux/phy.h>
+#include <linux/firmware.h>
 
 #include "aquantia.h"
 
@@ -20,6 +21,7 @@
 #define PHY_ID_AQR105	0x03a1b4a2
 #define PHY_ID_AQR106	0x03a1b4d0
 #define PHY_ID_AQR107	0x03a1b4e0
+#define PHY_ID_AQR112C	0x31c31d12
 #define PHY_ID_AQCS109	0x03a1b5c2
 #define PHY_ID_AQR405	0x03a1b4b0
 #define PHY_ID_AQR112	0x03a1b662
@@ -37,6 +39,9 @@
 #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI	7
 #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII	10
 
+#define MDIO_PHYXS_VEND_PROV2			0xC441
+#define MDIO_PHYXS_VEND_PROV2_USX_AN		BIT(3)
+
 #define MDIO_AN_VEND_PROV			0xc400
 #define MDIO_AN_VEND_PROV_1000BASET_FULL	BIT(15)
 #define MDIO_AN_VEND_PROV_1000BASET_HALF	BIT(14)
@@ -112,6 +117,27 @@
 #define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
 #define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
 
+#define VEND1_GLOBAL_MAILBOX_CONTROL		0x0200
+#define VEND1_GLOBAL_MAILBOX_EXECUTE		BIT(15)
+#define VEND1_GLOBAL_MAILBOX_WRITE		BIT(14)
+#define VEND1_GLOBAL_MAILBOX_RESET_CRC		BIT(12)
+#define VEND1_GLOBAL_MAILBOX_BUSY		BIT(8)
+
+#define VEND1_GLOBAL_MAILBOX_CRC		0x0201
+
+#define VEND1_GLOBAL_MAILBOX_ADDR_MSW		0x0202
+#define VEND1_GLOBAL_MAILBOX_ADDR_LSW		0x0203
+
+#define VEND1_GLOBAL_MAILBOX_DATA_MSW		0x0204
+#define VEND1_GLOBAL_MAILBOX_DATA_LSW		0x0205
+
+#define VEND1_GLOBAL_UP_CONTROL			0xc001
+#define VEND1_GLOBAL_UP_RESET			BIT(15)
+#define VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE	BIT(6)
+#define VEND1_GLOBAL_UP_RUN_STALL		BIT(0)
+
+#define VEND1_GLOBAL_FAULT			0xc850
+
 #define VEND1_GLOBAL_RSVD_STAT1			0xc885
 #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
 #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
@@ -152,6 +178,46 @@
 #define AQR107_OP_IN_PROG_SLEEP		1000
 #define AQR107_OP_IN_PROG_TIMEOUT	100000
 
+/* registers in MDIO_MMD_VEND1 region */
+#define AQUANTIA_VND1_GLOBAL_SC			0x000
+#define  AQUANTIA_VND1_GLOBAL_SC_LP		BIT(0xb)
+
+/* global start rate, the protocol associated with this speed is used by default
+ * on SI.
+ */
+#define AQUANTIA_VND1_GSTART_RATE		0x31a
+#define  AQUANTIA_VND1_GSTART_RATE_OFF		0
+#define  AQUANTIA_VND1_GSTART_RATE_100M		1
+#define  AQUANTIA_VND1_GSTART_RATE_1G		2
+#define  AQUANTIA_VND1_GSTART_RATE_10G		3
+#define  AQUANTIA_VND1_GSTART_RATE_2_5G		4
+#define  AQUANTIA_VND1_GSTART_RATE_5G		5
+
+/* SYSCFG registers for 100M, 1G, 2.5G, 5G, 10G */
+#define AQUANTIA_VND1_GSYSCFG_BASE		0x31b
+#define AQUANTIA_VND1_GSYSCFG_100M		0
+#define AQUANTIA_VND1_GSYSCFG_1G		1
+#define AQUANTIA_VND1_GSYSCFG_2_5G		2
+#define AQUANTIA_VND1_GSYSCFG_5G		3
+#define AQUANTIA_VND1_GSYSCFG_10G		4
+
+/* addresses of memory segments in the phy */
+#define DRAM_BASE_ADDR		0x3FFE0000
+#define IRAM_BASE_ADDR		0x40000000
+
+/* firmware image format constants */
+#define VERSION_STRING_SIZE	0x40
+#define VERSION_STRING_OFFSET	0x0200
+#define HEADER_OFFSET		0x300
+
+struct aqr_fw_header {
+	u8	padding[4];
+	u8	iram_offset[3];
+	u8	iram_size[3];
+	u8	dram_offset[3];
+	u8	dram_size[3];
+};
+
 struct aqr107_hw_stat {
 	const char *name;
 	int reg;
@@ -556,7 +622,7 @@
 	build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
 	prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
 
-	phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
+	phydev_info(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
 		   fw_major, fw_minor, build_id, prov_id);
 }
 
@@ -586,6 +652,482 @@
 	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
 }
 
+static const u16 _crc16_lookuptable[256] = {
+    0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+    0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+    0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+    0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+    0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+    0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+    0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+    0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+    0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+    0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+    0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+    0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+    0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+    0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+    0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+    0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+    0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+    0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+    0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+    0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+    0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+    0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+    0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+    0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+    0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+    0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+    0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+    0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+    0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+    0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+    0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+    0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
+};
+
+static u16 crc16_ccitt(u16 crc, const u8 *buf, size_t len)
+{
+    while (len--) {
+	    crc = ((crc << 8) ^ _crc16_lookuptable[((crc >> 8) ^
+						    ((*buf++) & 0x00FF))]);
+    }
+    return crc;
+}
+
+/* load data into the phy's memory */
+static int aqr112_load_chunk(struct phy_device *phydev, uint32_t addr,
+			     const uint8_t *data, size_t len)
+{
+	u16 crc = 0;
+	int up_crc;
+	size_t pos;
+	int err;
+
+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_MAILBOX_CONTROL,
+			    VEND1_GLOBAL_MAILBOX_RESET_CRC);
+	if (err < 0)
+		return err;
+
+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_MAILBOX_ADDR_MSW, addr >> 16);
+	if (err < 0)
+		return err;
+
+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_MAILBOX_ADDR_LSW, addr & 0xfffc);
+	if (err < 0)
+		return err;
+
+	for (pos = 0; pos < len; pos += min_t(u32, sizeof(u32), len - pos)) {
+		u32 word = 0;
+
+		memcpy(&word, &data[pos], min_t(u32, sizeof(u32), len - pos));
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+				    VEND1_GLOBAL_MAILBOX_DATA_MSW,
+				    word >> 16);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+				    VEND1_GLOBAL_MAILBOX_DATA_LSW,
+				    word & 0xffff);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+				    VEND1_GLOBAL_MAILBOX_CONTROL,
+				    (VEND1_GLOBAL_MAILBOX_EXECUTE |
+				     VEND1_GLOBAL_MAILBOX_WRITE));
+		if (err < 0)
+			return err;
+
+		/* keep a big endian CRC to match the phy processor */
+		word = cpu_to_be32(word);
+		crc = crc16_ccitt(crc, (uint8_t *)&word, sizeof(word));
+	}
+
+	up_crc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+			      VEND1_GLOBAL_MAILBOX_CRC);
+	if (up_crc < 0)
+		return up_crc;
+
+	if (crc != up_crc) {
+		dev_err(&phydev->mdio.dev,
+			"crc mismatch: calculated 0x%04hx phy 0x%04hx\n",
+			crc, up_crc);
+		return -EIO;
+	}
+	return 0;
+}
+
+static u32 unpack_u24(const u8 *data)
+{
+	return (data[2] << 16) + (data[1] << 8) + data[0];
+}
+
+static int aqr_upload_firmware(struct phy_device *phydev,
+			       const char *name)
+{
+	struct device *dev = &phydev->mdio.dev;
+	const struct firmware *fw;
+	const struct aqr_fw_header *header;
+	char file_name[64];
+	char version[VERSION_STRING_SIZE + 1];
+	u32 primary_offset, iram_offset, iram_size, dram_offset, dram_size;
+	u16 calculated_crc, read_crc;
+	int ret;
+
+	scnprintf(file_name, sizeof (file_name),  "aquantia_phy/%s.uc", name);
+	ret = request_firmware_direct(&fw, file_name, dev);
+	if (ret) {
+		dev_err(dev, "failed to load firmware %s, ret: %d\n",
+			file_name, ret);
+		return ret;
+	}
+
+	if (fw->size < 16) {
+		dev_err(dev, "firmware too small\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	read_crc = (fw->data[fw->size - 2] << 8)  | fw->data[fw->size - 1];
+	calculated_crc = crc16_ccitt(0, fw->data, fw->size - 2);
+	if (read_crc != calculated_crc) {
+		dev_err(dev, "bad firmware crc: file 0x%04x "
+			"calculated 0x%04x\n",
+			read_crc, calculated_crc);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Find the DRAM and IRAM sections within the firmware file. */
+	primary_offset = ((fw->data[9] & 0xf) << 8 | fw->data[8]) << 12;
+	header = (const struct aqr_fw_header *)
+		&fw->data[primary_offset + HEADER_OFFSET];
+
+	iram_offset = primary_offset + unpack_u24(header->iram_offset);
+	iram_size = unpack_u24(header->iram_size);
+
+	dram_offset = primary_offset + unpack_u24(header->dram_offset);
+	dram_size = unpack_u24(header->dram_size);
+
+	strlcpy(version,
+		(char *)&fw->data[dram_offset + VERSION_STRING_OFFSET],
+		VERSION_STRING_SIZE);
+	version[VERSION_STRING_SIZE] = 0;
+
+	dev_info(dev, "loading firmare version '%s'...\n", version);
+
+	/* stall the microcprocessor */
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_UP_CONTROL,
+			    (VEND1_GLOBAL_UP_RUN_STALL |
+			     VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE));
+	if (ret < 0)
+		goto fail;
+
+	ret = aqr112_load_chunk(phydev, DRAM_BASE_ADDR,
+				&fw->data[dram_offset],
+				dram_size);
+	if (ret)
+		goto fail;
+
+	ret = aqr112_load_chunk(phydev, IRAM_BASE_ADDR,
+				&fw->data[iram_offset],
+				iram_size);
+	if (ret)
+		goto fail;
+
+	/* make sure soft reset and low power mode are clear */
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0, 0);
+	if (ret)
+		goto fail;
+
+	/* Release the microprocessor. UP_RESET must be held for 100 usec. */
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_UP_CONTROL,
+			    (VEND1_GLOBAL_UP_RUN_STALL |
+			     VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE |
+			     VEND1_GLOBAL_UP_RESET));
+	if (ret)
+		goto fail;
+
+	msleep(10);
+
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_UP_CONTROL,
+			    VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE);
+	if (ret)
+		goto fail;
+
+	ret = 0;
+
+fail:
+	release_firmware(fw);
+	return ret;
+}
+
+static int aqr112_config_init(struct phy_device *phydev)
+{
+	int ret;
+
+	/* Check that the PHY interface type is compatible */
+	if (phydev->interface != PHY_INTERFACE_MODE_NA &&
+	    phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX) {
+		phydev_err(phydev, "requested interface mode not supp\n");
+		return -ENODEV;
+	}
+
+	ret = aqr_upload_firmware(phydev, "aqr112");
+	if (ret)
+		return ret;
+
+	aqr107_wait_reset_complete(phydev);
+	aqr107_chip_info(phydev);
+
+	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
+static int aqr112_get_features(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = genphy_c45_pma_read_abilities(phydev);
+	if (ret)
+		return ret;
+
+	phy_set_max_speed(phydev, SPEED_2500);
+	return 0;
+}
+
+static void dump_rate_config(struct phy_device *phydev,
+			     const char *str, u32 val)
+{
+	phydev_info(phydev,
+		    "cfg %s: "
+		    "serdes_mode:%u aneg:%d training:%d rstt:%d ra:%d\n",
+		    str,
+		    val & 0x7,
+		    (val >> 3) & 0x1,
+		    (val >> 4) & 0x1,
+		    (val >> 5) & 0x1,
+		    (val >> 7) & 0x3);
+}
+
+static void dump_rates_config(struct phy_device *phydev)
+{
+	dump_rate_config(phydev, "100M", phy_read_mmd(phydev, 0x1e, 0x31b));
+	dump_rate_config(phydev, "1G  ", phy_read_mmd(phydev, 0x1e, 0x31c));
+	dump_rate_config(phydev, "2.5G", phy_read_mmd(phydev, 0x1e, 0x31d));
+	dump_rate_config(phydev, "5G  ", phy_read_mmd(phydev, 0x1e, 0x31e));
+	dump_rate_config(phydev, "10G ", phy_read_mmd(phydev, 0x1e, 0x31f));
+}
+
+#define SERDES_MODE_XFI		0
+#define SERDES_MODE_XAUI	1
+#define SERDES_MODE_RXAUI	2
+#define SERDES_MODE_SGMII	3
+#define SERDES_MODE_OCSGMII	4
+#define SERDES_MODE_LOW_POWER	5
+
+#define RA_METHOD_NONE		0
+#define RA_METHOD_USX		1
+#define RA_METHOD_PAUSE		2
+
+enum system_if_rate {
+	SIF_RATE_100,
+	SIF_RATE_1G,
+	SIF_RATE_2_5G,
+	SIF_RATE_5G,
+	SIF_RATE_10G,
+	SIF_RATE_MAX,
+};
+
+struct system_if_cfg {
+	bool	used;
+	u32	serdes_mode;
+	bool	autoneg_en;
+	bool	training_en;
+	bool	serdes_rst_transition_en;
+	bool	serdes_silence_en;
+	u32	ra_method;
+};
+
+static u32 gen_system_if_cfg(const struct system_if_cfg *cfg)
+{
+	u32 val;
+
+	if (!cfg->used)
+		return (SERDES_MODE_LOW_POWER << 0);
+
+	val = cfg->serdes_mode << 0;
+	if (cfg->autoneg_en)
+		val |= (1 << 3);
+	if (cfg->training_en)
+		val |= (1 << 4);
+	if (cfg->serdes_rst_transition_en)
+		val |= (1 << 5);
+	if (cfg->serdes_silence_en)
+		val |= (1 << 6);
+	val |= (cfg->ra_method << 7);
+	return val;
+}
+
+static int aqr113_config_init(struct phy_device *phydev)
+{
+	struct system_if_cfg scfgs[SIF_RATE_MAX];
+	bool use_inband_aneg;
+	int ret, val;
+
+	/* Check that the PHY interface type is compatible */
+	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
+	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_10GKR &&
+	    phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+		return -ENODEV;
+
+	/* check if a valid firmware is loaded */
+	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+	if (!val) {
+		ret = aqr_upload_firmware(phydev, "aqr113");
+		if (ret)
+			return ret;
+	}
+
+	ret = aqr107_wait_reset_complete(phydev);
+	if (ret) {
+		phydev_err(phydev, "phy firmware load timeout\n");
+		return -ENODEV;
+	}
+
+	aqr107_chip_info(phydev);
+
+	/* set PHY in low power mode so we can configure protocols */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GLOBAL_SC,
+		      AQUANTIA_VND1_GLOBAL_SC_LP);
+	msleep(10);
+
+	/* set the default rate to enable the SI link */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1, AQUANTIA_VND1_GSTART_RATE,
+		      AQUANTIA_VND1_GSTART_RATE_OFF);
+
+	/* unfortunately we don't know if phylink uses MLO_AN_INBAND
+	 * or MLO_AN_PHY from here, so this needs to be tuned
+	 * manually, depending on the device tree node managed =
+	 * "in-band-status" presence */
+	use_inband_aneg = false;
+
+	memset(scfgs, 0, sizeof (scfgs));
+	switch (phydev->interface) {
+	case PHY_INTERFACE_MODE_SGMII:
+		scfgs[SIF_RATE_100] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_SGMII,
+			.autoneg_en = use_inband_aneg,
+		};
+		scfgs[SIF_RATE_1G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_SGMII,
+			.autoneg_en = use_inband_aneg,
+		};
+		break;
+	case PHY_INTERFACE_MODE_2500BASEX:
+		scfgs[SIF_RATE_2_5G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_OCSGMII,
+			.autoneg_en = false,
+		};
+		break;
+	case PHY_INTERFACE_MODE_10GBASER:
+		scfgs[SIF_RATE_10G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.autoneg_en = false,
+		};
+		break;
+	case PHY_INTERFACE_MODE_10GKR:
+		scfgs[SIF_RATE_10G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.autoneg_en = true,
+		};
+		break;
+	case PHY_INTERFACE_MODE_USXGMII:
+		scfgs[SIF_RATE_10G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		scfgs[SIF_RATE_2_5G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		scfgs[SIF_RATE_1G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		scfgs[SIF_RATE_100] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		break;
+	default:
+		break;
+	}
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_100M,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_100]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_1G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_1G]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_2_5G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_2_5G]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_5G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_5G]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_10G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_10G]));
+
+	val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_PROV2);
+	if (phydev->interface == PHY_INTERFACE_MODE_USXGMII &&
+	    use_inband_aneg)
+		val |= MDIO_PHYXS_VEND_PROV2_USX_AN;
+	else
+		val &= ~MDIO_PHYXS_VEND_PROV2_USX_AN;
+
+	phy_write_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_PROV2, val);
+
+	/* wake PHY back up */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1, AQUANTIA_VND1_GLOBAL_SC, 0);
+	mdelay(10);
+
+	dump_rates_config(phydev);
+	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
 static int aqcs109_config_init(struct phy_device *phydev)
 {
 	int ret;
@@ -776,6 +1318,20 @@
 	.link_change_notify = aqr107_link_change_notify,
 },
 {
+	PHY_ID_MATCH_MODEL(PHY_ID_AQR112C),
+	.name		= "Aquantia AQR112C",
+	.probe		= aqr107_probe,
+	.config_init	= aqr112_config_init,
+	.config_aneg    = aqr_config_aneg,
+	.read_status	= aqr107_read_status,
+	.get_features	= aqr112_get_features,
+	.suspend	= aqr107_suspend,
+	.resume		= aqr107_resume,
+	.get_sset_count	= aqr107_get_sset_count,
+	.get_strings	= aqr107_get_strings,
+	.get_stats	= aqr107_get_stats,
+},
+{
 	PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
 	.name		= "Aquantia AQCS109",
 	.probe		= aqr107_probe,
@@ -843,7 +1399,7 @@
 	.name           = "Aquantia AQR113C",
 	.probe          = aqr107_probe,
 	.get_rate_matching = aqr107_get_rate_matching,
-	.config_init    = aqr107_config_init,
+	.config_init    = aqr113_config_init,
 	.config_aneg    = aqr_config_aneg,
 	.config_intr    = aqr_config_intr,
 	.handle_interrupt       = aqr_handle_interrupt,
@@ -867,6 +1423,7 @@
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR106) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112C) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
diff -ruw linux-6.4/drivers/net/phy/bcm7xxx.c linux-6.4-fbx/drivers/net/phy/bcm7xxx.c
--- linux-6.4/drivers/net/phy/bcm7xxx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/bcm7xxx.c	2023-05-22 20:06:42.123797411 +0200
@@ -45,6 +45,7 @@
 
 struct bcm7xxx_phy_priv {
 	u64	*stats;
+	bool	printed;
 };
 
 static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
@@ -142,6 +143,7 @@
 
 static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
 {
+	struct bcm7xxx_phy_priv *priv = phydev->priv;
 	u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
 	u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags);
 	u8 count;
@@ -153,8 +155,11 @@
 	if (rev == 0)
 		rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
 
-	pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
+	if (!priv->printed) {
+		pr_info("%s: %s PHY revision: 0x%02x, patch: %d\n",
 		     phydev_name(phydev), phydev->drv->name, rev, patch);
+		priv->printed = true;
+	}
 
 	/* Dummy read to a register to workaround an issue upon reset where the
 	 * internal inverter may not allow the first MDIO transaction to pass
@@ -387,11 +392,15 @@
 
 static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
 {
+	struct bcm7xxx_phy_priv *priv = phydev->priv;
 	u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
 	int ret = 0;
 
-	pr_info_once("%s: %s PHY revision: 0x%02x\n",
+	if (!priv->printed) {
+		pr_info("%s: %s PHY revision: 0x%02x\n",
 		     phydev_name(phydev), phydev->drv->name, rev);
+		priv->printed = true;
+	}
 
 	/* Dummy read to a register to workaround a possible issue upon reset
 	 * where the internal inverter may not allow the first MDIO transaction
@@ -917,6 +926,7 @@
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
+	BCM7XXX_28NM_GPHY(PHY_ID_BCM63138, "Broadcom BCM63138"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
@@ -946,6 +956,7 @@
 	{ PHY_ID_BCM7435, 0xfffffff0, },
 	{ PHY_ID_BCM7445, 0xfffffff0, },
 	{ PHY_ID_BCM7712, 0xfffffff0, },
+	{ PHY_ID_BCM63138, 0xfffffff0, },
 	{ }
 };
 
diff -ruw linux-6.4/drivers/net/phy/broadcom.c linux-6.4-fbx/drivers/net/phy/broadcom.c
--- linux-6.4/drivers/net/phy/broadcom.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/broadcom.c	2023-05-22 20:06:42.127797518 +0200
@@ -16,6 +16,7 @@
 #include <linux/phy.h>
 #include <linux/brcmphy.h>
 #include <linux/of.h>
+#include <linux/debugfs.h>
 
 #define BRCM_PHY_MODEL(phydev) \
 	((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
@@ -862,6 +863,477 @@
 	bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret);
 }
 
+#define BRCM_MIIEXT_BANK            0x1f
+# define BRCM_MIIEXT_BANK_MASK       0xfff0
+# define BRCM_MIIEXT_ADDR_RANGE      0xffe0
+# define BRCM_MIIEXT_DEF_BANK        0x8000
+#define BRCM_MIIEXT_OFFSET          0x10
+# define BRCM_MIIEXT_OFF_MASK    0xf
+
+static int bcm63138_ephy_read(struct phy_device *phydev, int reg)
+{
+	uint32_t bank;
+	uint32_t offset;
+	int val;
+	int error;
+
+	if (reg < 0x20)
+		return phy_read(phydev, reg);
+
+	bank = reg & BRCM_MIIEXT_BANK_MASK;
+	offset = (reg & BRCM_MIIEXT_OFF_MASK) + BRCM_MIIEXT_OFFSET;
+
+	error = phy_write(phydev, BRCM_MIIEXT_BANK, bank);
+	val = phy_read(phydev, offset);
+	if (val < 0)
+		error = val;
+
+	error |= phy_write(phydev, BRCM_MIIEXT_BANK, BRCM_MIIEXT_DEF_BANK);
+        return (error < 0) ? error : val;
+}
+
+static int bcm63138_ephy_write(struct phy_device *phydev, int reg, u16 value)
+{
+        uint32_t bank;
+        uint32_t offset;
+        int error;
+
+        if (reg < 0x20)
+                return phy_write(phydev, reg, value);
+
+        bank = reg & BRCM_MIIEXT_BANK_MASK;
+        offset = (reg & BRCM_MIIEXT_OFF_MASK) + BRCM_MIIEXT_OFFSET;
+
+        error = phy_write(phydev, BRCM_MIIEXT_BANK, bank);
+        error |= phy_write(phydev, offset, value);
+        error |= phy_write(phydev, BRCM_MIIEXT_BANK, BRCM_MIIEXT_DEF_BANK);
+
+        return error;
+}
+
+static int bcm63138s_get_features(struct phy_device *phydev)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
+
+	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, features);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, features);
+
+	linkmode_copy(phydev->supported, features);
+	linkmode_copy(phydev->advertising, features);
+
+	return 0;
+}
+
+/*
+ * BCM63138 SerDes phy amplitude setting registers.
+ *
+ * serdes_amplitude for bcm63138s attribute is of the form:
+ * full|half <amplitude_value_in_hex>
+ *
+ * it can be read and written.
+ *
+ * official limits are 0x3f-0x0c for amplitude values (unchecked
+ * here).
+ *
+ * values written to the attribute are reset to default after the
+ * interface is brought down & up.
+ */
+#define BCM63138S_AMP_VALUE_REG		0x8065
+#define  AMP_VALUE_MASK			(0x3f << 8)
+#define  AMP_VALUE_SHIFT		(8)
+
+#define BCM63138S_AMP_SCALE_REG		0x8066
+#define  AMP_SCALE_MASK			(1 << 1)
+#define  AMP_SCALE_1V			(0 << 1)
+#define  AMP_SCALE_0_5V			(1 << 1)
+
+/*
+ *
+ */
+static ssize_t bcm63138s_read_serdes_amplitude(struct file *file,
+					       char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	char buf[64];
+	struct phy_device *phydev = file->private_data;
+	bool amp_scale_full;
+	u32 amp_value;
+	u32 reg;
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_SCALE_REG);
+	amp_scale_full = (reg & AMP_SCALE_MASK) == AMP_SCALE_1V;
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_VALUE_REG);
+	amp_value = (reg & AMP_VALUE_MASK) >> AMP_VALUE_SHIFT;
+
+	snprintf(buf, sizeof (buf), "%s 0x%02x\n",
+		 (amp_scale_full) ? "full" : "half", amp_value);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+#define SCALE_FULL_STR	"full "
+#define SCALE_HALF_STR	"half "
+
+/*
+ *
+ */
+static ssize_t bcm63138s_write_serdes_amplitude(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	int err;
+	char buf[32] = {0};
+	const char *val_start;
+	struct phy_device *phydev = file->private_data;
+	u32 scale_val;
+	u32 amp_val;
+	u32 reg;
+
+	/*
+	 * Yay, string parsing in the kernel.
+	 */
+	err = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf,
+				     count);
+	if (err < 0)
+		return err;
+
+	if (!strncmp(buf, SCALE_FULL_STR, strlen(SCALE_FULL_STR))) {
+		scale_val = AMP_SCALE_1V;
+		val_start = buf + strlen(SCALE_FULL_STR);
+	} else if (!strncmp(buf, SCALE_HALF_STR, strlen(SCALE_HALF_STR))) {
+		scale_val = AMP_SCALE_0_5V;
+		val_start = buf + strlen(SCALE_HALF_STR);
+	} else {
+		return -EINVAL;
+	}
+
+	err = kstrtou32(val_start, 16, &amp_val);
+	if (err)
+		return err;
+
+	/*
+	 * all done with parsing, now write the registers.
+	 */
+	amp_val  = (amp_val << AMP_VALUE_SHIFT) & AMP_VALUE_MASK;
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_SCALE_REG);
+	reg &= ~AMP_SCALE_MASK;
+	reg |= scale_val;
+	bcm63138_ephy_write(phydev, BCM63138S_AMP_SCALE_REG, reg);
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_VALUE_REG);
+	reg &= ~AMP_VALUE_MASK;
+	reg |= amp_val;
+	bcm63138_ephy_write(phydev, BCM63138S_AMP_VALUE_REG, reg);
+
+	return count;
+}
+
+static const struct file_operations fops_serdes_amplitude = {
+	.read = bcm63138s_read_serdes_amplitude,
+	.write = bcm63138s_write_serdes_amplitude,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/*
+ * probe for bcm63138s phy, just create the debugfs entries for serdes
+ * amplitude tunning.
+ *
+ * won't work for more than one 63138s phy in the system.
+ */
+static int bcm63138s_probe(struct phy_device *phydev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("bcm63138s_phy", NULL);
+	if (IS_ERR(dent) && PTR_ERR(dent) != -EEXIST) {
+		WARN_ON("unable to create debugfs entry for bcm63138s phy");
+		return PTR_ERR(dent);
+	}
+
+	debugfs_create_file("serdes_amplitude",
+			    0600, dent, phydev, &fops_serdes_amplitude);
+
+	return 0;
+}
+
+static int bcm63138s_config_init(struct phy_device *phydev)
+{
+	static const unsigned short cfg_1000x[] = {
+		0x0010, 0x0c2f,
+		0x8182, 0x4000,
+		0x8186, 0x003c,
+		0x8300, 0x015d,
+		0x8301, 0x7,
+		0x0,    0x1140,
+		0x0010, 0x2c2f
+	};
+	int err;
+	size_t i;
+
+	err = genphy_soft_reset(phydev);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(cfg_1000x); i += 2)
+                bcm63138_ephy_write(phydev, cfg_1000x[i], cfg_1000x[i + 1]);
+
+	return 0;
+}
+
+/**
+ * ethtool_adv_to_fiber_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADV register for fiber link.
+ */
+static inline u32 ethtool_adv_to_fiber_adv_t(unsigned long *adv)
+{
+	u32 result = 0;
+
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+		result |= ADVERTISE_1000XFULL;
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, adv))
+		result |= ADVERTISE_1000XFULL;
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, adv))
+		result |= ADVERTISE_1000XHALF;
+
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, adv) &&
+	    linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, adv))
+		result |= ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM;
+	else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, adv))
+		result |= ADVERTISE_1000XPAUSE;
+
+	return result;
+}
+
+static int bcm63138s_config_aneg(struct phy_device *phydev)
+{
+	int oldadv, adv, err;
+	int changed;
+
+	if (phydev->autoneg != AUTONEG_ENABLE)
+		return genphy_setup_forced(phydev);
+
+	/* Setup fiber advertisement */
+	adv = phy_read(phydev, MII_ADVERTISE);
+	if (adv < 0)
+		return adv;
+
+	oldadv = adv;
+	adv &= ~(ADVERTISE_1000XFULL |
+		 ADVERTISE_1000XHALF |
+		 ADVERTISE_1000XPSE_ASYM |
+		 ADVERTISE_1000XPAUSE);
+	adv |= ethtool_adv_to_fiber_adv_t(phydev->advertising);
+
+	if (adv != oldadv) {
+		err = phy_write(phydev, MII_ADVERTISE, adv);
+		if (err < 0)
+			return err;
+
+		changed = 1;
+	}
+
+	if (changed == 0) {
+		/* Advertisement hasn't changed, but maybe aneg was never on to
+		 * begin with?	Or maybe phy was isolated?
+		 */
+		int ctl = phy_read(phydev, MII_BMCR);
+
+		if (ctl < 0)
+			return ctl;
+
+		if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+			changed = 1; /* do restart aneg */
+	}
+
+	/* Only restart aneg if we are advertising something different
+	 * than we were before.
+	 */
+	if (changed > 0)
+		changed = genphy_restart_aneg(phydev);
+
+	return changed;
+}
+
+/**
+ * fiber_lpa_to_ethtool_lpa_t
+ * @lpa: value of the MII_LPA register for fiber link
+ *
+ * A small helper function that translates MII_LPA
+ * bits to ethtool LP advertisement settings.
+ */
+static void fiber_lpa_to_ethtool_lpa_t(u32 lpa, unsigned long *res)
+{
+	linkmode_zero(res);
+
+	if (lpa & LPA_1000XHALF)
+		linkmode_set_bit( ADVERTISED_1000baseT_Half, res);
+	if (lpa & LPA_1000XFULL)
+		linkmode_set_bit(ADVERTISED_1000baseT_Full, res);
+}
+
+static int bcm63138s_read_status_page_an(struct phy_device *phydev)
+{
+	int lpa, adv, common_adv;
+
+	lpa = phy_read(phydev, MII_LPA);
+	if (lpa < 0)
+		return lpa;
+
+	adv = phy_read(phydev, MII_ADVERTISE);
+	if (adv < 0)
+		return adv;
+
+	common_adv = lpa & adv;
+
+	phydev->speed = SPEED_10;
+	phydev->duplex = DUPLEX_HALF;
+	fiber_lpa_to_ethtool_lpa_t(lpa, phydev->lp_advertising);
+
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
+
+	if (common_adv & (LPA_1000XHALF | LPA_1000XFULL)) {
+		phydev->speed = SPEED_1000;
+		if (common_adv & LPA_1000XFULL)
+			phydev->duplex = DUPLEX_FULL;
+	}
+
+	if (phydev->duplex == DUPLEX_FULL) {
+		if (!(lpa & LPA_1000XPAUSE)) {
+			phydev->pause = 0;
+			phydev->asym_pause = 0;
+		} else if ((lpa & LPA_1000XPAUSE_ASYM)) {
+			phydev->pause = 1;
+			phydev->asym_pause = 1;
+		} else {
+			phydev->pause = 1;
+			phydev->asym_pause = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int bcm63138s_read_status_page_fixed(struct phy_device *phydev)
+{
+	int bmcr = phy_read(phydev, MII_BMCR);
+
+	if (bmcr < 0)
+		return bmcr;
+
+	if (bmcr & BMCR_FULLDPLX)
+		phydev->duplex = DUPLEX_FULL;
+	else
+		phydev->duplex = DUPLEX_HALF;
+
+	phydev->speed = SPEED_1000;
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
+
+	return 0;
+}
+
+static int bcm63138s_read_status(struct phy_device *phydev)
+{
+	int err;
+
+	genphy_update_link(phydev);
+
+	if (phydev->autoneg == AUTONEG_ENABLE)
+		err = bcm63138s_read_status_page_an(phydev);
+	else
+		err = bcm63138s_read_status_page_fixed(phydev);
+
+	return err;
+}
+
+#define MISC_ADDR(base, channel)	base, channel
+
+#define AFE_TXCONFIG_0			MISC_ADDR(0x39, 1)
+#define AFE_TXCONFIG_1			MISC_ADDR(0x3a, 2)
+#define AFE_TX_IQ_RX_LP			MISC_ADDR(0x39, 0)
+#define AFE_TEMPSEN_OTHERS		MISC_ADDR(0x3b, 0)
+
+static void r_rc_cal_reset(struct phy_device *phydev)
+{
+	/* Reset R_CAL/RC_CAL Engine */
+	bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
+
+	/* Disable Reset R_AL/RC_CAL Engine */
+	bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
+}
+
+static int bcm63158_config_init(struct phy_device *phydev)
+{
+	/* Turn off AOF */
+	bcm_phy_write_misc(phydev, AFE_TXCONFIG_0, 0x0000);
+
+	/* 1g AB symmetry Iq */
+	bcm_phy_write_misc(phydev, AFE_TXCONFIG_1, 0x0BCC);
+
+	/* LPF BW */
+	bcm_phy_write_misc(phydev, AFE_TX_IQ_RX_LP, 0x233F);
+
+	/* RCAL +6LSB to make impedance from 112 to 100ohm */
+	bcm_phy_write_misc(phydev, AFE_TEMPSEN_OTHERS, 0xAD40);
+
+	/* since rcal make R smaller, make master current -4%  */
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x091B);
+
+	/* From EEE excel config file for Vitesse fix */
+	/* rx_on_tune 8 -> 0xf */
+	bcm_phy_write_misc(phydev, 0x0021, 0x0002, 0x87F6);
+
+	/* 100tx EEE bandwidth */
+	bcm_phy_write_misc(phydev, 0x0022, 0x0002, 0x017D);
+
+	/* enable ffe zero det for Vitesse interop */
+	bcm_phy_write_misc(phydev, 0x0026, 0x0002, 0x0015);
+
+	/* Reset R_CAL/RC_CAL engine */
+	r_rc_cal_reset(phydev);
+
+	return 0;
+}
+
+static int bcm63158_read_mmd(struct phy_device *phydev,
+			     int devnum, u16 regnum)
+{
+	struct mii_bus *bus = phydev->mdio.bus;
+	int phy_addr = phydev->mdio.addr;
+	int val;
+
+	/* MDIO_MMD_PCS/MDIO_PCS_EEE_ABLE is not set in broadcom PHY,
+	 * so we divert read_mmd to return fake value for this
+	 * register */
+	if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE)
+		return MDIO_EEE_100TX | MDIO_EEE_1000T;
+
+	/* Write the desired MMD Devad */
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devnum);
+
+	/* Write the desired MMD register address */
+	__mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum);
+
+	/* Select the Function : DATA with no post increment */
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL,
+			devnum | MII_MMD_CTRL_NOINCR);
+
+	/* Read the content of the MMD's selected register */
+	val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
+	return val;
+}
+
+
 static struct phy_driver broadcom_drivers[] = {
 {
 	.phy_id		= PHY_ID_BCM5411,
@@ -1132,6 +1604,29 @@
 	.config_intr    = bcm_phy_config_intr,
 	.handle_interrupt = bcm_phy_handle_interrupt,
 	.link_change_notify	= bcm54xx_link_change_notify,
+}, {
+	.phy_id		= PHY_ID_BCM63138S,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM63138S",
+	.probe		= bcm63138s_probe,
+	.get_features	= bcm63138s_get_features,
+	.config_init	= bcm63138s_config_init,
+	.config_aneg	= bcm63138s_config_aneg,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
+	.read_status	= bcm63138s_read_status,
+}, {
+	.phy_id		= PHY_ID_BCM63158,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM63158",
+	.features	= PHY_GBIT_FEATURES,
+	.flags		= PHY_IS_INTERNAL,
+	.config_init	= bcm63158_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
+	.read_mmd	= bcm63158_read_mmd,
 } };
 
 module_phy_driver(broadcom_drivers);
@@ -1157,6 +1652,8 @@
 	{ PHY_ID_BCM53125, 0xfffffff0 },
 	{ PHY_ID_BCM53128, 0xfffffff0 },
 	{ PHY_ID_BCM89610, 0xfffffff0 },
+	{ PHY_ID_BCM63138S, 0xfffffff0 },
+	{ PHY_ID_BCM63158, 0xfffffff0 },
 	{ }
 };
 
diff -ruw linux-6.4/drivers/net/phy/mdio_bus.c linux-6.4-fbx/drivers/net/phy/mdio_bus.c
--- linux-6.4/drivers/net/phy/mdio_bus.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/mdio_bus.c	2023-06-27 11:47:15.859861290 +0200
@@ -650,7 +650,7 @@
 int __mdiobus_register(struct mii_bus *bus, struct module *owner)
 {
 	struct mdio_device *mdiodev;
-	struct gpio_desc *gpiod;
+	struct gpio_descs *gpiod;
 	bool prevent_c45_scan;
 	int i, err;
 
@@ -696,7 +696,7 @@
 	mutex_init(&bus->shared_lock);
 
 	/* assert bus level PHY GPIO reset */
-	gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
+	gpiod = devm_gpiod_get_array_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
 	if (IS_ERR(gpiod)) {
 		err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
 				    "mii_bus %s couldn't get reset GPIO\n",
@@ -706,7 +706,8 @@
 	} else	if (gpiod) {
 		bus->reset_gpiod = gpiod;
 		fsleep(bus->reset_delay_us);
-		gpiod_set_value_cansleep(gpiod, 0);
+		for (i = 0; i < gpiod->ndescs; i++)
+			gpiod_set_value_cansleep(gpiod->desc[i], 0);
 		if (bus->reset_post_delay_us > 0)
 			fsleep(bus->reset_post_delay_us);
 	}
@@ -748,8 +749,10 @@
 	}
 error_reset_gpiod:
 	/* Put PHYs in RESET to save power */
-	if (bus->reset_gpiod)
-		gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+	if (bus->reset_gpiod) {
+		for (i = 0; i < bus->reset_gpiod->ndescs; i++)
+			gpiod_set_value_cansleep(bus->reset_gpiod->desc[i], 1);
+	}
 
 	device_del(&bus->dev);
 	return err;
@@ -778,8 +781,10 @@
 	}
 
 	/* Put PHYs in RESET to save power */
-	if (bus->reset_gpiod)
-		gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+	if (bus->reset_gpiod) {
+		for (i = 0; i < bus->reset_gpiod->ndescs; i++)
+			gpiod_set_value_cansleep(bus->reset_gpiod->desc[i], 1);
+	}
 
 	device_del(&bus->dev);
 }
diff -ruw linux-6.4/drivers/net/phy/phy-c45.c linux-6.4-fbx/drivers/net/phy/phy-c45.c
--- linux-6.4/drivers/net/phy/phy-c45.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/phy-c45.c	2023-12-05 17:14:42.299715016 +0100
@@ -748,6 +748,16 @@
 		mii_10base_t1_adv_mod_linkmode_t(adv, val);
 	}
 
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+			      phydev->supported_eee)) {
+		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV2);
+		if (val < 0)
+			return val;
+
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+				 adv, val & MDIO_EEE_2_5GT);
+	}
+
 	return 0;
 }
 
@@ -784,6 +794,16 @@
 		mii_10base_t1_adv_mod_linkmode_t(lpa, val);
 	}
 
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+			      phydev->supported_eee)) {
+		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE2);
+		if (val < 0)
+			return val;
+
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+				 lpa, val & MDIO_EEE_2_5GT);
+	}
+
 	return 0;
 }
 
@@ -1477,3 +1497,25 @@
 	.name           = "Generic Clause 45 PHY",
 	.read_status    = genphy_c45_read_status,
 };
+
+static int genphy_broken_c45_get_tunable(struct phy_device *phydev,
+					 struct ethtool_tunable *tuna,
+					 void *data)
+{
+	switch (tuna->id) {
+	case ETHTOOL_PHY_BROKEN:
+		*(u8*)data = 1;
+		return 0;
+	default:
+		return -ENOTSUPP;
+	}
+	return 0;
+}
+
+struct phy_driver genphy_broken_c45_driver = {
+	.phy_id         = 0xffffffff,
+	.phy_id_mask    = 0xffffffff,
+	.name           = "Generic Broken Clause 45 PHY",
+	.read_status    = genphy_c45_read_status,
+	.get_tunable	= genphy_broken_c45_get_tunable,
+};
diff -ruw linux-6.4/drivers/net/phy/phy-core.c linux-6.4-fbx/drivers/net/phy/phy-core.c
--- linux-6.4/drivers/net/phy/phy-core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/phy-core.c	2023-05-22 20:30:14.541854042 +0200
@@ -13,7 +13,7 @@
  */
 const char *phy_speed_to_str(int speed)
 {
-	BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 102,
+	BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 108,
 		"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
 		"If a speed or mode has been added please update phy_speed_to_str "
 		"and the PHY settings array.\n");
@@ -138,10 +138,19 @@
 	case PHY_INTERFACE_MODE_RXAUI:
 	case PHY_INTERFACE_MODE_XAUI:
 	case PHY_INTERFACE_MODE_1000BASEKX:
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
 		return 1;
 	case PHY_INTERFACE_MODE_QSGMII:
 	case PHY_INTERFACE_MODE_QUSGMII:
+	case PHY_INTERFACE_MODE_10G_QXGMII:
 		return 4;
+	case PHY_INTERFACE_MODE_PSGMII:
+		return 5;
 	case PHY_INTERFACE_MODE_MAX:
 		WARN_ONCE(1, "PHY_INTERFACE_MODE_MAX isn't a valid interface mode");
 		return 0;
@@ -230,6 +239,10 @@
 	PHY_SETTING(  20000, FULL,  20000baseKR2_Full		),
 	PHY_SETTING(  20000, FULL,  20000baseMLD2_Full		),
 	/* 10G */
+	PHY_SETTING(  10000, FULL,  10000_1000basePRX_D_Full	),
+	PHY_SETTING(  10000, FULL,  10000_1000basePRX_U_Full	),
+	PHY_SETTING(  10000, FULL,  10000basePR_D_Full		),
+	PHY_SETTING(  10000, FULL,  10000basePR_U_Full		),
 	PHY_SETTING(  10000, FULL,  10000baseCR_Full		),
 	PHY_SETTING(  10000, FULL,  10000baseER_Full		),
 	PHY_SETTING(  10000, FULL,  10000baseKR_Full		),
@@ -245,6 +258,8 @@
 	PHY_SETTING(   2500, FULL,   2500baseT_Full		),
 	PHY_SETTING(   2500, FULL,   2500baseX_Full		),
 	/* 1G */
+	PHY_SETTING(   1000, FULL,   1000basePX_D_Full		),
+	PHY_SETTING(   1000, FULL,   1000basePX_U_Full		),
 	PHY_SETTING(   1000, FULL,   1000baseT_Full		),
 	PHY_SETTING(   1000, HALF,   1000baseT_Half		),
 	PHY_SETTING(   1000, FULL,   1000baseT1_Full		),
diff -ruw linux-6.4/drivers/net/phy/phy.c linux-6.4-fbx/drivers/net/phy/phy.c
--- linux-6.4/drivers/net/phy/phy.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/phy.c	2023-05-22 20:06:42.135797731 +0200
@@ -36,7 +36,7 @@
 #include <net/genetlink.h>
 #include <net/sock.h>
 
-#define PHY_STATE_TIME	HZ
+#define PHY_STATE_TIME	(HZ / 2)
 
 #define PHY_STATE_STR(_state)			\
 	case PHY_##_state:			\
diff -ruw linux-6.4/drivers/net/phy/phy_device.c linux-6.4-fbx/drivers/net/phy/phy_device.c
--- linux-6.4/drivers/net/phy/phy_device.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/phy_device.c	2023-06-27 11:47:15.859861290 +0200
@@ -812,7 +812,8 @@
 			return -EIO;
 	}
 
-	if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) {
+	if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff ||
+	    !devs_in_pkg) {
 		/* If mostly Fs, there is no device there, then let's probe
 		 * MMD 0, as some 10G PHYs have zero Devices In package,
 		 * e.g. Cortina CS4315/CS4340 PHY.
@@ -822,9 +823,13 @@
 			return -EIO;
 
 		/* no device there, let's get out of here */
-		if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff)
+		if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff ||
+		    !devs_in_pkg) {
+			if (bus->keep_broken_phy)
+				return 0;
 			return -ENODEV;
 	}
+	}
 
 	/* Now probe Device Identifiers for each device present. */
 	for (i = 1; i < num_ids; i++) {
@@ -1454,6 +1459,9 @@
 	 */
 	if (!d->driver) {
 		if (phydev->is_c45)
+			if (!phydev->c45_ids.mmds_present)
+				d->driver = &genphy_broken_c45_driver.mdiodrv.driver;
+			else
 			d->driver = &genphy_c45_driver.mdiodrv.driver;
 		else
 			d->driver = &genphy_driver.mdiodrv.driver;
@@ -3463,9 +3471,15 @@
 	if (rc)
 		goto err_c45;
 
+	rc = phy_driver_register(&genphy_broken_c45_driver, THIS_MODULE);
+	if (rc)
+		goto err_c45_broken;
+
 	rc = phy_driver_register(&genphy_driver, THIS_MODULE);
 	if (rc) {
 		phy_driver_unregister(&genphy_c45_driver);
+err_c45_broken:
+		phy_driver_unregister(&genphy_broken_c45_driver);
 err_c45:
 		mdio_bus_exit();
 	}
@@ -3475,6 +3489,7 @@
 
 static void __exit phy_exit(void)
 {
+	phy_driver_unregister(&genphy_broken_c45_driver);
 	phy_driver_unregister(&genphy_c45_driver);
 	phy_driver_unregister(&genphy_driver);
 	mdio_bus_exit();
diff -ruw linux-6.4/drivers/net/phy/phylink.c linux-6.4-fbx/drivers/net/phy/phylink.c
--- linux-6.4/drivers/net/phy/phylink.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/phylink.c	2023-11-24 15:30:07.781950177 +0100
@@ -191,9 +191,13 @@
 	case PHY_INTERFACE_MODE_QUSGMII:
 	case PHY_INTERFACE_MODE_SGMII:
 	case PHY_INTERFACE_MODE_GMII:
+	case PHY_INTERFACE_MODE_PSGMII:
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
 		return SPEED_1000;
 
 	case PHY_INTERFACE_MODE_2500BASEX:
+	case PHY_INTERFACE_MODE_10G_QXGMII:
 		return SPEED_2500;
 
 	case PHY_INTERFACE_MODE_5GBASER:
@@ -205,6 +209,10 @@
 	case PHY_INTERFACE_MODE_10GBASER:
 	case PHY_INTERFACE_MODE_10GKR:
 	case PHY_INTERFACE_MODE_USXGMII:
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
 		return SPEED_10000;
 
 	case PHY_INTERFACE_MODE_25GBASER:
@@ -213,6 +221,7 @@
 	case PHY_INTERFACE_MODE_XLGMII:
 		return SPEED_40000;
 
+
 	case PHY_INTERFACE_MODE_INTERNAL:
 	case PHY_INTERFACE_MODE_NA:
 	case PHY_INTERFACE_MODE_MAX:
@@ -445,7 +454,11 @@
 
 	switch (interface) {
 	case PHY_INTERFACE_MODE_USXGMII:
-		caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
+		caps |= MAC_10000FD | MAC_5000FD;
+		fallthrough;
+
+	case PHY_INTERFACE_MODE_10G_QXGMII:
+		caps |= MAC_2500FD;
 		fallthrough;
 
 	case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -456,6 +469,7 @@
 	case PHY_INTERFACE_MODE_QUSGMII:
 	case PHY_INTERFACE_MODE_SGMII:
 	case PHY_INTERFACE_MODE_GMII:
+	case PHY_INTERFACE_MODE_PSGMII:
 		caps |= MAC_1000HD | MAC_1000FD;
 		fallthrough;
 
@@ -479,6 +493,8 @@
 		fallthrough;
 	case PHY_INTERFACE_MODE_1000BASEKX:
 	case PHY_INTERFACE_MODE_TRGMII:
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
 		caps |= MAC_1000FD;
 		break;
 
@@ -495,6 +511,10 @@
 	case PHY_INTERFACE_MODE_XAUI:
 	case PHY_INTERFACE_MODE_10GBASER:
 	case PHY_INTERFACE_MODE_10GKR:
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
 		caps |= MAC_10000FD;
 		break;
 
@@ -507,10 +527,10 @@
 		break;
 
 	case PHY_INTERFACE_MODE_INTERNAL:
+	case PHY_INTERFACE_MODE_NA:
 		caps |= ~0;
 		break;
 
-	case PHY_INTERFACE_MODE_NA:
 	case PHY_INTERFACE_MODE_MAX:
 		break;
 	}
@@ -846,8 +866,13 @@
 		pl->cfg_link_an_mode = MLO_AN_INBAND;
 
 		switch (pl->link_config.interface) {
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+			phylink_set(pl->supported, 2500baseT_Full);
+			phylink_set(pl->supported, 2500baseX_Full);
+			fallthrough;
 		case PHY_INTERFACE_MODE_SGMII:
 		case PHY_INTERFACE_MODE_QSGMII:
+		case PHY_INTERFACE_MODE_PSGMII:
 		case PHY_INTERFACE_MODE_QUSGMII:
 		case PHY_INTERFACE_MODE_RGMII:
 		case PHY_INTERFACE_MODE_RGMII_ID:
@@ -930,6 +955,25 @@
 			phylink_set(pl->supported, 100000baseDR2_Full);
 			break;
 
+		case PHY_INTERFACE_MODE_1000BASEPX_D:
+			phylink_set(pl->supported, 1000basePX_D_Full);
+			break;
+		case PHY_INTERFACE_MODE_1000BASEPX_U:
+			phylink_set(pl->supported, 1000basePX_U_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000BASEPR_D:
+			phylink_set(pl->supported, 10000basePR_D_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000BASEPR_U:
+			phylink_set(pl->supported, 10000basePR_U_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+			phylink_set(pl->supported, 10000_1000basePRX_D_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
+			phylink_set(pl->supported, 10000_1000basePRX_U_Full);
+			break;
+
 		default:
 			phylink_err(pl,
 				    "incorrect link mode %s for in-band status\n",
@@ -1916,12 +1960,13 @@
  * desired link mode(s) and negotiation style. This should be called from the
  * network device driver's &struct net_device_ops ndo_open() method.
  */
-void phylink_start(struct phylink *pl)
+static void __phylink_start(struct phylink *pl, bool silent)
 {
 	bool poll = false;
 
 	ASSERT_RTNL();
 
+	if (!silent)
 	phylink_info(pl, "configuring for %s/%s link mode\n",
 		     phylink_an_mode_str(pl->cur_link_an_mode),
 		     phy_modes(pl->link_config.interface));
@@ -1976,6 +2021,18 @@
 }
 EXPORT_SYMBOL_GPL(phylink_start);
 
+void phylink_start(struct phylink *pl)
+{
+	return __phylink_start(pl, false);
+}
+
+void phylink_start_silent(struct phylink *pl)
+{
+	return __phylink_start(pl, true);
+}
+
+EXPORT_SYMBOL_GPL(phylink_start_silent);
+
 /**
  * phylink_stop() - stop a phylink instance
  * @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -2946,10 +3003,10 @@
 	phy_interface_t iface;
 	int ret;
 
-	linkmode_copy(support, phy->supported);
+	linkmode_copy(support, pl->sfp_support);
 
 	memset(&config, 0, sizeof(config));
-	linkmode_copy(config.advertising, phy->advertising);
+	linkmode_copy(config.advertising, pl->sfp_support);
 	config.interface = PHY_INTERFACE_MODE_NA;
 	config.speed = SPEED_UNKNOWN;
 	config.duplex = DUPLEX_UNKNOWN;
@@ -3370,6 +3427,7 @@
 
 	case PHY_INTERFACE_MODE_SGMII:
 	case PHY_INTERFACE_MODE_QSGMII:
+	case PHY_INTERFACE_MODE_PSGMII:
 		phylink_decode_sgmii_word(state, lpa);
 		break;
 	case PHY_INTERFACE_MODE_QUSGMII:
@@ -3551,6 +3609,121 @@
 }
 EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
 
+/*
+ * designed to be called from userland to override current link
+ * interface, used for both testing and to handle SFP from userland.
+ *
+ * While it could theoritically be used on phylink instance with a
+ * phy, phylink_of_phy_connect() or equivalent is called at netdevice
+ * probe time, so it's too late to override phy_interface, thus we
+ * restrict this to instances without phydev.
+ *
+ * For the same reasons, we don't allow this to be set on instance
+ * attached to an SFP bus, since the kernel will do the right thing
+ * when an SFP is plugged.
+ *
+ * The an_enabled has to be given because some devices either don't or
+ * only support autoneg for some interface, so we cannot rely on
+ * further ethtool call to enable/disable it, both the interface and
+ * autoneg have to be changed atomically.
+ */
+int phylink_set_interface(struct phylink *pl,
+			  phy_interface_t interface,
+			  bool an_enabled)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported1);
+	struct phylink_link_state config;
+	bool changed, changed_intf;
+	int ret;
+
+	if (pl->phydev)
+		return -ENOTSUPP;
+
+	if (pl->sfp_bus && pl->sfp_port)
+		return -ENOTSUPP;
+
+	memset(&config, 0, sizeof(config));
+	config.interface = PHY_INTERFACE_MODE_NA;
+	config.speed = SPEED_UNKNOWN;
+	config.duplex = DUPLEX_UNKNOWN;
+	config.pause = MLO_PAUSE_AN;
+	bitmap_fill(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+	linkmode_copy(config.advertising, supported);
+
+	if (!an_enabled)
+		phylink_clear(config.advertising, Autoneg);
+
+	phylink_validate(pl, supported, &config);
+
+	config.interface = interface;
+	linkmode_copy(supported1, supported);
+
+	ret = phylink_validate(pl, supported1, &config);
+	if (ret) {
+		phylink_err(pl,
+			    "validation of %s/%s with support %*pb failed: %d\n",
+			    phylink_an_mode_str(pl->cfg_link_an_mode),
+			    phy_modes(config.interface),
+			    __ETHTOOL_LINK_MODE_MASK_NBITS, supported, ret);
+		return ret;
+	}
+
+	changed = !linkmode_equal(pl->supported, supported1) ||
+		!linkmode_equal(pl->link_config.advertising,
+				config.advertising);
+
+	if (changed) {
+		linkmode_copy(pl->supported, supported1);
+		linkmode_copy(pl->link_config.advertising, config.advertising);
+	}
+
+	changed_intf = (pl->link_config.interface != config.interface);
+
+	if (changed || changed_intf) {
+		if (pl->old_link_state) {
+			phylink_link_down(pl);
+			pl->old_link_state = false;
+		}
+	}
+
+	if (changed_intf) {
+		pl->link_config.interface = config.interface;
+		phylink_info(pl, "switched to %s/%s link mode (userland)\n",
+			     phylink_an_mode_str(pl->cur_link_an_mode),
+			     phy_modes(pl->link_config.interface));
+	}
+
+	if ((changed || changed_intf) &&
+	    !test_bit(PHYLINK_DISABLE_STOPPED,
+		      &pl->phylink_disable_state)) {
+		cancel_work_sync(&pl->resolve);
+		phylink_mac_initial_config(pl, false);
+		phylink_run_resolve(pl);
+	}
+
+	return 0;
+
+}
+
+EXPORT_SYMBOL_GPL(phylink_set_interface);
+
+/*
+ * retrieve current interface & mode
+ */
+void phylink_get_interface(struct phylink *pl,
+			   phy_interface_t *interface,
+			   int *an_en,
+			   int *mode)
+{
+	*interface = pl->link_config.interface;
+	*an_en = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				   pl->link_config.advertising);
+	*mode = pl->cfg_link_an_mode;
+}
+
+EXPORT_SYMBOL_GPL(phylink_get_interface);
+
 static int __init phylink_init(void)
 {
 	for (int i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); ++i)
diff -ruw linux-6.4/drivers/net/phy/realtek.c linux-6.4-fbx/drivers/net/phy/realtek.c
--- linux-6.4/drivers/net/phy/realtek.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/realtek.c	2023-05-22 20:06:42.135797731 +0200
@@ -12,6 +12,10 @@
 #include <linux/phy.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/of.h>
+#include <dt-bindings/net/realtek-phy-rtl8211f.h>
+
+#include "realtek.h"
 
 #define RTL821x_PHYSR				0x11
 #define RTL821x_PHYSR_DUPLEX			BIT(13)
@@ -72,6 +76,75 @@
 #define RTL_GENERIC_PHYID			0x001cc800
 #define RTL_8211FVD_PHYID			0x001cc878
 
+#define RTL8221B_SERDES_OPT_REG				0x697a
+#define  RTL8221B_SERDES_OPT_MODE_MASK			0x3f
+#define  RTL8221B_SERDES_OPT_MODE_2G5_SGMII		0x00
+#define  RTL8221B_SERDES_OPT_MODE_HISGMII_SGMII		0x01
+#define  RTL8221B_SERDES_OPT_MODE_2G5_ONLY		0x02
+#define  RTL8221B_SERDES_OPT_MODE_HISGMII_ONLY		0x03
+
+#define RTL8221B_SERDES_CTRL3_REG			0x7580
+#define  RTL8221B_SERDES_CTRL3_MODE_MASK		0x1f
+#define  RTL8221B_SERDES_CTRL3_MODE_SGMII		0x02
+#define  RTL8221B_SERDES_CTRL3_MODE_HISGMII		0x12
+#define  RTL8221B_SERDES_CTRL3_MODE_2G5BX		0x16
+#define  RTL8221B_SERDES_CTRL3_MODE_OFF			0x1f
+
+#define RTL8221B_SERDES_CTRL5_REG			0x7582
+#define  RTL8221B_SERDES_CTRL5_FDPX			(1 << 2)
+#define  RTL8221B_SERDES_CTRL5_SPEED_MASK		0x3003
+#define  RTL8221B_SERDES_CTRL5_SPEED_10			0x0000
+#define  RTL8221B_SERDES_CTRL5_SPEED_100		0x0001
+#define  RTL8221B_SERDES_CTRL5_SPEED_1000		0x0002
+#define  RTL8221B_SERDES_CTRL5_SPEED_2G5		0x1001
+#define  RTL8221B_SERDES_CTRL5_SPEED_2G5LITE		0x1003
+
+#define RTL8221B_FEDCR_REG				0xa400
+#define  RTL8221B_FEDCR_PCS_LB_EN			(1 << 14)
+
+#define RTL8221B_GBCR_REG				0xa412
+#define  RTL8221B_GBCR_ADV_1000BaseT			(1 << 9)
+
+#define RTL8221B_GANLPAR_REG				0xa414
+#define  RTL8221B_GANLPAR_LP_1000BaseTFD		(1 << 11)
+#define  RTL8221B_GANLPAR_LP_1000BaseTHD		(1 << 10)
+
+#define RTL8221B_LCR4_REG				0xd036
+#define  LCR4_SELECT_2G5				(1 << 5)
+#define  LCR4_SELECT_1G					(1 << 2)
+#define  LCR4_SELECT_100M				(1 << 1)
+#define  LCR4_SELECT_10M				(1 << 0)
+
+#define RTL8221B_LCR6_REG				0xd040
+#define  LCR6_BLINK_FREQ_MASK				(0x3 << 8)
+#define  LCR6_BLINK_FREQ_20MS				(0x0 << 8)
+#define  LCR6_BLINK_FREQ_40MS				(0x1 << 8)
+#define  LCR6_BLINK_FREQ_60MS				(0x2 << 8)
+#define  LCR6_BLINK_PATTERN_MODE_B			(0x1 << 5)
+
+#define RTL8221B_LCR7_REG				0xd044
+#define  LCR7_LED_EN_MASK(led)				(1 << (4 + (led)))
+#define  LCR7_LED_POL_HI_MASK(led)			(1 << (led))
+
+#define RTL8221B_RX_STATS_SELECT_REG			0xc800
+#define  RX_STATS_SELECT_UTP				0x5a02
+#define  RX_STATS_SELECT_SERDES				0x5a06
+
+#define RTL8221B_RX_STATS_CLEAR_REG			0xc802
+#define  RX_STATS_CLEAR_VAL				0x0073
+
+#define RTL8221B_RX_STATS_GOOD_PKT_LSB			0xc810
+#define RTL8221B_RX_STATS_GOOD_PKT_MSB			0xc812
+#define RTL8221B_RX_STATS_CRC_PKT			0xc814
+
+#define RTL8221B_UNKOWN_0xc40a				0xc40a
+#define RTL8221B_UNKOWN_0xc466				0xc466
+
+#define RTL8211F_LCR_PAGE			0xd04
+#define RTL8211F_LCR_REG			0x10
+#define RTL8211F_LED_MODE_MASK(num)		(0x1b << ((num) * 5))
+#define RTL8211F_LED_MODE_SEL(num, mode)	((mode) << ((num) * 5))
+
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
 MODULE_LICENSE("GPL");
@@ -80,6 +153,8 @@
 	u16 phycr1;
 	u16 phycr2;
 	bool has_phycr2;
+#define MAX_LEDS 3
+	u8 leds_mode[MAX_LEDS];
 };
 
 static int rtl821x_read_page(struct phy_device *phydev)
@@ -335,12 +410,27 @@
 			    CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
 }
 
+static void rtl8211f_config_led(struct phy_device *phydev)
+{
+	struct rtl821x_priv *priv = phydev->priv;
+	size_t i;
+
+	/* Configure led */
+	for (i = 0; i < MAX_LEDS; ++i) {
+		phy_modify_paged(phydev, RTL8211F_LCR_PAGE, RTL8211F_LCR_REG,
+				RTL8211F_LED_MODE_MASK(i),
+				RTL8211F_LED_MODE_SEL(i, priv->leds_mode[i]));
+	}
+}
+
 static int rtl8211f_config_init(struct phy_device *phydev)
 {
 	struct rtl821x_priv *priv = phydev->priv;
 	struct device *dev = &phydev->mdio.dev;
-	u16 val_txdly, val_rxdly;
 	int ret;
+	u16 val_txdly, val_rxdly;
+
+	rtl8211f_config_led(phydev);
 
 	ret = phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1,
 				       RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
@@ -432,6 +522,70 @@
 	return 0;
 }
 
+#ifdef CONFIG_OF_MDIO
+static int rtl8211f_dt_led_modes_get(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+	struct device_node *of_node = dev->of_node;
+	struct rtl821x_priv *priv = phydev->priv;
+	int nr, i, ret;
+	char *led_dt_prop = "rtl8211f,led-mode";
+	uint8_t mode[MAX_LEDS << 1];
+
+	if (!of_node)
+		return -ENODEV;
+
+	nr = of_property_read_variable_u8_array(of_node, led_dt_prop, mode, 0,
+			ARRAY_SIZE(mode));
+
+	/* nr should be even */
+	if (nr & 0x1)
+		return -EINVAL;
+
+	ret = -EINVAL;
+	for (i = 0; i < nr; i += 2) {
+		if (mode[i] >= MAX_LEDS)
+			goto out;
+		if ((mode[i + 1] & ~RTL8211F_LED_MODE_MASK(0)) != 0)
+			goto out;
+		priv->leds_mode[mode[i]] = mode[i + 1];
+	}
+
+	ret = 0;
+
+out:
+	return ret;
+}
+
+
+#else
+static int rtl8211f_dt_led_modes_get(struct phy_device *phydev)
+{
+	return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int rtl8211f_probe(struct phy_device *phydev)
+{
+	struct rtl821x_priv *priv;
+	int error;
+	u8 default_mode[MAX_LEDS] = {
+		[0] = RTL8211F_LED_MODE_10M | RTL8211F_LED_MODE_100M |
+			RTL8211F_LED_MODE_1000M | RTL8211F_LED_MODE_ACT,
+		[1] = RTL8211F_LED_MODE_10M | RTL8211F_LED_MODE_100M |
+			RTL8211F_LED_MODE_1000M,
+		[2] = RTL8211F_LED_MODE_1000M | RTL8211F_LED_MODE_ACT,
+	};
+
+	error = rtl821x_probe(phydev);
+	if (error)
+		return error;
+
+	priv = phydev->priv;
+	memcpy(priv->leds_mode, default_mode, sizeof(priv->leds_mode));
+	return rtl8211f_dt_led_modes_get(phydev);
+}
+
 static int rtl8211e_config_init(struct phy_device *phydev)
 {
 	int ret = 0, oldpage;
@@ -849,6 +1003,311 @@
 	return IRQ_HANDLED;
 }
 
+enum {
+	RTL8221B_SERDES_STATS_RX_GOOD_PACKETS,
+	RTL8221B_SERDES_STATS_RX_BAD_CRC_PACKETS,
+
+	RTL8221B_SERDES_STATS_COUNT,
+};
+
+struct rtl8221b_stat {
+	u64 v;
+};
+
+struct rtl8221b_priv {
+	struct rtl8221b_stat stats[RTL8221B_SERDES_STATS_COUNT];
+};
+
+static int rtl8221b_get_sset_count(struct phy_device *phydev)
+{
+	return RTL8221B_SERDES_STATS_COUNT;
+}
+
+struct rtl8221b_stat_desc {
+	const char *name;
+	int reg;
+	int size;
+};
+
+enum {
+	E_STAT_SIZE_U16,
+	E_STAT_SIZE_U32,
+};
+
+struct rtl8221b_stat_desc rtl8221b_stats[RTL8221B_SERDES_STATS_COUNT] = {
+	{
+		.name = "sgmii_rx_good_frames",
+		.reg = RTL8221B_RX_STATS_GOOD_PKT_LSB,
+		.size = E_STAT_SIZE_U32
+	},
+	{
+		.name = "sgmii_rx_crc_frames",
+		.reg = RTL8221B_RX_STATS_CRC_PKT,
+		.size = E_STAT_SIZE_U16
+	},
+};
+
+static void rtl8221b_get_strings(struct phy_device *phydev, u8 *data)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(rtl8221b_stats); ++i) {
+		strscpy(data + i * ETH_GSTRING_LEN, rtl8221b_stats[i].name,
+			ETH_GSTRING_LEN);
+	}
+}
+
+static void rtl8221b_get_stats(struct phy_device *phydev,
+			       struct ethtool_stats *stats,
+			       u64 *data)
+{
+	struct rtl8221b_priv *priv = phydev->priv;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(rtl8221b_stats); ++i) {
+		struct rtl8221b_stat_desc *st_desc = &rtl8221b_stats[i];
+		struct rtl8221b_stat *st = &priv->stats[i];
+		u32 v;
+
+		switch (st_desc->size) {
+		case E_STAT_SIZE_U16:
+		default:
+			v = phy_read_mmd(phydev, MDIO_MMD_VEND2, st_desc->reg);
+			break;
+		case E_STAT_SIZE_U32:
+			v = phy_read_mmd(phydev, MDIO_MMD_VEND2, st_desc->reg) |
+				(phy_read_mmd(phydev, MDIO_MMD_VEND2,
+					      st_desc->reg + 2) << 16);
+			break;
+		}
+
+		st->v += v;
+		data[i] = st->v;
+	}
+
+	/*
+	 * FIXME: ask for autoclear feature of counters. clear
+	 * counters explicitely in the mean time.
+	 */
+	phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_RX_STATS_CLEAR_REG,
+		      RX_STATS_CLEAR_VAL);
+}
+
+static int rtl8221b_probe(struct phy_device *phydev)
+{
+	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+				    sizeof (struct rtl8221b_priv), GFP_KERNEL);
+	if (!phydev->priv)
+		return -ENOMEM;
+
+	return realtek_hwmon_probe(phydev);
+}
+
+static int rtl8221b_config_init(struct phy_device *phydev)
+{
+	/*
+	 * reset autoneg and PMA/PMD MMD.
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_AN, MII_BMCR, BMCR_RESET);
+	phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MII_BMCR, BMCR_RESET);
+
+
+	/*
+	 * configure serdes side of the phy so that its speed is
+	 * chosen depending on the copper side autonegotiation.
+	 */
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, BIT(0));
+	phy_modify_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_SERDES_OPT_REG,
+		       RTL8221B_SERDES_OPT_MODE_MASK,
+		       RTL8221B_SERDES_OPT_MODE_2G5_SGMII);
+
+	/*
+	 * serdes mode change will be considered once copper link goes
+	 * down. make it go down by briefly activating the PCS
+	 * loopback feature.
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_FEDCR_REG,
+			 RTL8221B_FEDCR_PCS_LB_EN);
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_FEDCR_REG,
+			   RTL8221B_FEDCR_PCS_LB_EN);
+
+	/*
+	 * led configuration: enable only LED 2, active low.
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR7_REG,
+			 LCR7_LED_EN_MASK(2));
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR7_REG,
+			   LCR7_LED_POL_HI_MASK(2));
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR7_REG,
+			   LCR7_LED_EN_MASK(1) | LCR7_LED_EN_MASK(0));
+
+	/*
+	 * led blink frequency: 60 ms.
+	 */
+	phy_modify_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR6_REG,
+		       LCR6_BLINK_FREQ_MASK, LCR6_BLINK_FREQ_60MS);
+
+	/*
+	 * led2 selects all possible speeds
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR4_REG,
+			 LCR4_SELECT_10M | LCR4_SELECT_100M | LCR4_SELECT_1G |
+			 LCR4_SELECT_2G5);
+
+	/*
+	 * select SerDes side for RX stats.
+	 */
+	phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_RX_STATS_SELECT_REG,
+		      RX_STATS_SELECT_SERDES);
+	phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_RX_STATS_CLEAR_REG,
+		      RX_STATS_CLEAR_VAL);
+
+	return 0;
+}
+
+static int rtl8221b_get_features(struct phy_device *phydev)
+{
+	int err;
+
+	err = genphy_c45_pma_read_abilities(phydev);
+	if (err)
+		return err;
+
+	phy_set_max_speed(phydev, SPEED_2500);
+
+	return 0;
+}
+
+static int rtl8221b_config_aneg(struct phy_device *phydev)
+{
+	bool changed = false;
+	int err;
+	u16 v;
+
+	if (phydev->autoneg == AUTONEG_DISABLE)
+		return genphy_c45_pma_setup_forced(phydev);
+
+	err = genphy_c45_an_config_aneg(phydev);
+	if (err < 0)
+		return err;
+	changed = (err > 0) || changed;
+
+	if (err > 0)
+		changed = true;
+
+	/*
+	 * 802.3-C45 doesn't provide a standardised way of advertising
+	 * 1000Base-T support, we have to use use vendor registers for
+	 * this.
+	 */
+	v = 0;
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+			      phydev->advertising) ||
+	    linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			      phydev->advertising))
+		v |= RTL8221B_GBCR_ADV_1000BaseT;
+
+	err = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL8221B_GBCR_REG,
+				     RTL8221B_GBCR_ADV_1000BaseT, v);
+	if (err < 0)
+		return err;
+	changed = (err > 0) || changed;
+
+	return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int rtl8221b_read_status(struct phy_device *phydev)
+{
+	int err;
+	int v;
+	bool fdpx;
+
+	if (phydev->autoneg == AUTONEG_ENABLE) {
+		/*
+		 * 802.3-C45 doesn't provide a standardised way of
+		 * getting LPA 1000Base-T support, we have to use use
+		 * vendor registers for this.
+		 */
+		v = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_GANLPAR_REG);
+		if (v < 0)
+			return v;
+	}
+
+	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+			 phydev->lp_advertising,
+			 v & RTL8221B_GANLPAR_LP_1000BaseTFD);
+	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			 phydev->lp_advertising,
+			 v & RTL8221B_GANLPAR_LP_1000BaseTHD);
+
+
+	err = genphy_c45_read_status(phydev);
+	if (err)
+		return err;
+
+
+	if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+		return 0;
+
+	/*
+	 * sort out phydev interface depending on the current mode of
+	 * the SERDES on the PHY side.
+	 */
+	v = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_SERDES_CTRL3_REG);
+	v &= RTL8221B_SERDES_CTRL3_MODE_MASK;
+
+	switch (v) {
+	case RTL8221B_SERDES_CTRL3_MODE_SGMII:
+		phydev->interface = PHY_INTERFACE_MODE_SGMII;
+		break;
+	case RTL8221B_SERDES_CTRL3_MODE_2G5BX:
+		phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+		break;
+	default:
+		phydev_warn(phydev, "invalid SERDES mode: %u\n", v);
+		return -EIO;
+	}
+
+	v = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_SERDES_CTRL5_REG);
+	fdpx = !!(v & RTL8221B_SERDES_CTRL5_FDPX);
+	v &= RTL8221B_SERDES_CTRL5_SPEED_MASK;
+
+	switch (v) {
+	case RTL8221B_SERDES_CTRL5_SPEED_2G5:
+		phydev->speed = SPEED_2500;
+		break;
+	case RTL8221B_SERDES_CTRL5_SPEED_1000:
+		phydev->speed = SPEED_1000;
+		break;
+	case RTL8221B_SERDES_CTRL5_SPEED_100:
+		phydev->speed = SPEED_100;
+		break;
+	case RTL8221B_SERDES_CTRL5_SPEED_10:
+		phydev->speed = SPEED_10;
+		break;
+	default:
+		phydev_warn(phydev, "invalid device speed: %04x\n", v);
+	}
+
+	/*
+	 * XXX: as recommended by realtek for SerDes RX stats.
+	 */
+	if (phydev->speed == 2500) {
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc40a, 0x0);
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc466, 0x2);
+	} else {
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc40a, 0x0);
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc466, 0x0);
+	}
+
+	phydev->duplex = fdpx ? DUPLEX_FULL : DUPLEX_HALF;
+	return 0;
+}
+
 static struct phy_driver realtek_drvs[] = {
 	{
 		PHY_ID_MATCH_EXACT(0x00008201),
@@ -922,7 +1381,7 @@
 	}, {
 		PHY_ID_MATCH_EXACT(0x001cc916),
 		.name		= "RTL8211F Gigabit Ethernet",
-		.probe		= rtl821x_probe,
+		.probe		= &rtl8211f_probe,
 		.config_init	= &rtl8211f_config_init,
 		.read_status	= rtlgen_read_status,
 		.config_intr	= &rtl8211f_config_intr,
@@ -998,6 +1457,7 @@
 		.read_page      = rtl821x_read_page,
 		.write_page     = rtl821x_write_page,
 	}, {
+#if 0
 		PHY_ID_MATCH_EXACT(0x001cc849),
 		.name           = "RTL8221B-VB-CG 2.5Gbps PHY",
 		.get_features   = rtl822x_get_features,
@@ -1008,6 +1468,7 @@
 		.read_page      = rtl821x_read_page,
 		.write_page     = rtl821x_write_page,
 	}, {
+#endif
 		PHY_ID_MATCH_EXACT(0x001cc84a),
 		.name           = "RTL8221B-VM-CG 2.5Gbps PHY",
 		.get_features   = rtl822x_get_features,
@@ -1051,6 +1512,19 @@
 		.handle_interrupt = genphy_handle_interrupt_no_ack,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
+	}, {
+		PHY_ID_MATCH_EXACT(0x001cc849),
+		.name		= "RTL8221B 2.5 Gigabit Ethernet",
+		.probe		= rtl8221b_probe,
+		.config_init	= rtl8221b_config_init,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+		.config_aneg	= rtl8221b_config_aneg,
+		.read_status	= rtl8221b_read_status,
+		.get_features	= rtl8221b_get_features,
+		.get_sset_count	= rtl8221b_get_sset_count,
+		.get_strings	= rtl8221b_get_strings,
+		.get_stats	= rtl8221b_get_stats,
 	},
 };
 
diff -ruw linux-6.4/drivers/net/phy/sfp-bus.c linux-6.4-fbx/drivers/net/phy/sfp-bus.c
--- linux-6.4/drivers/net/phy/sfp-bus.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/sfp-bus.c	2023-05-22 20:06:42.135797731 +0200
@@ -515,6 +515,15 @@
 }
 EXPORT_SYMBOL_GPL(sfp_get_module_eeprom);
 
+/*
+ * sfp_get_sfp_state
+ */
+int sfp_get_sfp_state(struct sfp_bus *bus, struct ethtool_sfp_state *st)
+{
+	return bus->socket_ops->get_sfp_state(bus->sfp, st);
+}
+EXPORT_SYMBOL_GPL(sfp_get_sfp_state);
+
 /**
  * sfp_get_module_eeprom_by_page() - Read a page from the SFP module EEPROM
  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -606,11 +615,6 @@
 	else if (ret < 0)
 		return ERR_PTR(ret);
 
-	if (!fwnode_device_is_available(ref.fwnode)) {
-		fwnode_handle_put(ref.fwnode);
-		return NULL;
-	}
-
 	bus = sfp_bus_get(ref.fwnode);
 	fwnode_handle_put(ref.fwnode);
 	if (!bus)
diff -ruw linux-6.4/drivers/net/phy/sfp.c linux-6.4-fbx/drivers/net/phy/sfp.c
--- linux-6.4/drivers/net/phy/sfp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/sfp.c	2023-05-22 20:06:42.139797837 +0200
@@ -25,6 +25,7 @@
 	GPIO_TX_FAULT,
 	GPIO_TX_DISABLE,
 	GPIO_RATE_SELECT,
+	GPIO_POWER_ENABLE,
 	GPIO_MAX,
 
 	SFP_F_PRESENT = BIT(GPIO_MODDEF0),
@@ -32,6 +33,7 @@
 	SFP_F_TX_FAULT = BIT(GPIO_TX_FAULT),
 	SFP_F_TX_DISABLE = BIT(GPIO_TX_DISABLE),
 	SFP_F_RATE_SELECT = BIT(GPIO_RATE_SELECT),
+	SFP_F_POWER_ENABLE = BIT(GPIO_POWER_ENABLE),
 
 	SFP_E_INSERT = 0,
 	SFP_E_REMOVE,
@@ -148,6 +150,7 @@
 	"tx-fault",
 	"tx-disable",
 	"rate-select0",
+	"pwr-enable",
 };
 
 static const enum gpiod_flags gpio_flags[] = {
@@ -156,6 +159,7 @@
 	GPIOD_IN,
 	GPIOD_ASIS,
 	GPIOD_ASIS,
+	GPIOD_ASIS,
 };
 
 /* t_start_up (SFF-8431) or t_init (SFF-8472) is the time required for a
@@ -231,6 +235,8 @@
 	const struct sff_data *type;
 	size_t i2c_block_size;
 	u32 max_power_mW;
+	bool force_1000baset_as_1000basex;
+	bool skip_10g_t_phy_detect;
 
 	unsigned int (*get_state)(struct sfp *);
 	void (*set_state)(struct sfp *, unsigned int);
@@ -312,7 +318,7 @@
 
 static const struct sff_data sfp_data = {
 	.gpios = SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT |
-		 SFP_F_TX_DISABLE | SFP_F_RATE_SELECT,
+		 SFP_F_TX_DISABLE | SFP_F_RATE_SELECT | SFP_F_POWER_ENABLE,
 	.module_supported = sfp_module_supported,
 };
 
@@ -502,6 +508,8 @@
 {
 	if (state & SFP_F_PRESENT) {
 		/* If the module is present, drive the signals */
+		if (sfp->gpio[GPIO_POWER_ENABLE])
+			gpiod_direction_output(sfp->gpio[GPIO_POWER_ENABLE], 1);
 		if (sfp->gpio[GPIO_TX_DISABLE])
 			gpiod_direction_output(sfp->gpio[GPIO_TX_DISABLE],
 					       state & SFP_F_TX_DISABLE);
@@ -510,6 +518,8 @@
 					       state & SFP_F_RATE_SELECT);
 	} else {
 		/* Otherwise, let them float to the pull-ups */
+		if (sfp->gpio[GPIO_POWER_ENABLE])
+			gpiod_direction_output(sfp->gpio[GPIO_POWER_ENABLE], 0);
 		if (sfp->gpio[GPIO_TX_DISABLE])
 			gpiod_direction_input(sfp->gpio[GPIO_TX_DISABLE]);
 		if (state & SFP_F_RATE_SELECT)
@@ -1776,6 +1786,7 @@
 		break;
 
 	case MDIO_I2C_C45:
+		if (sfp->skip_10g_t_phy_detect)
 		err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, true);
 		break;
 
@@ -2064,6 +2075,11 @@
 		}
 	}
 
+	if (sfp->force_1000baset_as_1000basex && id.base.e1000_base_t) {
+		id.base.e1000_base_t = 0;
+		id.base.e1000_base_sx = 1;
+	}
+
 	sfp->id = id;
 
 	dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n",
@@ -2190,6 +2206,7 @@
 	switch (sfp->sm_mod_state) {
 	default:
 		if (event == SFP_E_INSERT) {
+			sfp_set_state(sfp, sfp->state);
 			sfp_sm_mod_next(sfp, SFP_MOD_PROBE, T_SERIAL);
 			sfp->sm_mod_tries_init = R_PROBE_RETRY_INIT;
 			sfp->sm_mod_tries = R_PROBE_RETRY_SLOW;
@@ -2573,6 +2590,55 @@
 			page->data, page->length);
 };
 
+static inline u32 to_ethtool_sfp_state(int my_state)
+{
+	switch (my_state) {
+	case SFP_S_DOWN:
+		return ETHTOOL_SFP_S_DOWN;
+	case SFP_S_FAIL:
+		return ETHTOOL_SFP_S_FAIL;
+	case SFP_S_WAIT:
+		return ETHTOOL_SFP_S_WAIT;
+	case SFP_S_INIT:
+		return ETHTOOL_SFP_S_INIT;
+	case SFP_S_INIT_PHY:
+		return ETHTOOL_SFP_S_INIT_PHY;
+	case SFP_S_INIT_TX_FAULT:
+		return ETHTOOL_SFP_S_INIT_TX_FAULT;
+	case SFP_S_WAIT_LOS:
+		return ETHTOOL_SFP_S_WAIT_LOS;
+	case SFP_S_LINK_UP:
+		return ETHTOOL_SFP_S_LINK_UP;
+	case SFP_S_TX_FAULT:
+		return ETHTOOL_SFP_S_TX_FAULT;
+	case SFP_S_REINIT:
+		return ETHTOOL_SFP_S_REINIT;
+	case SFP_S_TX_DISABLE:
+		return ETHTOOL_SFP_S_TX_DISABLE;
+	default:
+		return 0xffffffff;
+	}
+}
+
+static int sfp_state(struct sfp *sfp, struct ethtool_sfp_state *st)
+{
+	memset(st, 0, sizeof (*st));
+
+	st->fsm_state = to_ethtool_sfp_state(sfp->sm_state);
+
+	st->i_presence = !(sfp->state & SFP_F_PRESENT);
+	st->i_rxlos = !!(sfp->state & SFP_F_LOS);
+	st->i_txfault = !!(sfp->state & SFP_F_TX_FAULT);
+
+	/*
+	 * pwren is unconditionally driven whenever an SFP is present.
+	 */
+	st->o_pwren = (sfp->state & SFP_F_PRESENT);
+	st->o_txdis = !!(sfp->state & SFP_F_TX_DISABLE);
+
+	return 0;
+}
+
 static const struct sfp_socket_ops sfp_module_ops = {
 	.attach = sfp_attach,
 	.detach = sfp_detach,
@@ -2581,6 +2647,7 @@
 	.module_info = sfp_module_info,
 	.module_eeprom = sfp_module_eeprom,
 	.module_eeprom_by_page = sfp_module_eeprom_by_page,
+	.get_sfp_state = sfp_state,
 };
 
 static void sfp_timeout(struct work_struct *work)
@@ -2768,6 +2835,12 @@
 	dev_info(sfp->dev, "Host maximum power %u.%uW\n",
 		 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
 
+	sfp->force_1000baset_as_1000basex =
+		device_property_present(&pdev->dev, "force-1000baset-as-1000basex");
+
+	sfp->skip_10g_t_phy_detect =
+		device_property_present(&pdev->dev, "skip-10g-t-phy-detect");
+
 	/* Get the initial state, and always signal TX disable,
 	 * since the network interface will not be up.
 	 */
diff -ruw linux-6.4/drivers/net/phy/sfp.h linux-6.4-fbx/drivers/net/phy/sfp.h
--- linux-6.4/drivers/net/phy/sfp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/sfp.h	2023-05-22 20:06:42.139797837 +0200
@@ -25,6 +25,7 @@
 	int (*module_eeprom_by_page)(struct sfp *sfp,
 				     const struct ethtool_module_eeprom *page,
 				     struct netlink_ext_ack *extack);
+	int (*get_sfp_state)(struct sfp *sfp, struct ethtool_sfp_state *st);
 };
 
 int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev);
diff -ruw linux-6.4/drivers/net/phy/swphy.c linux-6.4-fbx/drivers/net/phy/swphy.c
--- linux-6.4/drivers/net/phy/swphy.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/phy/swphy.c	2023-03-09 15:06:11.372234439 +0100
@@ -29,6 +29,7 @@
 	SWMII_SPEED_10 = 0,
 	SWMII_SPEED_100,
 	SWMII_SPEED_1000,
+	SWMII_SPEED_2500,
 	SWMII_DUPLEX_HALF = 0,
 	SWMII_DUPLEX_FULL,
 };
@@ -51,6 +52,10 @@
 		.lpagb = LPA_1000FULL | LPA_1000HALF,
 		.estat = ESTATUS_1000_TFULL | ESTATUS_1000_THALF,
 	},
+	[SWMII_SPEED_2500] = {
+		.bmsr  = BMSR_ESTATEN,
+		.lpagb = LPA_1000FULL | LPA_1000HALF,
+	},
 };
 
 static const struct swmii_regs duplex[] = {
@@ -71,6 +76,8 @@
 static int swphy_decode_speed(int speed)
 {
 	switch (speed) {
+	case 2500:
+		return SWMII_SPEED_2500;
 	case 1000:
 		return SWMII_SPEED_1000;
 	case 100:
diff -ruw linux-6.4/drivers/net/ppp/ppp_generic.c linux-6.4-fbx/drivers/net/ppp/ppp_generic.c
--- linux-6.4/drivers/net/ppp/ppp_generic.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ppp/ppp_generic.c	2023-05-22 20:06:42.139797837 +0200
@@ -178,6 +178,7 @@
 	netns_tracker	ns_tracker;
 	struct list_head clist;		/* link in list of channels per unit */
 	rwlock_t	upl;		/* protects `ppp' and 'bridge' */
+	int		stopped;	/* channel is stopped */
 	struct channel __rcu *bridge;	/* "bridged" ppp channel */
 #ifdef CONFIG_PPP_MULTILINK
 	u8		avail;		/* flag used in multilink stuff */
@@ -1646,10 +1647,28 @@
 			ppp_send_frame(ppp, skb);
 		/* If there's no work left to do, tell the core net
 		   code that we can accept some more. */
-		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
+		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
+			/* only  enable  net  queue  if at  least  one
+			 * channel is not stopped */
+			struct list_head *list;
+			struct channel *pch;
+			bool need_wake;
+
+			list = &ppp->channels;
+			need_wake = false;
+			while ((list = list->next) != &ppp->channels) {
+				pch = list_entry(list, struct channel, clist);
+				if (!pch->stopped) {
+					need_wake = true;
+					break;
+				}
+			}
+
+			if (need_wake)
 			netif_wake_queue(ppp->dev);
 		else
 			netif_stop_queue(ppp->dev);
+		}
 	} else {
 		kfree_skb(skb);
 	}
@@ -3005,10 +3024,24 @@
 
 	if (!pch)
 		return;
+	pch->stopped = 0;
 	ppp_channel_push(pch);
 }
 
 /*
+ * Callback from a channel when it want to prevent further transmit on it
+ */
+void
+ppp_output_stop(struct ppp_channel *chan)
+{
+	struct channel *pch = chan->ppp;
+
+	if (pch == 0)
+		return;
+	pch->stopped = 1;
+}
+
+/*
  * Compression control.
  */
 
@@ -3602,6 +3635,7 @@
 EXPORT_SYMBOL(ppp_input);
 EXPORT_SYMBOL(ppp_input_error);
 EXPORT_SYMBOL(ppp_output_wakeup);
+EXPORT_SYMBOL(ppp_output_stop);
 EXPORT_SYMBOL(ppp_register_compressor);
 EXPORT_SYMBOL(ppp_unregister_compressor);
 MODULE_LICENSE("GPL");
diff -ruw linux-6.4/drivers/net/ppp/pptp.c linux-6.4-fbx/drivers/net/ppp/pptp.c
--- linux-6.4/drivers/net/ppp/pptp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/ppp/pptp.c	2023-02-27 19:50:24.224303473 +0100
@@ -356,6 +356,7 @@
 	po = lookup_chan(ntohs(header->call_id), iph->saddr);
 	if (po) {
 		skb_dst_drop(skb);
+		skb->mark = 0;
 		nf_reset_ct(skb);
 		return sk_receive_skb(sk_pppox(po), skb, 0);
 	}
diff -ruw linux-6.4/drivers/net/tun.c linux-6.4-fbx/drivers/net/tun.c
--- linux-6.4/drivers/net/tun.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/tun.c	2023-05-22 20:06:42.143797943 +0200
@@ -77,6 +77,10 @@
 #include <net/ax25.h>
 #include <net/rose.h>
 #include <net/6lowpan.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
 
 #include <linux/uaccess.h>
 #include <linux/proc_fs.h>
@@ -163,6 +167,31 @@
 	unsigned long updated ____cacheline_aligned_in_smp;
 };
 
+/*
+ * smalltun definitions
+ */
+#define SMALLTUN_MAGIC			0x6660
+#define SMALLTUN_VERSION		0x1
+
+#define TYPE_MASK			0xf
+#define TYPE_CLT			(1 << 3)
+
+#define TYPE_TRIGGER			0x0
+#define TYPE_CHALLENGE			0x1
+#define TYPE_CLIENT_HELLO		0x2
+#define TYPE_SERVER_HELLO		0x3
+
+#define TYPE_CLT_DATA			(TYPE_CLT | 0x0)
+#define TYPE_CLT_GET_PARAMS		(TYPE_CLT | 0x1)
+#define TYPE_CLT_PARAMS			(TYPE_CLT | 0x2)
+
+struct smalltun_pkt_hdr {
+	u16		magic;
+	u8		version;
+	u8		flag_type;
+	u8		data[0];
+};
+
 #define TUN_NUM_FLOW_ENTRIES 1024
 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
 
@@ -182,6 +211,11 @@
 	kuid_t			owner;
 	kgid_t			group;
 
+	struct smalltun_fp	smalltun_fps[4];
+	unsigned int		smalltun_valid_count;
+	unsigned int		smalltun_valid[4];
+	struct rtable		*smalltun_rt_cache[4];
+
 	struct net_device	*dev;
 	netdev_features_t	set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
@@ -1035,6 +1069,184 @@
 	return 0;
 }
 
+static int smalltun_is_fastpath(struct tun_struct *tun,
+				struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	const struct smalltun_fp *fp;
+	struct rtable **prt_cache, *rt_cache;
+	struct flowi4 fl;
+	bool match;
+	size_t i;
+
+	if (!tun->smalltun_valid_count)
+		return 0;
+
+	if (skb->protocol != htons(ETH_P_IP))
+		return 0;
+
+	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+		return 0;
+
+	iph = ip_hdr(skb);
+
+	/* lookup smalltun fastpath */
+	fp = NULL;
+	rt_cache = NULL;
+	for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+		if (!tun->smalltun_valid[i])
+			continue;
+
+		if (iph->daddr == tun->smalltun_fps[i].inner_dst) {
+			fp = &tun->smalltun_fps[i];
+			prt_cache = &tun->smalltun_rt_cache[i];
+			break;
+		}
+	}
+
+	if (!fp)
+		return 0;
+
+	if (fp->af != AF_INET) {
+		/* FIXME: implement IPv6 transport */
+		return 0;
+	}
+
+	if (!pskb_may_pull(skb, iph->ihl * 4))
+		return 0;
+
+	match = false;
+	for (i = 0; i < fp->rule_count; i++) {
+		const struct smalltun_rule *r = &fp->rules[i];
+		unsigned int sport, dport;
+
+		if (iph->protocol != r->proto)
+			continue;
+
+		switch (iph->protocol) {
+		case IPPROTO_UDP:
+		{
+			const struct udphdr *udp;
+			udp = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
+			sport = ntohs(udp->source);
+	                dport = ntohs(udp->dest);
+			break;
+		}
+		case IPPROTO_TCP:
+		{
+			const struct tcphdr *tcp;
+			tcp = (struct tcphdr *)((u8 *)iph + (iph->ihl << 2));
+			sport = ntohs(tcp->source);
+			dport = ntohs(tcp->dest);
+			break;
+		}
+		default:
+			match = true;
+			break;
+		}
+
+		if (match)
+			break;
+
+		if (r->src_port_start && r->src_port_end) {
+			if (sport < ntohs(r->src_port_start) ||
+			    sport > ntohs(r->src_port_end))
+				continue;
+		}
+
+		if (r->dst_port_start && r->dst_port_end) {
+			if (dport < ntohs(r->dst_port_start) ||
+			    dport > ntohs(r->dst_port_end))
+				continue;
+		}
+		match = true;
+	}
+
+	if (!match)
+		return 0;
+
+	if (fp->af == AF_INET) {
+		struct iphdr *oiph;
+		struct udphdr *oudph;
+		struct smalltun_pkt_hdr *pkt;
+		unsigned int payload_len;
+
+		payload_len = skb->len;
+
+		if (skb_cow_head(skb,
+				 sizeof (struct iphdr) +
+				 sizeof (struct udphdr) +
+				 sizeof (struct smalltun_pkt_hdr)))
+			return 0;
+
+		pkt = skb_push(skb, sizeof (struct smalltun_pkt_hdr));
+		oudph = skb_push(skb, sizeof (struct udphdr));
+		skb_reset_transport_header(skb);
+		oiph = skb_push(skb, sizeof (struct iphdr));
+		skb_reset_network_header(skb);
+
+		/* ip */
+		oiph->version = 4;
+		oiph->tos = 0;
+		oiph->id = 0;
+		oiph->ihl = 5;
+		oiph->frag_off = 0;
+		oiph->ttl = 64;
+		oiph->protocol = IPPROTO_UDP;
+		memcpy(&oiph->saddr, fp->outer_src, 4);
+		memcpy(&oiph->daddr, fp->outer_dst, 4);
+
+		/* udp */
+		oudph->source = fp->outer_src_port;
+		oudph->dest = fp->outer_dst_port;
+		oudph->len = htons(payload_len + sizeof (*oudph) +
+				   sizeof (*pkt));
+		oudph->check = 0;
+
+		/* smalltun */
+		pkt->magic = htons(SMALLTUN_MAGIC);
+		pkt->version = SMALLTUN_VERSION;
+		pkt->flag_type = TYPE_CLT_DATA;
+
+		memset(&fl, 0x00, sizeof (fl));
+		memcpy(&fl.saddr, fp->outer_src, 4);
+		memcpy(&fl.daddr, fp->outer_dst, 4);
+
+		if (*prt_cache && (*prt_cache)->dst.obsolete > 0) {
+			rt_cache = *prt_cache;
+			*prt_cache = NULL;
+			ip_rt_put(rt_cache);
+		}
+
+		rt_cache = *prt_cache;
+		if (!rt_cache) {
+			rt_cache = ip_route_output_key(&init_net, &fl);
+			if (IS_ERR(rt_cache)) {
+				pr_err("ip_route_output_key(%pI4): %li\n",
+				       &fl.daddr, PTR_ERR(rt_cache));
+				return 0;
+			}
+
+			if (!rt_cache->dst.dev) {
+				pr_err("ip_route_output_key(%pI4): no dev\n",
+				       &fl.daddr);
+				return 0;
+			}
+
+			*prt_cache = rt_cache;
+		}
+
+		skb_dst_set(skb, dst_clone(&rt_cache->dst));
+		skb->dev = skb_dst(skb)->dev;
+		ip_local_out(&init_net, NULL, skb);
+		return 1;
+	}
+
+	/* find route */
+
+	return 0;
+}
+
 /* Net device start xmit */
 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
 {
@@ -1127,6 +1339,11 @@
 	 */
 	skb_orphan(skb);
 
+	if (smalltun_is_fastpath(tun, skb)) {
+		rcu_read_unlock();
+		return NETDEV_TX_OK;
+	}
+
 	nf_reset_ct(skb);
 
 	if (ptr_ring_produce(&tfile->tx_ring, skb)) {
@@ -3377,6 +3594,104 @@
 		ret = open_related_ns(&net->ns, get_net_ns);
 		break;
 
+	case TUNSMALLTUNSETFP:
+	{
+		struct smalltun_fp fp;
+		unsigned int i;
+		int free_idx;
+
+		ret = -EFAULT;
+		if (copy_from_user(&fp, argp, sizeof(fp)))
+			break;
+
+		/* look for duplicate */
+		ret = 0;
+		free_idx = -1;
+		for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+			if (!tun->smalltun_valid[i]) {
+				if (free_idx == -1)
+					free_idx = i;
+				continue;
+			}
+
+			if (fp.inner_src == tun->smalltun_fps[i].inner_src &&
+			    fp.inner_dst == tun->smalltun_fps[i].inner_dst) {
+				ret = -EEXIST;
+				break;
+			}
+		}
+
+		if (ret)
+			break;
+
+		if (free_idx == -1) {
+			ret = -ENOSPC;
+			break;
+		}
+
+		memcpy(&tun->smalltun_fps[free_idx], &fp, sizeof (fp));
+		tun->smalltun_valid[free_idx] = 1;
+		tun->smalltun_valid_count++;
+		netif_info(tun, tx_queued, tun->dev,
+			   "new fp rule for %pI4 <=> %pI4 (%u rules)\n",
+			   &fp.inner_src,
+			   &fp.inner_dst,
+			   fp.rule_count);
+
+		if (fp.af == AF_INET) {
+			netif_info(tun, tx_queued, tun->dev,
+				   "outer %pI4:%u <=> %pI4:%u\n",
+				   fp.outer_src,
+				   ntohs(fp.outer_src_port),
+				   fp.outer_dst,
+				   ntohs(fp.outer_dst_port));
+		} else {
+			netif_info(tun, tx_queued, tun->dev,
+				   "outer %pI6:%u <=> %pI6:%u\n",
+				   fp.outer_src,
+				   ntohs(fp.outer_src_port),
+				   fp.outer_dst,
+				   ntohs(fp.outer_dst_port));
+		}
+		break;
+	}
+
+	case TUNSMALLTUNDELFP:
+	{
+		struct smalltun_fp fp;
+		unsigned int i;
+
+		ret = -EFAULT;
+		if (copy_from_user(&fp, argp, sizeof(fp)))
+			break;
+
+		/* lookup */
+		ret = -ENOENT;
+		for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+			if (fp.inner_src == tun->smalltun_fps[i].inner_src &&
+			    fp.inner_dst == tun->smalltun_fps[i].inner_dst) {
+				ret = 0;
+				break;
+			}
+		}
+
+		if (ret)
+			break;
+
+		tun->smalltun_valid[i] = 0;
+		tun->smalltun_valid_count--;
+		if (tun->smalltun_rt_cache[i]) {
+			ip_rt_put(tun->smalltun_rt_cache[i]);
+			tun->smalltun_rt_cache[i] = NULL;
+		}
+
+		netif_info(tun, tx_queued, tun->dev,
+			   "removed fp rule for %pI4 <=> %pI4\n",
+			   &fp.inner_src,
+			   &fp.inner_dst);
+		break;
+	}
+
 	default:
 		ret = -EINVAL;
 		break;
diff -ruw linux-6.4/drivers/net/wireguard/device.c linux-6.4-fbx/drivers/net/wireguard/device.c
--- linux-6.4/drivers/net/wireguard/device.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireguard/device.c	2023-11-07 13:38:43.994254834 +0100
@@ -20,6 +20,7 @@
 #include <linux/icmp.h>
 #include <linux/suspend.h>
 #include <net/dst_metadata.h>
+#include <net/gso.h>
 #include <net/icmp.h>
 #include <net/rtnetlink.h>
 #include <net/ip_tunnels.h>
diff -ruw linux-6.4/drivers/net/wireless/ath/Kconfig linux-6.4-fbx/drivers/net/wireless/ath/Kconfig
--- linux-6.4/drivers/net/wireless/ath/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/Kconfig	2023-05-22 20:06:42.179798901 +0200
@@ -37,6 +37,9 @@
 	  This option enables tracepoints for atheros wireless drivers.
 	  Currently, ath9k makes use of this facility.
 
+config ATH_REG_IGNORE
+	bool "ignore all eeprom regulation"
+
 config ATH_REG_DYNAMIC_USER_REG_HINTS
 	bool "Atheros dynamic user regulatory hints"
 	depends on CFG80211_CERTIFICATION_ONUS
diff -ruw linux-6.4/drivers/net/wireless/ath/ath.h linux-6.4-fbx/drivers/net/wireless/ath/ath.h
--- linux-6.4/drivers/net/wireless/ath/ath.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath.h	2023-05-22 20:06:42.183799007 +0200
@@ -47,6 +47,7 @@
 	u32 rx_busy;
 	u32 rx_frame;
 	u32 tx_frame;
+	u32 rx_bss_frame;
 };
 
 enum ath_device_state {
@@ -186,6 +187,8 @@
 
 	int last_rssi;
 	struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+
+	int dfs_pulse_valid_diff_ts;
 };
 
 static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/core.c linux-6.4-fbx/drivers/net/wireless/ath/ath10k/core.c
--- linux-6.4/drivers/net/wireless/ath/ath10k/core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/core.c	2023-05-22 20:06:42.183799007 +0200
@@ -83,8 +83,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -95,6 +94,7 @@
 		.hw_filter_reset_required = true,
 		.fw_diag_ce_download = false,
 		.credit_size_workaround = false,
+		.uart_pin_workaround = true,
 		.tx_stats_over_pktlog = true,
 		.dynamic_sar_support = false,
 		.hw_restart_disconnect = false,
@@ -123,8 +123,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -164,8 +163,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -240,8 +238,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -280,8 +277,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -320,8 +316,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -363,8 +358,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -410,8 +404,7 @@
 		.decap_align_bytes = 1,
 		.spectral_bin_discard = 4,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -463,8 +456,9 @@
 		/* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
 		 * or 2x2 160Mhz, long-guard-interval.
 		 */
-		.vht160_mcs_rx_highest = 1560,
-		.vht160_mcs_tx_highest = 1560,
+		.vht_need_ext_nss = true,
+		.vht_over_supp_chan_width = 0,
+		.vht_over_ext_nss_bw = 2,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -513,8 +507,9 @@
 		/* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
 		 * 1x1 160Mhz, long-guard-interval.
 		 */
-		.vht160_mcs_rx_highest = 780,
-		.vht160_mcs_tx_highest = 780,
+		.vht_need_ext_nss = true,
+		.vht_over_supp_chan_width = 0,
+		.vht_over_ext_nss_bw = 2,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -553,8 +548,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -595,8 +589,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -675,8 +668,7 @@
 		.decap_align_bytes = 1,
 		.spectral_bin_discard = 4,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -1241,6 +1233,7 @@
 static int ath10k_fetch_cal_file(struct ath10k *ar)
 {
 	char filename[100];
+	unsigned int i;
 
 	/* pre-cal-<bus>-<id>.bin */
 	scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
@@ -1254,6 +1247,11 @@
 	scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
 		  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
 
+	for (i = 0; filename[i]; i++) {
+		if (filename[i] == ':')
+			filename[i] = '_';
+	}
+
 	ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
 	if (IS_ERR(ar->cal_file))
 		/* calibration file is optional, don't print any warnings */
@@ -1465,6 +1463,7 @@
 					      const char *boardname,
 					      const char *fallback_boardname1,
 					      const char *fallback_boardname2,
+					      const char *pci_boardname,
 					      const char *filename)
 {
 	size_t len, magic_len;
@@ -1509,7 +1508,11 @@
 	data += magic_len;
 	len -= magic_len;
 
-	/* attempt to find boardname in the IE list */
+	/* attempt to find pci_boardname in the IE list */
+	ret = ath10k_core_search_bd(ar, pci_boardname, data, len);
+
+	/* if we didn't find it try board name that */
+	if (ret == -ENOENT)
 	ret = ath10k_core_search_bd(ar, boardname, data, len);
 
 	/* if we didn't find it and have a fallback name, try that */
@@ -1521,8 +1524,8 @@
 
 	if (ret == -ENOENT) {
 		ath10k_err(ar,
-			   "failed to fetch board data for %s from %s/%s\n",
-			   boardname, ar->hw_params.fw.dir, filename);
+			   "failed to fetch board data for %s or %s from %s/%s\n",
+			   boardname, pci_boardname, ar->hw_params.fw.dir, filename);
 		ret = -ENODATA;
 	}
 
@@ -1538,7 +1541,8 @@
 
 static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
 					 size_t name_len, bool with_variant,
-					 bool with_chip_id)
+					 bool with_chip_id,
+					 bool force_pci_id)
 {
 	/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
 	char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
@@ -1547,7 +1551,7 @@
 		scnprintf(variant, sizeof(variant), ",variant=%s",
 			  ar->id.bdf_ext);
 
-	if (ar->id.bmi_ids_valid) {
+	if (ar->id.bmi_ids_valid && !force_pci_id) {
 		scnprintf(name, name_len,
 			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
 			  ath10k_bus_str(ar->hif.bus),
@@ -1556,7 +1560,7 @@
 		goto out;
 	}
 
-	if (ar->id.qmi_ids_valid) {
+	if (ar->id.qmi_ids_valid && !force_pci_id) {
 		if (with_chip_id)
 			scnprintf(name, name_len,
 				  "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s",
@@ -1601,14 +1605,15 @@
 
 int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
 {
-	char boardname[100], fallback_boardname1[100], fallback_boardname2[100];
+	char boardname[100], fallback_boardname1[100], fallback_boardname2[100],
+		pci_boardname[100];
 	int ret;
 
 	if (bd_ie_type == ATH10K_BD_IE_BOARD) {
 		/* With variant and chip id */
 		ret = ath10k_core_create_board_name(ar, boardname,
 						    sizeof(boardname), true,
-						    true);
+						    true, false);
 		if (ret) {
 			ath10k_err(ar, "failed to create board name: %d", ret);
 			return ret;
@@ -1617,7 +1622,7 @@
 		/* Without variant and only chip-id */
 		ret = ath10k_core_create_board_name(ar, fallback_boardname1,
 						    sizeof(boardname), false,
-						    true);
+						    true, false);
 		if (ret) {
 			ath10k_err(ar, "failed to create 1st fallback board name: %d",
 				   ret);
@@ -1627,12 +1632,20 @@
 		/* Without variant and without chip-id */
 		ret = ath10k_core_create_board_name(ar, fallback_boardname2,
 						    sizeof(boardname), false,
-						    false);
+						    false, false);
 		if (ret) {
 			ath10k_err(ar, "failed to create 2nd fallback board name: %d",
 				   ret);
 			return ret;
 		}
+
+		ret = ath10k_core_create_board_name(ar, pci_boardname,
+						    sizeof(pci_boardname),
+						    false, false, true);
+		if (ret) {
+			ath10k_err(ar, "failed to create pci board name: %d", ret);
+			return ret;
+		}
 	} else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
 		ret = ath10k_core_create_eboard_name(ar, boardname,
 						     sizeof(boardname));
@@ -1646,6 +1659,7 @@
 	ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
 						 fallback_boardname1,
 						 fallback_boardname2,
+						 pci_boardname,
 						 ATH10K_BOARD_API2_FILE);
 	if (!ret)
 		goto success;
@@ -2540,6 +2554,7 @@
 
 	switch (ar->state) {
 	case ATH10K_STATE_ON:
+	case ATH10K_STATE_PRE_ON:
 		ar->state = ATH10K_STATE_RESTARTING;
 		ath10k_halt(ar);
 		ath10k_scan_finish(ar);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/core.h linux-6.4-fbx/drivers/net/wireless/ath/ath10k/core.h
--- linux-6.4/drivers/net/wireless/ath/ath10k/core.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/core.h	2023-05-22 20:06:42.183799007 +0200
@@ -698,10 +698,12 @@
 	void *cal_data;
 	u32 enable_extd_tx_stats;
 	u8 fw_dbglog_mode;
+	u32 burst_dur[4];
 };
 
 enum ath10k_state {
 	ATH10K_STATE_OFF = 0,
+	ATH10K_STATE_PRE_ON,
 	ATH10K_STATE_ON,
 
 	/* When doing firmware recovery the device is first powered down.
@@ -1018,6 +1020,7 @@
 		void *vaddr;
 	} msa;
 	u8 mac_addr[ETH_ALEN];
+	const char *fem_name;
 
 	enum ath10k_hw_rev hw_rev;
 	u16 dev_id;
@@ -1203,6 +1206,8 @@
 	struct work_struct restart_work;
 	struct work_struct bundle_tx_work;
 	struct work_struct tx_complete_work;
+	struct work_struct powerup_work;
+	bool powerup_pending;
 
 	/* cycle count is reported twice for each visited channel during scan.
 	 * access protected by data_lock
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/debug.c linux-6.4-fbx/drivers/net/wireless/ath/ath10k/debug.c
--- linux-6.4/drivers/net/wireless/ath/ath10k/debug.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/debug.c	2023-05-22 20:06:42.183799007 +0200
@@ -2513,6 +2513,79 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t ath10k_write_burst_dur(struct file *file, const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+
+        struct ath10k *ar = file->private_data;
+        u32 dur[4];
+        int ret;
+	int ac;
+	char buf[128];
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = 0;
+
+	ret = sscanf(buf, "%u %u %u %u", &dur[0], &dur[1], &dur[2], &dur[3]);
+
+	if (!ret)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_RESTARTED) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	for (ac = 0; ac < 4; ac++) {
+		if (dur[ac] < MIN_BURST_DUR || dur[ac] > MAX_BURST_DUR) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->aggr_burst,
+						(SM(ac, ATH10K_AGGR_BURST_AC) |
+						SM(dur[ac], ATH10K_AGGR_BURST_DUR)));
+		if (ret) {
+			ath10k_warn(ar, "failed to set aggr burst duration for ac %d: %d\n", ac, ret);
+			goto exit;
+		}
+		ar->debug.burst_dur[ac] = dur[ac];
+	}
+
+        ret = count;
+
+exit:
+        mutex_unlock(&ar->conf_mutex);
+        return ret;
+}
+
+static ssize_t ath10k_read_burst_dur(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+	char buf[128];
+
+	len = scnprintf(buf, sizeof(buf) - len, "%u %u %u %u\n",
+			ar->debug.burst_dur[0], ar->debug.burst_dur[1],
+			ar->debug.burst_dur[2], ar->debug.burst_dur[3]);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_burst_dur = {
+        .read = ath10k_read_burst_dur,
+        .write = ath10k_write_burst_dur,
+        .open = simple_open,
+        .owner = THIS_MODULE,
+        .llseek = default_llseek,
+};
+
 int ath10k_debug_create(struct ath10k *ar)
 {
 	ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2600,6 +2673,9 @@
 	debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
 			    &fops_ani_enable);
 
+	debugfs_create_file("burst_dur", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_burst_dur);
+
 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
 				    ar, &fops_simulate_radar);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/debug.h linux-6.4-fbx/drivers/net/wireless/ath/ath10k/debug.h
--- linux-6.4/drivers/net/wireless/ath/ath10k/debug.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/debug.h	2023-02-27 20:52:16.423384193 +0100
@@ -81,6 +81,15 @@
 __printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
 __printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
 
+#define ATH10K_AGGR_BURST_AC_MASK  0xff000000
+#define ATH10K_AGGR_BURST_AC_LSB   24
+#define ATH10K_AGGR_BURST_DUR_MASK 0x00ffffff
+#define ATH10K_AGGR_BURST_DUR_LSB  0
+
+/* burst duration in usec */
+#define MIN_BURST_DUR 0
+#define MAX_BURST_DUR 8000
+
 void ath10k_debug_print_hwfw_info(struct ath10k *ar);
 void ath10k_debug_print_board_info(struct ath10k *ar);
 void ath10k_debug_print_boot_info(struct ath10k *ar);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/hw.h linux-6.4-fbx/drivers/net/wireless/ath/ath10k/hw.h
--- linux-6.4/drivers/net/wireless/ath/ath10k/hw.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/hw.h	2023-05-22 20:06:42.187799114 +0200
@@ -579,11 +579,14 @@
 	/* Number of bytes to be discarded for each FFT sample */
 	int spectral_bin_discard;
 
-	/* The board may have a restricted NSS for 160 or 80+80 vs what it
-	 * can do for 80Mhz.
+	/* The board may have a restricted NSS for 160 or 80+80 vs
+	 * what it can do for 80Mhz. To handle this, we have to use
+	 * Extended NSS support and overrides VHT capabilities from
+	 * firmware
 	 */
-	int vht160_mcs_rx_highest;
-	int vht160_mcs_tx_highest;
+	bool vht_need_ext_nss;
+	u32 vht_over_supp_chan_width;
+	u32 vht_over_ext_nss_bw;
 
 	/* Number of ciphers supported (i.e First N) in cipher_suites array */
 	int n_cipher_suites;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/mac.c linux-6.4-fbx/drivers/net/wireless/ath/ath10k/mac.c
--- linux-6.4/drivers/net/wireless/ath/ath10k/mac.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/mac.c	2023-05-22 20:06:42.191799220 +0200
@@ -2524,30 +2524,6 @@
 	return tx_mcs_set;
 }
 
-static u32 get_160mhz_nss_from_maxrate(int rate)
-{
-	u32 nss;
-
-	switch (rate) {
-	case 780:
-		nss = 1;
-		break;
-	case 1560:
-		nss = 2;
-		break;
-	case 2106:
-		nss = 3; /* not support MCS9 from spec*/
-		break;
-	case 3120:
-		nss = 4;
-		break;
-	default:
-		 nss = 1;
-	}
-
-	return nss;
-}
-
 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
 				    struct ieee80211_vif *vif,
 				    struct ieee80211_sta *sta,
@@ -2555,13 +2531,16 @@
 {
 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
-	struct ath10k_hw_params *hw = &ar->hw_params;
+	enum ieee80211_vht_chanwidth bw;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	const u16 *vht_mcs_mask;
 	u8 ampdu_factor;
-	u8 max_nss, vht_mcs;
-	int i;
+	u8 rx_nss;
+	struct ieee80211_vht_cap ieee_vht_cap = {
+		.vht_cap_info = cpu_to_le32(vht_cap->cap),
+		.supp_mcs = vht_cap->vht_mcs,
+	};
 
 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
 		return;
@@ -2604,15 +2583,20 @@
 	/* Calculate peer NSS capability from VHT capabilities if STA
 	 * supports VHT.
 	 */
-	for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
-		vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
-			  (2 * i) & 3;
-
-		if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
-		    vht_mcs_mask[i])
-			max_nss = i + 1;
+	switch (arg->peer_phymode) {
+	case MODE_11AC_VHT160:
+		bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
+		break;
+	case MODE_11AC_VHT80_80:
+		bw = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+		break;
+	default:
+		bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+		break;
 	}
-	arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss);
+
+	rx_nss = ieee80211_get_vht_max_nss(&ieee_vht_cap, bw, 0, true, 0);
+	arg->peer_num_spatial_streams = rx_nss;
 	arg->peer_vht_rates.rx_max_rate =
 		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
 	arg->peer_vht_rates.rx_mcs_set =
@@ -2627,20 +2611,6 @@
 	 */
 	if (arg->peer_phymode == MODE_11AC_VHT160 ||
 	    arg->peer_phymode == MODE_11AC_VHT80_80) {
-		u32 rx_nss;
-		u32 max_rate;
-
-		max_rate = arg->peer_vht_rates.rx_max_rate;
-		rx_nss = get_160mhz_nss_from_maxrate(max_rate);
-
-		if (rx_nss == 0)
-			rx_nss = arg->peer_num_spatial_streams;
-		else
-			rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
-
-		max_rate = hw->vht160_mcs_tx_highest;
-		rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
-
 		arg->peer_bw_rxnss_override =
 			FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
 			FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
@@ -4904,14 +4874,18 @@
 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
 
 	/* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
-	 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz.  Give
-	 * user-space a clue if that is the case.
+	 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz.
 	 */
 	if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
-	    (hw->vht160_mcs_rx_highest != 0 ||
-	     hw->vht160_mcs_tx_highest != 0)) {
-		vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
-		vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
+	    hw->vht_need_ext_nss) {
+		vht_cap.cap &= ~(IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
+				 IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
+		vht_cap.cap |= hw->vht_over_supp_chan_width <<
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT;
+		vht_cap.cap |= hw->vht_over_ext_nss_bw <<
+			IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT;
+		vht_cap.vht_mcs.tx_highest |=
+			cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
 	}
 
 	return vht_cap;
@@ -5064,13 +5038,18 @@
 	const char *fem_name;
 	int ret;
 
+	if (ar->fem_name)
+		fem_name = ar->fem_name;
+	else {
 	node = ar->dev->of_node;
 	if (!node)
 		return -ENOENT;
 
-	ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name);
+		ret = of_property_read_string_index(node, "ext-fem-name",
+						    0, &fem_name);
 	if (ret)
 		return -ENOENT;
+	}
 
 	/*
 	 * If external Front End module used in hardware, then default base band timing
@@ -5146,12 +5125,83 @@
 	return 0;
 }
 
+static int ath10k_get_powered(struct ieee80211_hw *hw, bool *up, bool *busy)
+{
+	struct ath10k *ar = hw->priv;
+	*up = (ar->state == ATH10K_STATE_ON ||
+	       ar->state == ATH10K_STATE_PRE_ON);
+	*busy = ar->powerup_pending;
+	return 0;
+}
+
+static int ath10k_set_powered(struct ieee80211_hw *hw)
+{
+	struct ath10k *ar = hw->priv;
+
+	switch (ar->state) {
+	case ATH10K_STATE_OFF:
+	case ATH10K_STATE_PRE_ON:
+		break;
+	default:
+		return 0;
+	}
+
+	if (ar->powerup_pending)
+		return 0;
+
+	queue_work(ar->workqueue, &ar->powerup_work);
+	ar->powerup_pending = true;
+	return 0;
+}
+
+static void ath10k_powerup_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, powerup_work);
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_OFF) {
+		mutex_unlock(&ar->conf_mutex);
+		return;
+	}
+
+	ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	if (ret) {
+		ath10k_err(ar, "Could not init hif: %d\n", ret);
+		goto err_off;
+	}
+
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+				&ar->normal_mode_fw);
+	if (ret) {
+		ath10k_err(ar, "Could not init core: %d\n", ret);
+		goto err_power_down;
+	}
+
+	ar->state = ATH10K_STATE_PRE_ON;
+	ar->powerup_pending = false;
+	mutex_unlock(&ar->conf_mutex);
+	return;
+
+err_power_down:
+	ath10k_hif_power_down(ar);
+
+err_off:
+	ar->state = ATH10K_STATE_OFF;
+
+	ar->powerup_pending = false;
+	mutex_unlock(&ar->conf_mutex);
+	return;
+}
+
 static int ath10k_start(struct ieee80211_hw *hw)
 {
 	struct ath10k *ar = hw->priv;
 	u32 param;
 	int ret = 0;
 	struct wmi_bb_timing_cfg_arg bb_timing = {0};
+	bool skip_core_start = false;
 
 	/*
 	 * This makes sense only when restarting hw. It is harmless to call
@@ -5166,6 +5216,10 @@
 	case ATH10K_STATE_OFF:
 		ar->state = ATH10K_STATE_ON;
 		break;
+	case ATH10K_STATE_PRE_ON:
+		skip_core_start = true;
+		ar->state = ATH10K_STATE_ON;
+		break;
 	case ATH10K_STATE_RESTARTING:
 		ar->state = ATH10K_STATE_RESTARTED;
 		break;
@@ -5190,6 +5244,7 @@
 
 	spin_unlock_bh(&ar->data_lock);
 
+	if (!skip_core_start) {
 	ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
 	if (ret) {
 		ath10k_err(ar, "Could not init hif: %d\n", ret);
@@ -5202,6 +5257,7 @@
 		ath10k_err(ar, "Could not init core: %d\n", ret);
 		goto err_power_down;
 	}
+	}
 
 	if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
 		ret = ath10k_mac_rfkill_config(ar);
@@ -5370,6 +5426,9 @@
 
 	ath10k_drain_tx(ar);
 
+	cancel_work_sync(&ar->powerup_work);
+	ar->powerup_pending = false;
+
 	mutex_lock(&ar->conf_mutex);
 	if (ar->state != ATH10K_STATE_OFF) {
 		if (!ar->hw_rfkill_on) {
@@ -9412,6 +9471,8 @@
 static const struct ieee80211_ops ath10k_ops = {
 	.tx				= ath10k_mac_op_tx,
 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
+	.get_powered			= ath10k_get_powered,
+	.set_powered			= ath10k_set_powered,
 	.start				= ath10k_start,
 	.stop				= ath10k_stop,
 	.config				= ath10k_config,
@@ -9626,6 +9687,7 @@
 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
 					BIT(NL80211_CHAN_WIDTH_20) |
 					BIT(NL80211_CHAN_WIDTH_40) |
+					BIT(NL80211_CHAN_WIDTH_160) |
 					BIT(NL80211_CHAN_WIDTH_80),
 #endif
 	},
@@ -9809,6 +9871,8 @@
 #define WRD_METHOD "WRDD"
 #define WRDD_WIFI  (0x07)
 
+#define ATH10K_DFS_PULSE_VALID_DIFF_TS 100
+
 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
 {
 	union acpi_object *mcc_pkg;
@@ -10022,11 +10086,13 @@
 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+	ieee80211_hw_set(ar->hw, APVLAN_NEED_MCAST_TO_UCAST);
 	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
 	ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
 	ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+	ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
 
 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
 		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
@@ -10219,6 +10285,8 @@
 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		/* Init ath dfs pattern detector */
 		ar->ath_common.debug_mask = ATH_DBG_DFS;
+		ar->ath_common.dfs_pulse_valid_diff_ts =
+					ATH10K_DFS_PULSE_VALID_DIFF_TS;
 		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
 							     NL80211_DFS_UNSET);
 
@@ -10266,6 +10334,9 @@
 
 	ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER;
 
+	INIT_WORK(&ar->powerup_work, ath10k_powerup_work);
+	ar->powerup_pending = false;
+
 	ret = ieee80211_register_hw(ar->hw);
 	if (ret) {
 		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
@@ -10277,6 +10348,7 @@
 		ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
 	}
 
+#ifndef CONFIG_ATH_REG_IGNORE
 	if (!ath_is_world_regd(&ar->ath_common.reg_world_copy) &&
 	    !ath_is_world_regd(&ar->ath_common.regulatory)) {
 		ret = regulatory_hint(ar->hw->wiphy,
@@ -10284,10 +10356,13 @@
 		if (ret)
 			goto err_unregister;
 	}
+#endif
 
 	return 0;
 
+#ifndef CONFIG_ATH_REG_IGNORE
 err_unregister:
+#endif
 	ieee80211_unregister_hw(ar->hw);
 
 err_dfs_detector_exit:
@@ -10305,6 +10380,7 @@
 void ath10k_mac_unregister(struct ath10k *ar)
 {
 	ieee80211_unregister_hw(ar->hw);
+	cancel_work_sync(&ar->powerup_work);
 
 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
 		ar->dfs_detector->exit(ar->dfs_detector);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/pci.c linux-6.4-fbx/drivers/net/wireless/ath/ath10k/pci.c
--- linux-6.4/drivers/net/wireless/ath/ath10k/pci.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/pci.c	2024-01-19 17:01:19.849846593 +0100
@@ -9,6 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/bitops.h>
+#include <linux/delay.h>
 
 #include "core.h"
 #include "debug.h"
@@ -30,6 +31,7 @@
 
 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
+static char *fem_name;
 
 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
@@ -37,6 +39,9 @@
 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 
+module_param(fem_name, charp, 0660);
+MODULE_PARM_DESC(fem_name, "force FEM type");
+
 /* how long wait to wait for target to initialise, in ms */
 #define ATH10K_PCI_TARGET_WAIT 3000
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
@@ -714,7 +719,8 @@
 	/* Check if the shared legacy irq is for us */
 	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 				  PCIE_INTR_CAUSE_ADDRESS);
-	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL) &&
+	    cause != 0xdeadbeef)
 		return true;
 
 	return false;
@@ -2655,12 +2661,6 @@
 	return 0;
 }
 
-static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
-{
-	ath10k_pci_irq_disable(ar);
-	return ath10k_pci_qca99x0_chip_reset(ar);
-}
-
 static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -3524,7 +3524,7 @@
 	.get_num_banks	= ath10k_pci_get_num_banks,
 };
 
-static int ath10k_pci_probe(struct pci_dev *pdev,
+static int __ath10k_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *pci_dev)
 {
 	int ret = 0;
@@ -3565,21 +3565,21 @@
 	case QCA99X0_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA99X0;
 		pci_ps = false;
-		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+		pci_soft_reset = NULL;;
 		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
 		break;
 	case QCA9984_1_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA9984;
 		pci_ps = false;
-		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+		pci_soft_reset = NULL;;
 		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
 		break;
 	case QCA9888_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA9888;
 		pci_ps = false;
-		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+		pci_soft_reset = NULL;;
 		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
 		break;
@@ -3618,6 +3618,7 @@
 	ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
 	ar->ce_priv = &ar_pci->ce;
 
+	ar->fem_name = fem_name;
 	ar->id.vendor = pdev->vendor;
 	ar->id.device = pdev->device;
 	ar->id.subsystem_vendor = pdev->subsystem_vendor;
@@ -3776,6 +3777,23 @@
 			 ath10k_pci_pm_suspend,
 			 ath10k_pci_pm_resume);
 
+static int ath10k_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *pci_dev)
+{
+	int cnt = 0;
+	int rv;
+	do {
+		rv = __ath10k_pci_probe(pdev, pci_dev);
+		if (rv == 0)
+			return rv;
+
+		pr_err("ath10k: failed to probe PCI : %d, retry-count: %d\n", rv, cnt);
+		mdelay(10); /* let the ath10k firmware gerbil take a small break */
+	} while (cnt++ < 3);
+
+	return rv;
+}
+
 static struct pci_driver ath10k_pci_driver = {
 	.name = "ath10k_pci",
 	.id_table = ath10k_pci_id_table,
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/thermal.c linux-6.4-fbx/drivers/net/wireless/ath/ath10k/thermal.c
--- linux-6.4/drivers/net/wireless/ath/ath10k/thermal.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/thermal.c	2023-05-22 20:06:42.195799326 +0200
@@ -160,7 +160,9 @@
 	if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
 		return 0;
 
-	cdev = thermal_cooling_device_register("ath10k_thermal", ar,
+	cdev = thermal_cooling_device_register_with_parent(ar->dev,
+							   "ath10k_thermal",
+							   ar,
 					       &ath10k_thermal_ops);
 
 	if (IS_ERR(cdev)) {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath10k/wmi.c linux-6.4-fbx/drivers/net/wireless/ath/ath10k/wmi.c
--- linux-6.4/drivers/net/wireless/ath/ath10k/wmi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath10k/wmi.c	2023-05-22 20:06:42.199799433 +0200
@@ -5815,12 +5815,14 @@
 	survey->noise     = noise_floor;
 	survey->time      = div_u64(total, cc_freq_hz);
 	survey->time_busy = div_u64(busy, cc_freq_hz);
-	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
+	survey->time_rx   = div_u64(rx, cc_freq_hz);
 	survey->time_tx   = div_u64(tx, cc_freq_hz);
+	survey->time_bss_rx = div_u64(rx_bss, cc_freq_hz);
 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
 			     SURVEY_INFO_TIME |
 			     SURVEY_INFO_TIME_BUSY |
 			     SURVEY_INFO_TIME_RX |
+			     SURVEY_INFO_TIME_BSS_RX |
 			     SURVEY_INFO_TIME_TX);
 exit:
 	spin_unlock_bh(&ar->data_lock);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/Kconfig linux-6.4-fbx/drivers/net/wireless/ath/ath11k/Kconfig
--- linux-6.4/drivers/net/wireless/ath/ath11k/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/Kconfig	2023-05-22 20:06:42.199799433 +0200
@@ -57,3 +57,15 @@
 	  Enable ath11k spectral scan support
 
 	  Say Y to enable access to the FFT/spectral data via debugfs.
+
+config ATH11K_SMALL_DP_RINGS
+	bool "ath11k small datapath DMA rings for memory challenged platforms"
+	depends on ATH11K
+	help
+	  Select this to lower the memory requirements for DMA rings
+	  in the datapath code. This can free up to 17 MiB of RAM per
+	  chip.
+
+config ATH11K_QCN9074_FIXED_MEM_REGION
+	bool "QCA ath11k fixed memory region mode on QCN9074"
+	depends on ATH11K
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/core.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/core.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/core.c	2024-03-18 14:40:14.839741005 +0100
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -32,6 +32,15 @@
 MODULE_PARM_DESC(frame_mode,
 		 "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
 
+bool ath11k_ftm_mode;
+module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0644);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
+static char *ath11k_board_variant;
+module_param_named(board_variant, ath11k_board_variant, charp, 0644);
+MODULE_PARM_DESC(board_variant, "board variant to use for bdf lookup");
+
+
 static const struct ath11k_hw_params ath11k_hw_params[] = {
 	{
 		.hw_rev = ATH11K_HW_IPQ8074,
@@ -51,7 +60,7 @@
 		.host_ce_config = ath11k_host_ce_config_ipq8074,
 		.ce_count = 12,
 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
-		.target_ce_count = 11,
+		.target_ce_count = 12,
 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
 		.svc_to_ce_map_len = 21,
 		.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
@@ -88,6 +97,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = false,
+		.supports_ap_vlan = true,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -116,6 +126,7 @@
 		.tcl_ring_retry = true,
 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
 		.smp2p_wow_exit = false,
+		.ce_fwlog_enable = false,
 	},
 	{
 		.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -135,7 +146,7 @@
 		.host_ce_config = ath11k_host_ce_config_ipq8074,
 		.ce_count = 12,
 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
-		.target_ce_count = 11,
+		.target_ce_count = 12,
 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
 		.svc_to_ce_map_len = 19,
 		.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
@@ -169,6 +180,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = false,
+		.supports_ap_vlan = true,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -198,6 +210,7 @@
 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = false,
+		.ce_fwlog_enable = false,
 	},
 	{
 		.name = "qca6390 hw2.0",
@@ -250,6 +263,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = true,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -282,6 +296,7 @@
 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = true,
+		.ce_fwlog_enable = false,
 	},
 	{
 		.name = "qcn9074 hw1.0",
@@ -334,6 +349,7 @@
 		.num_vdevs = 8,
 		.num_peers = 128,
 		.supports_suspend = false,
+		.supports_ap_vlan = true,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -350,7 +366,7 @@
 		.bios_sar_capa = NULL,
 		.m3_fw_support = true,
 		.fixed_bdf_addr = false,
-		.fixed_mem_region = false,
+		.fixed_mem_region = IS_ENABLED(CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION),
 		.static_window_map = true,
 		.hybrid_bus_type = false,
 		.fixed_fw_mem = false,
@@ -363,6 +379,7 @@
 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = false,
+		.ce_fwlog_enable = true,
 	},
 	{
 		.name = "wcn6855 hw2.0",
@@ -415,6 +432,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = true,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
 		.supports_regdb = true,
 		.fix_l1ss = false,
@@ -497,6 +515,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = true,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
 		.supports_regdb = true,
 		.fix_l1ss = false,
@@ -580,6 +599,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = false,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
 		.supports_regdb = true,
 		.fix_l1ss = false,
@@ -621,6 +641,7 @@
 		.max_radios = MAX_RADIOS_5018,
 		.bdf_addr = 0x4BA00000,
 		/* hal_desc_sz and hw ops are similar to qcn9074 */
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
@@ -966,7 +987,10 @@
 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
 	char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
 
-	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
+	if (ath11k_board_variant)
+		scnprintf(variant, sizeof(variant), ",variant=%s",
+			  ath11k_board_variant);
+	else if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
 		scnprintf(variant, sizeof(variant), ",variant=%s",
 			  ab->qmi.target.bdf_ext);
 
@@ -1032,9 +1056,14 @@
 
 void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
 {
+	struct ath11k_bid_override *ov, *tmp;
+
 	if (!IS_ERR(bd->fw))
 		release_firmware(bd->fw);
 
+	list_for_each_entry_safe(ov, tmp, &ab->board_id_overrides, next)
+		kfree(ov);
+
 	memset(bd, 0, sizeof(*bd));
 }
 
@@ -1354,6 +1383,11 @@
 {
 	int ret;
 
+	if (ath11k_ftm_mode) {
+		ab->fw_mode = ATH11K_FIRMWARE_MODE_FTM;
+		ath11k_info(ab, "Booting in factory test mode\n");
+	}
+
 	ret = ath11k_qmi_init_service(ab);
 	if (ret) {
 		ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
@@ -1576,11 +1610,15 @@
 	return ret;
 }
 
+unsigned int ce_fwlog = 1;
+module_param_named(ce_fwlog, ce_fwlog, uint, 0644);
+MODULE_PARM_DESC(ce_fwlog, "Enable/Disable CE based FW logging");
+
 int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
 {
 	int ret;
 
-	ret = ath11k_core_start_firmware(ab, ATH11K_FIRMWARE_MODE_NORMAL);
+	ret = ath11k_core_start_firmware(ab, ab->fw_mode);
 	if (ret) {
 		ath11k_err(ab, "failed to start firmware: %d\n", ret);
 		return ret;
@@ -1628,6 +1666,15 @@
 		goto err_core_stop;
 	}
 	ath11k_hif_irq_enable(ab);
+
+	if (ab->hw_params.ce_fwlog_enable && ce_fwlog) {
+		ret = ath11k_enable_fwlog(ab);
+		if (ret < 0) {
+			ath11k_err(ab, "failed to enable fwlog: %d\n", ret);
+			goto err_core_stop;
+		}
+	}
+
 	mutex_unlock(&ab->core_lock);
 
 	return 0;
@@ -1745,7 +1792,8 @@
 	for (i = 0; i < ab->num_radios; i++) {
 		pdev = &ab->pdevs[i];
 		ar = pdev->ar;
-		if (!ar || ar->state == ATH11K_STATE_OFF)
+		if (!ar || ar->state == ATH11K_STATE_OFF ||
+		    ar->state == ATH11K_STATE_FTM)
 			continue;
 
 		ieee80211_stop_queues(ar->hw);
@@ -1814,6 +1862,10 @@
 			ath11k_warn(ab,
 				    "device is wedged, will not restart radio %d\n", i);
 			break;
+		case ATH11K_STATE_FTM:
+			ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+				   "fw mode reset done radio %d\n", i);
+			break;
 		}
 		mutex_unlock(&ar->conf_mutex);
 	}
@@ -1932,6 +1984,104 @@
 	return 0;
 }
 
+static int load_board_id_override(struct ath11k_base *ab)
+{
+	struct ath11k_bid_override *ov, *tmp;
+	const struct firmware *fw;
+	const char *p, *end;
+	size_t len;
+	int ret, count;
+
+	fw = ath11k_core_firmware_request(ab, ATH11K_BOARD_OVERRIDE_FILE);
+	if (IS_ERR(fw)) {
+		/* file is optional */
+		if (PTR_ERR(fw) == -ENOENT)
+			return 0;
+		return PTR_ERR(fw);
+	}
+
+	/* format is <pci_path>=<board_id> [...] */
+	p = fw->data;
+	len = fw->size;
+	end = p + len;
+	count = 0;
+
+	while (1) {
+		const char *pstart;
+		char *ppath, *pbid, endc;
+		unsigned int seg, bus, slot, func;
+		u16 board_id;
+
+		while (p != end && isspace(*p))
+			p++;
+		if (p == end)
+			break;
+
+		pstart = p;
+		while (p != end && !isspace(*p))
+			p++;
+
+		if (p == end) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		ppath = kstrndup(pstart, p - pstart, GFP_KERNEL);
+		if (!pstart) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		pbid = strchr(ppath, '=');
+		if (!pbid) {
+			ath11k_err(ab, "bad key=value in override file\n");
+			ret = -EINVAL;
+			kfree(ppath);
+			goto fail;
+		}
+
+		*pbid++ = 0;
+
+		ret = sscanf(ppath, "pci:%x:%x:%x.%x%c", &seg, &bus, &slot,
+			     &func, &endc);
+		if (ret != 4) {
+			ath11k_err(ab, "invalid pci dev in override file\n");
+			kfree(ppath);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (kstrtou16(pbid, 0, &board_id)) {
+			ath11k_err(ab, "invalid board-id in override file\n");
+			kfree(ppath);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		ov = kzalloc(sizeof (*ov), GFP_KERNEL);
+		ov->domain = seg;
+		ov->bus_nr = bus;
+		ov->slot = slot;
+		ov->func = func;
+		ov->board_id = board_id;
+		list_add_tail(&ov->next, &ab->board_id_overrides);
+		count++;
+	}
+
+	ath11k_info(ab, "loaded %d entries from board-id override file\n",
+		    count);
+	release_firmware(fw);
+	return 0;
+
+fail:
+	ath11k_err(ab, "invalid board-id override file content\n");
+	release_firmware(fw);
+	list_for_each_entry_safe(ov, tmp, &ab->board_id_overrides, next)
+		kfree(ov);
+	INIT_LIST_HEAD(&ab->board_id_overrides);
+	return ret;
+}
+
 int ath11k_core_pre_init(struct ath11k_base *ab)
 {
 	int ret;
@@ -1942,6 +2092,10 @@
 		return ret;
 	}
 
+	ret = load_board_id_override(ab);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 EXPORT_SYMBOL(ath11k_core_pre_init);
@@ -2011,6 +2165,7 @@
 	init_completion(&ab->reconfigure_complete);
 	init_completion(&ab->recovery_start);
 
+	INIT_LIST_HEAD(&ab->board_id_overrides);
 	INIT_LIST_HEAD(&ab->peers);
 	init_waitqueue_head(&ab->peer_mapping_wq);
 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/core.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/core.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/core.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/core.h	2023-10-25 17:59:16.188036994 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH11K_CORE_H
@@ -157,12 +157,14 @@
 };
 
 extern bool ath11k_cold_boot_cal;
+extern bool ath11k_ftm_mode;
 
 #define ATH11K_IRQ_NUM_MAX 52
 #define ATH11K_EXT_IRQ_NUM_MAX	16
 
 struct ath11k_ext_irq_grp {
 	struct ath11k_base *ab;
+	char *name;
 	u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
 	u32 num_irq;
 	u32 grp_id;
@@ -277,6 +279,7 @@
 	ATH11K_FLAG_FIXED_MEM_RGN,
 	ATH11K_FLAG_DEVICE_INIT_DONE,
 	ATH11K_FLAG_MULTI_MSI_VECTORS,
+	ATH11K_FLAG_FTM_SEGMENTED,
 };
 
 enum ath11k_monitor_flags {
@@ -367,6 +370,8 @@
 #ifdef CONFIG_ATH11K_DEBUGFS
 	struct dentry *debugfs_twt;
 #endif /* CONFIG_ATH11K_DEBUGFS */
+	u64 tbtt_offset;
+	struct work_struct update_bcn_template_work;
 };
 
 struct ath11k_vif_iter {
@@ -530,6 +535,7 @@
 	ATH11K_STATE_RESTARTING,
 	ATH11K_STATE_RESTARTED,
 	ATH11K_STATE_WEDGED,
+	ATH11K_STATE_FTM,
 	/* Add other states as required */
 };
 
@@ -540,6 +546,12 @@
 
 #define ATH11K_INVALID_RSSI_EMPTY -128
 
+struct ath11k_ftm_event_obj {
+	u32 data_pos;
+	u32 expected_seq;
+	u8 *eventdata;
+};
+
 struct ath11k_fw_stats {
 	struct dentry *debugfs_fwstats;
 	u32 pdev_id;
@@ -572,6 +584,7 @@
 	u32 mem_offset;
 	u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
 	struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
+	u32 mem_addr;
 };
 
 struct ath11k_per_peer_tx_stats {
@@ -709,6 +722,8 @@
 	u32 last_ppdu_id;
 	u32 cached_ppdu_id;
 	int monitor_vdev_id;
+	struct completion fw_mode_reset;
+	u8 ftm_msgref;
 #ifdef CONFIG_ATH11K_DEBUGFS
 	struct ath11k_debug debug;
 #endif
@@ -835,9 +850,19 @@
 	u16 hw_rev;
 };
 
+struct ath11k_bid_override {
+	unsigned int domain;
+	unsigned int bus_nr;
+	unsigned int slot;
+	unsigned int func;
+	u16 board_id;
+	struct list_head next;
+};
+
 /* Master structure to hold the hw data which may be used in core module */
 struct ath11k_base {
 	enum ath11k_hw_rev hw_rev;
+	enum ath11k_firmware_mode fw_mode;
 	struct platform_device *pdev;
 	struct device *dev;
 	struct ath11k_qmi qmi;
@@ -847,6 +872,7 @@
 	/* HW channel counters frequency value in hertz common to all MACs */
 	u32 cc_freq_hz;
 
+	struct list_head board_id_overrides;
 	struct ath11k_htc htc;
 
 	struct ath11k_dp dp;
@@ -898,10 +924,12 @@
 	bool wmi_ready;
 	u32 wlan_init_status;
 	int irq_num[ATH11K_IRQ_NUM_MAX];
+	char *irq_name[ATH11K_IRQ_NUM_MAX];
 	struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
 	struct ath11k_targ_cap target_caps;
 	u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
 	bool pdevs_macaddr_valid;
+	bool enable_cold_boot_cal;
 	int bd_api;
 
 	struct ath11k_hw_params hw_params;
@@ -948,6 +976,7 @@
 		u32 fw_crash_counter;
 	} stats;
 	u32 pktlog_defs_checksum;
+	struct ath11k_ftm_event_obj ftm_event_obj;
 
 	struct ath11k_dbring_cap *db_caps;
 	u32 num_db_cap;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/dbring.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dbring.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/dbring.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dbring.c	2023-05-22 20:06:42.203799539 +0200
@@ -80,6 +80,8 @@
 
 	buff->paddr = paddr;
 
+	dma_sync_single_for_device(ab->dev, paddr, ring->buf_sz, DMA_FROM_DEVICE);
+
 	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
 		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
 
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/debugfs.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/debugfs.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/debugfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/debugfs.c	2023-05-22 20:06:42.203799539 +0200
@@ -15,6 +15,7 @@
 #include "debugfs_htt_stats.h"
 #include "peer.h"
 #include "hif.h"
+#include "qmi.h"
 
 static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
 	"REO2SW1_RING",
@@ -556,6 +557,104 @@
 	.llseek = default_llseek,
 };
 
+
+static ssize_t ath11k_athdiag_read(struct file *file,
+				   char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath11k *ar = file->private_data;
+	u8 *buf;
+	int ret;
+
+	if (*ppos <= 0)
+		return -EINVAL;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	buf = vmalloc(count);
+	if (!buf) {
+		ret = -ENOMEM;
+		 goto exit;
+	}
+
+	ret = ath11k_qmi_mem_read(ar->ab, *ppos, buf, count);
+	if (ret < 0) {
+		ath11k_warn(ar->ab, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
+			    (u32)(*ppos), ret);
+		 goto exit;
+	}
+
+	ret = copy_to_user(user_buf, buf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	count -= ret;
+	*ppos += count;
+	ret = count;
+exit:
+	vfree(buf);
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath11k_athdiag_write(struct file *file,
+				    const char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct ath11k *ar = file->private_data;
+	u8 *buf;
+	int ret;
+
+	if (*ppos <= 0)
+		return -EINVAL;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	buf = vmalloc(count);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	ret = copy_from_user(buf, user_buf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	ret = ath11k_qmi_mem_write(ar->ab, *ppos, buf, count);
+	if (ret < 0) {
+		ath11k_warn(ar->ab, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+			    (u32)(*ppos), ret);
+		goto exit;
+	}
+
+	*ppos += count;
+	ret = count;
+
+exit:
+	vfree(buf);
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_athdiag = {
+	.read = ath11k_athdiag_read,
+	.write = ath11k_athdiag_write,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
 						 const char __user *ubuf,
 						 size_t count, loff_t *ppos)
@@ -1648,6 +1747,10 @@
 				    &fops_reset_ps_duration);
 	}
 
+	debugfs_create_file("athdiag", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_athdiag);
+
 	return 0;
 }
 
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/dp.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/dp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp.c	2023-11-07 18:01:55.969707425 +0100
@@ -281,8 +281,18 @@
 	case HAL_RXDMA_MONITOR_STATUS:
 		params.low_threshold = num_entries >> 3;
 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
-		params.intr_batch_cntr_thres_entries = 0;
 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+		/* In case of PCI chipsets, we dont have PPDU end interrupts,
+		 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
+		 * Keep batch threshold as 8 so that an interrupt is received for
+		 * every 4 frames in MONITOR_STATUS ring.
+		 */
+		if ((type == HAL_RXDMA_MONITOR_STATUS) &&
+				(params.flags & HAL_SRNG_FLAGS_MSI_INTR))
+			params.intr_batch_cntr_thres_entries = 4;
+		else
+			params.intr_batch_cntr_thres_entries = 0;
+
 		break;
 	case HAL_WBM2SW_RELEASE:
 		if (ring_num < 3) {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/dp.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/dp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp.h	2023-05-22 20:06:42.207799646 +0200
@@ -205,7 +205,6 @@
 #define DP_WBM_RELEASE_RING_SIZE	64
 #define DP_TCL_DATA_RING_SIZE		512
 #define DP_TCL_DATA_RING_SIZE_WCN6750	2048
-#define DP_TX_COMP_RING_SIZE		32768
 #define DP_TX_IDR_SIZE			DP_TX_COMP_RING_SIZE
 #define DP_TCL_CMD_RING_SIZE		32
 #define DP_TCL_STATUS_RING_SIZE		32
@@ -219,13 +218,22 @@
 #define DP_RXDMA_BUF_RING_SIZE		4096
 #define DP_RXDMA_REFILL_RING_SIZE	2048
 #define DP_RXDMA_ERR_DST_RING_SIZE	1024
-#define DP_RXDMA_MON_STATUS_RING_SIZE	1024
-#define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
-#define DP_RXDMA_MONITOR_DST_RING_SIZE	2048
 #define DP_RXDMA_MONITOR_DESC_RING_SIZE	4096
 
 #define DP_RX_RELEASE_RING_NUM	3
 
+#ifdef CONFIG_ATH11K_SMALL_DP_RINGS
+# define DP_TX_COMP_RING_SIZE		8192
+# define DP_RXDMA_MON_STATUS_RING_SIZE  512
+# define DP_RXDMA_MONITOR_BUF_RING_SIZE 128
+# define DP_RXDMA_MONITOR_DST_RING_SIZE 128
+#else
+# define DP_TX_COMP_RING_SIZE		32768
+# define DP_RXDMA_MON_STATUS_RING_SIZE	1024
+# define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
+# define DP_RXDMA_MONITOR_DST_RING_SIZE	2048
+#endif
+
 #define DP_RX_BUFFER_SIZE	2048
 #define	DP_RX_BUFFER_SIZE_LITE  1024
 #define DP_RX_BUFFER_ALIGN_SIZE	128
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/dp_rx.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_rx.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/dp_rx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_rx.c	2024-03-27 19:01:35.032950662 +0100
@@ -783,6 +783,7 @@
 	dp->reo_cmd_cache_flush_count++;
 
 	/* Flush and invalidate aged REO desc from HW cache */
+retry:
 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
 				 list) {
 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
@@ -795,6 +796,7 @@
 			ath11k_dp_reo_cache_flush(ab, &elem->data);
 			kfree(elem);
 			spin_lock_bh(&dp->reo_cmd_lock);
+			goto retry;
 		}
 	}
 	spin_unlock_bh(&dp->reo_cmd_lock);
@@ -1621,14 +1623,20 @@
 	u8 pdev_id;
 
 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
+
+	rcu_read_lock();
+
 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
 	if (!ar) {
 		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
-		return;
+		goto out;
 	}
 
 	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
 				ar->ab->pktlog_defs_checksum);
+
+out:
+	rcu_read_unlock();
 }
 
 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
@@ -5233,10 +5241,13 @@
 		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
 		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
 			rx_mon_stats->status_ppdu_done++;
+			if (!ab->hw_params.full_monitor_mode) {
 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
-			ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
+				ath11k_dp_rx_mon_dest_process(ar, mac_id,
+							      budget, napi);
 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
 		}
+		}
 
 		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/dp_rx.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_rx.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/dp_rx.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_rx.h	2023-03-15 19:52:23.533979623 +0100
@@ -9,7 +9,7 @@
 #include "rx_desc.h"
 #include "debug.h"
 
-#define DP_MAX_NWIFI_HDR_LEN	30
+#define DP_MAX_NWIFI_HDR_LEN	36
 
 #define DP_RX_MPDU_ERR_FCS			BIT(0)
 #define DP_RX_MPDU_ERR_DECRYPT			BIT(1)
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/dp_tx.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_tx.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/dp_tx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_tx.c	2023-12-05 17:14:42.303715125 +0100
@@ -79,6 +79,47 @@
 	}
 }
 
+#define HTT_META_DATA_ALIGNMENT    0x8
+
+static int ath11k_dp_metadata_align_skb(struct sk_buff *skb, u8 align_len)
+{
+	int ret;
+
+	ret = skb_cow_head(skb, align_len);
+	if (unlikely(ret))
+		return ret;
+
+	skb_push(skb, align_len);
+	memset(skb->data, 0, align_len);
+	return 0;
+}
+
+static int ath11k_dp_prepare_htt_metadata(struct sk_buff *skb,
+					  u8 *htt_metadata_size)
+{
+	u8 htt_desc_size;
+	/* Size rounded of multiple of 8 bytes */
+	u8 htt_desc_size_aligned;
+	struct htt_tx_msdu_desc_ext *desc_ext;
+	int ret;
+
+	htt_desc_size = sizeof(*desc_ext);
+	htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+	ret = ath11k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+	if (unlikely(ret))
+		return ret;
+
+	desc_ext = (struct htt_tx_msdu_desc_ext *)skb->data;
+	desc_ext->info0 =
+		__cpu_to_le32(FIELD_PREP(HTT_TX_MSDU_DESC_INFO0_VALID_ENCRYPT_TYPE, 1) |
+			      FIELD_PREP(HTT_TX_MSDU_DESC_INFO0_ENCRYPT_TYPE, 0) |
+			      FIELD_PREP(HTT_TX_MSDU_DESC_INFO0_HOST_TX_DESC_POOL, 1));
+	*htt_metadata_size = htt_desc_size_aligned;
+
+	return 0;
+}
+
 int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
 		 struct ath11k_sta *arsta, struct sk_buff *skb)
 {
@@ -97,9 +138,7 @@
 	u32 ring_selector = 0;
 	u8 ring_map = 0;
 	bool tcl_ring_retry;
-
-	if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
-		return -ESHUTDOWN;
+	u8 align_pad, htt_meta_size = 0;
 
 	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 		     !ieee80211_is_data(hdr->frame_control)))
@@ -208,15 +247,42 @@
 		goto fail_remove_idr;
 	}
 
+	/* Add metadata for sw encrypted vlan group traffic */
+	if (!test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+	    !info->control.hw_key &&
+	    ieee80211_has_protected(hdr->frame_control)) {
+		/* HW requirement is that metadata should always point to a
+		 * 8-byte aligned address. So we add alignment pad to start of
+		 * buffer. HTT Metadata should be ensured to be multiple of 8-bytes
+		 *  to get 8-byte aligned start address along with align_pad added
+		 */
+		align_pad = ((unsigned long)skb->data) & (HTT_META_DATA_ALIGNMENT - 1);
+		ret = ath11k_dp_metadata_align_skb(skb, align_pad);
+		if (unlikely(ret))
+			goto fail_remove_idr;
+
+		ti.pkt_offset += align_pad;
+		ret = ath11k_dp_prepare_htt_metadata(skb, &htt_meta_size);
+		if (unlikely(ret))
+			goto fail_pull_skb;
+
+		ti.pkt_offset += htt_meta_size;
+		ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
+		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
+		ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+		ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+	}
+
 	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 		ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
 		ret = -ENOMEM;
-		goto fail_remove_idr;
+		goto fail_pull_skb;
 	}
 
-	ti.data_len = skb->len;
+	ti.data_len = skb->len - ti.pkt_offset;
 	skb_cb->paddr = ti.paddr;
 	skb_cb->vif = arvif->vif;
 	skb_cb->ar = ar;
@@ -271,6 +337,10 @@
 fail_unmap_dma:
 	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
 
+fail_pull_skb:
+	if (ti.pkt_offset)
+		skb_pull(skb, ti.pkt_offset);
+
 fail_remove_idr:
 	spin_lock_bh(&tx_ring->tx_idr_lock);
 	idr_remove(&tx_ring->txbuf_idr,
@@ -344,7 +414,7 @@
 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
 	if (!skb_cb->vif) {
-		dev_kfree_skb_any(msdu);
+		ieee80211_free_txskb(ar->hw, msdu);
 		return;
 	}
 
@@ -369,7 +439,7 @@
 			   "dp_tx: failed to find the peer with peer_id %d\n",
 			    ts->peer_id);
 		spin_unlock_bh(&ab->base_lock);
-		dev_kfree_skb_any(msdu);
+		ieee80211_free_txskb(ar->hw, msdu);
 		return;
 	}
 	spin_unlock_bh(&ab->base_lock);
@@ -566,12 +636,12 @@
 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
 	if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
-		dev_kfree_skb_any(msdu);
+		ieee80211_free_txskb(ar->hw, msdu);
 		return;
 	}
 
 	if (unlikely(!skb_cb->vif)) {
-		dev_kfree_skb_any(msdu);
+		ieee80211_free_txskb(ar->hw, msdu);
 		return;
 	}
 
@@ -624,7 +694,7 @@
 			   "dp_tx: failed to find the peer with peer_id %d\n",
 			    ts->peer_id);
 		spin_unlock_bh(&ab->base_lock);
-		dev_kfree_skb_any(msdu);
+		ieee80211_free_txskb(ar->hw, msdu);
 		return;
 	}
 	arsta = (struct ath11k_sta *)peer->sta->drv_priv;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/dp_tx.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_tx.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/dp_tx.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/dp_tx.h	2023-05-22 20:06:42.211799752 +0200
@@ -16,6 +16,20 @@
 	u16 peer_id;
 };
 
+#define HTT_TX_MSDU_DESC_INFO0_VALID_ENCRYPT_TYPE	BIT(8)
+#define HTT_TX_MSDU_DESC_INFO0_ENCRYPT_TYPE		GENMASK(16, 15)
+#define HTT_TX_MSDU_DESC_INFO0_HOST_TX_DESC_POOL	BIT(31)
+
+struct htt_tx_msdu_desc_ext {
+	__le32 info0;
+	__le32 info1;
+	__le32 info2;
+	__le32 info3;
+	__le32 info4;
+	__le32 info5;
+	__le32 info6;
+} __packed;
+
 void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts);
 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
 int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/hw.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/hw.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/hw.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/hw.c	2023-05-22 20:06:42.211799752 +0200
@@ -1396,6 +1396,14 @@
 	},
 
 	/* CE11 Not used */
+	{
+		.pipenum = __cpu_to_le32(11),
+		.pipedir = __cpu_to_le32(0),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
 };
 
 /* Map from service/endpoint to Copy Engine.
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/hw.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/hw.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/hw.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/hw.h	2023-10-25 17:59:16.188036994 +0200
@@ -74,6 +74,7 @@
 
 #define ATH11K_BOARD_MAGIC		"QCA-ATH11K-BOARD"
 #define ATH11K_BOARD_API2_FILE		"board-2.bin"
+#define ATH11K_BOARD_OVERRIDE_FILE	"board-id-override.txt"
 #define ATH11K_DEFAULT_BOARD_FILE	"board.bin"
 #define ATH11K_DEFAULT_CAL_FILE		"caldata.bin"
 #define ATH11K_AMSS_FILE		"amss.bin"
@@ -192,6 +193,7 @@
 	u32 num_vdevs;
 	u32 num_peers;
 	bool supports_suspend;
+	bool supports_ap_vlan;
 	u32 hal_desc_sz;
 	bool supports_regdb;
 	bool fix_l1ss;
@@ -224,6 +226,7 @@
 	u32 tx_ring_size;
 	bool smp2p_wow_exit;
 	bool support_fw_mac_sequence;
+	bool ce_fwlog_enable;
 };
 
 struct ath11k_hw_ops {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/mac.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/mac.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/mac.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/mac.c	2024-03-08 17:37:03.608237373 +0100
@@ -1,10 +1,11 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <net/mac80211.h>
+#include <net/cfg80211.h>
 #include <linux/etherdevice.h>
 #include <linux/bitfield.h>
 #include <linux/inetdevice.h>
@@ -433,7 +434,7 @@
 }
 
 static u32
-ath11k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+ath11k_mac_max_ht_nss(const u8 *ht_mcs_mask)
 {
 	int nss;
 
@@ -445,7 +446,7 @@
 }
 
 static u32
-ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+ath11k_mac_max_vht_nss(const u16 *vht_mcs_mask)
 {
 	int nss;
 
@@ -457,7 +458,7 @@
 }
 
 static u32
-ath11k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+ath11k_mac_max_he_nss(const u16 *he_mcs_mask)
 {
 	int nss;
 
@@ -643,6 +644,9 @@
 		return NULL;
 
 	for (i = 0; i < ab->num_radios; i++) {
+		if (ab->fw_mode == ATH11K_FIRMWARE_MODE_FTM)
+			pdev = &ab->pdevs[i];
+		else
 		pdev = rcu_dereference(ab->pdevs_active[i]);
 
 		if (pdev && pdev->pdev_id == pdev_id)
@@ -1351,7 +1355,7 @@
 	return ret;
 }
 
-static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
+int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
 {
 	struct ath11k *ar = arvif->ar;
 	struct ath11k_base *ab = ar->ab;
@@ -1362,6 +1366,7 @@
 	struct ieee80211_mgmt *mgmt;
 	u8 *ies;
 	int ret;
+	u64 adjusted_tsf;
 
 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
 		return 0;
@@ -1387,6 +1392,15 @@
 	else
 		arvif->wpaie_present = false;
 
+	/* Make the TSF offset negative so beacons in the same
+	 * staggered batch have the same TSF.
+	 */
+	if (arvif->tbtt_offset) {
+		adjusted_tsf = cpu_to_le64(0ULL - arvif->tbtt_offset);
+		mgmt = (void *)bcn->data;
+		memcpy(&mgmt->u.beacon.timestamp, &adjusted_tsf, sizeof(adjusted_tsf));
+	}
+
 	ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
 
 	kfree_skb(bcn);
@@ -1416,7 +1430,7 @@
 
 	if (vif->bss_conf.color_change_active)
 		ieee80211_beacon_update_cntdwn(vif);
-	ath11k_mac_setup_bcn_tmpl(arvif);
+	ieee80211_queue_work(arvif->ar->hw, &arvif->update_bcn_template_work);
 }
 
 static void ath11k_control_beaconing(struct ath11k_vif *arvif,
@@ -1599,7 +1613,7 @@
 	}
 
 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
-	if (rsnie || wpaie) {
+	if (ar->supports_6ghz || rsnie || wpaie) {
 		ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
 			   "%s: rsn ie found\n", __func__);
 		arg->need_ptk_4_way = true;
@@ -1658,7 +1672,7 @@
 }
 
 static bool
-ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+ath11k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask)
 {
 	int nss;
 
@@ -1670,7 +1684,7 @@
 }
 
 static bool
-ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[])
+ath11k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask)
 {
 	int nss;
 
@@ -2065,7 +2079,7 @@
 }
 
 static bool
-ath11k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+ath11k_peer_assoc_h_he_masked(const u16 *he_mcs_mask)
 {
 	int nss;
 
@@ -3068,7 +3082,8 @@
 {
 	u32 bitmap[2], param_id, param_val, pdev_id;
 	int ret;
-	s8 non_srg_th = 0, srg_th = 0;
+	s8 non_srg_th = ATH11K_OBSS_PD_THRESHOLD_DISABLED;
+	s8 srg_th = 0;
 
 	pdev_id = ar->pdev->pdev_id;
 
@@ -3097,8 +3112,6 @@
 		if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
 			non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD +
 				      he_obss_pd->non_srg_max_offset);
-		else
-			non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
 
 		param_val |= ATH11K_OBSS_PD_NON_SRG_EN;
 	}
@@ -3113,6 +3126,7 @@
 		param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM;
 		param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
 	} else {
+		if ((non_srg_th & 0xff) != ATH11K_OBSS_PD_THRESHOLD_DISABLED)
 		non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR;
 		/* SRG not supported and threshold in dB */
 		param_val &= ~(ATH11K_OBSS_PD_SRG_EN |
@@ -4078,7 +4092,25 @@
 	 */
 	if (peer && sta && cmd == SET_KEY)
 		ath11k_peer_frags_flush(ar, peer);
+
+	/* Reset peer authorized flag in FW before deleting keys
+	 * to avoid races in FW during encryption of queued packets.
+	 */
+	if (peer && sta && cmd == DISABLE_KEY && peer->is_authorized) {
+		peer->is_authorized = false;
+		spin_unlock_bh(&ab->base_lock);
+		ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+						arvif->vdev_id,
+						WMI_PEER_AUTHORIZE,
+						0);
+		if (ret) {
+			ath11k_warn(ar->ab, "Unable to reset authorize flag for "
+				    "peer (%pM) vdev %d: %d\n",
+				    sta->addr, arvif->vdev_id, ret);
+		}
+	} else {
 	spin_unlock_bh(&ab->base_lock);
+	}
 
 	if (!peer) {
 		if (cmd == SET_KEY) {
@@ -4882,10 +4914,21 @@
 		spin_lock_bh(&ar->ab->base_lock);
 
 		peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer)
+		if (peer) {
 			peer->is_authorized = false;
-
 		spin_unlock_bh(&ar->ab->base_lock);
+			ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+							arvif->vdev_id,
+							WMI_PEER_AUTHORIZE,
+							0);
+			if (ret) {
+				ath11k_warn(ar->ab, "Unable to reset authorize flag for "
+					    "peer (%pM) vdev %d: %d\n",
+					    sta->addr, arvif->vdev_id, ret);
+			}
+		} else {
+			spin_unlock_bh(&ar->ab->base_lock);
+		}
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTH &&
 		   (vif->type == NL80211_IFTYPE_AP ||
@@ -5920,8 +5963,15 @@
 	struct ieee80211_key_conf *key = info->control.hw_key;
 	struct ath11k_sta *arsta = NULL;
 	u32 info_flags = info->flags;
+	struct ieee80211_mgmt *mgmt;
 	bool is_prb_rsp;
 	int ret;
+	u64 adjusted_tsf;
+
+	if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) {
+		ieee80211_free_txskb(ar->hw, skb);
+		return;
+	}
 
 	memset(skb_cb, 0, sizeof(*skb_cb));
 	skb_cb->vif = vif;
@@ -5935,6 +5985,12 @@
 		skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
 	} else if (ieee80211_is_mgmt(hdr->frame_control)) {
 		is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+		if (is_prb_rsp && arvif->tbtt_offset) {
+			mgmt = (struct ieee80211_mgmt *)skb->data;
+			adjusted_tsf = cpu_to_le64(0ULL - arvif->tbtt_offset);
+			memcpy(&mgmt->u.probe_resp.timestamp, &adjusted_tsf,
+			       sizeof(adjusted_tsf));
+		}
 		ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
 		if (ret) {
 			ath11k_warn(ar->ab, "failed to queue management frame %d\n",
@@ -6020,6 +6076,11 @@
 	struct ath11k_pdev *pdev = ar->pdev;
 	int ret;
 
+	if (ath11k_ftm_mode) {
+		ath11k_warn(ab, "mac operations not supported in factory test mode\n");
+		return -EOPNOTSUPP;
+	}
+
 	ath11k_mac_drain_tx(ar);
 	mutex_lock(&ar->conf_mutex);
 
@@ -6034,6 +6095,7 @@
 	case ATH11K_STATE_RESTARTED:
 	case ATH11K_STATE_WEDGED:
 	case ATH11K_STATE_ON:
+	case ATH11K_STATE_FTM:
 		WARN_ON(1);
 		ret = -EINVAL;
 		goto err;
@@ -6140,6 +6202,22 @@
 	return ret;
 }
 
+static void ath11k_update_bcn_template_work(struct work_struct *work)
+{
+	struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
+						update_bcn_template_work);
+	struct ath11k *ar = arvif->ar;
+	int ret = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	if (arvif->is_up)
+		ret = ath11k_mac_setup_bcn_tmpl(arvif);
+	mutex_unlock(&ar->conf_mutex);
+	if (ret)
+		ath11k_warn(ar->ab, "failed to submit beacon template for vdev_id : %d ret : %d\n",
+			    arvif->vdev_id, ret);
+}
+
 static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
 {
 	struct ath11k *ar = hw->priv;
@@ -6455,6 +6533,7 @@
 	INIT_LIST_HEAD(&arvif->list);
 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
 			  ath11k_mac_vif_sta_connection_loss_work);
+	INIT_WORK(&arvif->update_bcn_template_work, ath11k_update_bcn_template_work);
 
 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
@@ -6675,7 +6754,7 @@
 	int i;
 
 	cancel_delayed_work_sync(&arvif->connection_loss_work);
-
+	cancel_work_sync(&arvif->update_bcn_template_work);
 	mutex_lock(&ar->conf_mutex);
 
 	ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
@@ -6882,6 +6961,7 @@
 	struct wmi_vdev_start_req_arg arg = {};
 	const struct cfg80211_chan_def *chandef = &ctx->def;
 	int ret = 0;
+	unsigned int dfs_cac_time;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -6950,20 +7030,21 @@
 	ath11k_dbg(ab, ATH11K_DBG_MAC,  "vdev %pM started, vdev_id %d\n",
 		   arvif->vif->addr, arvif->vdev_id);
 
-	/* Enable CAC Flag in the driver by checking the channel DFS cac time,
-	 * i.e dfs_cac_ms value which will be valid only for radar channels
-	 * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+	/* Enable CAC Flag in the driver by checking the all sub-channel's DFS
+	 * state as NL80211_DFS_USABLE which indicates CAC needs to be
 	 * done before channel usage. This flags is used to drop rx packets.
 	 * during CAC.
 	 */
 	/* TODO Set the flag for other interface types as required */
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
-	    chandef->chan->dfs_cac_ms &&
-	    chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled &&
+	    cfg80211_chandef_dfs_usable(ar->hw->wiphy, chandef)) {
 		set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+		dfs_cac_time = cfg80211_chandef_dfs_cac_time(ar->hw->wiphy,
+							     chandef);
 		ath11k_dbg(ab, ATH11K_DBG_MAC,
-			   "CAC Started in chan_freq %d for vdev %d\n",
-			   arg.channel.freq, arg.vdev_id);
+			   "cac started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n",
+			   dfs_cac_time, arg.channel.freq, chandef->center_freq1,
+			   arg.vdev_id);
 	}
 
 	ret = ath11k_mac_set_txbf_conf(arvif);
@@ -8725,6 +8806,14 @@
 	if (ar->state != ATH11K_STATE_ON)
 		goto err_fallback;
 
+	/* Firmware doesn't provide Tx power during CAC hence no need to fetch
+	 * the stats.
+	 */
+	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+		mutex_unlock(&ar->conf_mutex);
+		return -EAGAIN;
+	}
+
 	req_param.pdev_id = ar->pdev->pdev_id;
 	req_param.stats_id = WMI_REQUEST_PDEV_STAT;
 
@@ -8835,6 +8924,28 @@
 	}
 }
 
+#define ATH11k_5_DOT_9_MIN_FREQ	5845
+#define ATH11k_5_DOT_9_MAX_FREQ	5885
+
+static void ath11k_mac_update_5_dot_9_ch_list(struct ath11k *ar,
+				      struct ieee80211_supported_band *band)
+{
+	int i;
+
+	if (test_bit(WMI_TLV_SERVICE_5_DOT_9GHZ_SUPPORT,
+				ar->ab->wmi_ab.svc_map))
+		return;
+
+	if (ar->ab->dfs_region != ATH11K_DFS_REG_FCC)
+		return;
+
+	for (i = 0; i < band->n_channels; i++) {
+		if (band->channels[i].center_freq >= ATH11k_5_DOT_9_MIN_FREQ &&
+		    band->channels[i].center_freq <= ATH11k_5_DOT_9_MAX_FREQ)
+			band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+	}
+}
+
 static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band)
 {
 	struct ath11k_pdev *pdev = ar->pdev;
@@ -8892,7 +9003,7 @@
 	}
 
 	if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
-		if (reg_cap->high_5ghz_chan >= ATH11K_MAX_6G_FREQ) {
+		if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) {
 			channels = kmemdup(ath11k_6ghz_channels,
 					   sizeof(ath11k_6ghz_channels), GFP_KERNEL);
 			if (!channels) {
@@ -8945,6 +9056,7 @@
 			ath11k_mac_update_ch_list(ar, band,
 						  temp_reg_cap->low_5ghz_chan,
 						  temp_reg_cap->high_5ghz_chan);
+			ath11k_mac_update_5_dot_9_ch_list(ar, band);
 		}
 	}
 
@@ -9277,6 +9389,11 @@
 		 */
 		ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
 
+	if (ab->hw_params.supports_ap_vlan) {
+		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
+		ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
+	}
+
 	/* Apply the regd received during initialization */
 	ret = ath11k_regd_update(ar);
 	if (ret) {
@@ -9383,6 +9500,7 @@
 	struct ieee80211_hw *hw;
 	struct ath11k *ar;
 	struct ath11k_pdev *pdev;
+	struct ieee80211_ops *ops;
 	int ret;
 	int i;
 
@@ -9390,17 +9508,25 @@
 		return 0;
 
 	for (i = 0; i < ab->num_radios; i++) {
+		ops = kmemdup(&ath11k_ops, sizeof(ath11k_ops), GFP_KERNEL);
+		if (!ops) {
+			ret = -ENOMEM;
+			goto err_free_mac;
+		}
+
 		pdev = &ab->pdevs[i];
-		hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
+		hw = ieee80211_alloc_hw(sizeof(struct ath11k), ops);
 		if (!hw) {
 			ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
 			ret = -ENOMEM;
+			kfree(ops);
 			goto err_free_mac;
 		}
 
 		ar = hw->priv;
 		ar->hw = hw;
 		ar->ab = ab;
+		ar->ops = ops;
 		ar->pdev = pdev;
 		ar->pdev_idx = i;
 		ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i);
@@ -9460,6 +9586,7 @@
 {
 	struct ath11k *ar;
 	struct ath11k_pdev *pdev;
+	struct ieee80211_ops *ops;
 	int i;
 
 	for (i = 0; i < ab->num_radios; i++) {
@@ -9468,7 +9595,9 @@
 		if (!ar)
 			continue;
 
+		ops = ar->ops;
 		ieee80211_free_hw(ar->hw);
+		kfree(ops);
 		pdev->ar = NULL;
 	}
 }
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/mac.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/mac.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/mac.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/mac.h	2023-05-22 20:06:42.215799858 +0200
@@ -121,7 +121,7 @@
 #define ATH11K_PEER_RX_NSS_80_80MHZ		GENMASK(5, 3)
 
 #define ATH11K_OBSS_PD_MAX_THRESHOLD			-82
-#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD		-62
+#define ATH11K_OBSS_PD_THRESHOLD_DISABLED		128
 #define ATH11K_OBSS_PD_THRESHOLD_IN_DBM			BIT(29)
 #define ATH11K_OBSS_PD_SRG_EN				BIT(30)
 #define ATH11K_OBSS_PD_NON_SRG_EN			BIT(31)
@@ -175,4 +175,6 @@
 int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
 				 enum wmi_sta_keepalive_method method,
 				 u32 interval);
+int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif);
+
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/mhi.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/mhi.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/mhi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/mhi.c	2024-03-22 17:24:19.738851749 +0100
@@ -372,7 +372,7 @@
 	if (ret)
 		return ret;
 
-	mhi_ctrl->iova_start = res.start + 0x1000000;
+	mhi_ctrl->iova_start = res.start;
 	mhi_ctrl->iova_stop = res.end;
 
 	return 0;
@@ -414,7 +414,7 @@
 			goto free_controller;
 	} else {
 		mhi_ctrl->iova_start = 0;
-		mhi_ctrl->iova_stop = 0xFFFFFFFF;
+		mhi_ctrl->iova_stop = ab_pci->dma_mask;
 	}
 
 	mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/pci.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pci.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/pci.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pci.c	2024-03-22 17:24:19.742851858 +0100
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -15,9 +15,12 @@
 #include "mhi.h"
 #include "debug.h"
 #include "pcic.h"
+#include "qmi.h"
+#include <linux/of.h>
 
 #define ATH11K_PCI_BAR_NUM		0
-#define ATH11K_PCI_DMA_MASK		32
+#define ATH11K_PCI_DMA_MASK		36
+#define ATH11K_PCI_COHERENT_DMA_MASK	32
 
 #define TCSR_SOC_HW_VERSION		0x0224
 #define TCSR_SOC_HW_VERSION_MAJOR_MASK	GENMASK(11, 8)
@@ -27,6 +30,8 @@
 #define QCN9074_DEVICE_ID		0x1104
 #define WCN6855_DEVICE_ID		0x1103
 
+#define ATH11K_PCIE_LOCAL_REG_PCIE_LOCAL_RSV0  0x1E03164
+
 static const struct pci_device_id ath11k_pci_id_table[] = {
 	{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
 	{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
@@ -173,8 +178,16 @@
 
 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
 {
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
 	u32 val, delay;
 
+	/*
+	 * reset will reinitialise SoC registers to their default
+	 * values, we must not assume that the actual window cached
+	 * there is going to be valid after global reset.
+	 */
+	ab_pci->register_window = 0x00;
+
 	val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
 
 	val |= PCIE_SOC_GLOBAL_RESET_V;
@@ -367,9 +380,14 @@
 	ath11k_mhi_set_mhictrl_reset(ab);
 }
 
+#define ATH11K_QRTR_INSTANCE_PCI_DOMAIN		GENMASK(3, 0)
+#define ATH11K_QRTR_INSTANCE_PCI_BUS_NUM	GENMASK(7, 4)
+
 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
 {
 	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+	struct pci_bus *bus = ab_pci->pdev->bus;
 
 	cfg->tgt_ce = ab->hw_params.target_ce_config;
 	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
@@ -380,6 +398,13 @@
 
 	ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
 				    &cfg->shadow_reg_v2_len);
+
+	ab_pci->instance_id =
+		FIELD_PREP(ATH11K_QRTR_INSTANCE_PCI_DOMAIN,
+			   pci_domain_nr(bus)) |
+		FIELD_PREP(ATH11K_QRTR_INSTANCE_PCI_BUS_NUM,
+			   bus->number);
+	ab->qmi.service_ins_id += ab_pci->instance_id;
 }
 
 static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
@@ -525,13 +550,22 @@
 		goto disable_device;
 	}
 
-	ret = dma_set_mask_and_coherent(&pdev->dev,
+	ret = dma_set_mask(&pdev->dev,
 					DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
 	if (ret) {
 		ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
 			   ATH11K_PCI_DMA_MASK, ret);
 		goto release_region;
 	}
+	ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
+
+	ret = dma_set_coherent_mask(&pdev->dev,
+				    DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
+	if (ret) {
+		ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
+			   ATH11K_PCI_COHERENT_DMA_MASK, ret);
+		goto release_region;
+	}
 
 	pci_set_master(pdev);
 
@@ -594,6 +628,18 @@
 					   ab_pci->link_ctl);
 }
 
+static void ath11k_pci_update_qrtr_node_id(struct ath11k_base *ab)
+{
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+	u32 reg;
+
+	reg = ATH11K_PCIE_LOCAL_REG_PCIE_LOCAL_RSV0 & ATH11K_PCI_WINDOW_RANGE_MASK;
+	ath11k_pcic_write32(ab, reg, ab_pci->instance_id);
+
+	ath11k_dbg(ab, ATH11K_DBG_PCI, "pci reg 0x%x instance_id 0x%x read val 0x%x\n",
+		   reg, ab_pci->instance_id, ath11k_pcic_read32(ab, reg));
+}
+
 static int ath11k_pci_power_up(struct ath11k_base *ab)
 {
 	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -610,6 +656,8 @@
 
 	ath11k_pci_msi_enable(ab_pci);
 
+	ath11k_pci_update_qrtr_node_id(ab);
+
 	ret = ath11k_mhi_start(ab_pci);
 	if (ret) {
 		ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -745,6 +793,7 @@
 	ab_pci->ab = ab;
 	ab_pci->pdev = pdev;
 	ab->hif.ops = &ath11k_pci_hif_ops;
+	ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
 	pci_set_drvdata(pdev, ab);
 	spin_lock_init(&ab_pci->window_lock);
 
@@ -791,6 +840,7 @@
 	case QCN9074_DEVICE_ID:
 		pci_ops = &ath11k_pci_ops_qcn9074;
 		ab->hw_rev = ATH11K_HW_QCN9074_HW10;
+		ab->enable_cold_boot_cal = ath11k_cold_boot_cal;
 		break;
 	case WCN6855_DEVICE_ID:
 		ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
@@ -868,7 +918,7 @@
 
 	ath11k_pci_init_qmi_ce_config(ab);
 
-	ret = ath11k_pcic_config_irq(ab);
+	ret = ath11k_pcic_config_irq(ab, pci_name(pdev));
 	if (ret) {
 		ath11k_err(ab, "failed to config irq: %d\n", ret);
 		goto err_ce_free;
@@ -896,6 +946,7 @@
 		ath11k_err(ab, "failed to init core: %d\n", ret);
 		goto err_irq_affinity_cleanup;
 	}
+	ath11k_qmi_fwreset_from_cold_boot(ab);
 	return 0;
 
 err_irq_affinity_cleanup:
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/pci.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pci.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/pci.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pci.h	2024-03-22 17:24:19.742851858 +0100
@@ -72,6 +72,8 @@
 	/* enum ath11k_pci_flags */
 	unsigned long flags;
 	u16 link_ctl;
+	u32 instance_id;
+	u64 dma_mask;
 };
 
 static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/pcic.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pcic.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/pcic.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pcic.c	2023-05-22 20:06:42.215799858 +0200
@@ -8,10 +8,7 @@
 #include "pcic.h"
 #include "debug.h"
 
-static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
-	"bhi",
-	"mhi-er0",
-	"mhi-er1",
+static const char *ce_irq_name[] = {
 	"ce0",
 	"ce1",
 	"ce2",
@@ -24,42 +21,20 @@
 	"ce9",
 	"ce10",
 	"ce11",
-	"host2wbm-desc-feed",
-	"host2reo-re-injection",
-	"host2reo-command",
-	"host2rxdma-monitor-ring3",
-	"host2rxdma-monitor-ring2",
-	"host2rxdma-monitor-ring1",
-	"reo2ost-exception",
-	"wbm2host-rx-release",
-	"reo2host-status",
-	"reo2host-destination-ring4",
-	"reo2host-destination-ring3",
-	"reo2host-destination-ring2",
-	"reo2host-destination-ring1",
-	"rxdma2host-monitor-destination-mac3",
-	"rxdma2host-monitor-destination-mac2",
-	"rxdma2host-monitor-destination-mac1",
-	"ppdu-end-interrupts-mac3",
-	"ppdu-end-interrupts-mac2",
-	"ppdu-end-interrupts-mac1",
-	"rxdma2host-monitor-status-ring-mac3",
-	"rxdma2host-monitor-status-ring-mac2",
-	"rxdma2host-monitor-status-ring-mac1",
-	"host2rxdma-host-buf-ring-mac3",
-	"host2rxdma-host-buf-ring-mac2",
-	"host2rxdma-host-buf-ring-mac1",
-	"rxdma2host-destination-ring-mac3",
-	"rxdma2host-destination-ring-mac2",
-	"rxdma2host-destination-ring-mac1",
-	"host2tcl-input-ring4",
-	"host2tcl-input-ring3",
-	"host2tcl-input-ring2",
-	"host2tcl-input-ring1",
-	"wbm2host-tx-completions-ring3",
-	"wbm2host-tx-completions-ring2",
-	"wbm2host-tx-completions-ring1",
-	"tcl2host-status-ring",
+};
+
+static const char *dp_irq_name[ATH11K_EXT_IRQ_NUM_MAX] = {
+	"wbm2host_tx_completions_ring1",
+	"wbm2host_tx_completions_ring2",
+	"wbm2host_tx_completions_ring3",
+	"lmac_reo_misc_irq",
+	"reo2host_destination_ring1",
+	"reo2host_destination_ring2",
+	"reo2host_destination_ring3",
+	"reo2host_destination_ring4",
+	"dp_res1",
+	"dp_res2",
+	"dp_res3",
 };
 
 static const struct ath11k_msi_config ath11k_msi_config[] = {
@@ -305,6 +280,8 @@
 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
 
 		netif_napi_del(&irq_grp->napi);
+		kfree(irq_grp->name);
+		irq_grp->name = NULL;
 	}
 }
 
@@ -317,6 +294,8 @@
 			continue;
 		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+		kfree(ab->irq_name[i]);
+		ab->irq_name[i] = NULL;
 	}
 
 	ath11k_pcic_free_ext_irq(ab);
@@ -546,7 +525,8 @@
 	return ab->pci.ops->get_msi_irq(ab, vector);
 }
 
-static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
+static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab,
+				      const char *dev_name)
 {
 	int i, j, ret, num_vectors = 0;
 	u32 user_base_data = 0, base_vector = 0;
@@ -583,6 +563,13 @@
 			num_irq = 1;
 		}
 
+		irq_grp->name = kasprintf(GFP_KERNEL,
+					  "%s-%s",
+					  dev_name,
+					  dp_irq_name[i]);
+		if (!irq_grp->name)
+			return -ENOMEM;
+
 		irq_grp->num_irq = num_irq;
 		irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
 
@@ -601,7 +588,7 @@
 
 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
 			ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
-					  irq_flags, "DP_EXT_IRQ", irq_grp);
+					  irq_flags, irq_grp->name, irq_grp);
 			if (ret) {
 				ath11k_err(ab, "failed request irq %d: %d\n",
 					   vector, ret);
@@ -614,7 +601,8 @@
 	return 0;
 }
 
-int ath11k_pcic_config_irq(struct ath11k_base *ab)
+int ath11k_pcic_config_irq(struct ath11k_base *ab,
+			   const char *dev_name)
 {
 	struct ath11k_ce_pipe *ce_pipe;
 	u32 msi_data_start;
@@ -643,6 +631,13 @@
 		if (irq < 0)
 			return irq;
 
+		ab->irq_name[i] = kasprintf(GFP_KERNEL,
+					    "%s-%s",
+					    dev_name,
+					    ce_irq_name[i]);
+		if (!ab->irq_name[i])
+			return -ENOMEM;
+
 		ce_pipe = &ab->ce.ce_pipe[i];
 
 		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
@@ -650,7 +645,7 @@
 		tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
 
 		ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
-				  irq_flags, irq_name[irq_idx], ce_pipe);
+				  irq_flags, ab->irq_name[i], ce_pipe);
 		if (ret) {
 			ath11k_err(ab, "failed to request irq %d: %d\n",
 				   irq_idx, ret);
@@ -663,7 +658,7 @@
 		ath11k_pcic_ce_irq_disable(ab, i);
 	}
 
-	ret = ath11k_pcic_ext_irq_config(ab);
+	ret = ath11k_pcic_ext_irq_config(ab, dev_name);
 	if (ret)
 		return ret;
 
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/pcic.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pcic.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/pcic.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/pcic.h	2023-05-22 20:06:42.215799858 +0200
@@ -35,7 +35,7 @@
 				 u32 *msi_addr_hi);
 void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
 void ath11k_pcic_free_irq(struct ath11k_base *ab);
-int ath11k_pcic_config_irq(struct ath11k_base *ab);
+int ath11k_pcic_config_irq(struct ath11k_base *ab, const char *dev_name);
 void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
 void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
 void ath11k_pcic_stop(struct ath11k_base *ab);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/qmi.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/qmi.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/qmi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/qmi.c	2023-10-25 17:59:16.192037103 +0200
@@ -1,20 +1,23 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/elf.h>
 
 #include "qmi.h"
 #include "core.h"
+#include "pci.h"
 #include "debug.h"
+#include "hif.h"
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/ioport.h>
 #include <linux/firmware.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
+#include <linux/pci.h>
 
 #define SLEEP_CLOCK_SELECT_INTERNAL_BIT	0x02
 #define HOST_CSTATE_BIT			0x04
@@ -29,6 +32,10 @@
 MODULE_PARM_DESC(cold_boot_cal,
 		 "Decrease the channel switch time but increase the driver load time (Default: true)");
 
+bool ath11k_skip_caldata = 0;
+module_param_named(skip_caldata, ath11k_skip_caldata, bool, 0644);
+MODULE_PARM_DESC(ath11k_skip_caldata, "Skip caldata download");
+
 static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
 	{
 		.data_type	= QMI_OPT_FLAG,
@@ -1704,6 +1711,201 @@
 	},
 };
 
+struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ini_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_read_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_read_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_write_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_write_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
 {
 	struct qmi_wlanfw_host_cap_req_msg_v01 req;
@@ -2180,11 +2382,69 @@
 	return ret;
 }
 
+static bool ath11k_pci_has_board_id_override(struct ath11k_base *ab,
+					     u16 *ov_board_id)
+{
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+	struct pci_dev *pdev = ab_pci->pdev;
+	const struct ath11k_bid_override *ov;
+
+	list_for_each_entry(ov, &ab->board_id_overrides, next) {
+		if (ov->domain != pci_domain_nr(pdev->bus))
+			continue;
+
+		if (ov->bus_nr != pdev->bus->number)
+			continue;
+
+		if (pdev->devfn != PCI_DEVFN(ov->slot, ov->func))
+			continue;
+
+		*ov_board_id = ov->board_id;
+		return true;
+	}
+	return false;
+}
+
+static int ath11k_override_board_id(struct ath11k_base *ab, u16 *ov_board_id)
+{
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+	int ret = 0;
+
+	if (ab->hif.bus != ATH11K_BUS_PCI) {
+		/*
+		 * on AHB: just override to 0xff. we don't now how to
+		 * override otherwise.
+		 */
+		ab->qmi.target.board_id = 0xff;
+		return 0;
+	}
+
+	if (ath11k_pci_has_board_id_override(ab, ov_board_id)) {
+		ath11k_info(ab,
+			    "overriding board-id to 0x%x (%d)\n",
+			    *ov_board_id, *ov_board_id);
+	} else {
+		ath11k_err(ab,
+			   "device has invalid board-id, "
+			   "to use this card you need to setup "
+			   "%s/%s/%s file with "
+			   "this line:\n  pci:%s=<board_id>\n",
+			   ATH11K_FW_DIR, ab->hw_params.fw.dir,
+			   ATH11K_BOARD_OVERRIDE_FILE,
+			   pci_name(ab_pci->pdev));
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
 static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
 {
+	struct device *dev = ab->dev;
 	struct qmi_wlanfw_cap_req_msg_v01 req;
 	struct qmi_wlanfw_cap_resp_msg_v01 resp;
 	struct qmi_txn txn;
+	unsigned int board_id;
 	int ret = 0;
 	int r;
 	char *fw_build_id;
@@ -2229,10 +2489,19 @@
 		ab->qmi.target.chip_family = resp.chip_info.chip_family;
 	}
 
-	if (resp.board_info_valid)
+	if (!of_property_read_u32(dev->of_node, "qcom,board_id", &board_id) && board_id != 0xFF) {
+		ab->qmi.target.board_id = board_id;
+	} else if (resp.board_info_valid) {
 		ab->qmi.target.board_id = resp.board_info.board_id;
-	else
-		ab->qmi.target.board_id = 0xFF;
+	} else {
+		u16 ov_board_id;
+
+		ret = ath11k_override_board_id(ab, &ov_board_id);
+		if (ret)
+			goto out;
+
+		ab->qmi.target.board_id = ov_board_id;
+	}
 
 	if (resp.soc_info_valid)
 		ab->qmi.target.soc_id = resp.soc_info.soc_id;
@@ -2441,6 +2710,11 @@
 	if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
 		goto out;
 
+	if (ath11k_skip_caldata) {
+		ath11k_warn(ab, "Skipping caldata download\n");
+		goto out;
+	}
+
 	if (ab->qmi.target.eeprom_caldata) {
 		file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
 		tmp = filename;
@@ -2457,11 +2731,18 @@
 
 		fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
 		if (IS_ERR(fw_entry)) {
+			/* Caldata may not be present during first time calibration in
+			 * factory hence allow to boot without loading caldata in ftm mode
+			 */
+			if (ath11k_ftm_mode) {
+				ath11k_info(ab,
+					    "Booting without cal data file in FTM mode\n");
+				return 0;
+			}
 			ret = PTR_ERR(fw_entry);
 			ath11k_warn(ab,
-				    "qmi failed to load CAL data file:%s\n",
-				    filename);
-			goto out;
+				    "qmi failed to load CAL data file:%s booting with minimal performance\n",ATH11K_DEFAULT_CAL_FILE);
+			return 0;
 		}
 success:
 		fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
@@ -2826,6 +3107,33 @@
 	return 0;
 }
 
+int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
+{
+	int timeout;
+
+	if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
+	    ab->hw_params.cold_boot_calib == 0 ||
+	    ab->hw_params.cbcal_restart_fw == 0)
+		return 0;
+
+	ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
+	timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
+				     (ab->qmi.cal_done  == 1),
+				     ATH11K_COLD_BOOT_FW_RESET_DELAY);
+	if (timeout <= 0) {
+		ath11k_cold_boot_cal = 0;
+		ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
+	}
+
+	/* reset the firmware */
+	ath11k_hif_power_down(ab);
+	ath11k_hif_power_up(ab);
+
+	ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
+	return 0;
+}
+EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot);
+
 static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
 {
 	int timeout;
@@ -3052,6 +3360,130 @@
 	ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware init done\n");
 }
 
+int ath11k_qmi_mem_read(struct ath11k_base *ab, u32 mem_addr, void *mem_value,size_t count)
+{
+	struct qmi_wlanfw_mem_read_req_msg_v01 *req;
+	struct qmi_wlanfw_mem_read_resp_msg_v01 *resp;
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->offset = mem_addr;
+
+	/* Firmware uses mem type to map to various memory regions.
+	 * If this is set to 0, firmware uses automatic mapping of regions.
+	 * i.e, if mem address is given and mem_type is 0, firmware will
+	 * find under which memory region that address belongs
+	 */
+	req->mem_type = QMI_MEM_REGION_TYPE;
+	req->data_len = count;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_mem_read_resp_msg_v01_ei, resp);
+	if (ret < 0)
+		goto out;
+
+	ret =
+	qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			 QMI_WLANFW_MEM_READ_REQ_V01,
+			 QMI_WLANFW_MEM_READ_REQ_MSG_V01_MAX_MSG_LEN,
+			 qmi_wlanfw_mem_read_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		ath11k_warn(ab, "Failed to send mem read request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath11k_warn(ab, "qmi mem read req failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!resp->data_valid || resp->data_len != req->data_len) {
+		ath11k_warn(ab, "qmi mem read is invalid\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	memcpy(mem_value, resp->data, resp->data_len);
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
+int ath11k_qmi_mem_write(struct ath11k_base *ab, u32 mem_addr, void* mem_value, size_t count)
+{
+	struct qmi_wlanfw_mem_write_req_msg_v01 *req;
+	struct qmi_wlanfw_mem_write_resp_msg_v01 *resp;
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->offset = mem_addr;
+	req->mem_type = QMI_MEM_REGION_TYPE;
+	req->data_len = count;
+	memcpy(req->data, mem_value, req->data_len);
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_mem_write_resp_msg_v01_ei, resp);
+	if (ret < 0)
+		goto out;
+
+	ret =
+	qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			 QMI_WLANFW_MEM_WRITE_REQ_V01,
+			 QMI_WLANFW_MEM_WRITE_REQ_MSG_V01_MAX_MSG_LEN,
+			 qmi_wlanfw_mem_write_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		ath11k_warn(ab, "Failed to send mem write request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath11k_warn(ab, "qmi mem write req failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
 static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
 	{
 		.type = QMI_INDICATION,
@@ -3196,7 +3628,7 @@
 				break;
 			}
 
-			if (ath11k_cold_boot_cal && ab->qmi.cal_done == 0 &&
+			if (ab->enable_cold_boot_cal && ab->qmi.cal_done == 0 &&
 			    ab->hw_params.cold_boot_calib) {
 				ath11k_qmi_process_coldboot_calibration(ab);
 			} else {
@@ -3240,6 +3672,55 @@
 	spin_unlock(&qmi->event_lock);
 }
 
+int ath11k_enable_fwlog(struct ath11k_base *ab)
+{
+	struct wlfw_ini_req_msg_v01 *req;
+	struct wlfw_ini_resp_msg_v01 resp;
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	memset(&resp, 0, sizeof(resp));
+
+	req->enablefwlog_valid = 1;
+	req->enablefwlog = 1;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   wlfw_ini_resp_msg_v01_ei, &resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLFW_INI_REQ_V01,
+			       WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_ini_req_msg_v01_ei, req);
+
+	if (ret < 0) {
+		ath11k_warn(ab, "Failed to send init request for enabling fwlog = %d\n", ret);
+		qmi_txn_cancel(&txn);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0) {
+		ath11k_warn(ab, "fwlog enable wait for resp failed: %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath11k_warn(ab, "fwlog enable request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+out:
+	kfree(req);
+	return ret;
+}
+
 int ath11k_qmi_init_service(struct ath11k_base *ab)
 {
 	int ret;
@@ -3256,8 +3737,7 @@
 		return ret;
 	}
 
-	ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event",
-					   WQ_UNBOUND, 1);
+	ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0);
 	if (!ab->qmi.event_wq) {
 		ath11k_err(ab, "failed to allocate workqueue\n");
 		return -EFAULT;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/qmi.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/qmi.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/qmi.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/qmi.h	2023-05-22 20:06:42.215799858 +0200
@@ -37,7 +37,7 @@
 
 #define QMI_WLANFW_MAX_DATA_SIZE_V01		6144
 #define ATH11K_FIRMWARE_MODE_OFF		4
-#define ATH11K_COLD_BOOT_FW_RESET_DELAY		(40 * HZ)
+#define ATH11K_COLD_BOOT_FW_RESET_DELAY		(60 * HZ)
 
 #define ATH11K_QMI_DEVICE_BAR_SIZE		0x200000
 
@@ -154,6 +154,7 @@
 #define BDF_MEM_REGION_TYPE				0x2
 #define M3_DUMP_REGION_TYPE				0x3
 #define CALDB_MEM_REGION_TYPE				0x4
+#define QMI_MEM_REGION_TYPE				0
 
 struct qmi_wlanfw_host_cap_req_msg_v01 {
 	u8 num_clients_valid;
@@ -228,6 +229,18 @@
 	u64 fw_status;
 };
 
+struct wlfw_ini_req_msg_v01 {
+	u8 enablefwlog_valid;
+	u8 enablefwlog;
+};
+
+struct wlfw_ini_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+
 #define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN	1824
 #define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN	888
 #define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN	7
@@ -235,6 +248,11 @@
 #define QMI_WLANFW_RESPOND_MEM_REQ_V01			0x0036
 #define QMI_WLANFW_RESPOND_MEM_RESP_V01			0x0036
 #define QMI_WLANFW_MAX_NUM_MEM_CFG_V01			2
+#define QMI_WLANFW_MAX_STR_LEN_V01                      16
+#define QMI_WLANFW_MEM_WRITE_REQ_V01			0x0031
+#define QMI_WLANFW_MEM_WRITE_REQ_MSG_V01_MAX_MSG_LEN	6163
+#define QMI_WLANFW_MEM_READ_REQ_V01			0x0030
+#define QMI_WLANFW_MEM_READ_REQ_MSG_V01_MAX_MSG_LEN	21
 
 struct qmi_wlanfw_mem_cfg_s_v01 {
 	u64 offset;
@@ -511,6 +529,30 @@
 	struct qmi_response_type_v01 resp;
 };
 
+struct qmi_wlanfw_mem_read_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+};
+
+struct qmi_wlanfw_mem_read_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+};
+
+struct qmi_wlanfw_mem_write_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+};
+
+struct qmi_wlanfw_mem_write_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
 int ath11k_qmi_firmware_start(struct ath11k_base *ab,
 			      u32 mode);
 void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
@@ -519,5 +561,9 @@
 void ath11k_qmi_deinit_service(struct ath11k_base *ab);
 int ath11k_qmi_init_service(struct ath11k_base *ab);
 void ath11k_qmi_free_resource(struct ath11k_base *ab);
+int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab);
+int ath11k_enable_fwlog(struct ath11k_base *ab);
+int ath11k_qmi_mem_read(struct ath11k_base *ab, u32 mem_addr, void *mem_value, size_t count);
+int ath11k_qmi_mem_write(struct ath11k_base *ab, u32 mem_addr, void* mem_value, size_t count);
 
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/testmode.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/testmode.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/testmode.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/testmode.c	2023-11-07 13:38:44.002255052 +0100
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "testmode.h"
@@ -9,69 +10,218 @@
 #include "wmi.h"
 #include "hw.h"
 #include "core.h"
-#include "testmode_i.h"
+#include "../testmode_i.h"
 
-static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
-	[ATH11K_TM_ATTR_CMD]		= { .type = NLA_U32 },
-	[ATH11K_TM_ATTR_DATA]		= { .type = NLA_BINARY,
-					    .len = ATH11K_TM_DATA_MAX_LEN },
-	[ATH11K_TM_ATTR_WMI_CMDID]	= { .type = NLA_U32 },
-	[ATH11K_TM_ATTR_VERSION_MAJOR]	= { .type = NLA_U32 },
-	[ATH11K_TM_ATTR_VERSION_MINOR]	= { .type = NLA_U32 },
+#define ATH11K_FTM_SEGHDR_CURRENT_SEQ		GENMASK(3, 0)
+#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS	GENMASK(7, 4)
+
+#define ATH11K_FWLOG_MAX_LEN		2048
+
+static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+	[ATH_TM_ATTR_CMD]		= { .type = NLA_U32 },
+	[ATH_TM_ATTR_DATA]		= { .type = NLA_BINARY,
+					    .len = ATH_TM_DATA_MAX_LEN },
+	[ATH_TM_ATTR_WMI_CMDID]	= { .type = NLA_U32 },
+	[ATH_TM_ATTR_VERSION_MAJOR]	= { .type = NLA_U32 },
+	[ATH_TM_ATTR_VERSION_MINOR]	= { .type = NLA_U32 },
+	[ATH_TM_ATTR_FWLOG]		= { .type = NLA_BINARY,
+					    .len = ATH11K_FWLOG_MAX_LEN },
 };
 
-/* Returns true if callee consumes the skb and the skb should be discarded.
- * Returns false if skb is not used. Does not sleep.
- */
-bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
+void ath11k_fwlog_write(struct ath11k_base *ab,  u8 *data, int len)
 {
 	struct sk_buff *nl_skb;
-	bool consumed;
-	int ret;
+	int ret, i;
+	struct ath11k *ar = NULL;
+	struct ath11k_pdev *pdev;
 
-	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
-		   "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
-		   cmd_id, skb, skb->len);
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (pdev && pdev->ar) {
+			ar = pdev->ar;
+			break;
+		}
+	}
 
-	ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+	if (!ar)
+		return;
 
-	spin_lock_bh(&ar->data_lock);
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+						   len, GFP_ATOMIC);
+	if (!nl_skb) {
+		ath11k_warn(ab,
+			    "failed to allocate skb for fwlog event\n");
+		return;
+	}
+
+	ret = nla_put(nl_skb, ATH_TM_ATTR_FWLOG, len, data);
+	if (ret) {
+		ath11k_warn(ab,
+			    "failed to to put fwlog wmi event to nl: %d\n",
+			    ret);
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
 
-	consumed = true;
+static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
+{
+	struct ath11k_pdev *pdev;
+	struct ath11k *ar = NULL;
+	int i;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+		if (ar) {
+			if (ar->state == ATH11K_STATE_FTM)
+				break;
+		}
+	}
+
+	return ar;
+}
+
+/* This function handles unsegmented events. Data in various events are aggregated
+ * in application layer, this event is unsegmented from host perspective.
+ */
+void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
+				     struct sk_buff *skb)
+{
+	struct sk_buff *nl_skb;
+	struct ath11k *ar;
+
+	ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+		   "testmode event wmi cmd_id %d skb length %d\n",
+		   cmd_id, skb->len);
+	ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+	ar = ath11k_tm_get_ar(ab);
+	if (!ar) {
+		ath11k_warn(ab, "testmode event not handled due to invalid pdev\n");
+		return;
+	}
+
+	spin_lock_bh(&ar->data_lock);
 
 	nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
-						   2 * sizeof(u32) + skb->len,
+						   2 * nla_total_size(sizeof(u32)) +
+						   nla_total_size(skb->len),
 						   GFP_ATOMIC);
 	if (!nl_skb) {
-		ath11k_warn(ar->ab,
+		ath11k_warn(ab,
 			    "failed to allocate skb for testmode wmi event\n");
 		goto out;
 	}
 
-	ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
-	if (ret) {
-		ath11k_warn(ar->ab,
-			    "failed to put testmode wmi event cmd attribute: %d\n",
-			    ret);
+	if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+	    nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
+		ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
 		kfree_skb(nl_skb);
 		goto out;
 	}
 
-	ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
-	if (ret) {
-		ath11k_warn(ar->ab,
-			    "failed to put testmode wmi even cmd_id: %d\n",
-			    ret);
-		kfree_skb(nl_skb);
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+	spin_unlock_bh(&ar->data_lock);
+	return;
+
+out:
+	spin_unlock_bh(&ar->data_lock);
+	ath11k_warn(ab, "Failed to send testmode event to higher layers\n");
+}
+
+/* This function handles segmented events.
+ * Data of various events received from fw is aggregated and
+ * sent to application layer
+ */
+int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
+			    const struct wmi_ftm_event_msg *ftm_msg,
+			    u16 length)
+{
+	struct sk_buff *nl_skb;
+	int ret = 0;
+	struct ath11k *ar;
+	u8 const *buf_pos;
+	u16 datalen;
+	u8 total_segments, current_seq;
+	u32 data_pos;
+	u32 pdev_id;
+
+	ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+		   "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+		   cmd_id, ftm_msg, length);
+	ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
+	pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
+
+	if (pdev_id >= ab->num_radios) {
+		ath11k_warn(ab, "testmode event not handled due to invalid pdev id\n");
+		return -EINVAL;
+	}
+
+	ar = ab->pdevs[pdev_id].ar;
+	if (!ar) {
+		ath11k_warn(ab, "testmode event not handled due to absence of pdev\n");
+		return -ENODEV;
+	}
+
+	current_seq = FIELD_GET(ATH11K_FTM_SEGHDR_CURRENT_SEQ,
+				ftm_msg->seg_hdr.segmentinfo);
+	total_segments = FIELD_GET(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS,
+				   ftm_msg->seg_hdr.segmentinfo);
+	datalen = length - (sizeof(struct wmi_ftm_seg_hdr));
+	buf_pos = ftm_msg->data;
+
+	spin_lock_bh(&ar->data_lock);
+	if (current_seq == 0) {
+		ab->ftm_event_obj.expected_seq = 0;
+		ab->ftm_event_obj.data_pos = 0;
+	}
+
+	data_pos = ab->ftm_event_obj.data_pos;
+
+	if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+		ath11k_warn(ab,
+			    "Invalid event length date_pos[%d] datalen[%d]\n",
+			    data_pos, datalen);
+		ret = -EINVAL;
 		goto out;
 	}
 
-	ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data);
-	if (ret) {
-		ath11k_warn(ar->ab,
-			    "failed to copy skb to testmode wmi event: %d\n",
-			    ret);
+	memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen);
+	data_pos += datalen;
+
+	if (++ab->ftm_event_obj.expected_seq != total_segments) {
+		ab->ftm_event_obj.data_pos = data_pos;
+		ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+			   "partial data received current_seq[%d], total_seg[%d]\n",
+			   current_seq, total_segments);
+		goto out;
+	}
+
+	ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+		   "total data length[%d] = [%d]\n",
+		   data_pos, ftm_msg->seg_hdr.len);
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+						   2 * nla_total_size(sizeof(u32)) +
+						   nla_total_size(data_pos),
+						   GFP_ATOMIC);
+	if (!nl_skb) {
+		ath11k_warn(ab,
+			    "failed to allocate skb for testmode wmi event\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI_FTM) ||
+	    nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
+		    &ab->ftm_event_obj.eventdata[0])) {
+		ath11k_warn(ab, "failed to populate testmode event");
 		kfree_skb(nl_skb);
+		ret = -ENOBUFS;
 		goto out;
 	}
 
@@ -79,74 +229,133 @@
 
 out:
 	spin_unlock_bh(&ar->data_lock);
-
-	return consumed;
+	return ret;
 }
 
 static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
 {
 	struct sk_buff *skb;
-	int ret;
 
 	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
 		   "testmode cmd get version_major %d version_minor %d\n",
-		   ATH11K_TESTMODE_VERSION_MAJOR,
-		   ATH11K_TESTMODE_VERSION_MINOR);
+		   ATH_TESTMODE_VERSION_MAJOR,
+		   ATH_TESTMODE_VERSION_MINOR);
 
 	skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
 						nla_total_size(sizeof(u32)));
 	if (!skb)
 		return -ENOMEM;
 
-	ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
-			  ATH11K_TESTMODE_VERSION_MAJOR);
-	if (ret) {
+	if (nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+			ATH_TESTMODE_VERSION_MAJOR) ||
+	    nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+			ATH_TESTMODE_VERSION_MINOR)) {
 		kfree_skb(skb);
-		return ret;
+		return -ENOBUFS;
 	}
 
-	ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
-			  ATH11K_TESTMODE_VERSION_MINOR);
-	if (ret) {
-		kfree_skb(skb);
-		return ret;
+	return cfg80211_testmode_reply(skb);
 	}
 
-	return cfg80211_testmode_reply(skb);
+static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
+{
+	int ret;
+
+	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, " enter testmode cmd fw start\n");
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state == ATH11K_STATE_FTM) {
+		ret = -EALREADY;
+		goto err;
+	}
+
+	/* start utf only when the driver is not in use  */
+	if (ar->state != ATH11K_STATE_OFF) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	ar->ab->ftm_event_obj.eventdata =
+		kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL);
+	if (!ar->ab->ftm_event_obj.eventdata) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ar->state = ATH11K_STATE_FTM;
+	ar->ftm_msgref = 0;
+	mutex_unlock(&ar->conf_mutex);
+	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, " enter testmode cmd started\n");
+	return 0;
+err:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
 }
 
-static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
+static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
+			     struct ieee80211_vif *vif)
 {
 	struct ath11k_pdev_wmi *wmi = ar->wmi;
 	struct sk_buff *skb;
+	struct ath11k_vif *arvif;
 	u32 cmd_id, buf_len;
-	int ret;
+	int ret, tag;
 	void *buf;
+	u32 *ptr;
 
 	mutex_lock(&ar->conf_mutex);
 
-	if (ar->state != ATH11K_STATE_ON) {
-		ret = -ENETDOWN;
+	if (!tb[ATH_TM_ATTR_DATA]) {
+		ret = -EINVAL;
 		goto out;
 	}
 
-	if (!tb[ATH11K_TM_ATTR_DATA]) {
+	if (!tb[ATH_TM_ATTR_WMI_CMDID]) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+	buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+	if (!buf_len) {
+		ath11k_warn(ar->ab, "No data present in testmode command\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
-	buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
-	buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
-	cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+	cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
+
+	/* Make sure that the buffer length is long enough to
+	 * hold TLV and pdev/vdev id.
+	 */
+	if (buf_len < sizeof(struct wmi_tlv) + sizeof(u32)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ptr = buf;
+	tag = FIELD_GET(WMI_TLV_TAG, *ptr);
+
+	/* pdev/vdev id start after TLV header */
+	ptr++;
+
+	if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
+		*ptr = ar->pdev->pdev_id;
+
+	if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM &&
+	    (tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) {
+		if (vif) {
+			arvif = (struct ath11k_vif *)vif->drv_priv;
+			*ptr = arvif->vdev_id;
+		} else {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
 
 	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
-		   "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
-		   cmd_id, buf, buf_len);
+		   "testmode cmd wmi cmd_id %d buf length %d\n",
+		   cmd_id, buf_len);
 
 	ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
 
@@ -173,26 +382,112 @@
 	return ret;
 }
 
+static int ath11k_tm_cmd_process_ftm(struct ath11k *ar, struct nlattr *tb[])
+{
+	struct ath11k_pdev_wmi *wmi = ar->wmi;
+	struct sk_buff *skb;
+	u32 cmd_id, buf_len, hdr_info;
+	int ret;
+	void *buf;
+	u8 segnumber = 0, seginfo;
+	u16 chunk_len, total_bytes, num_segments;
+	u8 *bufpos;
+	struct wmi_ftm_cmd *ftm_cmd;
+
+	mutex_lock(&ar->conf_mutex);
+	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, "ar->state  %d\n", ar->state);
+
+	if (ar->state != ATH11K_STATE_FTM) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	if (!tb[ATH_TM_ATTR_DATA]) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+	cmd_id = WMI_PDEV_UTF_CMDID;
+	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+		   "testmode cmd wmi cmd_id %d buffer length %d\n",
+		   cmd_id, buf_len);
+	ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+	bufpos = buf;
+	total_bytes = buf_len;
+	num_segments = total_bytes / MAX_WMI_UTF_LEN;
+
+	if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+		num_segments++;
+
+	while (buf_len) {
+		if (buf_len > MAX_WMI_UTF_LEN)
+			chunk_len = MAX_WMI_UTF_LEN;    /* MAX message */
+		else
+			chunk_len = buf_len;
+
+		skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
+					   sizeof(struct wmi_ftm_cmd)));
+		if (!skb) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
+		hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+			   FIELD_PREP(WMI_TLV_LEN, (chunk_len +
+				      sizeof(struct wmi_ftm_seg_hdr)));
+		ftm_cmd->tlv_header = hdr_info;
+		ftm_cmd->seg_hdr.len = total_bytes;
+		ftm_cmd->seg_hdr.msgref = ar->ftm_msgref;
+		seginfo = FIELD_PREP(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
+			  FIELD_PREP(ATH11K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
+		ftm_cmd->seg_hdr.segmentinfo = seginfo;
+		segnumber++;
+		memcpy(&ftm_cmd->data, bufpos, chunk_len);
+		ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+		if (ret) {
+			ath11k_warn(ar->ab, "ftm wmi command fail: %d\n", ret);
+			goto out;
+		}
+
+		buf_len -= chunk_len;
+		bufpos += chunk_len;
+	}
+	++ar->ftm_msgref;
+	ret = 0;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
 int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		  void *data, int len)
 {
 	struct ath11k *ar = hw->priv;
-	struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+	struct ath11k_base *ab = ar->ab;
+	struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
 	int ret;
 
-	ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+	ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy,
 			NULL);
 	if (ret)
 		return ret;
 
-	if (!tb[ATH11K_TM_ATTR_CMD])
+	if (!tb[ATH_TM_ATTR_CMD])
 		return -EINVAL;
 
-	switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
-	case ATH11K_TM_CMD_GET_VERSION:
+	switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+	case ATH_TM_CMD_GET_VERSION:
 		return ath11k_tm_cmd_get_version(ar, tb);
-	case ATH11K_TM_CMD_WMI:
-		return ath11k_tm_cmd_wmi(ar, tb);
+	case ATH_TM_CMD_TESTMODE_START:
+		return ath11k_tm_cmd_testmode_start(ar, tb);
+	case ATH_TM_CMD_WMI:
+		return ath11k_tm_cmd_wmi(ar, tb, vif);
+	case ATH_TM_CMD_WMI_FTM:
+		set_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
+		return ath11k_tm_cmd_process_ftm(ar, tb);
 	default:
 		return -EOPNOTSUPP;
 	}
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/testmode.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/testmode.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/testmode.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/testmode.h	2023-05-22 20:06:42.219799965 +0200
@@ -1,22 +1,35 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "core.h"
 
 #ifdef CONFIG_NL80211_TESTMODE
 
-bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb);
+void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
+				     struct sk_buff *skb);
+int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
+			    const struct wmi_ftm_event_msg *ftm_msg,
+			    u16 length);
 int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		  void *data, int len);
 
+void ath11k_fwlog_write(struct ath11k_base *ab,  u8 *data, int len);
 #else
 
-static inline bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id,
+static inline void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab,
+						   u32 cmd_id,
 				       struct sk_buff *skb)
 {
-	return false;
+}
+
+static inline int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
+					  const struct wmi_ftm_event_msg *msg,
+					  u16 length)
+{
+	return 0;
 }
 
 static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,
@@ -26,4 +39,9 @@
 	return 0;
 }
 
+static inline void ath11k_fwlog_write(struct ath11k_base *ab,  u8 *data,
+				     int len)
+{
+
+}
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/wmi.c linux-6.4-fbx/drivers/net/wireless/ath/ath11k/wmi.c
--- linux-6.4/drivers/net/wireless/ath/ath11k/wmi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/wmi.c	2023-12-05 17:14:42.307715234 +0100
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/skbuff.h>
 #include <linux/ctype.h>
@@ -19,6 +19,7 @@
 #include "mac.h"
 #include "hw.h"
 #include "peer.h"
+#include "testmode.h"
 
 struct wmi_tlv_policy {
 	size_t min_len;
@@ -3871,7 +3872,7 @@
 	switch (ev->evt_type) {
 	case WMI_BSS_COLOR_COLLISION_DETECTION:
 		ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
-						      GFP_KERNEL);
+						      GFP_ATOMIC);
 		ath11k_dbg(ab, ATH11K_DBG_WMI,
 			   "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
 			   ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -7917,12 +7918,14 @@
 	survey->noise     = bss_ch_info_ev.noise_floor;
 	survey->time      = div_u64(total, cc_freq_hz);
 	survey->time_busy = div_u64(busy, cc_freq_hz);
-	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
+	survey->time_rx   = div_u64(rx, cc_freq_hz);
 	survey->time_tx   = div_u64(tx, cc_freq_hz);
+	survey->time_bss_rx   = div_u64(rx_bss, cc_freq_hz);
 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
 			     SURVEY_INFO_TIME |
 			     SURVEY_INFO_TIME_BUSY |
 			     SURVEY_INFO_TIME_RX |
+			     SURVEY_INFO_TIME_BSS_RX |
 			     SURVEY_INFO_TIME_TX);
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -8237,6 +8240,8 @@
 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
 		   ev->freq_offset, ev->sidx);
 
+	rcu_read_lock();
+
 	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
 
 	if (!ar) {
@@ -8254,6 +8259,39 @@
 		ieee80211_radar_detected(ar->hw);
 
 exit:
+	rcu_read_unlock();
+
+	kfree(tb);
+}
+
+static void
+ath11k_wmi_tm_event_segmented(struct ath11k_base *ab, u32 cmd_id,
+			      struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_ftm_event_msg *ev;
+	u16 length;
+	int ret;
+
+	tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_ARRAY_BYTE];
+	if (!ev) {
+		ath11k_warn(ab, "failed to fetch ftm msg\n");
+		kfree(tb);
+		return;
+	}
+
+	length = skb->len - TLV_HDR_SIZE;
+	ret = ath11k_tm_process_event(ab, cmd_id, ev, length);
+	if (ret)
+		ath11k_warn(ab, "Failed to process ftm event\n");
+
 	kfree(tb);
 }
 
@@ -8283,15 +8321,19 @@
 	ath11k_dbg(ab, ATH11K_DBG_WMI,
 		   "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
 
+	rcu_read_lock();
+
 	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
 	if (!ar) {
 		ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
-		kfree(tb);
-		return;
+		goto exit;
 	}
 
 	ath11k_thermal_event_temperature(ar, ev->temp);
 
+exit:
+	rcu_read_unlock();
+
 	kfree(tb);
 }
 
@@ -8410,13 +8452,6 @@
 	complete(&ab->wow.wakeup_completed);
 }
 
-static void
-ath11k_wmi_diag_event(struct ath11k_base *ab,
-		      struct sk_buff *skb)
-{
-	trace_ath11k_wmi_diag(ab, skb->data, skb->len);
-}
-
 static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
 {
 	switch (status) {
@@ -8445,6 +8480,130 @@
 	}
 }
 
+static int ath11k_wmi_tbtt_offset_subtlv_parser(struct ath11k_base *ab, u16 tag,
+						u16 len, const void *ptr,
+						void *data)
+{
+	int ret = 0;
+	struct ath11k *ar;
+	u64 tx_delay = 0;
+	struct wmi_tbtt_offset_info *tbtt_offset_info;
+	struct ieee80211_chanctx_conf *conf;
+	struct ath11k_vif *arvif;
+
+	tbtt_offset_info = (struct wmi_tbtt_offset_info *)ptr;
+
+	rcu_read_lock();
+	ar = ath11k_mac_get_ar_by_vdev_id(ab, tbtt_offset_info->vdev_id);
+	if (!ar) {
+		ath11k_warn(ab, "ar not found, vdev_id %d\n", tbtt_offset_info->vdev_id);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	arvif = ath11k_mac_get_arvif(ar, tbtt_offset_info->vdev_id);
+	if (!arvif) {
+		ath11k_warn(ab, "arvif not found, vdev_id %d\n",
+			    tbtt_offset_info->vdev_id);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+		ret = 0;
+		goto exit;
+	}
+
+	arvif->tbtt_offset = tbtt_offset_info->tbtt_offset;
+
+	conf = rcu_dereference(arvif->vif->bss_conf.chanctx_conf);
+	if (conf && conf->def.chan->band == NL80211_BAND_2GHZ) {
+		/* 1Mbps Beacon: */
+		/* 144 us ( LPREAMBLE) + 48 (PLCP Header)
+		 * + 192 (1Mbps, 24 ytes)
+		 * = 384 us + 2us(MAC/BB DELAY
+		 */
+		tx_delay = 386;
+	} else if (conf && (conf->def.chan->band == NL80211_BAND_5GHZ ||
+			    conf->def.chan->band == NL80211_BAND_6GHZ)) {
+		/* 6Mbps Beacon: */
+		/*20(lsig)+2(service)+32(6mbps, 24 bytes)
+		 *= 54us + 2us(MAC/BB DELAY)
+		 */
+		tx_delay = 56;
+	}
+	arvif->tbtt_offset -= tx_delay;
+
+	ieee80211_queue_work(ar->hw, &arvif->update_bcn_template_work);
+exit:
+	rcu_read_unlock();
+	return ret;
+}
+
+static int ath11k_wmi_tbtt_offset_event_parser(struct ath11k_base *ab,
+					       u16 tag, u16 len,
+					       const void *ptr, void *data)
+{
+	int ret = 0;
+
+	ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi tbtt offset event tag 0x%x of len %d rcvd\n",
+		   tag, len);
+
+	switch (tag) {
+	case WMI_TAG_TBTT_OFFSET_EXT_EVENT:
+		break;
+	case WMI_TAG_ARRAY_STRUCT:
+		ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+					  ath11k_wmi_tbtt_offset_subtlv_parser,
+					  data);
+		break;
+	default:
+		ath11k_warn(ab, "Received invalid tag for wmi tbtt offset event\n");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ath11k_wmi_pull_tbtt_offset(struct ath11k_base *ab, struct sk_buff *skb,
+				       struct wmi_tbtt_offset_ev_arg *arg)
+{
+	struct wmi_tbtt_offset_event *ev = NULL;
+	struct wmi_tbtt_offset_info tbtt_offset_info = {0};
+	struct wmi_tlv *tlv;
+	int ret;
+	u8 *ptr;
+	u16 tlv_tag;
+
+	ptr = skb->data;
+
+	if (skb->len < (sizeof(*ev) + TLV_HDR_SIZE)) {
+		ath11k_warn(ab, "wmi_tbtt_offset event size invalid\n");
+		return -EINVAL;
+	}
+
+	tlv = (struct wmi_tlv *)ptr;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	ptr += sizeof(*tlv);
+
+	if (tlv_tag == WMI_TAG_TBTT_OFFSET_EXT_EVENT) {
+		ev = (struct wmi_tbtt_offset_event *)ptr;
+	} else {
+		ath11k_warn(ab, "tbtt event received with invalid tag\n");
+		return -EINVAL;
+	}
+
+	ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath11k_wmi_tbtt_offset_event_parser,
+				  &tbtt_offset_info);
+	if (ret) {
+		ath11k_warn(ab, "failed to parse tbtt tlv %d\n", ret);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
 					    struct sk_buff *skb)
 {
@@ -8501,12 +8660,13 @@
 		return;
 	}
 
+	rcu_read_lock();
+
 	arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
 	if (!arvif) {
 		ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
 			    ev->vdev_id);
-		kfree(tb);
-		return;
+		goto exit;
 	}
 
 	ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
@@ -8523,10 +8683,45 @@
 
 	ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
 				   (void *)&replay_ctr_be, GFP_ATOMIC);
+exit:
+	rcu_read_unlock();
 
 	kfree(tb);
 }
 
+static void ath11k_wmi_diag_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+	const struct wmi_tlv *tlv;
+	u16 tlv_tag, tlv_len;
+	uint32_t *dev_id;
+	u8 *data;
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+
+	if (tlv_tag == WMI_TAG_ARRAY_BYTE) {
+		data = skb->data + sizeof(struct wmi_tlv);
+		dev_id = (uint32_t *)data;
+		*dev_id = ab->hw_params.hw_rev;
+	} else {
+		ath11k_warn(ab, "WMI Diag Event missing required tlv\n");
+		return;
+	}
+
+	ath11k_fwlog_write(ab,data, tlv_len);
+}
+
+void ath11k_wmi_event_tbttoffset_update(struct ath11k_base *ab, struct sk_buff *skb)
+{
+	struct wmi_tbtt_offset_ev_arg arg = {};
+	int ret;
+
+	ret = ath11k_wmi_pull_tbtt_offset(ab, skb, &arg);
+	if (ret)
+		ath11k_warn(ab, "failed to parse tbtt offset event: %d\n", ret);
+}
+
 static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
 {
 	struct wmi_cmd_hdr *cmd_hdr;
@@ -8612,6 +8807,12 @@
 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
 		ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
 		break;
+	case WMI_PDEV_UTF_EVENTID:
+		if (test_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
+			ath11k_wmi_tm_event_segmented(ab, id, skb);
+		else
+			ath11k_tm_wmi_event_unsegmented(ab, id, skb);
+		break;
 	case WMI_PDEV_TEMPERATURE_EVENTID:
 		ath11k_wmi_pdev_temperature_event(ab, skb);
 		break;
@@ -8630,8 +8831,10 @@
 	case WMI_TWT_ADD_DIALOG_EVENTID:
 		ath11k_wmi_twt_add_dialog_event(ab, skb);
 		break;
-	/* add Unsupported events here */
 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+		ath11k_wmi_event_tbttoffset_update(ab, skb);
+		break;
+	/* add Unsupported events here */
 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
 	case WMI_TWT_ENABLE_EVENTID:
 	case WMI_TWT_DISABLE_EVENTID:
diff -ruw linux-6.4/drivers/net/wireless/ath/ath11k/wmi.h linux-6.4-fbx/drivers/net/wireless/ath/ath11k/wmi.h
--- linux-6.4/drivers/net/wireless/ath/ath11k/wmi.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath11k/wmi.h	2023-05-22 20:06:42.223800071 +0200
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH11K_WMI_H
@@ -68,6 +69,7 @@
 
 #define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
 
+#define MAX_WMI_UTF_LEN 252
 #define WMI_BA_MODE_BUFFER_SIZE_256  3
 /*
  * HW mode config type replicated from FW header
@@ -2095,6 +2097,7 @@
 	WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
 	WMI_TLV_SERVICE_EXT2_MSG = 220,
 	WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
+	WMI_TLV_SERVICE_5_DOT_9GHZ_SUPPORT = 247,
 	WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
 	WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE = 263,
 
@@ -3541,6 +3544,24 @@
 	u32 pdev_id;
 } __packed;
 
+struct wmi_ftm_seg_hdr {
+	u32 len;
+	u32 msgref;
+	u32 segmentinfo;
+	u32 pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+	u32 tlv_header;
+	struct wmi_ftm_seg_hdr seg_hdr;
+	u8 data[];
+} __packed;
+
+struct wmi_ftm_event_msg {
+	struct wmi_ftm_seg_hdr seg_hdr;
+	u8 data[];
+} __packed;
+
 #define WMI_BEACON_TX_BUFFER_SIZE	512
 
 struct wmi_bcn_tmpl_cmd {
@@ -4395,6 +4416,24 @@
 	u32  flag_info;
 };
 
+struct wmi_tbtt_offset_info {
+	u32 vdev_id;
+	u32 tbtt_offset;
+	u32 tbtt_qtime_low_us;
+	u32 tbtt_qtime_high_us;
+} __packed;
+
+struct wmi_tbtt_offset_event {
+	u32 num_vdevs;
+} __packed;
+
+struct wmi_tbtt_offset_ev_arg {
+	u32 vdev_id;
+	u32 tbtt_offset;
+	u32 tbtt_qtime_low_us;
+	u32 tbtt_qtime_high_us;
+} __packed;
+
 #define WMI_REG_CLIENT_MAX 4
 
 struct wmi_reg_chan_list_cc_ext_event {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/Kconfig linux-6.4-fbx/drivers/net/wireless/ath/ath12k/Kconfig
--- linux-6.4/drivers/net/wireless/ath/ath12k/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/Kconfig	2024-04-04 13:46:08.942952695 +0200
@@ -32,3 +32,11 @@
 
 	  If unsure, say Y to make it easier to debug problems. But if
 	  you want optimal performance choose N.
+
+config ATH12K_DEBUGFS
+	bool "ath12k custom debugfs support"
+	depends on ATH12K
+
+config ATH12K_MEM_PROFILE_512M
+	bool "ath12k low memory profile"
+	depends on ATH12K
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/Makefile linux-6.4-fbx/drivers/net/wireless/ath/ath12k/Makefile
--- linux-6.4/drivers/net/wireless/ath/ath12k/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/Makefile	2024-01-19 17:01:19.849846593 +0100
@@ -18,10 +18,23 @@
 	    dbring.o \
 	    hw.o \
 	    mhi.o \
+	    pcic.o \
 	    pci.o \
-	    dp_mon.o
+	    dp_mon.o \
+	    vendor.o \
+	    umac_reset.o
 
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
+ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+ath12k-$(CONFIG_THERMAL) += thermal.o
+ath12k-$(CONFIG_ATH12K_SPECTRAL) += spectral.o
+ath12k-$(CONFIG_WANT_DEV_COREDUMP) += coredump.o
+ath12k-$(CONFIG_ATH12K_PKTLOG) += pktlog.o
+ath12k-$(CONFIG_ATH12K_AHB) += ahb.o
+ath12k-$(CONFIG_ATH12K_PPE_DS_SUPPORT) += ppe.o
+ath12k-$(CONFIG_ATH12K_BONDED_DS_SUPPORT) += bondif.o
+ath12k-$(CONFIG_ATH12K_SAWF) += sawf.o telemetry.o telemetry_agent_if.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/ce.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/ce.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/ce.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/ce.c	2024-01-19 17:01:19.853846702 +0100
@@ -41,6 +41,7 @@
 		.src_nentries = 32,
 		.src_sz_max = 2048,
 		.dest_nentries = 0,
+		.send_cb = ath12k_htc_tx_completion_handler,
 	},
 
 	/* CE4: host->target HTT */
@@ -74,6 +75,7 @@
 		.src_nentries = 32,
 		.src_sz_max = 2048,
 		.dest_nentries = 0,
+		.send_cb = ath12k_htc_tx_completion_handler,
 	},
 
 	/* CE8: target autonomous hif_memcpy */
@@ -140,6 +142,14 @@
 		.src_sz_max = 0,
 		.dest_nentries = 0,
 	},
+
+	/* CE15: reserved for customer use */
+	{
+		.flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+        },
 };
 
 const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
@@ -219,6 +229,98 @@
 
 };
 
+const struct ce_attr ath12k_host_ce_config_ipq5332[] = {
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 16,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+	/* CE1: target->host HTT + HTC control */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath12k_htc_rx_completion_handler,
+	},
+	/* CE2: target->host WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+		.recv_cb = ath12k_htc_rx_completion_handler,
+	},
+	/* CE3: host->target WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 32,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+		.send_cb = ath12k_htc_tx_completion_handler,
+	},
+	/* CE4: host->target HTT */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 2048,
+		.src_sz_max = 256,
+		.dest_nentries = 0,
+	},
+	/* CE5: target -> host PKTLOG */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+	},
+	/* CE6: Target autonomous HIF_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+	/* CE7: CV Prefetch */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+		.send_cb = ath12k_htc_tx_completion_handler,
+	},
+	/* CE8: Target HIF memcpy (Generic HIF memcypy) */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+	/* CE9: WMI logging/CFR/Spectral/Radar */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+	},
+	/* CE10: Unused */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+	/* CE11: Unused */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+};
+
 static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
 					 struct sk_buff *skb, dma_addr_t paddr)
 {
@@ -447,18 +549,31 @@
 	return skb;
 }
 
-static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe)
+static void ath12k_ce_tx_process_cb(struct ath12k_ce_pipe *pipe)
 {
 	struct ath12k_base *ab = pipe->ab;
 	struct sk_buff *skb;
+	struct sk_buff_head list;
 
+	__skb_queue_head_init(&list);
 	while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) {
 		if (!skb)
 			continue;
 
 		dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len,
 				 DMA_TO_DEVICE);
+		if ((!pipe->send_cb) || ab->hw_params->credit_flow) {
 		dev_kfree_skb_any(skb);
+			continue;
+		}
+
+		__skb_queue_tail(&list, skb);
+	}
+
+	while ((skb = __skb_dequeue(&list))) {
+		ath12k_dbg(ab, ATH12K_DBG_PCI, "tx ce pipe %d len %d\n",
+			   pipe->pipe_num, skb->len);
+		pipe->send_cb(ab, skb);
 	}
 }
 
@@ -484,6 +599,9 @@
 
 	ring_params->msi_addr = addr_lo;
 	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+	if (ab->hif.bus == ATH12K_BUS_HYBRID)
+		ring_params->msi_data = ab->ipci.ce_msi_data[ce_id];
+	else
 	ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
 }
@@ -528,7 +646,7 @@
 
 	/* TODO: Init other params needed by HAL to init the ring */
 
-	ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, &params);
+	ret = ath12k_hal_srng_setup_idx(ab, type, ce_id, 0, &params, 0);
 	if (ret < 0) {
 		ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 			    ret, ce_id);
@@ -588,7 +706,7 @@
 	pipe->attr_flags = attr->flags;
 
 	if (attr->src_nentries) {
-		pipe->send_cb = ath12k_ce_send_done_cb;
+		pipe->send_cb = attr->send_cb;
 		nentries = roundup_pow_of_two(attr->src_nentries);
 		desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
 		ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
@@ -619,9 +737,10 @@
 void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id)
 {
 	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+	const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
 
-	if (pipe->send_cb)
-		pipe->send_cb(pipe);
+	if (attr->src_nentries)
+		ath12k_ce_tx_process_cb(pipe);
 
 	if (pipe->recv_cb)
 		ath12k_ce_recv_process_cb(pipe);
@@ -630,9 +749,33 @@
 void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id)
 {
 	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+	const struct ce_attr *attr =  &ab->hw_params->host_ce_config[pipe_id];
 
-	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
-		pipe->send_cb(pipe);
+	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
+		ath12k_ce_tx_process_cb(pipe);
+}
+
+#define CE_RING_FULL_THRESHOLD_TIME_MS 500
+#define ATH12K_MAX_CE_MANUAL_RETRY	3
+/* Ths function is called from ce_send path. Returns true If there is no buffer
+ * to send packet via HTC, then check if interrupts are not processed from that
+ * CE for last 500ms. If so, poll manually to reap available entries.
+ */
+static bool ath12k_is_manual_ce_poll_needed(struct ath12k_base *ab, struct ath12k_ce_pipe *pipe, struct hal_srng *srng)
+{
+	if (!ab->hw_params->support_ce_manual_poll)
+		return false;
+
+	if (time_after
+	    (jiffies, pipe->timestamp + msecs_to_jiffies(CE_RING_FULL_THRESHOLD_TIME_MS)) &&
+	    (srng->u.src_ring.hp == srng->u.src_ring.reap_hp) &&
+		 (srng->u.src_ring.reap_hp == *srng->u.src_ring.tp_addr)) {
+		pipe->ce_manual_poll_count++;
+		pipe->last_ce_manual_poll_ts = jiffies;
+		return true;
+	}
+
+	return false;
 }
 
 int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
@@ -645,7 +788,7 @@
 	unsigned int nentries_mask;
 	int ret = 0;
 	u8 byte_swap_data = 0;
-	int num_used;
+	int num_used, retry = 0;
 
 	/* Check if some entries could be regained by handling tx completion if
 	 * the CE has interrupts disabled and the used entries is more than the
@@ -671,7 +814,7 @@
 
 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
 		return -ESHUTDOWN;
-
+retry:
 	spin_lock_bh(&ab->ce.ce_lock);
 
 	write_index = pipe->src_ring->write_index;
@@ -692,9 +835,18 @@
 	desc = ath12k_hal_srng_src_get_next_reaped(ab, srng);
 	if (!desc) {
 		ath12k_hal_srng_access_end(ab, srng);
+		if (retry++ < ATH12K_MAX_CE_MANUAL_RETRY &&
+		    ath12k_is_manual_ce_poll_needed(ab, pipe, srng)) {
+			spin_unlock_bh(&srng->lock);
+			spin_unlock_bh(&ab->ce.ce_lock);
+
+			ath12k_ce_tx_process_cb(pipe);
+			goto retry;
+		} else {
 		ret = -ENOBUFS;
 		goto unlock;
 	}
+	}
 
 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
 		byte_swap_data = 1;
@@ -880,6 +1032,8 @@
 		}
 	}
 
+	ab->ce_pipe_init_done = true;
+
 	return 0;
 }
 
@@ -926,6 +1080,8 @@
 			pipe->status_ring = NULL;
 		}
 	}
+
+	ab->ce_pipe_init_done = false;
 }
 
 int ath12k_ce_alloc_pipes(struct ath12k_base *ab)
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/ce.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/ce.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/ce.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/ce.h	2024-01-19 17:01:19.853846702 +0100
@@ -8,6 +8,8 @@
 #define ATH12K_CE_H
 
 #define CE_COUNT_MAX 16
+#define ATH12K_QCN6432_CE_COUNT 12
+#define ATH12K_QCN6432_EXT_IRQ_GRP_NUM_MAX 12
 
 /* Byte swap data words */
 #define CE_ATTR_BYTE_SWAP_DATA 2
@@ -39,8 +41,8 @@
 #define PIPEDIR_INOUT_H2H	4 /* bidirectional, host to host */
 
 /* CE address/mask */
-#define CE_HOST_IE_ADDRESS	0x00A1803C
-#define CE_HOST_IE_2_ADDRESS	0x00A18040
+#define CE_HOST_IE_ADDRESS	0x75804c
+#define CE_HOST_IE_2_ADDRESS	0x758050
 #define CE_HOST_IE_3_ADDRESS	CE_HOST_IE_ADDRESS
 
 #define CE_HOST_IE_3_SHIFT	0xC
@@ -76,6 +78,17 @@
 	__le32 reserved;
 };
 
+struct ce_ie_addr {
+	u32 ie1_reg_addr;
+	u32 ie2_reg_addr;
+	u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+	u32 base;
+	u32 size;
+};
+
 struct ce_attr {
 	/* CE_ATTR_* values */
 	unsigned int flags;
@@ -92,6 +105,8 @@
 	unsigned int dest_nentries;
 
 	void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
+
+	void (*send_cb)(struct ath12k_base *, struct sk_buff *);
 };
 
 #define CE_DESC_RING_ALIGN 8
@@ -145,14 +160,16 @@
 	unsigned int buf_sz;
 	unsigned int rx_buf_needed;
 
-	void (*send_cb)(struct ath12k_ce_pipe *pipe);
+	void (*send_cb)(struct ath12k_base *, struct sk_buff *);
 	void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
 
 	struct tasklet_struct intr_tq;
 	struct ath12k_ce_ring *src_ring;
 	struct ath12k_ce_ring *dest_ring;
 	struct ath12k_ce_ring *status_ring;
-	u64 timestamp;
+	unsigned long timestamp;
+	u32 ce_manual_poll_count;
+	u64 last_ce_manual_poll_ts;
 };
 
 struct ath12k_ce {
@@ -164,6 +181,7 @@
 
 extern const struct ce_attr ath12k_host_ce_config_qcn9274[];
 extern const struct ce_attr ath12k_host_ce_config_wcn7850[];
+extern const struct ce_attr ath12k_host_ce_config_ipq5332[];
 
 void ath12k_ce_cleanup_pipes(struct ath12k_base *ab);
 void ath12k_ce_rx_replenish_retry(struct timer_list *t);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/core.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/core.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/core.c	2024-03-18 14:40:14.843741115 +0100
@@ -8,17 +8,364 @@
 #include <linux/slab.h>
 #include <linux/remoteproc.h>
 #include <linux/firmware.h>
+#include <linux/panic_notifier.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include "peer.h"
 #include "core.h"
+#include "coredump.h"
 #include "dp_tx.h"
 #include "dp_rx.h"
 #include "debug.h"
 #include "hif.h"
+#include "sawf.h"
+#include "ppe.h"
+#include "ahb.h"
+#include "telemetry.h"
+#include "peer.h"
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#include <linux/rtnetlink.h>
+#include "bondif.h"
+#endif
 
 unsigned int ath12k_debug_mask;
 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 
+static unsigned int ath12k_crypto_mode;
+module_param_named(crypto_mode, ath12k_crypto_mode, uint, 0644);
+MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software");
+
+/* frame mode values are mapped as per enum ath12k_hw_txrx_mode */
+unsigned int ath12k_frame_mode = ATH12K_HW_TXRX_ETHERNET;
+module_param_named(frame_mode, ath12k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+		 "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
+unsigned int ath12k_ftm_mode;
+module_param_named(ftm_mode, ath12k_ftm_mode, uint, 0444);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
+unsigned int ath12k_mlo_capable = 0;
+module_param_named(mlo_capable, ath12k_mlo_capable, uint, 0644);
+MODULE_PARM_DESC(mlo_capable, "mlo capable: 0-disable, 1-enable");
+
+static unsigned int ath12k_en_fwlog = true;
+module_param_named(en_fwlog, ath12k_en_fwlog, uint, 0644);
+MODULE_PARM_DESC(en_fwlog, "fwlog: 0-disable, 1-enable");
+
+unsigned int ath12k_ppe_ds_enabled = false;
+module_param_named(ppe_ds_enable, ath12k_ppe_ds_enabled, uint, 0644);
+MODULE_PARM_DESC(ppe_ds_enable, "ppe_ds_enable: 0-disable, 1-enable");
+
+unsigned int ath12k_ssr_failsafe_mode = true;
+module_param_named(ssr_failsafe_mode, ath12k_ssr_failsafe_mode, uint, 0644);
+MODULE_PARM_DESC(ssr_failsafe_mode, "ssr failsafe mode: 0-disable, 1-enable");
+
+bool ath12k_mgmt_rx_reordering = false;
+module_param_named(mgmt_rx_reorder, ath12k_mgmt_rx_reordering, bool, 0644);
+MODULE_PARM_DESC(mgmt_rx_reorder, "Mgmt Rx Re-Ordering (0 - disable, 1 - enable)");
+
+bool ath12k_debug_critical = false;
+module_param_named(debug_critical, ath12k_debug_critical, bool, 0644);
+MODULE_PARM_DESC(debug_critical, "Debug critical issue (0 - disable, 1 - enable)");
+
+static char *ath12k_board_variant;
+module_param_named(board_variant, ath12k_board_variant, charp, 0444);
+MODULE_PARM_DESC(board_variant, "board variant to use for bdf lookup");
+
+bool ath12k_en_shutdown;
+module_param_named(en_shutdown, ath12k_en_shutdown, bool, 0644);
+MODULE_PARM_DESC(en_shutdown, "enable pcie shutdown callback");
+
+bool ath12k_stats_disable;
+module_param_named(stats_disable, ath12k_stats_disable, bool, 0644);
+MODULE_PARM_DESC(stats_disable, "disable all HTT stats");
+
+static DEFINE_MUTEX(ath12k_hw_lock);
+static struct list_head ath12k_hw_groups = LIST_HEAD_INIT(ath12k_hw_groups);
+
+extern struct ath12k_coredump_info ath12k_coredump_ram_info;
+
+/* This function needs to be used only when dt has multi chip grouping information */
+static struct ath12k_hw_group *ath12k_core_hw_group_find_by_id(u8 group_id)
+{
+	struct ath12k_hw_group *ag;
+
+	lockdep_assert_held(&ath12k_hw_lock);
+
+	/* group ids will be unique only for multi chip group */
+	list_for_each_entry(ag, &ath12k_hw_groups, list) {
+		if (group_id == ag->id && ag->num_chip > 1)
+			return ag;
+	}
+
+	return NULL;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+	mutex_lock(&ath12k_hw_lock);
+
+	list_del(&ag->list);
+	kfree(ag);
+
+	mutex_unlock(&ath12k_hw_lock);
+}
+
+void ath12k_core_put_hw_group(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	u8 chip_id = ab->chip_id;
+	int num_probed;
+
+	if (!ag)
+		return;
+
+	mutex_lock(&ag->mutex_lock);
+
+	if (chip_id >= ag->num_chip) {
+		ath12k_err(ab, "failed to put Invalid chip id %d in the group id %d max chip %d\n",
+			   chip_id, ag->id, ag->num_chip);
+
+		return;
+	}
+
+	if (ag->ab[chip_id] != ab) {
+		ath12k_err(ab, "failed to put chip id %d in the group id %d\n",
+			   chip_id, ag->id);
+
+		return;
+	}
+
+	ag->ab[chip_id] = NULL;
+	ab->ag = NULL;
+	ab->chip_id = ATH12K_INVALID_CHIP_ID;
+
+	if (ag->num_probed)
+		ag->num_probed--;
+
+	num_probed = ag->num_probed;
+
+	mutex_unlock(&ag->mutex_lock);
+
+	if (!num_probed)
+		ath12k_core_hw_group_free(ag);
+
+}
+
+static void ath12k_core_hw_group_reset(struct work_struct *work)
+{
+	struct ath12k_hw_group *ag = container_of(work, struct ath12k_hw_group, reset_work);
+	struct ath12k_base *ab;
+	int i;
+
+	if (ag->recovery_mode != ATH12K_MLO_RECOVERY_MODE0)
+		return;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+		if (ab->hif.bus == ATH12K_BUS_PCI) {
+			ag->crash_type = ATH12K_RPROC_ROOTPD_AHB_CRASH;
+			continue;
+		}
+
+		if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->ag->dev_flags))) {
+			set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+			set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+			queue_work(ab->workqueue_aux, &ab->reset_work);
+		}
+
+		ath12k_hal_dump_srng_stats(ab);
+	}
+}
+
+static struct ath12k_hw_group *
+ath12k_core_hw_group_alloc(u8 id, u8 max_chip)
+{
+	struct ath12k_hw_group *ag = NULL;
+
+	lockdep_assert_held(&ath12k_hw_lock);
+
+	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+	if (!ag)
+		return NULL;
+
+	ag->id = id;
+	ag->num_chip = max_chip;
+	ag->mlo_capable = !!ath12k_mlo_capable;
+	atomic_set(&ag->num_dp_tx_pending, 0);
+	list_add(&ag->list, &ath12k_hw_groups);
+	mutex_init(&ag->mutex_lock);
+	mutex_init(&ag->mlomem_arena.mutex_lock);
+	spin_lock_init(&ag->mlo_umac_reset.lock);
+	ag->hw_queues_stopped = false;
+	ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE0;
+	INIT_WORK(&ag->reset_work, ath12k_core_hw_group_reset);
+
+	return ag;
+}
+
+static struct ath12k_hw_group *ath12k_core_get_hw_group(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = NULL;
+	struct device_node *mlo;
+	struct device *dev = ab->dev;
+	u32 group_id, num_chips;
+	int ret;
+
+	lockdep_assert_held(&ath12k_hw_lock);
+
+	/* If there is no grouping info in the dt, we don't enable
+	 * multi soc hw grouping and registration, rather the single
+	 * soc is added to hw group and based on the number of links
+	 * within the soc, all links are combined into a single wiphy.
+	 */
+	mlo = of_parse_phandle(dev->of_node, "qcom,wsi", 0);
+	if (mlo && ath12k_mlo_capable && !ath12k_ftm_mode) {
+		ret = of_property_read_u32(mlo, "id", &group_id);
+		if (ret)
+			group_id = ATH12K_INVALID_GRP_ID;
+	} else {
+		group_id = ATH12K_INVALID_GRP_ID;
+	}
+
+again:
+	if (group_id == ATH12K_INVALID_GRP_ID) {
+		ath12k_info(ab, "Grouping of multiple SoC not done\n");
+
+		ag = ath12k_core_hw_group_alloc(group_id, 1);
+
+		if (!ag) {
+			ath12k_warn(ab, "unable to create new hw group\n");
+			return NULL;
+		}
+
+		if (ab->hw_params->support_umac_reset)
+			spin_lock_init(&ag->mlo_umac_reset.lock);
+	} else {
+		if (of_property_read_u32(mlo, "num_chip", &num_chips)) {
+			ath12k_err(ab, "num_chip is not configured\n");
+			group_id = ATH12K_INVALID_GRP_ID;
+			goto again;
+		}
+
+		if (num_chips > ATH12K_MAX_SOCS) {
+			ath12k_warn(ab, "num_chip advertised %d is more than limit %d\n",
+				    num_chips, ATH12K_MAX_SOCS);
+			group_id = ATH12K_INVALID_GRP_ID;
+			goto again;
+		}
+
+		/* currently only one group of multiple socs are supported,
+		 * since we use group id ATH12K_INVALID_GRP_ID for single
+		 * chip group which didn't have dt entry, there could be many
+		 * groups with same group id, i.e ATH12K_INVALID_GRP_ID. So
+		 * default group id of ATH12K_INVALID_GRP_ID combined with
+		 * num chips in ath12k_hw_group determines if the group is
+		 * multisoc or single soc group
+		 */
+		ag = ath12k_core_hw_group_find_by_id(group_id);
+		if (!ag) {
+			ag = ath12k_core_hw_group_alloc(group_id, num_chips);
+
+			if (!ag) {
+				ath12k_warn(ab, "unable to create new hw group\n");
+				return NULL;
+			}
+
+			if (ab->hw_params->support_umac_reset)
+				spin_lock_init(&ag->mlo_umac_reset.lock);
+
+		} else if (test_bit(ATH12K_FLAG_UNREGISTERING, &ag->dev_flags)) {
+			ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
+				   ag->id);
+			group_id = ATH12K_INVALID_GRP_ID;
+			goto again;
+		}
+	}
+
+	if (ag->num_probed >= ag->num_chip) {
+		ath12k_warn(ab, "unable to add new chip to group, max limit reached\n");
+		group_id = ATH12K_INVALID_GRP_ID;
+		goto again;
+	}
+
+	ab->chip_id = ag->num_probed;
+	ag->ab[ag->num_probed++] = ab;
+	ab->ag = ag;
+	return ag;
+}
+
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
+{
+	lockdep_assert_held(&ag->mutex_lock);
+
+	return (ag->num_probed == ag->num_chip);
+}
+
+static inline
+bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
+{
+	lockdep_assert_held(&ag->mutex_lock);
+
+	return (ag->num_started == ag->num_chip);
+}
+
+void ath12k_fw_stats_pdevs_free(struct list_head *head)
+{
+	struct ath12k_fw_stats_pdev *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+{
+	struct ath12k_fw_stats_vdev *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+void ath12k_fw_stats_bcn_free(struct list_head *head)
+{
+	struct ath12k_fw_stats_bcn *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+void ath12k_fw_stats_init(struct ath12k *ar)
+{
+	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+	INIT_LIST_HEAD(&ar->fw_stats.bcn);
+	init_completion(&ar->fw_stats_complete);
+}
+
+void ath12k_fw_stats_reset(struct ath12k *ar)
+{
+	ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+	ath12k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+}
+
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
+{
+	ath12k_fw_stats_pdevs_free(&stats->pdevs);
+	ath12k_fw_stats_vdevs_free(&stats->vdevs);
+	ath12k_fw_stats_bcn_free(&stats->bcn);
+}
+
 int ath12k_core_suspend(struct ath12k_base *ab)
 {
 	int ret;
@@ -83,23 +430,87 @@
 	return 0;
 }
 
-static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
-					 size_t name_len)
+int ath12k_core_check_dt(struct ath12k_base *ab)
+{
+	size_t max_len = sizeof(ab->qmi.target.bdf_ext);
+	const char *variant = NULL;
+	struct device_node *node;
+
+	node = ab->dev->of_node;
+	if (!node)
+		return -ENOENT;
+
+	of_property_read_string(node, "qcom,ath12k-calibration-variant",
+				&variant);
+	if (!variant)
+		return -ENODATA;
+
+	if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
+		ath12k_dbg(ab, ATH12K_DBG_BOOT,
+			   "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+			    variant);
+
+	return 0;
+}
+
+static int ath12k_core_create_board_name(struct ath12k_base *ab,
+					 char *boardname,
+					 char *defaultboardname)
 {
 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
 
-	if (ab->qmi.target.bdf_ext[0] != '\0')
+	if (ath12k_board_variant)
+		scnprintf(variant, sizeof(variant), ",variant=%s",
+			  ath12k_board_variant);
+	else if (ab->qmi.target.bdf_ext[0] != '\0')
 		scnprintf(variant, sizeof(variant), ",variant=%s",
 			  ab->qmi.target.bdf_ext);
+	switch (ab->id.bdf_search) {
+	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
+		snprintf(boardname, BOARD_NAME_SIZE,
+			 "bus=%s,vendor=%04x,device=%04x,"
+			 "subsystem-vendor=%04x,"
+			 "subsystem-device=%04x,"
+			 "qmi-chip-id=%d,qmi-board-id=%d%s",
+			 ath12k_bus_str(ab->hif.bus),
+			 ab->id.vendor, ab->id.device,
+			 ab->id.subsystem_vendor,
+			 ab->id.subsystem_device,
+			 ab->qmi.target.chip_id,
+			 ab->qmi.target.board_id, variant);
 
-	scnprintf(name, name_len,
+		snprintf(defaultboardname, BOARD_NAME_SIZE,
+			 "bus=%s,vendor=%04x,device=%04x,"
+			 "subsystem-vendor=%04x,"
+			 "subsystem-device=%04x,"
+			 "qmi-chip-id=%d,qmi-board-id=%d%s",
+			 ath12k_bus_str(ab->hif.bus),
+			 ab->id.vendor, ab->id.device,
+			 ab->id.subsystem_vendor,
+			 ab->id.subsystem_device,
+			 ab->qmi.target.chip_id,
+			 ab->qmi.target.board_id,
+			 variant);
+		break;
+	default:
+		snprintf(boardname, BOARD_NAME_SIZE,
 		  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
 		  ath12k_bus_str(ab->hif.bus),
 		  ab->qmi.target.chip_id,
 		  ab->qmi.target.board_id, variant);
 
-	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
+		snprintf(defaultboardname, BOARD_NAME_SIZE,
+			 "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+			 ath12k_bus_str(ab->hif.bus),
+			 ab->qmi.target.chip_id,
+			 ATH12K_DEFAULT_ID, variant);
+		break;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name : '%s'\t"
+		   "default boardname : '%s'\n", boardname,
+		   defaultboardname);
 
 	return 0;
 }
@@ -128,17 +539,15 @@
 
 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
 {
-	if (!IS_ERR(bd->fw))
+	if (!IS_ERR(bd->fw)) {
 		release_firmware(bd->fw);
-
-	memset(bd, 0, sizeof(*bd));
+		bd->fw = NULL;
+	}
 }
 
 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
 					 struct ath12k_board_data *bd,
-					 const void *buf, size_t buf_len,
-					 const char *boardname,
-					 int bd_ie_type)
+					 const void *buf, size_t buf_len)
 {
 	const struct ath12k_fw_ie *hdr;
 	bool name_match_found;
@@ -148,7 +557,7 @@
 
 	name_match_found = false;
 
-	/* go through ATH12K_BD_IE_BOARD_ elements */
+	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
 		hdr = buf;
 		board_ie_id = le32_to_cpu(hdr->id);
@@ -159,48 +568,51 @@
 		buf += sizeof(*hdr);
 
 		if (buf_len < ALIGN(board_ie_len, 4)) {
-			ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
+			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
+				   ath12k_bd_ie_type_str(bd->ie_id),
 				   buf_len, ALIGN(board_ie_len, 4));
 			ret = -EINVAL;
 			goto out;
 		}
 
-		switch (board_ie_id) {
-		case ATH12K_BD_IE_BOARD_NAME:
+		if (board_ie_id == bd->name_id) {
 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
 					board_ie_data, board_ie_len);
 
-			if (board_ie_len != strlen(boardname))
-				break;
+			if (board_ie_len != strlen(bd->boardname))
+				goto next;
 
-			ret = memcmp(board_ie_data, boardname, strlen(boardname));
+			ret = memcmp(board_ie_data, bd->boardname, strlen(bd->boardname));
 			if (ret)
-				break;
+				goto next;
 
 			name_match_found = true;
 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
-				   "boot found match for name '%s'",
-				   boardname);
-			break;
-		case ATH12K_BD_IE_BOARD_DATA:
+				   "boot found match %s for name '%s'",
+				   ath12k_bd_ie_type_str(bd->ie_id),
+				   bd->boardname);
+		} else if (board_ie_id == bd->data_id) {
 			if (!name_match_found)
 				/* no match found */
-				break;
+				goto next;
 
 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
-				   "boot found board data for '%s'", boardname);
+				   "boot found %s for '%s'",
+				   ath12k_bd_ie_type_str(bd->ie_id),
+				   bd->boardname);
 
 			bd->data = board_ie_data;
 			bd->len = board_ie_len;
 
 			ret = 0;
 			goto out;
-		default:
-			ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
+		} else {
+			ath12k_warn(ab, "unknown %s id found: %d\n",
+				    ath12k_bd_ie_type_str(bd->ie_id),
 				    board_ie_id);
-			break;
 		}
 
+next:
 		/* jump over the padding */
 		board_ie_len = ALIGN(board_ie_len, 4);
 
@@ -216,8 +628,7 @@
 }
 
 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
-					      struct ath12k_board_data *bd,
-					      const char *boardname)
+					      struct ath12k_board_data *bd)
 {
 	size_t len, magic_len;
 	const u8 *data;
@@ -282,15 +693,12 @@
 			goto err;
 		}
 
-		switch (ie_id) {
-		case ATH12K_BD_IE_BOARD:
+		if (ie_id == bd->ie_id) {
 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
-							    ie_len,
-							    boardname,
-							    ATH12K_BD_IE_BOARD);
+							    ie_len);
 			if (ret == -ENOENT)
 				/* no match found, continue */
-				break;
+				goto next;
 			else if (ret)
 				/* there was an error, bail out */
 				goto err;
@@ -298,6 +706,7 @@
 			goto out;
 		}
 
+next:
 		/* jump over the padding */
 		ie_len = ALIGN(ie_len, 4);
 
@@ -307,9 +716,10 @@
 
 out:
 	if (!bd->data || !bd->len) {
-		ath12k_err(ab,
-			   "failed to fetch board data for %s from %s\n",
-			   boardname, filepath);
+		ath12k_dbg(ab, ATH12K_DBG_BOOT,
+			   "failed to fetch %s for %s from %s\n",
+			   ath12k_bd_ie_type_str(bd->ie_id),
+			   bd->boardname, filepath);
 		ret = -ENODATA;
 		goto err;
 	}
@@ -326,6 +736,7 @@
 				       char *filename)
 {
 	bd->fw = ath12k_core_firmware_request(ab, filename);
+
 	if (IS_ERR(bd->fw))
 		return PTR_ERR(bd->fw);
 
@@ -335,41 +746,179 @@
 	return 0;
 }
 
-#define BOARD_NAME_SIZE 100
 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
 {
-	char boardname[BOARD_NAME_SIZE];
+	char boardname[BOARD_NAME_SIZE] = {};
+	char defaultboardname[BOARD_NAME_SIZE] = {};
+	u8 bd_api;
 	int ret;
 
-	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+	ret = ath12k_core_create_board_name(ab, boardname, defaultboardname);
 	if (ret) {
 		ath12k_err(ab, "failed to create board name: %d", ret);
 		return ret;
 	}
 
-	ab->bd_api = 2;
-	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
+	bd_api = 2;
+	bd->ie_id = ATH12K_BD_IE_BOARD;
+	bd->name_id = ATH12K_BD_IE_BOARD_NAME;
+	bd->data_id = ATH12K_BD_IE_BOARD_DATA;
+	memcpy(bd->boardname, boardname, BOARD_NAME_SIZE);
+
+	ret = ath12k_core_fetch_board_data_api_n(ab, bd);
 	if (!ret)
 		goto success;
 
-	ab->bd_api = 1;
+	memcpy(bd->boardname, defaultboardname, BOARD_NAME_SIZE);
+
+	ret = ath12k_core_fetch_board_data_api_n(ab, bd);
+	if (!ret)
+		goto success;
+
+	bd_api = 1;
 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
 	if (ret) {
-		ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+		ath12k_err(ab, "failed to fetch board-2.bin from %s\n",
+			   ab->hw_params->fw.dir);
+	} else {
+		ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_BOARD_API2_FILE);
+		if (ret)
+			ath12k_err(ab, "failed to fetch board-2.bin from %s\n",
 			   ab->hw_params->fw.dir);
 		return ret;
 	}
 
 success:
-	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api);
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %hhu\n", bd_api);
+	return 0;
+}
+
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+	char regdbname[BOARD_NAME_SIZE] = {};
+	char defaultregdbname[BOARD_NAME_SIZE] = {};
+	u8 bd_api;
+	int ret;
+
+	ret = ath12k_core_create_board_name(ab, regdbname, defaultregdbname);
+	if (ret) {
+		ath12k_err(ab, "failed to create regdb name: %d", ret);
+		return ret;
+	}
+
+	bd_api = 2;
+	bd->ie_id = ATH12K_BD_IE_REGDB;
+	bd->name_id = ATH12K_BD_IE_REGDB_NAME;
+	bd->data_id = ATH12K_BD_IE_REGDB_DATA;
+	memcpy(bd->boardname, regdbname, BOARD_NAME_SIZE);
+
+	ret = ath12k_core_fetch_board_data_api_n(ab, bd);
+	if (!ret)
+		goto success;
+
+	memcpy(bd->boardname, defaultregdbname, BOARD_NAME_SIZE);
+
+	ret = ath12k_core_fetch_board_data_api_n(ab, bd);
+	if (!ret)
+		goto success;
+
+	bd_api = 1;
+	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
+	if (ret) {
+		ath12k_err(ab, "failed to fetch %s file from %s\n",
+			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
+		return ret;
+	}
+
+success:
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %hhu\n", bd_api);
+	return 0;
+}
+
+int ath12k_core_fetch_fw_cfg(struct ath12k_base *ab,
+			     struct ath12k_board_data *bd)
+{
+	int ret;
+
+	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_FW_CFG_FILE);
+	if (ret) {
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "failed to fetch %s from %s\n",
+			   ATH12K_FW_CFG_FILE, ab->hw_params->fw.dir);
+		return -ENOENT;
+	}
+
+	ath12k_info(ab, "fetching %s from %s\n", ATH12K_FW_CFG_FILE,
+		    ab->hw_params->fw.dir);
+
+	return 0;
+}
+
+int ath12k_core_fetch_rxgainlut(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+	char rxgainlutname[BOARD_NAME_SIZE] = {};
+	char rxgainlutdefaultname[BOARD_NAME_SIZE] = {};
+	int ret;
+	u8 bd_api;
+
+	ret = ath12k_core_create_board_name(ab, rxgainlutname,
+					    rxgainlutdefaultname);
+	if (ret) {
+		ath12k_err(ab, "failed to create rxgainlut name: %d", ret);
+		return ret;
+	}
+
+	bd_api = 2;
+	bd->ie_id = ATH12K_BD_IE_RXGAINLUT;
+	bd->name_id = ATH12K_BD_IE_RXGAINLUT_NAME;
+	bd->data_id = ATH12K_BD_IE_RXGAINLUT_DATA;
+	memcpy(bd->boardname, rxgainlutname, BOARD_NAME_SIZE);
+
+	ret = ath12k_core_fetch_board_data_api_n(ab, bd);
+
+	if (!ret)
+		goto success;
+
+	memcpy(bd->boardname, rxgainlutdefaultname, BOARD_NAME_SIZE);
+
+	ret = ath12k_core_fetch_board_data_api_n(ab, bd);
+
+	if (!ret)
+		goto success;
+
+	bd_api = 1;
+	snprintf(rxgainlutname, sizeof(rxgainlutname), "%s%04x",
+		 ATH12K_RXGAINLUT_FILE_PREFIX, ab->qmi.target.board_id);
+
+	ret = ath12k_core_fetch_board_data_api_1(ab, bd, rxgainlutname);
+	if (ret) {
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
+			   rxgainlutname, ab->hw_params->fw.dir);
+
+		ret = ath12k_core_fetch_board_data_api_1(ab, bd,
+							 ATH12K_RXGAINLUT_FILE);
+		if (ret) {
+			ath12k_warn(ab, "failed to fetch default %s from %s\n",
+				    ATH12K_RXGAINLUT_FILE, ab->hw_params->fw.dir);
+			return -ENOENT;
+		}
+	}
+
+success:
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %hhu\n", bd_api);
 	return 0;
 }
 
 static void ath12k_core_stop(struct ath12k_base *ab)
 {
+	lockdep_assert_held(&ab->ag->mutex_lock);
+	lockdep_assert_held(&ab->core_lock);
+
+	ab->ag->num_started--;
+
 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
 		ath12k_qmi_firmware_stop(ab);
 
+	ath12k_mgmt_rx_reo_deinit_context(ab);
 	ath12k_hif_stop(ab);
 	ath12k_wmi_detach(ab);
 	ath12k_dp_rx_pdev_reo_cleanup(ab);
@@ -381,6 +930,11 @@
 {
 	int ret;
 
+	if (ath12k_ftm_mode) {
+		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
+		ath12k_info(ab, "Booting in ftm mode\n");
+	}
+
 	ret = ath12k_qmi_init_service(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
@@ -402,40 +956,397 @@
 
 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
 {
-	ath12k_dp_free(ab);
-	ath12k_reg_free(ab);
 	ath12k_qmi_deinit_service(ab);
 }
 
+/**
+ * ath12k_core_mgmt_rx_reo_init_ss_params() - Initialize a given snapshot
+ * params object
+ * @snapshot_params: Pointer to snapshot params object
+ *
+ * Return: void
+ */
+static void
+ath12k_core_mgmt_rx_reo_init_ss_params(
+	    struct ath12k_mgmt_rx_reo_snapshot_params *snapshot_params)
+{
+	snapshot_params->valid = false;
+	snapshot_params->mgmt_pkt_ctr = 0;
+	snapshot_params->global_timestamp = 0;
+}
+
+/**
+ * ath12k_core_rx_reo_init_ss_value() - Initialize management Rx reorder
+ * snapshot values for a given pdev
+ * @pdev: pointer to pdev object
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+static int
+ath12k_core_rx_reo_init_ss_value(struct ath12k *ar)
+{
+	enum ath12k_mgmt_rx_reo_shared_snapshot_id snapshot_id;
+	struct ath12k_mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
+
+	mgmt_rx_reo_pdev_ctx = &ar->rx_reo_pdev_ctx;
+	snapshot_id = 0;
+	while (snapshot_id < ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
+		ath12k_core_mgmt_rx_reo_init_ss_params
+			(&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot
+			 [snapshot_id]);
+		snapshot_id++;
+	}
+
+	/* Initialize Host snapshot params */
+	ath12k_core_mgmt_rx_reo_init_ss_params
+		(&mgmt_rx_reo_pdev_ctx->host_snapshot);
+
+	return 0;
+}
+
+void *ath12k_core_mgmt_rx_reo_get_ss_address(
+		struct ath12k_base *ab,
+	u8 link_id,
+	enum ath12k_mgmt_rx_reo_shared_snapshot_id snapshot_id)
+{
+	struct ath12k_host_mlo_mem_arena *mlomem_arena_ctx;
+	struct ath12k_host_mlo_glb_rx_reo_snapshot_info *snapshot_info;
+	struct ath12k_host_mlo_glb_rx_reo_per_link_info *snapshot_link_info;
+	u8 link;
+
+	if (snapshot_id >= ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
+		ath12k_err(ab, "Invalid snapshot ID: %d\n", snapshot_id);
+		return NULL;
+	}
+
+	mlomem_arena_ctx = &ab->ag->mlomem_arena;
+	snapshot_info = &mlomem_arena_ctx->rx_reo_snapshot_info;
+
+	for (link = 0; link < snapshot_info->num_links; ++link) {
+		snapshot_link_info = &snapshot_info->link_info[link];
+
+		if (link_id == snapshot_link_info->link_id)
+			break;
+	}
+
+	if (link == snapshot_info->num_links) {
+		ath12k_err(ab, "Couldn't find the snapshot link info corresponding to the link %d\n",
+			   link_id);
+		return NULL;
+	}
+
+	switch (snapshot_id) {
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW:
+		return snapshot_link_info->hw_forwarded;
+
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED:
+		return snapshot_link_info->fw_consumed;
+
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED:
+		return snapshot_link_info->fw_forwarded;
+
+	default:
+		WARN_ON(1);
+	}
+
+	return NULL;
+}
+
+static int ath12k_mgmt_rx_reo_get_snapshot_version(struct ath12k_base *ab,
+						   enum ath12k_mgmt_rx_reo_shared_snapshot_id id)
+{
+	struct ath12k_host_mlo_mem_arena *mlomem_arena_ctx;
+	struct ath12k_host_mlo_glb_rx_reo_snapshot_info *snapshot_info;
+	int snapshot_version;
+
+	if (id >= ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
+		ath12k_err(ab, "Invalid snapshot ID: %d\n", id);
+		return ATH12K_MGMT_RX_REO_INVALID_SNAPSHOT_VERSION;
+	}
+
+	mlomem_arena_ctx = &ab->ag->mlomem_arena;
+	snapshot_info = &mlomem_arena_ctx->rx_reo_snapshot_info;
+
+	switch (id) {
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW:
+		snapshot_version = snapshot_info->hw_forwarded_snapshot_ver;
+		break;
+
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED:
+		snapshot_version = snapshot_info->fw_consumed_snapshot_ver;
+		break;
+
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED:
+		snapshot_version = snapshot_info->fw_forwarded_snapshot_ver;
+		break;
+
+	default:
+		snapshot_version = ATH12K_MGMT_RX_REO_INVALID_SNAPSHOT_VERSION;
+		break;
+	}
+
+	return snapshot_version;
+}
+
+static int
+ath12k_core_mgmt_rx_reo_get_snapshot_info
+	    (struct ath12k *ar,
+	     enum ath12k_mgmt_rx_reo_shared_snapshot_id id,
+	     struct ath12k_mgmt_rx_reo_snapshot_info *snapshot_info)
+{
+	u8 link_id;
+	u8 snapshot_version;
+	struct ath12k_base *ab;
+
+	ab = ar->ab;
+
+	if (id >= ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
+		ath12k_err(ab, "Mgmt RX REO snapshot id invalid %d\n", id);
+		return -EINVAL;
+	}
+
+	if (!snapshot_info) {
+		ath12k_err(ab, "Ref to mgmt RX REO snapshot info is null\n");
+		return -EINVAL;
+	}
+
+	link_id = ar->pdev->hw_link_id;
+
+	snapshot_info->address =
+		ath12k_core_mgmt_rx_reo_get_ss_address(ab, link_id, id);
+	if (!snapshot_info->address) {
+		ath12k_err(ab, "NULL snapshot address\n");
+		return -EINVAL;
+	}
+
+	snapshot_version = ath12k_mgmt_rx_reo_get_snapshot_version(ab, id);
+	if (snapshot_version < 0) {
+		ath12k_err(ab, "Invalid snapshot version %d\n",
+			   snapshot_version);
+		return -EINVAL;
+	}
+
+	snapshot_info->version = snapshot_version;
+
+	return 0;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder
+ * snapshot addresses for a given pdev
+ * @pdev: pointer to pdev object
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+static int
+ath12k_core_mgmt_rx_reo_init_ss_address(struct ath12k *ar)
+{
+	enum ath12k_mgmt_rx_reo_shared_snapshot_id snapshot_id;
+	struct ath12k_mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
+	int status;
+
+	mgmt_rx_reo_pdev_ctx = &ar->rx_reo_pdev_ctx;
+	snapshot_id = 0;
+	while (snapshot_id < ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
+		struct ath12k_mgmt_rx_reo_snapshot_info *snapshot_info;
+
+		snapshot_info =
+			&mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info
+			[snapshot_id];
+		status = ath12k_core_mgmt_rx_reo_get_snapshot_info
+			(ar, snapshot_id, snapshot_info);
+		if (status) {
+			ath12k_err(ar->ab, "Get snapshot info failed, id = %u\n",
+				   snapshot_id);
+			return status;
+		}
+
+		snapshot_id++;
+	}
+
+	return 0;
+}
+
+static int ath12k_core_mgmt_rx_reordering_init(struct ath12k_base *ab)
+{
+	int i, ret;
+	struct ath12k *ar;
+	struct ath12k_pdev *pdev;
+	struct ath12k_hw_group *ag = ab->ag;
+
+	if (!(ag->mlo_mem.is_mlo_mem_avail && ag->mgmt_rx_reorder))
+		return 0;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+		if (!ar)
+			continue;
+		ret = ath12k_core_rx_reo_init_ss_value(ar);
+		if (ret) {
+			ath12k_err(ab, "Failed to initialize snapshot value\n");
+			return ret;
+		}
+
+		ret = ath12k_core_mgmt_rx_reo_init_ss_address(ar);
+		if (ret) {
+			ath12k_err(ab, "Failed to initialize snapshot address\n");
+			return ret;
+		}
+
+		ar->rx_reo_pdev_ctx.init_complete = true;
+	}
+
+	return 0;
+}
+
+static int ath12k_core_mlo_shmem_per_chip_crash_info_addresses(
+		struct ath12k_base *ab,
+		struct ath12k_host_mlo_glb_chip_crash_info *global_chip_crash_info,
+		int chip_id)
+{
+	int i;
+	struct ath12k_host_mlo_glb_per_chip_crash_info *per_chip_crash_info = NULL;
+
+	for (i = 0; i < global_chip_crash_info->no_of_chips; i++)
+	{
+		per_chip_crash_info = &global_chip_crash_info->per_chip_crash_info[i];
+
+		if (!per_chip_crash_info)
+			return -EINVAL;
+
+		if (chip_id == per_chip_crash_info->chip_id)
+			break;
+	}
+
+	if (i >= global_chip_crash_info->no_of_chips) {
+		ath12k_err(ab, "error in chip id:%d\n", chip_id);
+		return 0;
+	}
+
+	if (!per_chip_crash_info ||
+	    !per_chip_crash_info->crash_reason ||
+	    !per_chip_crash_info->recovery_mode) {
+		ath12k_err(ab, "crash_reason address is null\n");
+		return 0;
+	}
+
+	ab->crash_info_address = per_chip_crash_info->crash_reason;
+	ab->recovery_mode_address = per_chip_crash_info->recovery_mode;
+
+	return 0;
+}
+
+static int ath12k_core_mlo_shmem_crash_info_init(struct ath12k_base *ab, int index)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_host_mlo_mem_arena *mlomem_arena_ctx;
+	struct ath12k_host_mlo_glb_chip_crash_info *global_chip_crash_info;
+
+	mlomem_arena_ctx = &ab->ag->mlomem_arena;
+
+	if (!(ag->mlo_mem.is_mlo_mem_avail))
+		return 0;
+
+	global_chip_crash_info = &mlomem_arena_ctx->global_chip_crash_info;
+
+	if (ath12k_core_mlo_shmem_per_chip_crash_info_addresses(ab,
+			global_chip_crash_info,
+			index) < 0) {
+		ath12k_warn(ab, "per_chip_crash_info is not set\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ath12k_core_pdev_init(struct ath12k_base *ab)
+{
+	int ret;
+
+	ret = ath12k_thermal_register(ab);
+	if (ret) {
+		ath12k_err(ab, "could not register thermal device: %d\n",
+			   ret);
+		return ret;
+	}
+
+	ret = ath12k_spectral_init(ab);
+	if (ret) {
+		ath12k_err(ab, "failed to init spectral %d\n", ret);
+		goto err_thermal_unregister;
+	}
+
+	ret = ath12k_core_mgmt_rx_reordering_init(ab);
+	if (ret) {
+		ath12k_err(ab, "failed to rx reo reordering %d\n", ret);
+		goto err_spectral_deinit;
+	}
+
+	ath12k_sawf_init(ab);
+	ath12k_telemetry_init(ab);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ret = ath12k_dp_ppeds_start(ab);
+	if (ret) {
+		ath12k_err(ab, "failed to start DP PPEDS \n");
+		goto err_dp_ppeds_stop;
+	}
+#endif
+	return 0;
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+err_dp_ppeds_stop:
+	ath12k_dp_ppeds_stop(ab);
+#endif
+err_spectral_deinit:
+	ath12k_spectral_deinit(ab);
+err_thermal_unregister:
+	ath12k_thermal_unregister(ab);
+	return ret;
+}
+
+static void ath12k_core_pdev_deinit(struct ath12k_base *ab)
+{
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ath12k_dp_ppeds_stop(ab);
+#endif
+	ath12k_spectral_deinit(ab);
+	ath12k_thermal_unregister(ab);
+	ath12k_telemetry_deinit(ab);
+	ath12k_sawf_deinit(ab);
+}
+
 static int ath12k_core_pdev_create(struct ath12k_base *ab)
 {
 	int ret;
 
-	ret = ath12k_mac_register(ab);
+	ret = ath12k_debugfs_pdev_create(ab);
 	if (ret) {
-		ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+		ath12k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
+		/* Free any previously allocated ab info */
 		return ret;
 	}
 
 	ret = ath12k_dp_pdev_alloc(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
-		goto err_mac_unregister;
+		/* Free current debugfs and any previously allocated info */
+		goto err_debugfs_pdev;
 	}
 
 	return 0;
 
-err_mac_unregister:
-	ath12k_mac_unregister(ab);
+err_debugfs_pdev:
+	ath12k_debugfs_pdev_destroy(ab);
 
 	return ret;
 }
 
 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
 {
-	ath12k_mac_unregister(ab);
-	ath12k_hif_irq_disable(ab);
 	ath12k_dp_pdev_free(ab);
+	ath12k_debugfs_pdev_destroy(ab);
 }
 
 static int ath12k_core_start(struct ath12k_base *ab,
@@ -443,6 +1354,9 @@
 {
 	int ret;
 
+	lockdep_assert_held(&ab->ag->mutex_lock);
+	lockdep_assert_held(&ab->core_lock);
+
 	ret = ath12k_wmi_attach(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@@ -492,21 +1406,12 @@
 		goto err_hif_stop;
 	}
 
-	ret = ath12k_mac_allocate(ab);
-	if (ret) {
-		ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
-			   ret);
-		goto err_hif_stop;
-	}
-
 	ath12k_dp_cc_config(ab);
 
-	ath12k_dp_pdev_pre_alloc(ab);
-
 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
-		goto err_mac_destroy;
+		goto err_hif_stop;
 	}
 
 	ret = ath12k_wmi_cmd_init(ab);
@@ -522,6 +1427,8 @@
 		goto err_reo_cleanup;
 	}
 
+	WARN_ON(test_bit(ATH12K_FLAG_WMI_INIT_DONE, &ab->dev_flags));
+	set_bit(ATH12K_FLAG_WMI_INIT_DONE, &ab->dev_flags);
 	/* put hardware to DBS mode */
 	if (ab->hw_params->single_pdev_only) {
 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
@@ -538,12 +1445,13 @@
 		goto err_reo_cleanup;
 	}
 
+	/* Indicate the core start in the appropriate group */
+	ab->ag->num_started++;
+
 	return 0;
 
 err_reo_cleanup:
 	ath12k_dp_rx_pdev_reo_cleanup(ab);
-err_mac_destroy:
-	ath12k_mac_destroy(ab);
 err_hif_stop:
 	ath12k_hif_stop(ab);
 err_wmi_detach:
@@ -551,6 +1459,225 @@
 	return ret;
 }
 
+static int ath12k_core_mlo_teardown(struct ath12k_hw_group *ag)
+{
+	struct ath12k_hw *ah;
+	int ret;
+	int i;
+
+	if (!ag->mlo_capable)
+		return 0;
+
+	for (i = ag->num_hw - 1; i >= 0; i--) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		ret = ath12k_mac_mlo_teardown(ah);
+		if (ret)
+			goto out;
+	}
+
+out:
+	return ret;
+}
+
+static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
+{
+	struct ath12k_hw *ah;
+	int ret;
+	int i;
+
+	if (!ag->mlo_capable)
+		return 0;
+
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		ret = ath12k_mac_mlo_setup(ah);
+		if (ret)
+			goto err_setup;
+	}
+
+	for (i = 0; i < ag->num_chip; i++)
+		ath12k_dp_partner_cc_init(ag->ab[i]);
+
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		ret = ath12k_mac_mlo_ready(ah);
+		if (ret)
+			goto err_ready;
+	}
+
+	return 0;
+
+err_ready:
+	i = ag->num_hw;
+err_setup:
+	for (i = i - 1; i >= 0; i--) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		ath12k_mac_mlo_teardown(ah);
+	}
+
+	return ret;
+}
+
+static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab = ag->ab[0];
+	int ret, i;
+	bool is_registered = false;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	/* Check If already registered or not, since same flow
+	 * execute for HW restart case.
+	 */
+	if (test_bit(ATH12K_FLAG_REGISTERED, &ag->dev_flags))
+		is_registered = true;
+
+	if (!is_registered) {
+		ret = ath12k_mac_allocate(ag);
+		if (ret) {
+			ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+				   ret);
+			return ret;
+		}
+	}
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		/* pdev create needs to be done only for recovered
+		 * ab during Mode1 scenario
+		 */
+		if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1 && !ab->recovery_start)
+			continue;
+
+		mutex_lock(&ab->core_lock);
+		ret = ath12k_core_pdev_create(ab);
+		if (ret) {
+			mutex_unlock(&ab->core_lock);
+			ath12k_err(ab, "failed to create pdev core %d\n", ret);
+			goto err_pdev_destroy;
+		}
+		mutex_unlock(&ab->core_lock);
+	}
+
+	ret = ath12k_core_mlo_setup(ag);
+	if (ret) {
+		ath12k_err(NULL, "failed to setup MLO: %d\n", ret);
+		goto err_pdev_destroy;
+	}
+
+	if (!is_registered) {
+		ret = ath12k_mac_register(ag);
+		if (ret) {
+			ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+			goto err_mlo_teardown;
+		}
+	}
+
+	/* Setup pdev features for all ab */
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		/* pdev init needs to be done only for recovered
+		 * ab during Mode1 scenario
+		 */
+		if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1 && !ab->recovery_start)
+			continue;
+
+		mutex_lock(&ab->core_lock);
+
+		ret = ath12k_core_pdev_init(ab);
+		if (ret) {
+			mutex_unlock(&ab->core_lock);
+			ath12k_err(ab, "failed to init pdev features\n");
+			goto pdev_cleanup;
+		}
+		ath12k_hif_irq_enable(ab);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+		ath12k_hif_ppeds_irq_enable(ab, PPEDS_IRQ_PPE2TCL);
+		ath12k_hif_ppeds_irq_enable(ab, PPEDS_IRQ_REO2PPE);
+		ath12k_hif_ppeds_irq_enable(ab, PPEDS_IRQ_PPE_WBM2SW_REL);
+#endif
+		if (ab->hw_params->en_qdsslog) {
+			ath12k_info(ab, "QDSS trace enabled\n");
+			ath12k_config_qdss(ab);
+		}
+
+		if (ath12k_en_fwlog == true) {
+			ret = ath12k_enable_fwlog(ab);
+			if (ret < 0) {
+				mutex_unlock(&ab->core_lock);
+				ath12k_err(ab, "failed to enable fwlog: %d\n", ret);
+				goto pdev_cleanup;
+			}
+		}
+
+		ret = ath12k_dp_umac_reset_init(ab);
+		if (ret) {
+			mutex_unlock(&ab->core_lock);
+			ath12k_warn(ab, "Failed to initialize UMAC RESET: %d\n", ret);
+			goto pdev_cleanup;
+		}
+
+		ret = ath12k_core_mlo_shmem_crash_info_init(ab, i);
+		if (ret) {
+			ath12k_err(ab, "failed to parse crash info %d\n", ret);
+		}
+
+		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+		mutex_unlock(&ab->core_lock);
+	}
+
+	if (!is_registered) {
+		set_bit(ATH12K_FLAG_REGISTERED, &ag->dev_flags);
+		init_completion(&ag->umac_reset_complete);
+	}
+
+	return 0;
+
+pdev_cleanup:
+	for (i = i - 1; i >= 0; i--) {
+		ab = ag->ab[i];
+
+		mutex_lock(&ab->core_lock);
+
+		ath12k_core_pdev_deinit(ab);
+		ath12k_hif_irq_disable(ab);
+		ath12k_dp_umac_reset_deinit(ab);
+
+		mutex_unlock(&ab->core_lock);
+	}
+
+	ath12k_mac_unregister(ag);
+err_mlo_teardown:
+	ath12k_core_mlo_teardown(ag);
+	i = ag->num_chip;
+err_pdev_destroy:
+	for (i = i - 1; i >= 0; i--) {
+		ab = ag->ab[i];
+
+		mutex_lock(&ab->core_lock);
+		ath12k_core_pdev_destroy(ab);
+		mutex_unlock(&ab->core_lock);
+	}
+
+	ath12k_mac_destroy(ag);
+ 	return ret;
+}
+
 static int ath12k_core_start_firmware(struct ath12k_base *ab,
 				      enum ath12k_firmware_mode mode)
 {
@@ -568,11 +1695,37 @@
 	return ret;
 }
 
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_base *partner_ab;
+	int i;
+	bool found = false;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+		if (!partner_ab)
+			continue;
+
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "QMI Trigger Host Cap AB:%p Partner AB:%p Found:%d Num Radios: %d ag->num_chip:%d\n",
+				ab, partner_ab, found,
+				ab->qmi.num_radios, ag->num_chip);
+
+		if (found)
+			ath12k_qmi_trigger_host_cap(partner_ab);
+
+		found = (partner_ab == ab) ? true : false;
+	}
+}
+
 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
 {
+	struct ath12k_hw_group *ag;
 	int ret;
 
-	ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
 	if (ret) {
 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
 		return ret;
@@ -584,47 +1737,329 @@
 		goto err_firmware_stop;
 	}
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (ath12k_ppe_ds_enabled) {
+		if (ath12k_frame_mode != ATH12K_HW_TXRX_ETHERNET) {
+			ath12k_warn(ab,
+				    "Force enabling Ethernet frame mode in PPE DS for" \
+				    " AP and STA modes.\n");
+			/* MESH and WDS VAPs will still use NATIVE_WIFI mode
+			 * @ath12k_mac_update_vif_offload()
+			 * TODO: add device capability check
+			 */
+			ath12k_frame_mode = ATH12K_HW_TXRX_ETHERNET;
+		}
+		if (ab->hif.bus == ATH12K_BUS_PCI || ab->hif.bus == ATH12K_BUS_HYBRID)
+			set_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags);
+	}
+#endif
+
 	ret = ath12k_dp_alloc(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to init DP: %d\n", ret);
 		goto err_firmware_stop;
 	}
 
+	ag = ab->ag;
+
+	switch (ath12k_crypto_mode) {
+	case ATH12K_CRYPT_MODE_SW:
+		set_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ag->dev_flags);
+		set_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags);
+		break;
+	case ATH12K_CRYPT_MODE_HW:
+		clear_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ag->dev_flags);
+		clear_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags);
+		break;
+	default:
+		ath12k_info(ab, "invalid crypto_mode: %d\n", ath12k_crypto_mode);
+		return -EINVAL;
+	}
+
+	if (ath12k_frame_mode == ATH12K_HW_TXRX_RAW)
+		set_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags);
+
+	mutex_lock(&ag->mutex_lock);
 	mutex_lock(&ab->core_lock);
-	ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+
+	/* This should be the last function, add core related
+	 * initializations within this function
+	 */
+	ret = ath12k_core_start(ab, ab->fw_mode);
 	if (ret) {
 		ath12k_err(ab, "failed to start core: %d\n", ret);
 		goto err_dp_free;
 	}
 
-	ret = ath12k_core_pdev_create(ab);
+	set_bit(ATH12K_FLAG_CORE_REGISTERED, &ab->dev_flags);
+	mutex_unlock(&ab->core_lock);
+
+	if (ath12k_core_hw_group_start_ready(ag)) {
+
+		/* initialize the mgmt rx re-order after
+		 * mlo mem is available
+		 */
+		ath12k_mgmt_rx_reo_init_context(ab);
+		ath12k_qmi_mlo_global_snapshot_mem_init(ab);
+
+		ret = ath12k_core_hw_group_start(ag);
 	if (ret) {
-		ath12k_err(ab, "failed to create pdev core: %d\n", ret);
+			ath12k_warn(ab, "unable to start hw group\n");
 		goto err_core_stop;
 	}
-	ath12k_hif_irq_enable(ab);
-	mutex_unlock(&ab->core_lock);
 
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+	} else {
+		ath12k_core_trigger_partner(ab);
+	}
+
+	mutex_unlock(&ag->mutex_lock);
+
+	/* Add code here carefully */
 	return 0;
 
 err_core_stop:
+	mutex_lock(&ab->core_lock);
 	ath12k_core_stop(ab);
-	ath12k_mac_destroy(ab);
+	clear_bit(ATH12K_FLAG_CORE_REGISTERED, &ab->dev_flags);
 err_dp_free:
 	ath12k_dp_free(ab);
 	mutex_unlock(&ab->core_lock);
+	mutex_unlock(&ag->mutex_lock);
 err_firmware_stop:
 	ath12k_qmi_firmware_stop(ab);
 
 	return ret;
 }
 
+#define ATH12K_COLLECT_DUMP_TIMEOUT	(120 * HZ)
+
+void ath12k_core_wait_dump_collect(struct ath12k_base *ab)
+{
+	int timeout;
+
+	if (ab->collect_dump) {
+		timeout = wait_event_timeout(ab->ssr_dump_wq,
+					     (ab->collect_dump == false),
+					     ATH12K_COLLECT_DUMP_TIMEOUT);
+		if (timeout <= 0)
+			ath12k_warn(ab, "dump collection timed out\n");
+	}
+	return;
+}
+EXPORT_SYMBOL(ath12k_core_wait_dump_collect);
+
+void ath12k_core_issue_bug_on(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+
+	if (ab->in_panic)
+		goto out;
+
+	/* set in_panic to true to avoid multiple rddm download during
+	 * firmware crash
+	 */
+	ab->in_panic = true;
+
+	if (!ag->mlo_capable)
+		BUG_ON(1);
+
+	if (atomic_read(&ath12k_coredump_ram_info.num_chip) >= ab->ag->num_started)
+		BUG_ON(1);
+	else
+		goto out;
+
+out:
+	ath12k_info(ab,
+		    "%d chip dump collected and waiting for partner chips\n",
+		    atomic_read(&ath12k_coredump_ram_info.num_chip));
+
+}
+
+static void ath12k_coredump_download_ahb(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	int dump_count;
+	struct ath12k_ahb *ab_ahb = ath12k_ahb_priv(ab);
+	struct ath12k_base *partner_ab;
+	int i;
+
+	/* Crash the system once all the stats are dumped */
+	if (ab->fw_recovery_support && !ab->in_panic)
+		return;
+
+	if (ag->mlo_capable ) {
+		dump_count = atomic_read(&ath12k_coredump_ram_info.num_chip);
+		if (dump_count >= ATH12K_MAX_SOCS) {
+			ath12k_err(ab, "invalid chip number %d\n",
+				   dump_count);
+			return;
+		}
+	}
+	if (ab_ahb->tgt_rrproc->state == RPROC_CRASHED && ab->multi_pd_arch) {
+		for (i = 0; i < ag->num_chip; i++) {
+			partner_ab = ag->ab[i];
+			if (partner_ab->multi_pd_arch)
+				atomic_inc(&ath12k_coredump_ram_info.num_chip);
+		}
+	} else {
+		atomic_inc(&ath12k_coredump_ram_info.num_chip);
+	}
+
+	ath12k_core_issue_bug_on(ab);
+}
+
+/* Print the driver stats and crash the system on receiving this notification */
+static int ath12k_core_ssr_notifier_cb(struct notifier_block *nb,
+				       const unsigned long event,
+				       void *data)
+{
+	struct ath12k_base *ab = container_of(nb, struct ath12k_base, ssr_nb);
+
+	if (ab->collect_dump && event == ATH12K_SSR_POWERUP) {
+		ab->collect_dump = false;
+		wake_up(&ab->ssr_dump_wq);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int ath12k_core_rpd_ssr_notifier_cb(struct notifier_block *nb,
+					   const unsigned long event,
+					   void *data)
+{
+	struct ath12k_base *ab = container_of(nb, struct ath12k_base, rpd_ssr_nb);
+
+	if (ab->collect_dump && event == ATH12K_SSR_POWERUP) {
+		ab->collect_dump = false;
+		wake_up(&ab->ssr_dump_wq);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int ath12k_core_check_ssr_notifier_as_expected(struct ath12k_base *ab,
+						      void *data,
+						      const char *node)
+{
+	phandle rproc_phandle;
+	struct device *dev = ab->dev;
+	struct platform_device *pdev;
+	struct device_node *rproc_node;
+	struct platform_device *ssr_pdev = data;
+
+	if (of_property_read_u32(dev->of_node, node, &rproc_phandle))
+		return -EINVAL;
+
+	rproc_node = of_find_node_by_phandle(rproc_phandle);
+	if (!rproc_node) {
+		ath12k_warn(ab, "ssr notification failed to get rproc_node\n");
+		return -EINVAL;
+	}
+
+	pdev = of_find_device_by_node(rproc_node);
+	if (!pdev) {
+		ath12k_warn(ab, "Failed to get pdev from device node\n");
+		return -EINVAL;
+	}
+
+	if (!ab->in_panic && (strcmp(ssr_pdev->name, pdev->name) != 0)) {
+		ath12k_warn(ab, "SSR notification mismatch %s  pdev name:%s\n",
+			    ssr_pdev->name, pdev->name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ath12k_core_atomic_ssr_notifier_cb(struct notifier_block *nb,
+					      const unsigned long event,
+					      void *data)
+{
+	struct ath12k_base *ab = container_of(nb, struct ath12k_base, atomic_ssr_nb);
+	struct ath12k_ahb *ab_ahb = ath12k_ahb_priv(ab);
+	struct platform_device *ssr_pdev = data;
+	int ret;
+
+	if (event != ATH12K_SSR_PREPARE_SHUTDOWN)
+		return NOTIFY_DONE;
+
+	if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+		return NOTIFY_DONE;
+
+	ab->collect_dump = true;
+
+	ret = ath12k_core_check_ssr_notifier_as_expected(ab, data, "qcom,rproc");
+	if (ret)
+		return NOTIFY_DONE;
+
+	if (strcmp(ssr_pdev->name, ab_ahb->tgt_rproc->name) == 0) {
+		ab->ag->crash_type = ATH12K_RPROC_USERPD_HYBRID_CRASH;
+		ab_ahb->tgt_rproc->state = RPROC_CRASHED;
+	}
+
+	if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->ag->dev_flags))) {
+		set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+		queue_work(ab->workqueue_aux, &ab->reset_work);
+	}
+
+	ath12k_hal_dump_srng_stats(ab);
+
+	return NOTIFY_OK;
+}
+
+/* Print the driver stats and crash the system on receiving this notification */
+static int ath12k_core_rpd_atomic_ssr_notifier_cb(struct notifier_block *nb,
+						  const unsigned long event,
+						  void *data)
+{
+	struct ath12k_base *ab = container_of(nb, struct ath12k_base, rpd_atomic_ssr_nb);
+	struct ath12k_hw_group *ag = ab->ag;
+	struct platform_device *ssr_pdev = data;
+	struct ath12k_ahb *ab_ahb = ath12k_ahb_priv(ab);
+	int ret;
+
+	if (event != ATH12K_SSR_PREPARE_SHUTDOWN)
+		return NOTIFY_DONE;
+
+	if (!test_bit(ATH12K_FLAG_REGISTERED, &ag->dev_flags))
+		return NOTIFY_DONE;
+
+	ab->collect_dump = true;
+
+	ret = ath12k_core_check_ssr_notifier_as_expected(ab, data, "qcom,rproc_rpd");
+	if (ret)
+		return NOTIFY_DONE;
+
+	/* Changing the rrpoc->state to crashed disabled the
+	 * remoteproc module to do the recovery process.
+	 * This module will take care of the rrproc recovery process.
+	 */
+	if (strcmp(ssr_pdev->name, ab_ahb->tgt_rrproc->name) == 0) {
+		ag->crash_type = ATH12K_RPROC_ROOTPD_HYBRID_CRASH;
+		ab_ahb->tgt_rrproc->state = RPROC_CRASHED;
+	}
+
+	if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ag->dev_flags))) {
+		set_bit(ATH12K_FLAG_RECOVERY, &ag->dev_flags);
+		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ag->dev_flags);
+		queue_work(ab->workqueue_aux, &ag->reset_work);
+	}
+
+	/* TODO Add more driver stats */
+
+	return NOTIFY_OK;
+}
+
 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
 {
 	int ret;
 
 	mutex_lock(&ab->core_lock);
-	ath12k_hif_irq_disable(ab);
+	ath12k_core_pdev_deinit(ab);
 	ath12k_dp_pdev_free(ab);
 	ath12k_hif_stop(ab);
 	ath12k_wmi_detach(ab);
@@ -633,21 +2068,20 @@
 
 	ath12k_dp_free(ab);
 	ath12k_hal_srng_deinit(ab);
+	ath12k_dp_umac_reset_deinit(ab);
+	ath12k_umac_reset_completion(ab);
 
 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+	ab->num_db_cap = 0;
 
 	ret = ath12k_hal_srng_init(ab);
 	if (ret)
 		return ret;
 
-	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
-
 	ret = ath12k_core_qmi_firmware_ready(ab);
 	if (ret)
 		goto err_hal_srng_deinit;
 
-	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
-
 	return 0;
 
 err_hal_srng_deinit:
@@ -655,61 +2089,99 @@
 	return ret;
 }
 
-void ath12k_core_halt(struct ath12k *ar)
+static void ath12k_core_mlo_hw_queues_stop(struct ath12k_hw_group *ag,
+					   struct ath12k_base *block_ab)
 {
-	struct ath12k_base *ab = ar->ab;
+	struct ath12k_base *ab;
+	struct ath12k_hw *ah;
+	int i;
 
-	lockdep_assert_held(&ar->conf_mutex);
+	lockdep_assert_held(&ag->mutex_lock);
 
-	ar->num_created_vdevs = 0;
-	ar->allocated_vdev_map = 0;
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
 
-	ath12k_mac_scan_finish(ar);
-	ath12k_mac_peer_cleanup_all(ar);
-	cancel_delayed_work_sync(&ar->scan.timeout);
-	cancel_work_sync(&ar->regd_update_work);
+		ieee80211_stop_queues(ah->hw);
+	}
+	ag->hw_queues_stopped = true;
 
-	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
-	synchronize_rcu();
-	INIT_LIST_HEAD(&ar->arvifs);
-	idr_init(&ar->txmgmt_idr);
+	if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1) {
+		block_ab->qmi.num_radios = ATH12K_QMI_INVALID_RADIO;
+		clear_bit(ATH12K_FLAG_QMI_HOST_CAP_SENT, &block_ab->dev_flags);
+		return;
+	}
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		/*
+		* In Mode0, partner chips are expected to be asserted, hence reset
+		* qmi.num_radios count to ensure avoiding duplicate host cap triggers.
+		*/
+		if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0) {
+			ab->qmi.num_radios = ATH12K_QMI_INVALID_RADIO;
+			clear_bit(ATH12K_FLAG_QMI_HOST_CAP_SENT, &ab->dev_flags);
+		}
+	}
 }
 
 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
 {
 	struct ath12k *ar;
 	struct ath12k_pdev *pdev;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_mac_tx_mgmt_free_arg arg;
 	int i;
 
 	spin_lock_bh(&ab->base_lock);
 	ab->stats.fw_crash_counter++;
 	spin_unlock_bh(&ab->base_lock);
 
+	if (ab->is_reset)
+		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
 	for (i = 0; i < ab->num_radios; i++) {
 		pdev = &ab->pdevs[i];
 		ar = pdev->ar;
 		if (!ar || ar->state == ATH12K_STATE_OFF)
 			continue;
 
-		ieee80211_stop_queues(ar->hw);
-		ath12k_mac_drain_tx(ar);
+		list_for_each_entry(arvif, &ar->arvifs, list)
+			if (arvif->is_started)
+				ath12k_debugfs_remove_interface(arvif);
+
+		ath12k_mac_radio_drain_tx(ar);
 		complete(&ar->scan.started);
 		complete(&ar->scan.completed);
+		complete(&ar->scan.on_channel);
 		complete(&ar->peer_assoc_done);
 		complete(&ar->peer_delete_done);
+		if (!list_empty(&ab->neighbor_peers))
+			ath12k_debugfs_nrp_cleanup_all(ar);
 		complete(&ar->install_key_done);
 		complete(&ar->vdev_setup_done);
 		complete(&ar->vdev_delete_done);
 		complete(&ar->bss_survey_done);
+		complete(&ar->thermal.wmi_sync);
 
-		wake_up(&ar->dp.tx_empty_waitq);
+		memset(&arg, 0, sizeof(arg));
+		arg.ar = ar;
+		arg.type = u8_encode_bits(true, ATH12K_MAC_TX_MGMT_FREE_TYPE_PDEV);
 		idr_for_each(&ar->txmgmt_idr,
-			     ath12k_mac_tx_mgmt_pending_free, ar);
+			     ath12k_mac_tx_mgmt_pending_free, &arg);
 		idr_destroy(&ar->txmgmt_idr);
+		wake_up(&ar->tx_empty_waitq);
+
+		ar->monitor_vdev_id = -1;
+		clear_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags);
+		clear_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags);
 	}
 
 	wake_up(&ab->wmi_ab.tx_credits_wq);
 	wake_up(&ab->peer_mapping_wq);
+
 }
 
 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
@@ -721,7 +2193,8 @@
 	for (i = 0; i < ab->num_radios; i++) {
 		pdev = &ab->pdevs[i];
 		ar = pdev->ar;
-		if (!ar || ar->state == ATH12K_STATE_OFF)
+		if (!ar || ar->state == ATH12K_STATE_OFF ||
+		    ar->state == ATH12K_STATE_TM)
 			continue;
 
 		mutex_lock(&ar->conf_mutex);
@@ -729,8 +2202,15 @@
 		switch (ar->state) {
 		case ATH12K_STATE_ON:
 			ar->state = ATH12K_STATE_RESTARTING;
+				if (ar->scan.state == ATH12K_SCAN_RUNNING ||
+						ar->scan.state == ATH12K_SCAN_STARTING)
+					ar->scan.state = ATH12K_SCAN_ABORTING;
+				ath12k_mac_scan_finish(ar);
+				mutex_unlock(&ar->conf_mutex);
+				cancel_delayed_work_sync(&ar->scan.timeout);
+				cancel_work_sync(&ar->scan.vdev_del_wk);
+				mutex_lock(&ar->conf_mutex);
 			ath12k_core_halt(ar);
-			ieee80211_restart_hw(ar->hw);
 			break;
 		case ATH12K_STATE_OFF:
 			ath12k_warn(ab,
@@ -738,6 +2218,7 @@
 				    i);
 			break;
 		case ATH12K_STATE_RESTARTING:
+				ar->state = ATH12K_STATE_RESTARTED;
 			break;
 		case ATH12K_STATE_RESTARTED:
 			ar->state = ATH12K_STATE_WEDGED;
@@ -746,16 +2227,901 @@
 			ath12k_warn(ab,
 				    "device is wedged, will not restart radio %d\n", i);
 			break;
+			case ATH12K_STATE_TM:
+				ath12k_warn(ab, "fw mode reset done radio %d\n", i);
+				break;
 		}
 		mutex_unlock(&ar->conf_mutex);
 	}
-	complete(&ab->driver_recovery);
+}
+
+void ath12k_core_halt(struct ath12k *ar)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_hw_group *ag = ab->ag;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ar->num_created_vdevs = 0;
+	ar->allocated_vdev_map = 0;
+
+	ath12k_mac_peer_cleanup_all(ar);
+	cancel_work_sync(&ar->regd_update_work);
+	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+	
+	if(!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->ag->dev_flags)))
+		synchronize_rcu();
+
+	if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0)
+		INIT_LIST_HEAD(&ar->arvifs);
+
+	idr_init(&ar->txmgmt_idr);
+}
+
+static void ath12k_core_mode1_recovery_sta_list(void *data,
+					       struct ieee80211_sta *sta)
+{
+	struct ath12k_link_sta *arsta;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_vif *arvif = (struct ath12k_link_vif *)data;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_base *ab = arvif->ab;
+	struct ath12k_key_conf *key_conf = NULL;
+	struct ath12k_peer *peer;
+	struct ieee80211_key_conf *key;
+	int ret = -1, key_idx;
+	u8 link_id = arvif->link_id;
+	enum ieee80211_sta_state state, prev_state;
+	bool sta_added = false;
+
+	if (ahsta->ahvif != arvif->ahvif)
+		return;
+
+	/* Check if there is a link sta in the vif link */
+	if (!(BIT(link_id) & ahsta->links_map))
+		return;
+
+	/* From iterator, rcu_read_lock is acquired. Will be revisited
+	 * later to use local list
+	 */
+	arsta = ahsta->link[link_id];
+
+	key_conf = container_of((void *)sta, struct ath12k_key_conf, sta);
+
+	if (vif->type != NL80211_IFTYPE_AP &&
+	    vif->type != NL80211_IFTYPE_AP_VLAN &&
+	    vif->type != NL80211_IFTYPE_STATION &&
+	    vif->type != NL80211_IFTYPE_MESH_POINT)
+		return;
+
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+	if (peer) {
+		sta_added = true;
+		goto key_add;
+	}
+
+	prev_state = arsta->state;
+	for (state = IEEE80211_STA_NOTEXIST;
+	     state < prev_state; state++) {
+		/* all station set case */
+		/* TODO: Iterator API is called with rcu lock
+		 * hence need for this unlock/lock statement.
+		 * Need to revisit in next version
+		 */
+		rcu_read_unlock();
+		ath12k_mac_update_sta_state(ar->ah->hw, arvif->ahvif->vif, sta,
+					    state, (state + 1));
+		rcu_read_lock();
+		sta_added = true;
+	}
+
+key_add:
+	if (sta_added)
+	for (key_idx = 0; key_idx < WMI_MAX_KEY_INDEX; key_idx++) {
+		key = arsta->keys[key_idx];
+
+		if (key) {
+			/* BIP needs to be done in software */
+			if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+			    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+			    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+			    key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) {
+				ret = 1;
+				goto out;
+			}
+
+			if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->ag->dev_flags)) {
+				ret = 1;
+				goto out;
+			}
+
+			if (!arvif->is_created) {
+				key_conf = kzalloc(sizeof(*key_conf), GFP_ATOMIC);
+
+				if (!key_conf) {
+					goto out;
+				}
+
+				key_conf->cmd = SET_KEY;
+				key_conf->sta = sta;
+				key_conf->key = key;
+
+				list_add_tail(&key_conf->list,
+					      &ahvif->cache[link_id].key_conf.list);
+
+				ath12k_info(ab, "set key param cached since vif not assign to radio\n");
+				goto out;
+			}
+
+			if (sta->mlo) {
+				unsigned long links = sta->valid_links;
+				for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+					arvif = ahvif->link[link_id];
+					arsta = ahsta->link[link_id];
+					if (WARN_ON(!arvif || !arsta))
+						continue;
+
+					/* TODO: Iterator API is called with rcu lock
+					 * hence need for this unlock/lock statement.
+					 * Need to revisit in next version
+					 */
+					rcu_read_unlock();
+					ret = ath12k_mac_set_key(arvif->ar, SET_KEY, arvif, arsta, key);
+					rcu_read_lock();
+					if (ret)
+						break;
+					arsta->keys[key->keyidx] = key;
+				}
+			} else {
+				arsta = &ahsta->deflink;
+				arvif = arsta->arvif;
+				if (WARN_ON(!arvif))
+					goto out;
+
+				/* TODO: Iterator API is called with rcu lock
+				 * hence need for this unlock/lock statement.
+				 * Need to revisit in next version
+				 */
+				rcu_read_unlock();
+				ret = ath12k_mac_set_key(arvif->ar, SET_KEY, arvif, arsta, key);
+				rcu_read_lock();
+				arsta->keys[key->keyidx] = key;
+			}
+		}
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+		   "Recovered sta:%pM link_id:%d, num_sta:%d\n",
+		   arsta->addr, arsta->link_id, arvif->ar->num_stations);
+
+	ret = 0;
+out:
+	return;
+}
+
+static void ath12k_core_iterate_sta_list(struct ath12k *ar,
+					 struct ath12k_link_vif *arvif)
+{
+	/* unlock ar mutex here as the iterator will be called
+	 * within rcu lock.
+	 */
+	mutex_unlock(&ar->conf_mutex);
+	ieee80211_iterate_stations_atomic(ar->ah->hw,
+					  ath12k_core_mode1_recovery_sta_list,
+					  arvif);
+	mutex_lock(&ar->conf_mutex);
+}
+
+static void ath12k_core_ml_sta_add(struct ath12k *ar)
+{
+	struct ath12k_link_vif *arvif, *tmp;
+	struct ieee80211_bss_conf *info;
+	struct ath12k_vif *ahvif;
+	struct ieee80211_vif *vif;
+
+	mutex_lock(&ar->conf_mutex);
+	list_for_each_entry_safe_reverse(arvif, tmp, &ar->arvifs, list) {
+		ahvif = arvif->ahvif;
+
+		if (!ahvif)
+			continue;
+
+		vif = ahvif->vif;
+		if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
+			continue;
+
+		if (!vif->valid_links)
+			continue;
+
+		ath12k_core_iterate_sta_list(ar, arvif);
+
+		info = vif->link_conf[arvif->link_id];
+		/* Set is_up to false as we will do
+		 * recovery for that vif in the
+		 * upcoming executions
+		 */
+		arvif->is_up = false;
+		if (vif->cfg.assoc && info)
+			ath12k_bss_assoc(ar, arvif, info);
+		else
+			ath12k_bss_disassoc(ar, arvif, false);
+		ath12k_dbg(ar->ab, ATH12K_DBG_MODE1_RECOVERY,
+			   "station vif:%pM recovered\n",
+			   arvif->addr);
+	}
+	mutex_unlock(&ar->conf_mutex);
+}
+
+/* API to recovery station VIF enabled in non-asserted links */
+static void ath12k_core_mlo_recover_station(struct ath12k_hw_group *ag,
+					    struct ath12k_base *assert_ab)
+{
+	struct ath12k_base *ab;
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar;
+	int i, j;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		if (ab == assert_ab)
+			continue;
+
+		for (j = 0; j < ab->num_radios; j++) {
+			pdev = &ab->pdevs[j];
+			ar = pdev->ar;
+
+			if (!ar)
+				continue;
+
+			if (list_empty(&ar->arvifs))
+				continue;
+
+			/* Re-add all MLD station VIF which are
+			 * in non-asserted link
+			 */
+			ath12k_core_ml_sta_add(ar);
+		}
+	}
+}
+
+static int ath12k_mlo_recovery_link_vif_reconfig(struct ath12k *ar,
+						 struct ath12k_vif *ahvif,
+						 struct ath12k_link_vif *arvif,
+						 struct ieee80211_vif *vif,
+						 struct ieee80211_bss_conf *link_conf)
+{
+	int i;
+	int link_id = arvif->link_id;
+	struct ath12k_hw *ah = ar->ah;
+	struct ieee80211_tx_queue_params params;
+	struct wmi_wmm_params_arg *p = NULL;
+	struct ieee80211_bss_conf *info;
+	u64 changed = 0;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_MONITOR:
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		fallthrough;
+	default:
+		ieee80211_iterate_stations_atomic(ar->ah->hw,
+						  ath12k_core_mode1_recovery_sta_list,
+						  arvif);
+		fallthrough;
+	case NL80211_IFTYPE_AP: /* AP stations are handled later */
+		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+
+			if ((vif->active_links &&
+			    !(vif->active_links & BIT(link_id))) ||
+			    link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+				break;
+
+			switch (i) {
+			case IEEE80211_AC_VO:
+				p = &arvif->wmm_params.ac_vo;
+				break;
+			case IEEE80211_AC_VI:
+				p = &arvif->wmm_params.ac_vi;
+				break;
+			case IEEE80211_AC_BE:
+				p = &arvif->wmm_params.ac_be;
+				break;
+			case IEEE80211_AC_BK:
+				p = &arvif->wmm_params.ac_bk;
+				break;
+			}
+
+			params.cw_min = p->cwmin;
+			params.cw_max = p->cwmax;
+			params.aifs = p->aifs;
+			params.txop = p->txop;
+
+			mutex_lock(&ar->conf_mutex);
+			ath12k_mac_conf_tx(ar, arvif, i, &params);
+			mutex_unlock(&ar->conf_mutex);
+		}
+		break;
+	}
+
+	mutex_lock(&ah->conf_mutex);
+
+	/* common change flags for all interface types */
+	changed = BSS_CHANGED_ERP_CTS_PROT |
+		  BSS_CHANGED_ERP_PREAMBLE |
+		  BSS_CHANGED_ERP_SLOT |
+		  BSS_CHANGED_HT |
+		  BSS_CHANGED_BASIC_RATES |
+		  BSS_CHANGED_BEACON_INT |
+		  BSS_CHANGED_BSSID |
+		  BSS_CHANGED_CQM |
+		  BSS_CHANGED_QOS |
+		  BSS_CHANGED_TXPOWER |
+		  BSS_CHANGED_MCAST_RATE;
+
+	if (link_conf->mu_mimo_owner)
+		changed |= BSS_CHANGED_MU_GROUPS;
+
+	switch (vif->type) {
+#ifdef CONFIG_MAC80211_BONDED_SUPPORT
+	case NL80211_IFTYPE_DUMMY:
+		mutex_unlock(&ah->conf_mutex);
+		return 0;
+#endif
+	case NL80211_IFTYPE_STATION:
+		if (!vif->valid_links) {
+			/* Set this only for legacy stations */
+			changed |= BSS_CHANGED_ASSOC |
+				   BSS_CHANGED_ARP_FILTER |
+				   BSS_CHANGED_PS;
+
+			/* Assume re-send beacon info report to the driver */
+			changed |= BSS_CHANGED_BEACON_INFO;
+
+			if (link_conf->max_idle_period ||
+				link_conf->protected_keep_alive)
+				changed |= BSS_CHANGED_KEEP_ALIVE;
+
+			if (!arvif->is_created) {
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+					    "bss info parameter changes %llx cached to apply after vdev create on channel assign\n",
+					    changed);
+				ahvif->cache[link_id].bss_conf_changed |= changed;
+
+				mutex_unlock(&ah->conf_mutex);
+				return 0;
+			}
+		}
+
+		mutex_lock(&ar->conf_mutex);
+		/* Set is_up to false as we will do
+		 * recovery for that vif in the
+		 * upcoming executions
+		 */
+		arvif->is_up = false;
+		ath12k_mac_bss_info_changed(ar, arvif, link_conf, changed);
+		if (vif->valid_links) {
+			info = vif->link_conf[link_id];
+			if (vif->cfg.assoc) {
+				if (info)
+					ath12k_bss_assoc(ar, arvif, info);
+			} else {
+				ath12k_bss_disassoc(ar, arvif, false);
+			}
+		}
+		mutex_unlock(&ar->conf_mutex);
+		break;
+	case NL80211_IFTYPE_OCB:
+		changed |= BSS_CHANGED_OCB;
+
+		mutex_lock(&ar->conf_mutex);
+		ath12k_mac_bss_info_changed(ar, arvif, link_conf, changed);
+		mutex_unlock(&ar->conf_mutex);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		changed |= BSS_CHANGED_IBSS;
+		fallthrough;
+	case NL80211_IFTYPE_AP:
+		changed |= BSS_CHANGED_P2P_PS;
+
+		if (vif->type == NL80211_IFTYPE_AP) {
+			changed |= BSS_CHANGED_AP_PROBE_RESP;
+			ahvif->u.ap.ssid_len = vif->cfg.ssid_len;
+			if (vif->cfg.ssid_len)
+				memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+		}
+		fallthrough;
+	case NL80211_IFTYPE_MESH_POINT:
+		if (link_conf->enable_beacon) {
+			changed |= BSS_CHANGED_BEACON |
+				   BSS_CHANGED_BEACON_ENABLED;
+
+			mutex_lock(&ar->conf_mutex);
+			ath12k_mac_bss_info_changed(ar, arvif, link_conf,
+					changed & ~BSS_CHANGED_IDLE);
+			mutex_unlock(&ar->conf_mutex);
+
+		}
+		break;
+	case NL80211_IFTYPE_NAN:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_P2P_DEVICE:
+		/* nothing to do */
+		break;
+	case NL80211_IFTYPE_UNSPECIFIED:
+	case NUM_NL80211_IFTYPES:
+	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_WDS:
+		WARN_ON(1);
+		break;
+	}
+
+	mutex_unlock(&ah->conf_mutex);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MODE1_RECOVERY,
+		   "Reconfig link vif done:type:%d\n", vif->type);
+
+	return 0;
+}
+
+static int ath12k_mlo_core_recovery_reconfig_link_bss(struct ath12k *ar,
+						      struct ieee80211_bss_conf *link_conf,
+						      struct ath12k_vif *ahvif,
+						      struct ath12k_link_vif *arvif)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_hw *ah = ar->ah;
+	enum ieee80211_ap_reg_power power_type;
+	struct ath12k_wmi_peer_create_arg param;
+	struct ieee80211_chanctx_conf *ctx = &arvif->chanctx;
+	int ret = -1;
+	u8 link_id = link_conf->link_id;
+
+	ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+		   "Recovering: link_id:%d [vif->link_id:%d] type:%d\n",
+		   link_id, arvif->link_id, vif->type);
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (vif->type == NL80211_IFTYPE_AP &&
+	    ar->num_peers > (ar->max_num_peers - 1)) {
+		ath12k_err(ab, "Error in peers:%d\n",
+			   ar->num_peers);
+		goto exit;
+	}
+
+	if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+		ath12k_err(ab, "failed to create vdev, reached max vdev limit %d[%d]\n",
+			   ar->num_created_vdevs,
+			   TARGET_NUM_VDEVS);
+		goto exit;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+
+	ret = ath12k_mac_vdev_create(ar, arvif);
+	if (!ret)
+		ath12k_mac_vif_cache_flush(ar, vif, arvif->link_id);
+
+	if (ar->supports_6ghz && ctx->def.chan->band == NL80211_BAND_6GHZ &&
+	    (ahvif->vdev_type == WMI_VDEV_TYPE_STA ||
+	    ahvif->vdev_type == WMI_VDEV_TYPE_AP)) {
+		power_type = link_conf->power_type;
+		ath12k_dbg(ab, ATH12K_DBG_MAC, "mac chanctx power type %d\n",
+			   power_type);
+		if (power_type == IEEE80211_REG_UNSET_AP)
+			power_type = IEEE80211_REG_LPI_AP;
+
+		/* TODO: Transmit Power Envelope specification for 320 is not
+		 * available yet. Need to add TPE 320 support when spec is ready
+		 */
+		if (ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+		    ctx->def.width != NL80211_CHAN_WIDTH_320) {
+			ath12k_mac_parse_tx_pwr_env(ar, arvif, ctx);
+		}
+	}
+
+	/* for some targets bss peer must be created before vdev_start */
+	if (ab->hw_params->vdev_start_delay &&
+	    ahvif->vdev_type != WMI_VDEV_TYPE_AP &&
+	    ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+	    !ath12k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
+		ret = 0;
+		goto exit;
+	}
+
+	if (ab->hw_params->vdev_start_delay &&
+	    (ahvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	     ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
+		param.vdev_id = arvif->vdev_id;
+		param.peer_type = WMI_PEER_TYPE_DEFAULT;
+		param.peer_addr = ar->mac_addr;
+
+		ret = ath12k_peer_create(ar, arvif, NULL, &param);
+		if (ret) {
+			ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
+				    ret);
+			goto exit;
+		}
+	}
+
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+		ret = ath12k_mac_monitor_start(ar);
+		if (ret)
+			goto exit;
+		arvif->is_started = true;
+		goto exit;
+	}
+
+	ret = ath12k_mac_vdev_start(arvif, &ctx->def, ctx->radar_enabled);
+	if (ret) {
+		ath12k_err(ab, "vdev start failed during recovery\n");
+		goto exit;
+	}
+
+	if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+	    test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags))
+		ath12k_mac_monitor_start(ar);
+
+	arvif->is_started = true;
+	arvif->is_created = true;
+
+	ret = 0;
+exit:
+	ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+		   "ret:%d No. of vdev created:%d, links_map:%lu, flag:%d\n",
+		   ret,
+		   ahvif->num_vdev_created,
+		   ahvif->links_map,
+		   arvif->is_created);
+
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+
+	return ret;
+}
+
+static void ath12k_core_peer_disassoc(struct ath12k_hw_group *ag,
+				      struct ath12k_base *assert_ab)
+{
+	struct ath12k_base *ab;
+	struct ath12k_peer *peer, *tmp;
+	struct ath12k_sta *ahsta;
+	struct ieee80211_sta *sta;
+	int i;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+			if (!peer->sta || !peer->vif)
+				continue;
+
+			/* Allow sending disassoc to legacy peer
+			 * only for asserted radio
+			 */
+			if (!peer->mlo && ab != assert_ab)
+				continue;
+
+			sta = peer->sta;
+			ahsta = (struct ath12k_sta *)sta->drv_priv;
+
+			/* Send low ack to disassoc the MLD station
+			 * Need to check on the sequence as FW has
+			 * discarded the management packet at this
+			 * sequence.
+			 */
+			if (!ahsta->low_ack_sent) {
+				ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+					   "sending low ack for/disassoc:%pM\n",
+					   sta->addr);
+				/* set num of packets to maximum so
+				 * that we distinguish in the hostapd
+				 * to send disassoc irrespective of
+				 * hostapd conf
+				 */
+				ieee80211_report_low_ack(sta, 0xFFFF);
+				/* Using this flag to avoid certain
+				 * known warnings which will be
+				 * triggerred when umac reset is
+				 * happening
+				 */
+				ahsta->low_ack_sent = true;
+			}
+		}
+	}
+}
+
+/* Wrapper function for recovery after crash */
+int ath12k_mode1_recovery_reconfig(struct ath12k_base *ab)
+{
+	struct ath12k *ar = NULL;
+	struct ath12k_pdev *pdev;
+	struct ath12k_link_vif *arvif, *tmp;
+	struct ath12k_vif *ahvif ;
+	struct ieee80211_bss_conf *link;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_base *partner_ab;
+	struct ath12k_hw *ah;
+	struct ieee80211_key_conf *key;
+	int i, j, key_idx;
+	int ret = -EINVAL;
+	bool started = false;
+
+
+	for (j = 0; j < ab->num_radios; j++) {
+		pdev = &ab->pdevs[j];
+		ar = pdev->ar;
+		if (WARN_ON(!ar))
+			continue;
+
+		if (!ath12k_ftm_mode) {
+			if (ath12k_mac_radio_start(ar)) {
+				ath12k_err(ab, "mac radio start failed\n");
+				return ret;
+			}
+			ar->ah->in_recovery = true;
+			started = true;
+		}
+
+		if (list_empty(&ar->arvifs))
+			continue;
+
+		/* Decrement number of vdev created for all the arvif
+		 * under a ar
+		 */
+		list_for_each_entry_safe_reverse(arvif, tmp, &ar->arvifs, list) {
+			arvif->ahvif->num_vdev_created--;
+		}
+	}
+
+	/* add chanctx/hw_config/filter part */
+	for (j = 0; j < ab->num_radios; j++) {
+		pdev = &ab->pdevs[j];
+		ar = pdev->ar;
+
+		if (!ar)
+			continue;
+
+		mutex_lock(&ar->conf_mutex);
+
+		list_for_each_entry_safe_reverse(arvif, tmp, &ar->arvifs, list) {
+			ahvif = arvif->ahvif;
+
+			if (!ahvif)
+				continue;
+
+			arvif->is_started = false;
+			arvif->is_created = false;
+
+			rcu_read_lock();
+			link = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+
+			/* Not expected */
+			if (WARN_ON(!link)) {
+				rcu_read_unlock();
+				continue;
+			}
+			rcu_read_unlock();
+			spin_lock_bh(&ar->data_lock);
+
+			ar->rx_channel = link->chandef.chan;
+			spin_unlock_bh(&ar->data_lock);
+
+			/* hw_config */
+			if (ar->ah->hw->conf.flags & IEEE80211_CONF_MONITOR) {
+				set_bit(MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+				if (test_bit(MONITOR_VDEV_CREATED,
+					     &ar->monitor_flags)) {
+					ret = ath12k_mac_monitor_vdev_create(ar);
+					if (!ret && ath12k_mac_monitor_start(ar)) {
+						ath12k_err(ab, "unable to start monitor vdev\n");
+						ath12k_mac_monitor_vdev_delete(ar);
+					} else {
+						ath12k_err(ab, "unable to create monitor vdev\n");
+					}
+				}
+			} else {
+				clear_bit(MONITOR_CONF_ENABLED, &ar->monitor_flags);
+				if (test_bit(MONITOR_VDEV_CREATED,
+					     &ar->monitor_flags)) {
+					ret = ath12k_mac_monitor_stop(ar);
+					if (!ret && ath12k_mac_monitor_vdev_delete(ar))
+						ath12k_err(ab, "monitor vdev delete failed\n");
+					else
+						ath12k_err(ab, "failed to stop monitor stop\n");
+				}
+			}
+
+
+			/* configure filter - we can use the same flag*/
+		}
+		mutex_unlock(&ar->conf_mutex);
+	}
+
+	/* assign chanctx part */
+	for (j = 0; j < ab->num_radios; j++) {
+		pdev = &ab->pdevs[j];
+		ar = pdev->ar;
+
+		if (!ar)
+			continue;
+
+		mutex_lock(&ar->conf_mutex);
+
+		list_for_each_entry_safe_reverse(arvif, tmp, &ar->arvifs, list) {
+			ahvif = arvif->ahvif;
+
+			if (!ahvif)
+				continue;
+
+			rcu_read_lock();
+			link = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+
+			/* Not expected */
+			if (WARN_ON(!link)) {
+				rcu_read_unlock();
+				continue;
+			}
+			rcu_read_unlock();
+
+			/* unlock ar mutex here since we will take ah mutex in
+			 * the reconfig link API thus maintaining same order
+			 * as we use in other places.
+			 */
+			mutex_unlock(&ar->conf_mutex);
+			ret = ath12k_mlo_core_recovery_reconfig_link_bss(ar, link, ahvif, arvif);
+			if (ret) {
+				ath12k_err(ab, "ERROR in reconfig link:%d\n",
+					   ret);
+				return ret;
+			}
+			mutex_lock(&ar->conf_mutex);
+			ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+				   "vdev_created getting incremented:%d\n",
+				   ahvif->num_vdev_created);
+		}
+		mutex_unlock(&ar->conf_mutex);
+		ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY, "assign chanctx is completed\n");
+	}
+
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+
+		clear_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &partner_ab->dev_flags);
+	}
+
+	/* reconfig_link_bss */
+	for (j = 0; j < ab->num_radios; j++) {
+		pdev = &ab->pdevs[j];
+		ar = pdev->ar;
+
+		if (!ar)
+			continue;
+
+		mutex_lock(&ar->conf_mutex);
+
+		list_for_each_entry_safe_reverse(arvif, tmp, &ar->arvifs, list) {
+			ahvif = arvif->ahvif;
+
+			if (!ahvif)
+				continue;
+
+			rcu_read_lock();
+			link = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+
+			/* Not expected */
+			if (WARN_ON(!link)) {
+				rcu_read_unlock();
+			}
+			rcu_read_unlock();
+
+			/* unlock ar mutex here since we will take ah mutex in
+			 * the reconfig link API thus maintaining same order
+			 * as we use in other places.
+			 */
+			mutex_unlock(&ar->conf_mutex);
+			ret = ath12k_mlo_recovery_link_vif_reconfig(ar, ahvif,
+								    arvif,
+								    arvif->ahvif->vif,
+								    link);
+			if (ret) {
+				ath12k_err(ab, "Failed to update reconfig_bss\n");
+				return ret;
+			}
+			mutex_lock(&ar->conf_mutex);
+		}
+		mutex_unlock(&ar->conf_mutex);
+	}
+
+	/* recover station VIF enabled in non-asserted links */
+	ath12k_core_mlo_recover_station(ag, ab);
+
+	/* sta state part */
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+
+		for (j = 0; j < partner_ab->num_radios; j++) {
+			pdev = &partner_ab->pdevs[j];
+			ar = pdev->ar;
+
+			if (!ar)
+				continue;
+
+			mutex_lock(&ar->conf_mutex);
+
+			if (list_empty(&ar->arvifs)) {
+				mutex_unlock(&ar->conf_mutex);
+				continue;
+			}
+			list_for_each_entry_safe_reverse(arvif, tmp, &ar->arvifs, list) {
+				ahvif = arvif->ahvif;
+
+				if (!ahvif)
+					continue;
+
+				/* unlock ar mutex here as the iterator will be called
+				 * within rcu lock.
+				 */
+				if (ahvif->vdev_type != WMI_VDEV_TYPE_STA) {
+					ath12k_core_iterate_sta_list(ar, arvif);
+				}
+
+				for (key_idx = 0; key_idx < WMI_MAX_KEY_INDEX; key_idx++) {
+					key = arvif->keys[key_idx];
+					if (key) {
+						ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+							   "key:%p cipher:%d idx:%d flags:%d\n",
+							   key, key->cipher, key->keyidx, key->flags);
+						ret = ath12k_mac_set_key(arvif->ar, SET_KEY, arvif, NULL, key);
+					}
+				}
+			}
+			mutex_unlock(&ar->conf_mutex);
+			ar->ah->in_recovery = false;
+		}
+	}
+
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		for (j = 0; j < ab->num_radios; j++) {
+			pdev = &ab->pdevs[j];
+			ar = pdev->ar;
+
+			if (!ar)
+				continue;
+
+			ath12k_mac_reconfig_complete(ah->hw,
+						     IEEE80211_RECONFIG_TYPE_RESTART,
+						     ar);
+		}
+	}
+
+	/* Send disassoc to MLD STA */
+	ath12k_core_peer_disassoc(ag, ab);
+	ab->recovery_start = false;
+	ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE0;
+	ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY, "Mode1 recovery completed\n");
+	return ret;
 }
 
 static void ath12k_core_restart(struct work_struct *work)
 {
 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
+	struct ath12k_hw_group *ag = ab->ag;
 	int ret;
+	struct ath12k_hw *ah;
+	struct ath12k *ar;
+	int i;
 
 	if (!ab->is_reset)
 		ath12k_core_pre_reconfigure_recovery(ab);
@@ -763,38 +3129,402 @@
 	ret = ath12k_core_reconfigure_on_crash(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
+		/*
+		 * If for any reason, reconfiguration fails, issue bug on for
+		 * Mode 0
+		 */
+		if (ath12k_ssr_failsafe_mode && ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0)
+			BUG_ON(1);
 		return;
 	}
 
-	if (ab->is_reset)
-		complete_all(&ab->reconfigure_complete);
+	ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+			"ab->is_reset[%d]\n", ab->is_reset);
+	ar = ab->pdevs[0].ar;
 
 	if (!ab->is_reset)
 		ath12k_core_post_reconfigure_recovery(ab);
+
+	if (ath12k_core_hw_group_start_ready(ag) &&
+	    ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0) {
+		for (i = 0; i < ag->num_hw; i++) {
+			ah = ag->ah[i];
+			if (!ah)
+				continue;
+
+			mutex_lock(&ah->conf_mutex);
+			ieee80211_restart_hw(ah->hw);
+			mutex_unlock(&ah->conf_mutex);
+		}
+	}
+#ifdef CONFIG_ATH12K_SAWF
+	if (ath12k_sawf_reconfigure_on_crash(ab))
+		ath12k_warn(ab, "SAWF SLA reconfiguring failed\n");
+#endif /* CONFIG_ATH12K_SAWF */
+
+	if (ath12k_core_hw_group_start_ready(ag) &&
+	    ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1) {
+		queue_work(ab->workqueue_aux, &ab->recovery_work);
+	}
+}
+
+static void ath12k_core_mode1_recovery_work(struct work_struct *work)
+{
+	struct ath12k_base *ab = container_of(work, struct ath12k_base, recovery_work);
+
+	ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+		   "queued recovery work\n");
+	ath12k_mode1_recovery_reconfig(ab);
+}
+
+static int ath12k_core_rproc_stop(struct rproc *rproc)
+{
+	int ret = 0;
+
+	if (!rproc)
+		return ret;
+
+	if (rproc->state != RPROC_OFFLINE) {
+		ret = rproc_shutdown(rproc);
+		if (ret < 0) {
+			ath12k_err(NULL, "rproc:%s stop failed:%d\n",
+				   rproc->name, ret);
+		}
+	}
+	return ret;
 }
 
+static int ath12k_core_rproc_start(struct rproc *rproc)
+{
+	const struct firmware *firmware_p = NULL;
+	struct device *dev;
+	int ret = 0;
+
+	if (rproc && rproc->state != RPROC_RUNNING) {
+		dev = &rproc->dev;
+		ret = request_firmware(&firmware_p, rproc->firmware, dev);
+		if (ret < 0) {
+			ath12k_err(NULL, "rproc:%s request_firmware failed: %d\n",
+				   rproc->name, ret);
+			return ret;
+		}
+		ret = rproc_boot(rproc);
+		if (ret < 0) {
+			ath12k_err(NULL, "rproc:%s start failed: %d\n",
+				   rproc->name, ret);
+		}
+		release_firmware(firmware_p);
+	}
+	return ret;
+}
+
+static void ath12k_core_rproc_coredump(struct ath12k_base *ab,
+				       struct rproc *rproc)
+{
+	if (rproc) {
+		ath12k_info(ab, "WLAN target is restarting");
+		rproc->ops->coredump(rproc);
+	}
+}
+
+/* Asserted target's reboot handling for crash type ATH12K_RPROC_ROOTPD_AHB_CRASH */
+static void ath12k_core_rproc_boot_recovery(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_ahb *ab_ahb = ath12k_ahb_priv(ab);
+
+	if (ag->crash_type != ATH12K_RPROC_ROOTPD_AHB_CRASH)
+		return;
+
+	/*
+	 * Stop user pd
+	 * Collect coredump using user pd if enabled
+	 * Stop root pd
+	 * Collect coredump using root pd
+	 * Free Target memory chunk
+	 * Start root pd, then start user pd
+	 */
+
+	ab_ahb->tgt_rproc->state = RPROC_CRASHED;
+	ath12k_core_rproc_stop(ab_ahb->tgt_rproc);
+
+	ab_ahb->tgt_rproc->state = RPROC_SUSPENDED;
+	ath12k_core_rproc_coredump(ab, ab_ahb->tgt_rproc);
+
+	ab_ahb->tgt_rrproc->state = RPROC_RUNNING;
+	ath12k_core_rproc_stop(ab_ahb->tgt_rrproc);
+	ath12k_core_rproc_coredump(ab, ab_ahb->tgt_rrproc);
+
+	ath12k_qmi_free_target_mem_chunk(ab);
+
+	ath12k_core_rproc_start(ab_ahb->tgt_rrproc);
+	ath12k_core_rproc_start(ab_ahb->tgt_rproc);
+	ag->crash_type = ATH12K_NO_CRASH;
+	ath12k_core_wait_dump_collect(ab);
+}
+
+/* Asserted target's reboot handling for crash type ATH12K_RPROC_USERPD_HYBRID_CRASH */
+static void ath12k_core_upd_rproc_boot_recovery(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_ahb *ab_ahb = ath12k_ahb_priv(ab);
+
+	if (ag->crash_type != ATH12K_RPROC_USERPD_HYBRID_CRASH)
+		return;
+
+	/*
+	 * Stop user pd
+	 * Collect coredump using user pd
+	 * Free Target memory chunk
+	 * Start root pd
+	 */
+	ath12k_core_rproc_stop(ab_ahb->tgt_rproc);
+	ath12k_core_rproc_coredump(ab, ab_ahb->tgt_rproc);
+	ath12k_qmi_free_target_mem_chunk(ab);
+	ath12k_core_rproc_start(ab_ahb->tgt_rproc);
+	ag->crash_type = ATH12K_NO_CRASH;
+	ath12k_core_wait_dump_collect(ab);
+}
+
+/* Asserted target's reboot handling for crash type ATH12K_RPROC_ROOTPD_HYBRID_CRASH */
+static void ath12k_core_rpd_rproc_boot_recovery(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_ahb *ab_ahb = ath12k_ahb_priv(ab);
+	struct rproc *rootpd_rproc = ab_ahb->tgt_rrproc;
+	struct rproc *txtpd_rproc = ab_ahb->tgt_text_rproc;
+	struct rproc *usrpd_rproc = ab_ahb->tgt_rproc;
+	struct ath12k_base *tmp_ab;
+	int user_pd, ret;
+
+	if (ag->crash_type != ATH12K_RPROC_ROOTPD_HYBRID_CRASH)
+		return;
+
+	if (!rootpd_rproc || !usrpd_rproc) {
+		ath12k_err(ab, "rproc(s) are null\n");
+		return;
+	}
+
+	if (rootpd_rproc->state == RPROC_OFFLINE) {
+		ath12k_err(ab, "rootpd rproc:%s is already stopped.\n",
+			   rootpd_rproc->name);
+		return;
+	}
+
+	if (usrpd_rproc->state == RPROC_OFFLINE) {
+		ath12k_err(ab, "userpd rproc:%s is already stopped.\n",
+			   usrpd_rproc->name);
+		return;
+	}
+
+	usrpd_rproc->state = RPROC_CRASHED;
+	ret = ath12k_core_rproc_stop(usrpd_rproc);
+	if (ret)
+		ath12k_err(ab, "failed to stop user_pd:%s ret:%d\n",
+			   usrpd_rproc->name, ret);
+
+	/* Reboot Sequence of all remoteproc pds
+	 * 1. Stop all user pds
+	 * 2. In case of text device present, stop text pd
+	 * 3. Stop the root pd
+	 * 4. Do the coredump using root pd handle once all user pds and root
+	 *    pd are stopped:
+	 *    Root pd handle coredump will take care of all the userpd data
+	 *    collection as part of the coredump
+	 * 5. Free Target memory chunk
+	 * 6. Power up the remote processor again as per below sequence
+	 *    Start the rootpd device
+	 *    Start textpd device if present
+	 *    Start all userpd devices
+	 */
+	if (!ag->num_started) {
+		if (txtpd_rproc && txtpd_rproc->state != RPROC_OFFLINE) {
+			txtpd_rproc->state = RPROC_CRASHED;
+			ret = ath12k_core_rproc_stop(txtpd_rproc);
+			if (ret)
+				ath12k_err(ab, "failed to stop text_pd:%s ret:%d\n",
+					   txtpd_rproc->name, ret);
+		}
+		rootpd_rproc->state = RPROC_RUNNING;
+		ret = ath12k_core_rproc_stop(rootpd_rproc);
+		if (ret)
+			ath12k_err(ab, "failed to stop root_pd:%s ret:%d\n",
+				   rootpd_rproc->name, ret);
+
+		ath12k_core_rproc_coredump(ab, rootpd_rproc);
+	}
+
+	ath12k_qmi_free_target_mem_chunk(ab);
+
+	if (!ag->num_started) {
+		ret = ath12k_core_rproc_start(rootpd_rproc);
+		if (ret)
+			ath12k_err(ab, "failed to start root_pd:%s ret:%d\n",
+				   rootpd_rproc->name, ret);
+
+		ath12k_core_wait_dump_collect(ab);
+
+		if (txtpd_rproc) {
+			ret = ath12k_core_rproc_start(txtpd_rproc);
+			if (ret)
+				ath12k_err(ab, "failed to start text_pd:%s ret:%d\n",
+					   txtpd_rproc->name, ret);
+		}
+
+		for (user_pd = 0; user_pd < ag->num_chip; user_pd++) {
+			tmp_ab = ag->ab[user_pd];
+			if (tmp_ab->hif.bus == ATH12K_BUS_PCI)
+				continue;
+
+			ab_ahb = ath12k_ahb_priv(tmp_ab);
+			usrpd_rproc = ab_ahb->tgt_rproc;
+			ret = ath12k_core_rproc_start(usrpd_rproc);
+			if (ret) {
+				ath12k_err(tmp_ab, "failed to start user_pd:%s ret:%d\n",
+					   usrpd_rproc->name, ret);
+			}
+		}
+
+		ag->crash_type = ATH12K_NO_CRASH;
+	}
+}
+
+/*
+ * Trigger umac_reset with umac_reset flag set. This is a
+ * waiting function which will return only after UMAC reset
+ * is complete on non-asserted chip set. UMAC reset completion
+ * is identified by waiting for MLO Teardown complete for all
+ * chipsets
+ */
+static int ath12k_core_trigger_umac_reset(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	long time_left;
+	int ret = 0;
+
+	reinit_completion(&ag->umac_reset_complete);
+
+	ath12k_mac_mlo_teardown_with_umac_reset(ab);
+
+	time_left = wait_for_completion_timeout(&ag->umac_reset_complete,
+						msecs_to_jiffies(ATH12K_UMAC_RESET_TIMEOUT_IN_MS));
+
+	if (!time_left) {
+		ath12k_warn(ab, "UMAC reset didn't get completed within 200 ms\n");
+		ret = -ETIMEDOUT;
+	}
+
+	ag->trigger_umac_reset = false;
+	return ret;
+}
+
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+static void ath12k_core_disable_ppe_rtnl_bond_release(struct ath12k_base *ab)
+{
+	struct ath12k *ar;
+	struct ath12k_pdev *pdev;
+	struct ath12k_link_vif *arvif;
+	struct net_device *link_ndev;
+	int i;
+
+	rtnl_lock();
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+
+		if (!ar || ar->state == ATH12K_STATE_OFF)
+			continue;
+
+		mutex_lock(&ar->conf_mutex);
+		list_for_each_entry(arvif, &ar->arvifs, list) {
+			if (arvif->ndev_pvt) {
+				link_ndev = arvif->ndev_pvt->link_ndev;
+				if (arvif->ndev_pvt->bond_dev &&
+				    arvif->ndev_pvt->bond_dev->flags & IFF_UP) {
+					netif_tx_stop_all_queues(arvif->ndev_pvt->bond_dev);
+					arvif->ndev_pvt->bond_dev->flags &= ~IFF_UP;
+				}
+
+				if (link_ndev) {
+					netif_tx_stop_all_queues(link_ndev);
+					if (ab->ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0) {
+						ath12k_bond_link_release(arvif);
+						ath12k_disable_ppe_for_link_netdev(ab,
+										   arvif,
+										   link_ndev);
+					}
+				}
+			}
+		}
+		mutex_unlock(&ar->conf_mutex);
+	}
+
+	rtnl_unlock();
+}
+#endif
+
 static void ath12k_core_reset(struct work_struct *work)
 {
 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
-	int reset_count, fail_cont_count;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_hw *ah;
+	struct ath12k_base *partner_ab;
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar;
+	int j;
+#endif
+	int reset_count, fail_cont_count, i;
 	long time_left;
+	bool teardown = false;
 
-	if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
-		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
-		return;
+	if (ab->recovery_mode_address) {
+		switch (*ab->recovery_mode_address) {
+		case ATH12K_MLO_RECOVERY_MODE1:
+			ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE1;
+			break;
+		case ATH12K_MLO_RECOVERY_MODE0:
+			fallthrough;
+		default:
+			ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE0;
 	}
+		ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+			   "mode:%d\n", ag->recovery_mode);
+	} else {
+		ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE0;
+	}
+	ath12k_info(ab, "Recovery is initiated with Mode%s\n",
+		    (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0 ? "0" : "1"));
+
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	if (!ab->is_reset) {
+		ath12k_hif_ppeds_irq_disable(ab, PPEDS_IRQ_PPE2TCL);
+		ath12k_hif_ppeds_irq_disable(ab, PPEDS_IRQ_REO2PPE);
+		ath12k_hif_ppeds_irq_disable(ab, PPEDS_IRQ_PPE_WBM2SW_REL);
+		ath12k_core_disable_ppe_rtnl_bond_release(ab);
+	}
+#endif
 
 	/* Sometimes the recovery will fail and then the next all recovery fail,
-	 * this is to avoid infinite recovery since it can not recovery success
+	 * this is to avoid infinite recovery since it can not recovery success.
 	 */
 	fail_cont_count = atomic_read(&ab->fail_cont_count);
 
-	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
+	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL) {
+		ath12k_warn(ab, "Recovery Failed, Fail count:%d MAX_FAIL_COUNT final:%d\n",
+				fail_cont_count,
+				ATH12K_RESET_MAX_FAIL_COUNT_FINAL);
 		return;
+	}
 
 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
-	    time_before(jiffies, ab->reset_fail_timeout))
+	    time_before(jiffies, ab->reset_fail_timeout)) {
+		ath12k_warn(ab, "Recovery Failed, Fail count:%d MAX_FAIL_COUNT first:%d\n",
+				fail_cont_count,
+				ATH12K_RESET_MAX_FAIL_COUNT_FIRST);
 		return;
+	}
 
 	reset_count = atomic_inc_return(&ab->reset_count);
 
@@ -805,6 +3535,9 @@
 		 */
 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
 
+		if (ath12k_ssr_failsafe_mode && ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0)
+			BUG_ON(1);
+
 		reinit_completion(&ab->reset_complete);
 		time_left = wait_for_completion_timeout(&ab->reset_complete,
 							ATH12K_RESET_TIMEOUT_HZ);
@@ -816,32 +3549,286 @@
 
 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
 		/* Record the continuous recovery fail count when recovery failed*/
-		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
+		atomic_inc(&ab->fail_cont_count);
 	}
 
 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
+	ab->recovery_start_time = jiffies;
+
+	mutex_lock(&ag->mutex_lock);
+
+	if (!ab->is_reset)
+		ath12k_hif_irq_disable(ab);
 
 	ab->is_reset = true;
 	atomic_set(&ab->recovery_count, 0);
 
+	/* Incase recovery fails and FW asserts again, this is to prevent invalid operation. */
+	if (ag->num_started && ab->fw_recovery_support)
+		ag->num_started--;
+
+	if (!ag->hw_queues_stopped) {
+		ath12k_core_mlo_hw_queues_stop(ag, ab);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+		if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1) {
+			for (i = 0; i < ag->num_chip; i++) {
+				partner_ab = ag->ab[i];
+				if (ab == partner_ab)
+					continue;
+				for (j = 0; j < partner_ab->num_radios; j++) {
+					pdev = &partner_ab->pdevs[j];
+					ar = pdev->ar;
+					if (!ar)
+						continue;
+					ath12k_ppeds_partner_link_stop_queues(ar);
+				}
+			}
+		}
+#endif
+	}
+
+
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+		if (ab == partner_ab)
+			continue;
+
+		teardown = false;
+		/* Need to check partner_ab flag to select recovery mode
+		 * as Mode0, if continuous reset has happened
+		 */
+
+		if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1) {
+			if (test_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &partner_ab->dev_flags) ||
+			    test_bit(ATH12K_FLAG_RECOVERY, &partner_ab->dev_flags)) {
+				/* On receiving MHI Interrupt for pdev which is
+				 * already in UMAC Recovery, then fallback to
+				 * MODE0
+				 */
+				ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE0;
+			} else if ((!test_bit(ATH12K_FLAG_RECOVERY, &partner_ab->dev_flags)) &&
+			    (!(test_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &partner_ab->dev_flags)))) {
+				/* Set dev flags to UMAC recovery START
+				 * and set flag to send teardown later
+				 */
+				ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+					   "setting teardown to true\n");
+				set_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &partner_ab->dev_flags);
+				teardown = true;
+				continue;
+			} else {
+				/* if one of the ab is already in recovery, or
+				 * in UMAC recovery start phase
+				 * then fall back to MODE0
+				 */
+				ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE0;
+			}
+			ath12k_info(ab, "Recovery is falling back to Mode0 as one of the partner chip is already in recovery\n");
+		}
+	}
+
+	if (teardown) {
+		if (ath12k_core_trigger_umac_reset(ab) ||
+		    ath12k_mac_partner_peer_cleanup(ab)) {
+			/* Fallback to Mode0 if umac reset/peer_cleanup is
+			 * failed
+			 */
+			ag->recovery_mode = ATH12K_MLO_RECOVERY_MODE0;
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+			ath12k_core_disable_ppe_rtnl_bond_release(ab);
+#endif
+			ath12k_info(ab, "Recovery is falling back to Mode0\n");
+		} else {
+			/* wake queues here as ping should continue for
+			 * legacy clients in non-asserted chipsets
+			 */
+			for (i = 0; i < ag->num_hw; i++) {
+				ah = ag->ah[i];
+				if (!ah)
+					continue;
+
+				ieee80211_wake_queues(ah->hw);
+			}
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+			for (i = 0; i < ag->num_chip; i++) {
+				partner_ab = ag->ab[i];
+				if (ab == partner_ab)
+					continue;
+				for (j = 0; j < partner_ab->num_radios; j++) {
+					pdev = &partner_ab->pdevs[j];
+					ar = pdev->ar;
+
+					if (!ar)
+						continue;
+					ath12k_ppeds_partner_link_start_queues(ar);
+				}
+			}
+#endif
+			ath12k_dbg(ab, ATH12K_DBG_MODE1_RECOVERY,
+				   "Queues are started as umac reset is completed for partner chipset\n");
+		}
+	}
+
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+		if (ab == partner_ab)
+			continue;
+
+		/* issue FW Hang command on partner chips for Mode0. This is a fool proof
+		 * method to ensure recovery of all partner chips in MODE0 instead of
+		 * relying on firmware to crash partner chips
+		 */
+		if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0 &&
+		    !test_bit(ATH12K_FLAG_RECOVERY, &partner_ab->dev_flags)) {
+			ath12k_info(ab, "sending fw_hang cmd to partner chipset(s)\n");
+			ath12k_wmi_force_fw_hang_cmd(partner_ab->pdevs[0].ar,
+						     ATH12K_WMI_FW_HANG_ASSERT_TYPE,
+						     ATH12K_WMI_FW_HANG_DELAY, true);
+		}
+	}
+
 	ath12k_core_pre_reconfigure_recovery(ab);
 
-	reinit_completion(&ab->reconfigure_complete);
 	ath12k_core_post_reconfigure_recovery(ab);
 
-	reinit_completion(&ab->recovery_start);
-	atomic_set(&ab->recovery_start_count, 0);
-
-	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
+	/* reset host fixed mem off to zero */
+	ab->host_ddr_fixed_mem_off = 0;
 
-	time_left = wait_for_completion_timeout(&ab->recovery_start,
-						ATH12K_RECOVER_START_TIMEOUT_HZ);
+	/* prepare coredump when bus is PCI
+	 * In case of AHB, the coredump occurs in rproc module
+	 */
+	if (ab->hif.bus == ATH12K_BUS_PCI) {
+		ath12k_coredump_download_rddm(ab);
+	} else if (ab->hif.bus == ATH12K_BUS_AHB || ab->hif.bus == ATH12K_BUS_HYBRID) {
+		ath12k_coredump_download_ahb(ab);
+	}
+	if (ab->is_qdss_tracing)
+		ab->is_qdss_tracing = false;
 
+	if (ab->fw_recovery_support) {
+		if (ab->hif.bus == ATH12K_BUS_PCI) {
 	ath12k_hif_power_down(ab);
 	ath12k_hif_power_up(ab);
-
+		} else if (ab->hif.bus == ATH12K_BUS_AHB ||
+			   ab->hif.bus == ATH12K_BUS_HYBRID) {
+			if (IS_ENABLED(CONFIG_REMOTEPROC)) {
+				/* When crash type ATH12K_RPROC_USERPD_HYBRID_CRASH is set */
+				ath12k_core_upd_rproc_boot_recovery(ab);
+				/* When crash type ATH12K_RPROC_ROOTPD_AHB_CRASH is set */
+				ath12k_core_rproc_boot_recovery(ab);
+				/* When crash type ATH12K_RPROC_ROOTPD_HYBRID_CRASH is set */
+				ath12k_core_rpd_rproc_boot_recovery(ab);
+			}
+		}
 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
 }
+	if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1)
+		ab->recovery_start = true;
+
+	ab->recovery_mode_address = NULL;
+	ab->crash_info_address = NULL;
+
+	mutex_unlock(&ag->mutex_lock);
+}
+
+static int load_board_id_override(struct ath12k_base *ab)
+{
+	struct ath12k_bid_override *ov, *tmp;
+	const struct firmware *fw;
+	const char *p, *end;
+	size_t len;
+	int ret, count;
+
+	fw = ath12k_core_firmware_request(ab, ATH12K_BOARD_OVERRIDE_FILE);
+	if (IS_ERR(fw)) {
+		/* file is optional */
+		if (PTR_ERR(fw) == -ENOENT)
+			return 0;
+		return PTR_ERR(fw);
+	}
+
+	/* format is <pci_path>=<board_id> [...] */
+	p = fw->data;
+	len = fw->size;
+	end = p + len;
+	count = 0;
+
+	while (1) {
+		const char *pstart;
+		char *ppath, *pbid, endc;
+		unsigned int seg, bus, slot, func;
+		u16 board_id;
+
+		while (p != end && isspace(*p))
+			p++;
+		if (p == end)
+			break;
+
+		pstart = p;
+		while (p != end && !isspace(*p))
+			p++;
+
+		if (p == end) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		ppath = kstrndup(pstart, p - pstart, GFP_KERNEL);
+		if (!pstart) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		pbid = strchr(ppath, '=');
+		if (!pbid) {
+			ath12k_err(ab, "bad key=value in override file\n");
+			ret = -EINVAL;
+			kfree(ppath);
+			goto fail;
+		}
+
+		*pbid++ = 0;
+
+		ret = sscanf(ppath, "pci:%x:%x:%x.%x%c", &seg, &bus, &slot,
+			     &func, &endc);
+		if (ret != 4) {
+			ath12k_err(ab, "invalid pci dev in override file\n");
+			kfree(ppath);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (kstrtou16(pbid, 0, &board_id)) {
+			ath12k_err(ab, "invalid board-id in override file\n");
+			kfree(ppath);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		ov = kzalloc(sizeof (*ov), GFP_KERNEL);
+		ov->domain = seg;
+		ov->bus_nr = bus;
+		ov->slot = slot;
+		ov->func = func;
+		ov->board_id = board_id;
+		list_add_tail(&ov->next, &ab->board_id_overrides);
+		count++;
+	}
+
+	if (count)
+		ath12k_info(ab, "loaded %d entries from board-id "
+			    "override file\n", count);
+	release_firmware(fw);
+	return 0;
+
+fail:
+	ath12k_err(ab, "invalid board-id override file content\n");
+	release_firmware(fw);
+	list_for_each_entry_safe(ov, tmp, &ab->board_id_overrides, next)
+		kfree(ov);
+	INIT_LIST_HEAD(&ab->board_id_overrides);
+	return ret;
+}
 
 int ath12k_core_pre_init(struct ath12k_base *ab)
 {
@@ -853,45 +3840,289 @@
 		return ret;
 	}
 
+	ret = load_board_id_override(ab);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
-int ath12k_core_init(struct ath12k_base *ab)
+static int ath12k_core_get_chip(struct ath12k_hw_group *ag,
+				struct ath12k_base **ab,
+				u8 max_ab, u8 *num_ab)
 {
-	int ret;
+	int i;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	if (ag->num_chip != ag->num_probed)
+		return -EINVAL;
+
+	if (max_ab < ag->num_probed)
+		return -ENOMEM;
+
+	*num_ab = 0;
+	for (i = 0; i < ag->num_probed; i++) {
+		ab[i] = ag->ab[i];
+		*num_ab = *num_ab + 1;
+	}
+
+	return 0;
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_base **ab_group, u8 num_ab)
+{
+	int i, ret = 0;
+	struct ath12k_base *ab;
+
+	for(i = 0; i < num_ab; i++) {
+		ab = ab_group[i];
 
+		mutex_lock(&ab->core_lock);
 	ret = ath12k_core_soc_create(ab);
 	if (ret) {
+			mutex_unlock(&ab->core_lock);
 		ath12k_err(ab, "failed to create soc core: %d\n", ret);
+			goto out;
+ 		}
+		set_bit(ATH12K_FLAG_HW_GROUP_ATTACHED, &ab->dev_flags);
+		mutex_unlock(&ab->core_lock);
+
+		ath12k_qmi_fwreset_from_cold_boot(ab);
+
+	}
+
+out:
 		return ret;
 	}
 
+int ath12k_core_init(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag;
+	struct ath12k_base *ab_group[ATH12K_MAX_SOCS];
+	bool is_ready = false;
+	u8 num_ab;
+	int ret;
+
+	/* Register a notifier after core init
+	 * to be called on fw crash
+	 */
+	init_waitqueue_head(&ab->ipci.gic_msi_waitq);
+	ab->ssr_nb.notifier_call = ath12k_core_ssr_notifier_cb;
+	ab->atomic_ssr_nb.notifier_call = ath12k_core_atomic_ssr_notifier_cb;
+	ab->rpd_ssr_nb.notifier_call = ath12k_core_rpd_ssr_notifier_cb;
+	ab->rpd_atomic_ssr_nb.notifier_call = ath12k_core_rpd_atomic_ssr_notifier_cb;
+
+	ret = ath12k_hif_ssr_notifier_reg(ab);
+	if (ret) {
+		ath12k_err(ab, "failed to register ssr notifier callback\n");
+		return ret;
+	}
+	ab->multi_pd_arch = of_property_read_bool(ab->dev->of_node, "qcom,multipd_arch");
+	ret = ath12k_debugfs_soc_create(ab);
+	if (ret) {
+		ath12k_err(ab, "failed to create debugfs\n");
+		return ret;
+	}
+
+	mutex_lock(&ath12k_hw_lock);
+
+	ag = ath12k_core_get_hw_group(ab);
+	if (!ag) {
+		mutex_unlock(&ath12k_hw_lock);
+		ath12k_err(ab, "unable to get hw group\n");
+		goto err_debugfs;
+	}
+	mutex_unlock(&ath12k_hw_lock);
+
+	mutex_lock(&ag->mutex_lock);
+
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num chips in group %d, num probed %d\n",
+		    ag->num_chip, ag->num_probed);
+
+	is_ready = ath12k_core_hw_group_create_ready(ag);
+	if (is_ready) {
+		ret = ath12k_core_get_chip(ag, ab_group, ATH12K_MAX_SOCS, &num_ab);
+		if (ret) {
+			mutex_unlock(&ag->mutex_lock);
+			ath12k_err(ab, "unable to get chip from hw group %d\n", ret);
+			goto err_debugfs;
+		}
+	}
+
+	mutex_unlock(&ag->mutex_lock);
+
+	if (is_ready) {
+		ret = ath12k_core_hw_group_create(ab_group, num_ab);
+		if (ret) {
+			ath12k_warn(ab, "unable to create hw group\n");
+			goto err_hw_group;
+		}
+	}
+
+	ag->mgmt_rx_reorder = ath12k_mgmt_rx_reordering;
+
 	return 0;
+
+err_hw_group:
+	ath12k_core_put_hw_group(ab);
+err_debugfs:
+	ath12k_debugfs_soc_destroy(ab);
+	return ret;
 }
 
-void ath12k_core_deinit(struct ath12k_base *ab)
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	for(i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		mutex_lock(&ab->core_lock);
+		if (test_bit(ATH12K_FLAG_HW_GROUP_ATTACHED, &ab->dev_flags)) {
+			clear_bit(ATH12K_FLAG_HW_GROUP_ATTACHED, &ab->dev_flags);
+			ath12k_reg_free(ab);
+			ath12k_dp_free(ab);
+			ath12k_core_soc_destroy(ab);
+		}
+		mutex_unlock(&ab->core_lock);
+	}
+}
+
+static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
 {
+	struct ath12k_base *ab;
+	struct ath12k_dp *dp;
+	u32 address;
+	int i;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	if (!test_bit(ATH12K_FLAG_REGISTERED, &ag->dev_flags))
+		return;
+
+	clear_bit(ATH12K_FLAG_REGISTERED, &ag->dev_flags);
+
+	for (i = ag->num_chip - 1; i >= 0; i--) {
+		ab = ag->ab[i];
+
+		mutex_lock(&ab->core_lock);
+
+		ath12k_core_pdev_deinit(ab);
+
+		mutex_unlock(&ab->core_lock);
+	}
+
+	ath12k_mac_unregister(ag);
+
+	ath12k_core_mlo_teardown(ag);
+
+	for (i = ag->num_chip - 1; i >= 0; i--) {
+		ab = ag->ab[i];
+
 	mutex_lock(&ab->core_lock);
 
+		ath12k_hif_irq_disable(ab);
 	ath12k_core_pdev_destroy(ab);
 	ath12k_core_stop(ab);
+		ath12k_dp_umac_reset_deinit(ab);
+		clear_bit(ATH12K_FLAG_CORE_REGISTERED, &ab->dev_flags);
+
+		mutex_unlock(&ab->core_lock);
+	}
+
+	for (i = ag->num_chip - 1; i >= 0; i--) {
+		ab = ag->ab[i];
+		dp = &ab->dp;
+
+		mutex_lock(&ab->core_lock);
+
+		if (!ab->hw_params->reoq_lut_support)
+			break;
+
+		if (!dp->reoq_lut.vaddr)
+			break;
+
+		if (dp->reoq_lut.vaddr) {
+			address = HAL_SEQ_WCSS_UMAC_REO_REG +\
+				  HAL_REO1_QDESC_LUT_BASE0(ab);
+			ath12k_hif_write32(ab, address, 0);
+		}
+
+		if (dp->ml_reoq_lut.vaddr) {
+			address = HAL_SEQ_WCSS_UMAC_REO_REG +\
+				  HAL_REO1_QDESC_LUT_BASE1(ab);
+			ath12k_hif_write32(ab, address, 0);
+		}
+
+		ath12k_hif_power_down(ab);
 
 	mutex_unlock(&ab->core_lock);
+	}
+	ath12k_mac_destroy(ag);
+
+	ath12k_core_hw_group_destroy(ag);
+}
+
+void ath12k_core_deinit(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	int ret;
+
+	mutex_lock(&ag->mutex_lock);
 
+	if (test_bit(ATH12K_FLAG_REGISTERED, &ag->dev_flags)) {
+		ath12k_core_hw_group_stop(ag);
+	} else if (test_bit(ATH12K_FLAG_HW_GROUP_ATTACHED, &ab->dev_flags)) {
+		mutex_lock(&ab->core_lock);
+
+		clear_bit(ATH12K_FLAG_HW_GROUP_ATTACHED, &ab->dev_flags);
+		if (test_bit(ATH12K_FLAG_CORE_REGISTERED, &ab->dev_flags)) {
+			ath12k_core_stop(ab);
+			ath12k_reg_free(ab);
+			ath12k_dp_free(ab);
+			clear_bit(ATH12K_FLAG_CORE_REGISTERED, &ab->dev_flags);
+		}
 	ath12k_hif_power_down(ab);
-	ath12k_mac_destroy(ab);
 	ath12k_core_soc_destroy(ab);
+
+		mutex_unlock(&ab->core_lock);
+	}
+
+	ath12k_debugfs_soc_destroy(ab);
+
+	/* Unregister the ssr notifier as we are not intersted
+	 * in receving these notifications after mac is unregistered.
+	 */
+	ret = ath12k_hif_ssr_notifier_unreg(ab);
+	if (ret)
+		ath12k_err(ab, "failed to un-register ssr notifier callback\n");
+
+	mutex_unlock(&ag->mutex_lock);
+
+	ath12k_core_put_hw_group(ab);
 }
 
 void ath12k_core_free(struct ath12k_base *ab)
 {
+	struct ath12k_bid_override *ov, *tmp;
+	del_timer_sync(&ab->rx_replenish_retry);
+	flush_workqueue(ab->workqueue_aux);
 	destroy_workqueue(ab->workqueue_aux);
+
+	flush_workqueue(ab->workqueue);
 	destroy_workqueue(ab->workqueue);
+	list_for_each_entry_safe(ov, tmp, &ab->board_id_overrides, next)
+		kfree(ov);
 	kfree(ab);
 }
 
 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
-				      enum ath12k_bus bus)
+				      enum ath12k_bus bus,
+				      const struct ath12k_bus_params *bus_params)
 {
 	struct ath12k_base *ab;
 
@@ -899,8 +4130,6 @@
 	if (!ab)
 		return NULL;
 
-	init_completion(&ab->driver_recovery);
-
 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
 	if (!ab->workqueue)
 		goto err_sc_free;
@@ -909,23 +4138,34 @@
 	if (!ab->workqueue_aux)
 		goto err_free_wq;
 
+	ab->stats_disable = ath12k_stats_disable;
 	mutex_init(&ab->core_lock);
+	mutex_init(&ab->tbl_mtx_lock);
 	spin_lock_init(&ab->base_lock);
 	init_completion(&ab->reset_complete);
-	init_completion(&ab->reconfigure_complete);
-	init_completion(&ab->recovery_start);
 
+	INIT_LIST_HEAD(&ab->board_id_overrides);
 	INIT_LIST_HEAD(&ab->peers);
+	INIT_LIST_HEAD(&ab->neighbor_peers);
 	init_waitqueue_head(&ab->peer_mapping_wq);
 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+	init_waitqueue_head(&ab->qmi.cold_boot_waitq);
+	init_waitqueue_head(&ab->ssr_dump_wq);
 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
+	INIT_WORK(&ab->recovery_work, ath12k_core_mode1_recovery_work);
 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
 	init_completion(&ab->htc_suspend);
+	init_completion(&ab->rddm_reset_done);
 
 	ab->dev = dev;
+	ab->bus_params = *bus_params;
 	ab->hif.bus = bus;
+	ab->qmi.num_radios = ATH12K_QMI_INVALID_RADIO;
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ab->ppeds_node_idx = -1;
+#endif
 	return ab;
 
 err_free_wq:
@@ -935,5 +4175,174 @@
 	return NULL;
 }
 
-MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
+#ifdef CONFIG_ATH12K_SAWF
+
+int ath12k_send_sawf_configs_soc(struct ath12k_sawf_svc_params *new_param)
+{
+	struct ath12k_hw_group *ag;
+	struct ath12k_base *ab;
+	struct ath12k *ar;
+	int soc, ret = 0;
+
+	mutex_lock(&ath12k_hw_lock);
+	list_for_each_entry(ag, &ath12k_hw_groups, list) {
+		if (!ag) {
+			ath12k_warn(NULL, "unable to fetch hw group\n");
+			mutex_unlock(&ath12k_hw_lock);
+			return -ENODEV;
+		}
+		for (soc = ag->num_probed; soc > 0; soc--) {
+			ab = ag->ab[soc - 1];
+			ar = ab->pdevs[0].ar;
+			if (!ar) {
+				/* Control should not reach here */
+				ath12k_info(ab, "Radio not initialized in the SOC\n");
+				continue;
+			}
+			ret = ath12k_wmi_svc_config_send(ar, new_param);
+			if (ret) {
+				mutex_unlock(&ath12k_hw_lock);
+				return ret;
+			}
+		}
+	}
+	mutex_unlock(&ath12k_hw_lock);
+	return ret;
+}
+
+int ath12k_sawf_send_disable_soc(u8 svc_id)
+{
+	struct ath12k_hw_group *ag;
+	struct ath12k_base *ab;
+	struct ath12k *ar;
+	int soc, ret = 0;
+
+	mutex_lock(&ath12k_hw_lock);
+	list_for_each_entry(ag, &ath12k_hw_groups, list) {
+		if (!ag) {
+			ath12k_warn(NULL, "unable to fetch hw group\n");
+			mutex_unlock(&ath12k_hw_lock);
+			return -ENODEV;
+		}
+		for (soc = ag->num_probed; soc > 0; soc--) {
+			ab = ag->ab[soc - 1];
+			ar = ab->pdevs[0].ar;
+			if (!ar) {
+				/* Control should not reach here */
+				ath12k_info(ab, "Radio not initialized in the SOC\n");
+				continue;
+			}
+			ret = ath12k_wmi_svc_send_disable(ar, svc_id);
+			if (ret) {
+				mutex_unlock(&ath12k_hw_lock);
+				return ret;
+			}
+		}
+	}
+	mutex_unlock(&ath12k_hw_lock);
+	return ret;
+}
+
+int ath12k_core_sawf_ul_config(struct net_device *dev, struct ath12k_sawf_wmi_peer_latency_param *latency_info)
+{
+	struct ath12k_base *ab;
+	u16 peer_id;
+	int ret = -EOPNOTSUPP;
+
+	if (!latency_info)
+		return -EINVAL;
+
+	ab = ath12k_sawf_get_ab_from_netdev(dev, latency_info->peer_mac, &peer_id);
+	if (ab)
+		ret = ath12k_sawf_wmi_config_ul(ab, latency_info);
+	return ret;
+}
+
+void *ath12k_get_ab_nd_peer_from_peer_mac(u8 *peer_mac, struct ath12k_base **ab_ref)
+{
+	struct ath12k_hw_group *ag = NULL;
+	struct ath12k_base *ab = NULL;
+	void *peer = NULL;
+	int soc;
+
+	mutex_lock(&ath12k_hw_lock);
+	list_for_each_entry(ag, &ath12k_hw_groups, list) {
+		if (!ag) {
+			ath12k_warn(NULL, "unable to fetch hw group\n");
+			mutex_unlock(&ath12k_hw_lock);
+			return -ENODEV;
+		}
+		for (soc = ag->num_probed; soc > 0; soc--) {
+
+			ab = ag->ab[soc - 1];
+			if (!ab) {
+				/* Control should not reach here */
+				ath12k_info(NULL, "SOC not initialized\n");
+				continue;
+			}
+			spin_lock_bh(&ab->base_lock);
+			peer = ath12k_peer_find_by_addr(ab, peer_mac);
+			spin_unlock_bh(&ab->base_lock);
+			if (peer) {
+				mutex_unlock(&ath12k_hw_lock);
+				*ab_ref = ab;
+				return peer;
+			}
+			ab = NULL;
+		}
+	}
+	mutex_unlock(&ath12k_hw_lock);
+	return peer;
+}
+
+#endif /* CONFIG_ATH12K_SAWF */
+
+static int ath12k_init(void)
+{
+	int ret;
+
+	ret = ath12k_debugfs_create();
+	if (ret) {
+		pr_err("Failed to register debugfs ath12k driver: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath12k_ahb_init();
+	if (ret) {
+		ret = -ENODEV;
+		pr_err("Failed to initialize ath12k AHB device: %d\n", ret);
+		goto error;
+	}
+
+	ret = ath12k_pci_init();
+	if (ret) {
+		ret = -ENODEV;
+		pr_err("Failed to initialize ath12k PCI device: %d\n", ret);
+		goto error_ahb;
+	}
+
+	return 0;
+
+error_ahb:
+	ath12k_ahb_exit();
+error:
+	ath12k_debugfs_destroy();
+
+	return ret;
+}
+
+static void ath12k_exit(void)
+{
+	ath12k_pci_exit();
+	ath12k_ahb_exit();
+	ath12k_debugfs_destroy();
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	ath12k_bond_disable_ppe_ds();
+#endif
+}
+
+module_init(ath12k_init)
+module_exit(ath12k_exit)
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe/AHB devices");
 MODULE_LICENSE("Dual BSD/GPL");
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/core.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/core.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/core.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/core.h	2024-04-19 16:04:28.953735667 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_CORE_H
@@ -11,6 +11,8 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/bitfield.h>
+#include <linux/average.h>
+#include <linux/rhashtable.h>
 #include "qmi.h"
 #include "htc.h"
 #include "wmi.h"
@@ -21,10 +23,22 @@
 #include "hw.h"
 #include "hal_rx.h"
 #include "reg.h"
+#include "thermal.h"
 #include "dbring.h"
+#include "spectral.h"
+#include "pktlog.h"
+#include "sawf.h"
+#include "vendor.h"
+#include "telemetry_agent_if.h"
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#include <ppe_ds_wlan.h>
+#include <ppe_vp_public.h>
+#endif
 
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
 
+#define ATH12K_MAX_NUM_PWR_LEVEL	16
+
 #define ATH12K_TX_MGMT_NUM_PENDING_MAX	512
 
 #define ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
@@ -36,13 +50,31 @@
 #define	ATH12K_RX_RATE_TABLE_NUM	320
 #define	ATH12K_RX_RATE_TABLE_11AX_NUM	576
 
+extern unsigned int ath12k_frame_mode;
+
+
+extern bool ath12k_debug_critical;
 #define ATH12K_MON_TIMER_INTERVAL  10
-#define ATH12K_RESET_TIMEOUT_HZ			(20 * HZ)
+#define ATH12K_RESET_TIMEOUT_HZ			(180 * HZ)
 #define ATH12K_RESET_MAX_FAIL_COUNT_FIRST	3
 #define ATH12K_RESET_MAX_FAIL_COUNT_FINAL	5
 #define ATH12K_RESET_FAIL_TIMEOUT_HZ		(20 * HZ)
 #define ATH12K_RECONFIGURE_TIMEOUT_HZ		(10 * HZ)
 #define ATH12K_RECOVER_START_TIMEOUT_HZ		(20 * HZ)
+#define ATH12K_UMAC_RESET_TIMEOUT_IN_MS		200
+
+#define INVALID_CIPHER 0xFFFFFFFF
+
+#define ATH12K_MAX_MLO_PEERS		256
+#define ATH12K_MLO_PEER_ID_INVALID	0xFFFF
+
+#define ATH12K_SSR_POWERUP QCOM_SSR_AFTER_POWERUP
+#define ATH12K_SSR_PREPARE_SHUTDOWN 5
+
+enum ath12k_bdf_search {
+    ATH12K_BDF_SEARCH_DEFAULT,
+    ATH12K_BDF_SEARCH_BUS_AND_BOARD,
+};
 
 enum wme_ac {
 	WME_AC_BE,
@@ -53,8 +85,10 @@
 };
 
 #define ATH12K_HT_MCS_MAX	7
-#define ATH12K_VHT_MCS_MAX	9
+#define ATH12K_VHT_MCS_MAX	11
 #define ATH12K_HE_MCS_MAX	11
+#define ATH12K_HE_EXTRA_MCS_MAX 13
+#define ATH12K_EHT_MCS_MAX	15
 
 enum ath12k_crypt_mode {
 	/* Only use hardware crypto engine */
@@ -74,15 +108,19 @@
 enum ath12k_skb_flags {
 	ATH12K_SKB_HW_80211_ENCAP = BIT(0),
 	ATH12K_SKB_CIPHER_SET = BIT(1),
+	ATH12K_SKB_TX_STATUS = BIT(2),
+	ATH12K_SKB_MGMT_LINK_AGNOSTIC = BIT(3),
 };
 
 struct ath12k_skb_cb {
 	dma_addr_t paddr;
-	struct ath12k *ar;
+	u8 eid;
 	struct ieee80211_vif *vif;
 	dma_addr_t paddr_ext_desc;
 	u32 cipher;
 	u8 flags;
+	u8 link_id;
+	u16 pkt_offset;
 };
 
 struct ath12k_skb_rxcb {
@@ -92,32 +130,44 @@
 	bool is_continuation;
 	bool is_mcbc;
 	bool is_eapol;
+	bool is_end_of_ppdu;
 	struct hal_rx_desc *rx_desc;
 	u8 err_rel_src;
 	u8 err_code;
-	u8 mac_id;
+	u8 hw_link_id;
 	u8 unmapped;
 	u8 is_frag;
 	u8 tid;
 	u16 peer_id;
+	struct napi_struct *napi;
 };
 
 enum ath12k_hw_rev {
-	ATH12K_HW_QCN9274_HW10,
+	ATH12K_HW_QCN9274_HW10 = 0,
 	ATH12K_HW_QCN9274_HW20,
-	ATH12K_HW_WCN7850_HW20
+	ATH12K_HW_WCN7850_HW20,
+	ATH12K_HW_IPQ5332_HW10,
+	ATH12K_HW_QCN6432_HW10,
 };
 
+#define ATH12K_DIAG_HW_ID_OFFSET 16
+
 enum ath12k_firmware_mode {
 	/* the default mode, standard 802.11 functionality */
 	ATH12K_FIRMWARE_MODE_NORMAL,
 
 	/* factory tests etc */
 	ATH12K_FIRMWARE_MODE_FTM,
+
+	/* Cold boot calibration */
+	ATH12K_FIRMWARE_MODE_COLD_BOOT = 7,
 };
 
+extern bool ath12k_cold_boot_cal;
+
 #define ATH12K_IRQ_NUM_MAX 57
 #define ATH12K_EXT_IRQ_NUM_MAX	16
+#define ATH12K_MAX_TCL_RING_NUM 3
 
 struct ath12k_ext_irq_grp {
 	struct ath12k_base *ab;
@@ -125,6 +175,7 @@
 	u32 num_irq;
 	u32 grp_id;
 	u64 timestamp;
+	bool napi_enabled;
 	struct napi_struct napi;
 	struct net_device napi_ndev;
 };
@@ -137,6 +188,11 @@
 
 #define HE_PPET16_PPET8_SIZE            8
 
+enum ath12k_msi_supported_hw {
+	ATH12K_MSI_CONFIG_PCI,
+	ATH12K_MSI_CONFIG_IPCI,
+};
+
 /* 802.11ax PPE (PPDU packet Extension) threshold */
 struct he_ppe_threshold {
 	u32 numss_m1;
@@ -154,6 +210,11 @@
 };
 
 #define MAX_RADIOS 3
+#define ATH12K_MAX_SOCS        3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_INVALID_GRP_ID  0xFF
+#define ATH12K_INVALID_CHIP_ID 0xFF
+#define ATH12K_MAX_MLO_PEER    0x100
 
 enum {
 	WMI_HOST_TP_SCALE_MAX   = 0,
@@ -173,40 +234,238 @@
 
 enum ath12k_dev_flags {
 	ATH12K_CAC_RUNNING,
+	ATH12K_FLAG_CORE_REGISTERED,
 	ATH12K_FLAG_CRASH_FLUSH,
 	ATH12K_FLAG_RAW_MODE,
 	ATH12K_FLAG_HW_CRYPTO_DISABLED,
+	ATH12K_FLAG_BTCOEX,
 	ATH12K_FLAG_RECOVERY,
 	ATH12K_FLAG_UNREGISTERING,
 	ATH12K_FLAG_REGISTERED,
 	ATH12K_FLAG_QMI_FAIL,
 	ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
+	ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+	ATH12K_FLAG_QMI_HOST_CAP_SENT,
+	ATH12K_FLAG_HW_GROUP_ATTACHED,
+	ATH12K_FLAG_FTM_SEGMENTED,
+	ATH12K_FLAG_PPE_DS_ENABLED,
+	ATH12K_FLAG_WMI_INIT_DONE,
+	ATH12K_FLAG_UMAC_PRERESET_START,
+	ATH12K_FLAG_UMAC_RESET_COMPLETE,
+	ATH12K_FLAG_UMAC_RECOVERY_START,
 };
 
 enum ath12k_monitor_flags {
 	ATH12K_FLAG_MONITOR_ENABLED,
+	MONITOR_VDEV_CREATED,
+	MONITOR_VDEV_STARTED,
+	MONITOR_CONF_ENABLED,
+};
+
+struct ath12k_fw_vdev_ol_stats {
+	u64 rx_msdu_byte_cnt;
+	u64 rx_msdu_pkt_cnt;
+	u64 tx_msdu_byte_cnt;
+	u64 tx_msdu_pkt_cnt;
+	u64 tx_retry_byte_cnt;
+	u64 tx_retry_pkt_cnt;
+	u64 tx_drop_byte_cnt;
+	u64 tx_drop_pkt_cnt;
+	u64 tx_msdu_ttl_byte_cnt;
+	u64 tx_msdu_ttl_pkt_cnt;
+};
+
+/**
+ * struct chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20MHz,  it is 6135
+ * channel 37/40MHz,  it is 6125
+ * channel 37/80MHz,  it is 6145
+ * channel 37/160MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct chan_power_info {
+        u16 chan_cfreq;
+        s8 tx_power;
 };
 
-struct ath12k_vif {
+/**
+ * struct reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @power_type_6g: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @ap_constraint_power: AP constraint power (dBm)
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to FW
+ */
+struct ath12k_reg_tpc_power_info {
+        bool is_psd_power;
+        u8 eirp_power;
+        enum wmi_reg_6g_ap_type power_type_6g;
+        u8 num_pwr_levels;
+        u8 reg_max[ATH12K_MAX_NUM_PWR_LEVEL];
+        u8 ap_constraint_power;
+        s8 tpe[ATH12K_MAX_NUM_PWR_LEVEL];
+        struct chan_power_info chan_power_info[ATH12K_MAX_NUM_PWR_LEVEL];
+};
+
+#define ATH12K_STATS_MGMT_FRM_TYPE_MAX 16
+
+struct ath12k_mgmt_frame_stats {
+	u32 tx_succ_cnt[ATH12K_STATS_MGMT_FRM_TYPE_MAX];
+	u32 tx_fail_cnt[ATH12K_STATS_MGMT_FRM_TYPE_MAX];
+	u32 rx_cnt[ATH12K_STATS_MGMT_FRM_TYPE_MAX];
+	u32 tx_compl_succ[ATH12K_STATS_MGMT_FRM_TYPE_MAX];
+	u32 tx_compl_fail[ATH12K_STATS_MGMT_FRM_TYPE_MAX];
+};
+
+struct ath12k_tx_conf {
+	bool changed;
+	u16 ac;
+	struct ieee80211_tx_queue_params tx_queue_params;
+};
+
+struct ath12k_key_conf {
+	enum set_key_cmd cmd;
+	struct list_head list;
+	struct ieee80211_sta *sta;
+	struct ieee80211_key_conf *key;
+};
+
+struct ath12k_recovery_cache {
+	struct ath12k_tx_conf tx_conf;
+	struct ath12k_key_conf key_conf;
+	struct list_head recovery_sta_list;
+	u64 bss_conf_changed;
+};
+
+struct ath12k_vif_cache {
+	struct ath12k_tx_conf tx_conf;
+	struct ath12k_key_conf key_conf;
+	u64 bss_conf_changed;
+};
+
+struct ath12k_vif_tcl_desc_template {
+	u32 info0;
+	u32 info1;
+	u32 info2;
+	u32 info3;
+	u32 info4;
+};
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+struct ath12k_link_vif_pvt {
+	struct ath12k_base *ab;
+	struct ath12k_link_vif *arvif;
+	struct net_device *link_ndev;
+	struct net_device *bond_dev;
+	struct ieee80211_hw *hw;
+	bool is_started;
+	bool is_bond_enslaved;
+	int ppe_vp_profile_idx;
+	int vp_num;
+};
+#endif
+
+struct ath12k_peer_ch_width_switch_data {
+	int count;
+	struct wmi_chan_width_peer_arg peer_arg[];
+};
+
+struct ath12k_prb_resp_tmpl_ml_info {
+	u32 hw_link_id;
+	u32 cu_vdev_map_cat1_lo;
+	u32 cu_vdev_map_cat1_hi;
+	u32 cu_vdev_map_cat2_lo;
+	u32 cu_vdev_map_cat2_hi;
+};
+
+struct ath12k_link_vif {
+	struct ath12k_base *ab;
+	struct ath12k_dp *dp;
+	struct ath12k_vif_tcl_desc_template desc;
+	struct ieee80211_vif *vif;
+	struct device *dev;
+	struct ath12k *ar;
+	bool assoc_link;
+	bool vdev_stop_notify_done;
+	u8 pdev_idx;
 	u32 vdev_id;
-	enum wmi_vdev_type vdev_type;
-	enum wmi_vdev_subtype vdev_subtype;
 	u32 beacon_interval;
 	u32 dtim_period;
+	u8 addr[ETH_ALEN];
 	u16 ast_hash;
 	u16 ast_idx;
 	u16 tcl_metadata;
 	u8 hal_addr_search_flags;
 	u8 search_type;
-
-	struct ath12k *ar;
-	struct ieee80211_vif *vif;
-
 	int bank_id;
 	u8 vdev_id_check_en;
+	bool beacon_prot;
 
 	struct wmi_wmm_params_all_arg wmm_params;
 	struct list_head list;
+
+	bool is_created;
+	bool is_started;
+	bool is_scan_vif;
+	bool is_up;
+	bool pending_up;
+	bool pending_stop;
+	bool spectral_enabled;
+	u8 bssid[ETH_ALEN];
+	struct cfg80211_bitrate_mask bitrate_mask;
+	int num_legacy_stations;
+	int rtscts_prot_mode;
+	int txpower;
+	struct ieee80211_chanctx_conf chanctx;
+	struct ath12k_reg_tpc_power_info reg_tpc_info;
+	u8 vdev_stats_id;
+	struct ath12k_fw_vdev_ol_stats vdev_stats;
+	struct host_link_stats link_stats;
+	bool bcca_zero_sent;
+	bool do_not_send_tmpl;
+	u32 vht_cap;
+#ifdef CONFIG_ATH12K_DEBUGFS
+	struct dentry *debugfs_twt;
+	struct dentry *debugfs_rtsthreshold;
+#endif /* CONFIG_ATH12K_DEBUGFS */
+	struct work_struct update_bcn_template_work;
+	u64 obss_color_bitmap;
+	struct work_struct update_obss_color_notify_work;
+	u32 tx_vdev_id;
+    u8 link_id;
+    struct ath12k_vif *ahvif;
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	struct ath12k_link_vif_pvt *ndev_pvt;
+	struct net_device *link_ndev;
+	int ppe_vp_type;
+	int ppe_vp_num;
+#endif
+
+	bool mvr_processing;
+	bool pending_csa_up;
+	int num_stations;
+
+	struct completion peer_ch_width_switch_send;
+	struct work_struct peer_ch_width_switch_work;
+	struct ath12k_peer_ch_width_switch_data *peer_ch_width_switch_data;
+	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+	struct ath12k_prb_resp_tmpl_ml_info ml_info;
+};
+
+struct ath12k_vif {
+	enum wmi_vdev_type vdev_type;
+	enum wmi_vdev_subtype vdev_subtype;
+	struct ieee80211_vif *vif;
+	struct ath12k_hw *ah;
+	bool rsnie_present;
+	bool wpaie_present;
+	u8 tx_encap_type;
+	u32 key_cipher;
 	union {
 		struct {
 			u32 uapsd;
@@ -224,25 +483,30 @@
 		} ap;
 	} u;
 
-	bool is_started;
-	bool is_up;
+	struct ath12k_link_vif deflink;
+	struct ath12k_link_vif *link[ATH12K_NUM_MAX_LINKS];
+	/* indicates bitmap of link vdev created in FW */
+	unsigned long links_map;
 	u32 aid;
-	u8 bssid[ETH_ALEN];
-	struct cfg80211_bitrate_mask bitrate_mask;
-	int num_legacy_stations;
-	int rtscts_prot_mode;
-	int txpower;
-	bool rsnie_present;
-	bool wpaie_present;
-	struct ieee80211_chanctx_conf chanctx;
-	u32 key_cipher;
-	u8 tx_encap_type;
-	u8 vdev_stats_id;
+	struct ath12k_mgmt_frame_stats mgmt_stats;
+	u8 num_vdev_created;
+	u16 mcbc_gsn;
+	spinlock_t mcbc_gsn_lock; /* Protect mcbc_gsn */
+	struct ath12k_vif_cache cache[IEEE80211_MLD_MAX_NUM_LINKS];
+	u8 last_scan_link;
+	u8 primary_link_id;
+#ifdef CONFIG_ATH12K_DEBUGFS
+	struct dentry *debugfs_linkstats;
+	struct dentry *debugfs_primary_link;
+#endif /* CONFIG_ATH12K_DEBUGFS */
+	bool mcast_to_ucast_en;
+	bool chanctx_peer_del_done;
 };
 
 struct ath12k_vif_iter {
 	u32 vdev_id;
-	struct ath12k_vif *arvif;
+	struct ath12k *ar;
+	struct ath12k_link_vif *arvif;
 };
 
 #define HAL_AST_IDX_INVALID	0xFFFF
@@ -250,20 +514,23 @@
 #define HAL_RX_MAX_MCS_HT	31
 #define HAL_RX_MAX_MCS_VHT	9
 #define HAL_RX_MAX_MCS_HE	11
+#define HAL_RX_MAX_MCS_BE	15
 #define HAL_RX_MAX_NSS		8
 #define HAL_RX_MAX_NUM_LEGACY_RATES 12
 #define ATH12K_RX_RATE_TABLE_11AX_NUM	576
 #define ATH12K_RX_RATE_TABLE_NUM 320
+#define ATH12K_STATS_MGMT_FRM_TYPE_MAX	16
 
 struct ath12k_rx_peer_rate_stats {
 	u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
 	u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
 	u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+	u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
 	u64 nss_count[HAL_RX_MAX_NSS];
 	u64 bw_count[HAL_RX_BW_MAX];
 	u64 gi_count[HAL_RX_GI_MAX];
 	u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
-	u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
+	u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
 };
 
 struct ath12k_rx_peer_stats {
@@ -293,6 +560,7 @@
 };
 
 #define ATH12K_HE_MCS_NUM       12
+#define ATH12K_EHT_MCS_NUM	16
 #define ATH12K_VHT_MCS_NUM      10
 #define ATH12K_BW_NUM           5
 #define ATH12K_NSS_NUM          4
@@ -347,6 +615,7 @@
 	u64 ht[ATH12K_COUNTER_TYPE_MAX][ATH12K_HT_MCS_NUM];
 	u64 vht[ATH12K_COUNTER_TYPE_MAX][ATH12K_VHT_MCS_NUM];
 	u64 he[ATH12K_COUNTER_TYPE_MAX][ATH12K_HE_MCS_NUM];
+	u64 eht[ATH12K_COUNTER_TYPE_MAX][ATH12K_EHT_MCS_NUM];
 	u64 bw[ATH12K_COUNTER_TYPE_MAX][ATH12K_BW_NUM];
 	u64 nss[ATH12K_COUNTER_TYPE_MAX][ATH12K_NSS_NUM];
 	u64 gi[ATH12K_COUNTER_TYPE_MAX][ATH12K_GI_NUM];
@@ -373,36 +642,112 @@
 	u32 retry_bytes;
 };
 
+DECLARE_EWMA(avg_rssi, 10, 8)
+
 struct ath12k_wbm_tx_stats {
 	u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
 };
 
-struct ath12k_sta {
-	struct ath12k_vif *arvif;
+struct ath12k_driver_tx_pkts_flow {
+	atomic_t pkts_in;
+	atomic_t pkts_out;
+};
+
+struct ath12k_driver_rx_pkts_flow {
+	atomic_t pkts_frm_hw;
+	atomic_t pkts_out;
+};
+
+struct ath12k_link_sta {
+	struct ath12k_link_vif *arvif;
+	struct ath12k_sta *ahsta;
+	/* link address similar to ieee80211_link_sta */
+	u8 addr[ETH_ALEN];
 
 	/* the following are protected by ar->data_lock */
 	u32 changed; /* IEEE80211_RC_* */
 	u32 bw;
 	u32 nss;
 	u32 smps;
-	enum hal_pn_type pn_type;
 
 	struct work_struct update_wk;
 	struct rate_info txrate;
+	u32 peer_nss;
 	struct rate_info last_txrate;
 	u64 rx_duration;
 	u64 tx_duration;
 	u8 rssi_comb;
+	struct ewma_avg_rssi avg_rssi;
+
+	struct ath12k_htt_tx_stats *tx_stats;
 	struct ath12k_rx_peer_stats *rx_stats;
+	struct ath12k_driver_tx_pkts_flow drv_tx_pkts;
+	struct ath12k_driver_rx_pkts_flow drv_rx_pkts;
 	struct ath12k_wbm_tx_stats *wbm_tx_stats;
 	u32 bw_prev;
+
+	u16 tcl_metadata;
+	u8 link_id; /* IEEE link id */
+	u8 link_idx; /* for fw use only */
+	u32 last_tx_pkt_bw;
+
+	/* For now the assoc link will be considered primary */
+	bool is_assoc_link;
+
+	/* For check disable fixed rate check for peer */
+	bool disable_fixed_rate;
+	u8 sawf_svc_id;
+	/* will be saved to use during recovery */
+	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+	enum ieee80211_sta_state state;
+};
+
+struct ath12k_sta {
+	struct ath12k_vif *ahvif;
+ #ifdef CONFIG_MAC80211_DEBUGFS
+	/* protected by conf_mutex */
+	bool aggr_mode;
+ #endif
+	bool use_4addr_set;
+	struct work_struct set_4addr_wk;
+	enum hal_pn_type pn_type;
+
+	struct ath12k_link_sta deflink;
+	struct ath12k_link_sta *link[IEEE80211_MLD_MAX_NUM_LINKS];
+	/* indicates bitmap of link sta created in FW */
+	unsigned long links_map;
+	u16 ml_peer_id;
+	u8 assoc_link_id;
+	u8 primary_link_id;
+	u8 num_peer;
+	/* Indicates whether disassoc is sent after recovery
+	 * ONLY used currently in recovery case
+	 */
+	bool low_ack_sent;
 };
 
-#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5945
-#define ATH12K_MAX_6G_FREQ 7115
-#define ATH12K_NUM_CHANS 100
-#define ATH12K_MAX_5G_CHAN 173
+#define ATH12K_HALF_20MHZ_BW 10
+#define ATH12K_2G_MIN_CENTER 2412
+#define ATH12K_2G_MAX_CENTER 2484
+#define ATH12K_5G_MIN_CENTER 4900
+#define ATH12K_5G_MAX_CENTER 5920
+#define ATH12K_6G_MIN_CENTER 5935
+#define ATH12K_6G_MAX_CENTER 7115
+#define ATH12K_MIN_2G_FREQ (ATH12K_2G_MIN_CENTER - ATH12K_HALF_20MHZ_BW - 1)
+#define ATH12K_MAX_2G_FREQ (ATH12K_2G_MAX_CENTER + ATH12K_HALF_20MHZ_BW + 1)
+#define ATH12K_MIN_5G_FREQ (ATH12K_5G_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_5G_FREQ (ATH12K_5G_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MIN_6G_FREQ (ATH12K_6G_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_6G_FREQ (ATH12K_6G_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
+
+#define ATH12K_MAX_5G_LOW_BAND_FREQ  5330
+#define ATH12K_MIN_5G_HIGH_BAND_FREQ 5490
+
+#define ATH12K_NUM_CHANS 102
+#define ATH12K_MIN_5G_CHAN 36
+#define ATH12K_MAX_5G_CHAN 177
+#define ATH12K_MIN_2G_CHAN 1
+#define ATH12K_MAX_2G_CHAN 11
 
 enum ath12k_state {
 	ATH12K_STATE_OFF,
@@ -410,20 +755,69 @@
 	ATH12K_STATE_RESTARTING,
 	ATH12K_STATE_RESTARTED,
 	ATH12K_STATE_WEDGED,
+	ATH12K_STATE_TM,
 	/* Add other states as required */
 };
 
 /* Antenna noise floor */
 #define ATH12K_DEFAULT_NOISE_FLOOR -95
 
+struct ath12k_ftm_event_obj {
+	u32 data_pos;
+	u32 expected_seq;
+	u8 *eventdata;
+};
+
 struct ath12k_fw_stats {
+	struct dentry *debugfs_fwstats;
 	u32 pdev_id;
 	u32 stats_id;
 	struct list_head pdevs;
 	struct list_head vdevs;
 	struct list_head bcn;
+	bool en_vdev_stats_ol;
 };
 
+struct ath12k_dbg_htt_stats {
+	u32 type;
+	u32 cfg_param[4];
+	u8 reset;
+	struct debug_htt_stats_req *stats_req;
+	/* protects shared stats req buffer */
+	spinlock_t lock;
+};
+
+#define ATH12K_MAX_COEX_PRIORITY_LEVEL  3
+
+struct ath12k_debug {
+	struct dentry *debugfs_pdev;
+	struct ath12k_dbg_htt_stats htt_stats;
+	u32 extd_tx_stats;
+	u32 extd_rx_stats;
+#ifdef CONFIG_ATH12K_SAWF
+	u32 sawf_stats;
+#endif
+	u32 pktlog_filter;
+	u32 pktlog_mode;
+	u32 pktlog_peer_valid;
+	u8 pktlog_peer_addr[ETH_ALEN];
+#ifdef CONFIG_ATH12K_PKTLOG
+	struct dentry *debugfs_pktlog;
+	struct ath12k_pktlog pktlog;
+	bool is_pkt_logging;
+#endif
+	u32 rx_filter;
+	struct list_head wmi_list;
+	struct completion wmi_ctrl_path_stats_rcvd;
+	u32 wmi_ctrl_path_stats_tagid;
+	struct dentry *debugfs_nrp;
+	u32 coex_priority_level[ATH12K_MAX_COEX_PRIORITY_LEVEL];
+};
+
+int ath12k_pktlog_rx_filter_setting(struct ath12k *ar,
+                                    struct htt_rx_ring_tlv_filter
+                                    *tlv_filter);
+
 struct ath12k_per_peer_tx_stats {
 	u32 succ_bytes;
 	u32 retry_bytes;
@@ -441,17 +835,73 @@
 	bool is_ampdu;
 };
 
+struct ath12k_btcoex_info {
+        bool coex_support;
+        u32 pta_num;
+        u32 coex_mode;
+        u32 bt_active_time_slot;
+        u32 bt_priority_time_slot;
+        u32 coex_algo_type;
+        u32 pta_priority;
+        u32 pta_algorithm;
+        u32 wlan_prio_mask;
+        u32 wlan_weight;
+        u32 bt_weight;
+        u32 duty_cycle;
+        u32 wlan_duration;
+        u32 wlan_pkt_type;
+        u32 wlan_pkt_type_continued;
+};
+
+enum btcoex_algo {
+        COEX_ALGO_UNCONS_FREERUN = 0,
+        COEX_ALGO_FREERUN,
+        COEX_ALGO_OCS,
+        COEX_ALGO_MAX_SUPPORTED,
+};
+
+enum ath12k_ap_ps_state {
+	ATH12K_AP_PS_STATE_OFF,
+	ATH12K_AP_PS_STATE_ON,
+};
+
+enum ath12k_fw_recovery_option {
+	 ATH12K_FW_RECOVERY_DISABLE = 0,
+	 ATH12K_FW_RECOVERY_ENABLE_AUTO, /* Automatically recover after FW assert */
+	 ATH12K_FW_RECOVERY_ENABLE_MODE1_AUTO, /* Automatically recover after FW assert through Mode1 */
+	 /* Enable only recovery. Send MPD SSR WMI */
+	 /* command to unlink UserPD assert from RootPD */
+	 ATH12K_FW_RECOVERY_ENABLE_SSR_ONLY,
+};
+
+struct ath12k_chan_info {
+	u32 low_freq;
+	u32 high_freq;
+	u32 num_channels;
+};
+
+struct ath12k_wmm_stats {
+	int tx_type;
+	int rx_type;
+	u64 total_wmm_tx_pkts[WME_NUM_AC];
+	u64 total_wmm_rx_pkts[WME_NUM_AC];
+	u64 total_wmm_tx_drop[WME_NUM_AC];
+	u64 total_wmm_rx_drop[WME_NUM_AC];
+};
+
+#define ATH12K_DSCP_PRIORITY 7
+
 #define ATH12K_FLUSH_TIMEOUT (5 * HZ)
 #define ATH12K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
 
 struct ath12k {
 	struct ath12k_base *ab;
+	struct ath12k_hw *ah;
 	struct ath12k_pdev *pdev;
-	struct ieee80211_hw *hw;
-	struct ieee80211_ops *ops;
 	struct ath12k_wmi_pdev *wmi;
 	struct ath12k_pdev_dp dp;
 	u8 mac_addr[ETH_ALEN];
+	struct ath12k_chan_info chan_info;
 	u32 ht_cap_info;
 	u32 vht_cap_info;
 	struct ath12k_he ar_he;
@@ -467,6 +917,7 @@
 		int vdev_id;
 		int roc_freq;
 		bool roc_notify;
+		struct work_struct vdev_del_wk;
 	} scan;
 
 	struct {
@@ -482,6 +933,7 @@
 	u32 max_tx_power;
 	u32 txpower_limit_2g;
 	u32 txpower_limit_5g;
+	u32 txpower_limit_6g;
 	u32 txpower_scale;
 	u32 power_scale;
 	u32 chan_tx_pwr;
@@ -512,6 +964,8 @@
 	/* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
 	u8 pdev_idx;
 	u8 lmac_id;
+	/* link_idx unique across the group of hw used to lookup in ah */
+	u8 link_idx;
 
 	struct completion peer_assoc_done;
 	struct completion peer_delete_done;
@@ -522,6 +976,7 @@
 	int last_wmi_vdev_start_status;
 	struct completion vdev_setup_done;
 	struct completion vdev_delete_done;
+	struct ath12k_wmm_stats wmm_stats;
 
 	int num_peers;
 	int max_num_peers;
@@ -532,7 +987,9 @@
 	struct idr txmgmt_idr;
 	/* protects txmgmt_idr data */
 	spinlock_t txmgmt_idr_lock;
+	atomic_t flush_request;
 	atomic_t num_pending_mgmt_tx;
+	wait_queue_head_t tx_empty_waitq;
 
 	/* cycle count is reported twice for each visited channel during scan.
 	 * access protected by data_lock
@@ -562,12 +1019,93 @@
 	struct ath12k_per_peer_tx_stats cached_stats;
 	u32 last_ppdu_id;
 	u32 cached_ppdu_id;
-
+#ifdef CONFIG_ATH12K_DEBUGFS
+	struct ath12k_debug debug;
+	struct dentry *wmi_ctrl_stat;
+	/* To protect wmi_list manipulation */
+	struct mutex wmi_ctrl_path_stats_lock;
+	/* TODO: Add mac_filter, ampdu_aggr_size and wbm_tx_completion_stats stats*/
+#endif
+	struct ath12k_btcoex_info coex;
+
+#ifdef CONFIG_ATH12K_SPECTRAL
+	struct ath12k_spectral spectral;
+#endif
 	bool dfs_block_radar_events;
-	bool monitor_conf_enabled;
-	bool monitor_vdev_created;
-	bool monitor_started;
+	struct ath12k_thermal thermal;
 	int monitor_vdev_id;
+	u8 twt_enabled;
+	s8 max_allowed_tx_power;
+	struct completion fw_mode_reset;
+	u8 ftm_msgref;
+	bool ap_ps_enabled;
+	enum ath12k_ap_ps_state ap_ps_state;
+
+	struct cfg80211_chan_def awgn_chandef;
+	u32 chan_bw_interference_bitmap;
+	bool awgn_intf_handling_in_prog;
+
+	struct wmi_rssi_dbm_conv_offsets rssi_offsets;
+
+	u8 tpc_stats_type;
+	/* tpc_stats ptr is protected by data lock */
+	struct wmi_tpc_stats_event *tpc_stats;
+	struct completion tpc_complete;
+	bool tpc_request;
+
+	struct completion mlo_setup_done;
+	u32 mlo_setup_status;	
+
+	struct ath12k_fw_stats fw_stats;
+	struct completion fw_stats_complete;
+	bool fw_stats_done;
+
+	unsigned long last_tx_power_update;
+
+	struct completion mvr_complete;
+	struct cfg80211_chan_def agile_chandef;
+	struct ath12k_mgmt_rx_reo_pdev_info rx_reo_pdev_ctx;
+
+	u64 delta_tsf2;
+	u64 delta_tqm;
+
+	bool mlo_complete_event;
+
+	bool ani_enabled;
+	u32 ani_poll_period;
+	u32 ani_listen_period;
+	int ani_ofdm_level;
+	struct completion ani_ofdm_event;
+};
+
+struct ath12k_hw {
+	struct ieee80211_hw *hw;
+	const struct ieee80211_ops *ops;
+	struct ath12k_hw_group *ag;
+
+	/* To synchronize concurrent synchronous mac80211 callback operations,
+	 * concurrent debugfs configuration and concurrent FW statistics events.
+	 */
+	struct mutex conf_mutex;
+	bool regd_updated;
+	u8 supported_band_mask;
+	u8 num_radio;
+	DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
+
+	/* Used for protecting objects used at ah level, ex. ath12k_ml_peer */
+	spinlock_t data_lock;
+
+	/* protected by ah->data_lock */
+	struct list_head ml_peers;
+	bool in_recovery;
+	struct ath12k radio[0] __aligned(sizeof(void *));
+};
+
+struct ath12k_mlo_memory {
+	struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+	struct reserved_mem *rsv;
+	bool init_done;
+	bool is_mlo_mem_avail;
 };
 
 struct ath12k_band_cap {
@@ -579,6 +1117,14 @@
 	u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
 	struct ath12k_wmi_ppe_threshold_arg he_ppet;
 	u16 he_6ghz_capa;
+	u32 eht_cap_mac_info[PSOC_HOST_MAX_MAC_SIZE];
+	u32 eht_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+	u32 eht_mcs_20_only;
+	u32 eht_mcs_80;
+	u32 eht_mcs_160;
+	u32 eht_mcs_320;
+	struct ath12k_ppe_threshold eht_ppet;
+	u32 eht_cap_info_internal;
 };
 
 struct ath12k_pdev_cap {
@@ -591,7 +1137,14 @@
 	u32 rx_chain_mask;
 	u32 tx_chain_mask_shift;
 	u32 rx_chain_mask_shift;
+	u32 chainmask_table_id;
+	unsigned long adfs_chain_mask;
 	struct ath12k_band_cap band[NUM_NL80211_BANDS];
+	bool nss_ratio_enabled;
+	u8 nss_ratio_info;
+	u32 ru_punct_supp_bw;
+	u32 eml_cap;
+	u32 mld_cap;
 };
 
 struct mlo_timestamp {
@@ -607,21 +1160,70 @@
 
 struct ath12k_pdev {
 	struct ath12k *ar;
-	u32 pdev_id;
+	u16 pdev_id;
+	u16 hw_link_id;
 	struct ath12k_pdev_cap cap;
 	u8 mac_addr[ETH_ALEN];
 	struct mlo_timestamp timestamp;
 };
 
+struct pmm_remap {
+	u32 base;
+	u32 size;
+};
+
+struct cmem_remap {
+	u32 base;
+	u32 size;
+};
+
+#define BOARD_NAME_SIZE 100
+
 struct ath12k_board_data {
 	const struct firmware *fw;
 	const void *data;
 	size_t len;
+	char boardname[BOARD_NAME_SIZE];
+	u32 ie_id;
+	u32 name_id;
+	u32 data_id;
+};
+
+struct ath12k_bp_stats {
+	/* Head Pointer reported by the last HTT Backpressure event for the ring */
+	u16 hp;
+
+	/* Tail Pointer reported by the last HTT Backpressure event for the ring */
+	u16 tp;
+
+	/* Number of Backpressure events received for the ring */
+	u32 count;
+
+	/* Last recorded event timestamp */
+	unsigned long jiffies;
+};
+
+struct ath12k_dp_ring_bp_stats {
+	struct ath12k_bp_stats umac_ring_bp_stats[HTT_SW_UMAC_RING_IDX_MAX];
+	struct ath12k_bp_stats lmac_ring_bp_stats[HTT_SW_LMAC_RING_IDX_MAX][MAX_RADIOS];
+};
+
+struct ath12k_bus_params {
+	bool fixed_bdf_addr;
+	bool fixed_mem_region;
 };
 
 struct ath12k_soc_dp_tx_err_stats {
 	/* TCL Ring Descriptor unavailable */
 	u32 desc_na[DP_TCL_NUM_RING_MAX];
+	/* TCL Ring Buffers unavailable */
+	u32 txbuf_na[DP_TCL_NUM_RING_MAX];
+
+	u32 peers_not_present;
+
+	u32 pdev_threshold_limit;
+	u32 group_threshold_limit;
+
 	/* Other failures during dp_tx due to mem allocation failure
 	 * idr unavailable etc.
 	 */
@@ -632,28 +1234,209 @@
 	u32 err_ring_pkts;
 	u32 invalid_rbm;
 	u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+	u32 rxdma_error_drop[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
 	u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+	u32 reo_error_drop[HAL_REO_DEST_RING_ERROR_CODE_MAX];
 	u32 hal_reo_error[DP_REO_DST_RING_MAX];
+	u32 mon_drop_desc;
+	u32 hal_reo_cmd_drain;
+	u32 reo_cmd_cache_error;
+	u32 mcast_enqueued;
+	u32 ucast_enqueued;
+	u32 mcast_reinject;
+	u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
+	u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
+	u32 fw_tx_status[MAX_FW_TX_STATUS];
+	u32 tx_enqueued[MAX_TCL_RING];
+	u32 tx_completed[MAX_TCL_RING];
+	u32 null_tx_complete[MAX_TCL_RING];
+	u32 bond_tx_ucast_enqueued[MAX_TCL_RING];
+	u32 bond_tx_mcast_enqueued[MAX_TCL_RING];
+	u32 bond_tx_ucast_dropped[MAX_TCL_RING];
+	u32 bond_tx_mcast_dropped[MAX_TCL_RING];
+	/* Number of frames successfully converted to ucast from mcast */
+	u32 bond_mcast2ucast_converted[MAX_TCL_RING];
+	/* Number of mcast frames tried for ucast conversion*/
+	u32 bond_mcast2ucast_tried[MAX_TCL_RING];
+	/* Number of times mcast to ucast conversion failed*/
+	u32 bond_mcast2ucast_drop[MAX_TCL_RING];
+	u32 reo_rx[DP_REO_DST_RING_MAX] [ATH12K_MAX_SOCS];
+	u32 fast_rx[DP_REO_DST_RING_MAX] [ATH12K_MAX_SOCS];
+	u32 non_fast_rx[DP_REO_DST_RING_MAX] [ATH12K_MAX_SOCS];
+	u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX] [ATH12K_MAX_SOCS];
+
 	struct ath12k_soc_dp_tx_err_stats tx_err;
+	struct ath12k_dp_ring_bp_stats bp_stats;
+	u32 reo_cmd_update_rx_queue_error;
+	u32 first_and_last_msdu_bit_miss;
+	u32 reo_excep_msdu_buf_type;
+};
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+struct ath12k_ppeds_stats {
+	u32 tcl_prod_cnt;
+	u32 tcl_cons_cnt;
+	u32 reo_prod_cnt;
+	u32 reo_cons_cnt;
+	u32 get_tx_desc_cnt;
+	u32 tx_desc_allocated;
+	u32 tx_desc_freed;
+	u32 fw2wbm_pkt_drops;
+	u32 enable_intr_cnt;
+	u32 disable_intr_cnt;
+	u32 release_tx_single_cnt;
+	u32 release_rx_desc_cnt;
+	u32 num_rx_desc_freed;
+	u32 num_rx_desc_realloc;
+};
+
+struct ath12k_ppeds_napi {
+	struct napi_struct napi;
+	struct net_device ndev;
+};
+#endif
+
+struct ath12k_reg_freq {
+         u32 start_freq;
+         u32 end_freq;
+};
+
+struct vendor_info {
+    enum ath12k_bdf_search bdf_search;
+    u32 vendor;
+    u32 device;
+    u32 subsystem_vendor;
+    u32 subsystem_device;
+};
+
+/* Fatal error notification type based on specific platform type */
+enum ath12k_core_crash_type {
+	/* Fatal error notification unknown or fatal error notification
+	 * is honored.
+	 */
+	ATH12K_NO_CRASH,
+
+	/* Fatal error notification from MHI message */
+	ATH12K_MHI_CRASH,
+
+	/* Fatal error notification from remoteproc user pd for ahb based
+	 * internal radio
+	 */
+	ATH12K_RPROC_USERPD_AHB_CRASH,
+
+	/* Fatal error notification from remoteproc user pd for platform with
+	 * ahb based internal radio and pcic based external radios
+	 */
+	ATH12K_RPROC_USERPD_HYBRID_CRASH,
+
+	/* Fatal error notification from remoteproc root pd for ahb based
+	 * internal radio
+	 */
+	ATH12K_RPROC_ROOTPD_AHB_CRASH,
+
+	/* Fatal error notification from remoteproc root pd for platform with
+	 * ahb based internal radio and pcic based external radios
+	 */
+	ATH12K_RPROC_ROOTPD_HYBRID_CRASH
+};
+
+enum ath12k_mlo_recovery_mode {
+	ATH12K_MLO_RECOVERY_MODE0 = 1,
+	ATH12K_MLO_RECOVERY_MODE1 = 2,
+};
+
+struct ath12k_internal_pci {
+	bool gic_enabled;
+	wait_queue_head_t gic_msi_waitq;
+	u32 dp_msi_data[ATH12K_QCN6432_EXT_IRQ_GRP_NUM_MAX];
+	u32 ce_msi_data[ATH12K_QCN6432_CE_COUNT];
+	u32 dp_irq_num[ATH12K_QCN6432_EXT_IRQ_GRP_NUM_MAX];
+};
+
+struct ath12k_mlo_dp_umac_reset {
+        atomic_t response_chip;
+        spinlock_t lock;
+        u8 umac_reset_info;
+        u8 initiator_chip;
+	u8 is_intr_bkup;
+	struct ath12k_hw_ring_mask intr_bkup;
+};
+
+#define ATH12K_UMAC_RESET_IPC	451
+#define ATH12K_IS_UMAC_RESET_IN_PROGRESS	BIT(0)
+
+/* Holds info on the group of SOCs that are registered as a single wiphy
+ * or single SOC where each radio registered as separate wiphy in non-MLO
+ */
+struct ath12k_hw_group {
+	/* Keep this always as first member */
+	struct list_head list;
+	u8 id;
+	u8 num_chip;
+	u8 num_probed;
+	u8 num_started;
+	u8 num_hw;
+	bool mlo_capable;
+	bool hw_queues_stopped;
+	bool mgmt_rx_reorder;
+	unsigned long dev_flags;
+	enum ath12k_mlo_recovery_mode recovery_mode;
+	enum ath12k_core_crash_type crash_type;
+	struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
+	struct ath12k_base *ab[ATH12K_MAX_SOCS];
+	struct ath12k __rcu *hw_links[ATH12K_GROUP_MAX_RADIO];
+	struct ath12k_mlo_memory mlo_mem;
+	struct mutex mutex_lock;
+	struct ath12k_mgmt_rx_reo_context rx_reo;
+	struct ath12k_host_mlo_mem_arena mlomem_arena;
+	atomic_t num_dp_tx_pending;
+	u64 mlo_tstamp_offset;
+	struct work_struct reset_work;
+	struct work_struct recovery_work;
+	struct ath12k_mlo_dp_umac_reset mlo_umac_reset;
+	struct completion umac_reset_complete;
+	bool trigger_umac_reset;
+};
+
+struct ath12k_bid_override {
+	unsigned int domain;
+	unsigned int bus_nr;
+	unsigned int slot;
+	unsigned int func;
+	u16 board_id;
+	struct list_head next;
 };
 
 /* Master structure to hold the hw data which may be used in core module */
 struct ath12k_base {
 	enum ath12k_hw_rev hw_rev;
+	enum ath12k_firmware_mode fw_mode;
 	struct platform_device *pdev;
 	struct device *dev;
+	struct ath12k_hw_group *ag;
 	struct ath12k_qmi qmi;
 	struct ath12k_wmi_base wmi_ab;
 	struct completion fw_ready;
 	int num_radios;
+	u8 chip_id;
 	/* HW channel counters frequency value in hertz common to all MACs */
 	u32 cc_freq_hz;
 
+	struct list_head board_id_overrides;
 	struct ath12k_htc htc;
 
 	struct ath12k_dp dp;
 
 	void __iomem *mem;
+	void __iomem *mem_ce;
+	void __iomem *mem_cmem;
+	void __iomem *mem_pmm;
+	u32 ce_remap_base_addr;
+	u32 cmem_remap_base_addr;
+	u32 pmm_remap_base_addr;
+	bool ce_remap;
+	bool cmem_remap;
+	bool pmm_remap;
 	unsigned long mem_len;
 
 	struct {
@@ -673,13 +1456,25 @@
 	struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
 	unsigned long long free_vdev_map;
 	unsigned long long free_vdev_stats_id_map;
+
+	/* To synchronize rhash tbl write operation */
+	struct mutex tbl_mtx_lock;
+
+	/* The rhashtable containing struct ath12k_peer keyed by mac addr */
+	struct rhashtable *rhead_peer_addr;
+	struct rhashtable_params rhash_peer_addr_param;
+
+	/* The rhashtable containing struct ath12k_peer keyed by id  */
+	struct rhashtable *rhead_peer_id;
+	struct rhashtable_params rhash_peer_id_param;
+
 	struct list_head peers;
 	wait_queue_head_t peer_mapping_wq;
 	u8 mac_addr[ETH_ALEN];
 	bool wmi_ready;
 	u32 wlan_init_status;
 	int irq_num[ATH12K_IRQ_NUM_MAX];
-	struct ath12k_ext_irq_grp ext_irq_grp[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+	struct ath12k_ext_irq_grp ext_irq_grp[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
 	struct napi_struct *napi;
 	struct ath12k_wmi_target_cap_arg target_caps;
 	u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
@@ -687,6 +1482,8 @@
 	int bd_api;
 
 	const struct ath12k_hw_params *hw_params;
+	struct ath12k_bus_params bus_params;
+	bool is_qdss_tracing;
 
 	const struct firmware *cal_file;
 
@@ -698,10 +1495,20 @@
 	/* This regd is set during dynamic country setting
 	 * This may or may not be used during the runtime
 	 */
+	bool regd_change_user_request[MAX_RADIOS];
 	struct ieee80211_regdomain *new_regd[MAX_RADIOS];
 
 	/* Current DFS Regulatory */
 	enum ath12k_dfs_region dfs_region;
+	struct ath12k_reg_freq reg_freq_2g;
+	struct ath12k_reg_freq reg_freq_5g;
+	struct ath12k_reg_freq reg_freq_6g;
+#ifdef CONFIG_ATH12K_DEBUGFS
+	struct dentry *debugfs_soc;
+#endif
+	struct list_head neighbor_peers;
+	int num_nrps;
+
 	struct ath12k_soc_dp_stats soc_stats;
 
 	unsigned long dev_flags;
@@ -710,20 +1517,35 @@
 	struct work_struct restart_work;
 	struct workqueue_struct *workqueue_aux;
 	struct work_struct reset_work;
-	atomic_t reset_count;
+	struct work_struct recovery_work;
 	atomic_t recovery_count;
-	atomic_t recovery_start_count;
+	atomic_t reset_count;
 	bool is_reset;
 	struct completion reset_complete;
+	wait_queue_head_t ssr_dump_wq;
+	bool collect_dump;
+	struct notifier_block ssr_nb;
+	struct notifier_block atomic_ssr_nb;
+	struct notifier_block rpd_ssr_nb;
+	struct notifier_block rpd_atomic_ssr_nb;
+	void *ssr_upd_handle;
+	void *ssr_txt_handle;
+	void *ssr_rpd_handle;
+	void *ssr_atomic_rpd_handle;
+	void *ssr_atomic_txt_handle;
+	void *ssr_atomic_upd_handle;
 	struct completion reconfigure_complete;
-	struct completion recovery_start;
+	unsigned long reset_fail_timeout;
 	/* continuous recovery fail count */
 	atomic_t fail_cont_count;
-	unsigned long reset_fail_timeout;
+	u32 recovery_start_time;
 	struct {
 		/* protected by data_lock */
 		u32 fw_crash_counter;
+		u32 last_recovery_time;
 	} stats;
+	bool ftm_segment_handler;
+	struct ath12k_ftm_event_obj ftm_event_obj;
 	u32 pktlog_defs_checksum;
 
 	struct ath12k_dbring_cap *db_caps;
@@ -733,35 +1555,247 @@
 
 	struct completion htc_suspend;
 
+	enum ath12k_fw_recovery_option fw_recovery_support;
+
+	u32 fw_dbglog_param;
+	u64 fw_dbglog_val;
+
 	u64 fw_soc_drop_count;
 	bool static_window_map;
+	struct completion rddm_reset_done;
+	
+	struct device_node *hremote_node;
+	u32 host_ddr_fixed_mem_off;
+	bool stats_disable;
+	u32 rx_hash_ix2;
+	u32 rx_hash_ix3;
+
+	struct vendor_info id;
+	u32 max_msduq_per_tid;
+	u32 default_msduq_per_tid;
+	bool in_panic;
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ppe_ds_wlan_handle_t *ppeds_handle;
+	/* used for per node enumeration*/
+	int ppeds_node_idx;
+	int ppe_vp_tbl_registered[PPE_VP_ENTRIES_MAX];
+	int ppe_vp_search_idx_tbl_set[PPE_VP_ENTRIES_MAX];
+	struct ath12k_ppeds_napi ppeds_napi_ctxt;
+	struct mutex ppe_vp_tbl_lock;
+	u8 num_ppe_vp_profiles;
+	u8 num_ppe_vp_search_idx_entries;
+	u8 num_ppe_vp_entries;
+    u8 ppeds_int_mode_enabled;
+	u8 ppeds_stopped;
+	struct ath12k_ppeds_stats ppeds_stats;
+	u16 *ppeds_rx_idx[ATH12K_MAX_SOCS];
+	u16 ppeds_rx_num_elem;
+#endif
+
+	int userpd_id;
+	struct {
+		const struct ath12k_msi_config *config;
+		u32 ep_base_data;
+		u32 irqs[32];
+		u32 addr_lo;
+		u32 addr_hi;
+	} msi;
+
+	struct ath12k_internal_pci ipci;
+	u32 chwidth_num_peer_caps;
+	struct ath12k_dp_umac_reset dp_umac_reset;
+
+	u32 *crash_info_address;
+	u32 *recovery_mode_address;
+
+	/* to identify multipd_architecture */
+	bool multi_pd_arch;
+	/* Mode1 specific flag to identify recovering pdev */
+	bool recovery_start;
+	bool fw_cfg_support;
 
+	bool ce_pipe_init_done;
+	bool rxgainlut_support;
 	/* must be last */
 	u8 drv_priv[] __aligned(sizeof(void *));
 };
 
+struct ath12k_fw_stats_pdev {
+	struct list_head list;
+
+	/* PDEV stats */
+	s32 ch_noise_floor;
+	/* Cycles spent transmitting frames */
+	u32 tx_frame_count;
+	/* Cycles spent receiving frames */
+	u32 rx_frame_count;
+	/* Total channel busy time, evidently */
+	u32 rx_clear_count;
+	/* Total on-channel time */
+	u32 cycle_count;
+	u32 phy_err_count;
+	u32 chan_tx_power;
+	u32 ack_rx_bad;
+	u32 rts_bad;
+	u32 rts_good;
+	u32 fcs_bad;
+	u32 no_beacons;
+	u32 mib_int_count;
+
+	/* PDEV TX stats */
+	/* Num HTT cookies queued to dispatch list */
+	s32 comp_queued;
+	/* Num HTT cookies dispatched */
+	s32 comp_delivered;
+	/* Num MSDU queued to WAL */
+	s32 msdu_enqued;
+	/* Num MPDU queue to WAL */
+	s32 mpdu_enqued;
+	/* Num MSDUs dropped by WMM limit */
+	s32 wmm_drop;
+	/* Num Local frames queued */
+	s32 local_enqued;
+	/* Num Local frames done */
+	s32 local_freed;
+	/* Num queued to HW */
+	s32 hw_queued;
+	/* Num PPDU reaped from HW */
+	s32 hw_reaped;
+	/* Num underruns */
+	s32 underrun;
+	/* Num PPDUs cleaned up in TX abort */
+	s32 tx_abort;
+	/* Num MPDUs requeued by SW */
+	s32 mpdus_requed;
+	/* excessive retries */
+	u32 tx_ko;
+	/* data hw rate code */
+	u32 data_rc;
+	/* Scheduler self triggers */
+	u32 self_triggers;
+	/* frames dropped due to excessive sw retries */
+	u32 sw_retry_failure;
+	/* illegal rate phy errors	*/
+	u32 illgl_rate_phy_err;
+	/* wal pdev continuous xretry */
+	u32 pdev_cont_xretry;
+	/* wal pdev tx timeouts */
+	u32 pdev_tx_timeout;
+	/* wal pdev resets */
+	u32 pdev_resets;
+	/* frames dropped due to non-availability of stateless TIDs */
+	u32 stateless_tid_alloc_failure;
+	/* PhY/BB underrun */
+	u32 phy_underrun;
+	/* MPDU is more than txop limit */
+	u32 txop_ovf;
+
+	/* PDEV RX stats */
+	/* Cnts any change in ring routing mid-ppdu */
+	s32 mid_ppdu_route_change;
+	/* Total number of statuses processed */
+	s32 status_rcvd;
+	/* Extra frags on rings 0-3 */
+	s32 r0_frags;
+	s32 r1_frags;
+	s32 r2_frags;
+	s32 r3_frags;
+	/* MSDUs / MPDUs delivered to HTT */
+	s32 htt_msdus;
+	s32 htt_mpdus;
+	/* MSDUs / MPDUs delivered to local stack */
+	s32 loc_msdus;
+	s32 loc_mpdus;
+	/* AMSDUs that have more MSDUs than the status ring size */
+	s32 oversize_amsdu;
+	/* Number of PHY errors */
+	s32 phy_errs;
+	/* Number of PHY errors drops */
+	s32 phy_err_drop;
+	/* Number of mpdu errors - FCS, MIC, ENC etc. */
+	s32 mpdu_errs;
+};
+
+struct ath12k_fw_stats_vdev {
+	struct list_head list;
+
+	u32 vdev_id;
+	u32 beacon_snr;
+	u32 data_snr;
+	u32 num_tx_frames[WLAN_MAX_AC];
+	u32 num_rx_frames;
+	u32 num_tx_frames_retries[WLAN_MAX_AC];
+	u32 num_tx_frames_failures[WLAN_MAX_AC];
+	u32 num_rts_fail;
+	u32 num_rts_success;
+	u32 num_rx_err;
+	u32 num_rx_discard;
+	u32 num_tx_not_acked;
+	u32 tx_rate_history[MAX_TX_RATE_VALUES];
+	u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath12k_fw_stats_bcn {
+	struct list_head list;
+
+	u32 vdev_id;
+	u32 tx_bcn_succ_cnt;
+	u32 tx_bcn_outage_cnt;
+};
+
 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
 int ath12k_core_pre_init(struct ath12k_base *ab);
 int ath12k_core_init(struct ath12k_base *ath12k);
 void ath12k_core_deinit(struct ath12k_base *ath12k);
 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
-				      enum ath12k_bus bus);
+				      enum ath12k_bus bus,
+				      const struct ath12k_bus_params *bus_params);
 void ath12k_core_free(struct ath12k_base *ath12k);
 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
 				       struct ath12k_board_data *bd,
 				       char *filename);
 int ath12k_core_fetch_bdf(struct ath12k_base *ath12k,
 			  struct ath12k_board_data *bd);
+int ath12k_core_fetch_regdb(struct ath12k_base *ath12k,
+                            struct ath12k_board_data *bd);
+int ath12k_core_fetch_fw_cfg(struct ath12k_base *ath12k,
+			     struct ath12k_board_data *bd);
+int ath12k_core_fetch_rxgainlut(struct ath12k_base *ath12k,
+				struct ath12k_board_data *bd);
 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd);
 int ath12k_core_check_dt(struct ath12k_base *ath12k);
 
 void ath12k_core_halt(struct ath12k *ar);
 int ath12k_core_resume(struct ath12k_base *ab);
 int ath12k_core_suspend(struct ath12k_base *ab);
+int ath12k_config_qdss(struct ath12k_base *ab);
+
+void ath12k_core_put_hw_group(struct ath12k_base *ab);
 
 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
 						    const char *filename);
 
+void ath12k_core_wait_dump_collect(struct ath12k_base *ab);
+void ath12k_core_issue_bug_on(struct ath12k_base *ab);
+
+void ath12k_fw_stats_init(struct ath12k *ar);
+void ath12k_fw_stats_pdevs_free(struct list_head *head);
+void ath12k_fw_stats_bcn_free(struct list_head *head);
+void ath12k_fw_stats_reset(struct ath12k *ar);
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats);
+#ifdef CONFIG_ATH12K_SAWF
+int ath12k_send_sawf_configs_soc(struct ath12k_sawf_svc_params *new_param);
+int ath12k_sawf_send_disable_soc(u8 svc_id);
+int ath12k_core_sawf_ul_config(struct net_device *dev, struct ath12k_sawf_wmi_peer_latency_param *latency_info);
+void *ath12k_get_ab_nd_peer_from_peer_mac(u8 *peer_mac, struct ath12k_base **ab_ref);
+#endif /* CONFIG_ATH12K_SAWF */
+void ath12k_dp_umac_reset_handle(struct ath12k_base *ab);
+int ath12k_dp_umac_reset_init(struct ath12k_base *ab);
+void ath12k_dp_umac_reset_deinit(struct ath12k_base *ab);
+void ath12k_umac_reset_completion(struct ath12k_base *ab);
+void ath12k_umac_reset_notify_pre_reset_done(struct ath12k_base *ab);
+
 static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
 {
 	switch (state) {
@@ -791,11 +1825,16 @@
 	return (struct ath12k_skb_rxcb *)skb->cb;
 }
 
-static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif)
+static inline struct ath12k_vif *ath12k_vif_to_ahvif(struct ieee80211_vif *vif)
 {
 	return (struct ath12k_vif *)vif->drv_priv;
 }
 
+static inline struct ath12k_sta *ath12k_sta_to_ahsta(struct ieee80211_sta *sta)
+{
+	return (struct ath12k_sta *)sta->drv_priv;
+}
+
 static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab,
 					     int mac_id)
 {
@@ -815,9 +1854,27 @@
 	switch (bus) {
 	case ATH12K_BUS_PCI:
 		return "pci";
+	case ATH12K_BUS_AHB:
+		return "ahb";
+	case ATH12K_BUS_HYBRID:
+		return "ahb";
 	}
 
 	return "unknown";
 }
 
+int ath12k_pci_init(void);
+void ath12k_pci_exit(void);
+#ifdef CONFIG_ATH12K_AHB
+int ath12k_ahb_init(void);
+void ath12k_ahb_exit(void);
+#else
+static inline int ath12k_ahb_init(void)
+{
+	return 0;
+};
+
+static inline void ath12k_ahb_exit(void) {};
+#endif
+
 #endif /* _CORE_H_ */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dbring.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dbring.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/dbring.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dbring.c	2024-01-19 17:01:19.853846702 +0100
@@ -7,6 +7,34 @@
 #include "core.h"
 #include "debug.h"
 
+#define ATH12K_DB_MAGIC_VALUE 0xdeadbeaf
+
+int ath12k_dbring_validate_buffer(struct ath12k *ar, void *buffer, u32 size)
+{
+	u32 *temp;
+	int idx;
+	size = size >> 2;
+
+	for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
+		if (*temp == ATH12K_DB_MAGIC_VALUE)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ath12k_dbring_fill_magic_value(struct ath12k *ar,
+					  void *buffer, u32 size)
+{
+	/* memset32 function fills buffer payload with the ATH12K_DB_MAGIC_VALUE
+	 * and the variable size is expected to be the number of u32 values
+	 * to be stored, not the number of bytes.
+	 */
+	size = size / sizeof(u32);
+
+	memset32(buffer, ATH12K_DB_MAGIC_VALUE, size);
+}
+
 static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
 					struct ath12k_dbring *ring,
 					struct ath12k_dbring_element *buff,
@@ -28,6 +56,7 @@
 
 	ptr_unaligned = buff->payload;
 	ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
+	ath12k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
 	paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
 			       DMA_FROM_DEVICE);
 
@@ -51,8 +80,9 @@
 
 	buff->paddr = paddr;
 
-	cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
-		 u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+	dma_sync_single_for_device(ab->dev, paddr, ring->buf_sz, DMA_FROM_DEVICE);
+	cookie = u32_encode_bits(ar->pdev_idx, DP_DIR_BUF_COOKIE_PDEV_ID) |
+		 u32_encode_bits(buf_id, DP_DIR_BUF_COOKIE_BUF_ID);
 
 	ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
 
@@ -178,7 +208,7 @@
 	ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
 	ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
 
-	ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
+	ret = ath12k_dbring_fill_bufs(ar, ring, GFP_ATOMIC);
 
 	return ret;
 }
@@ -270,6 +300,7 @@
 
 	switch (ev->fixed.module_id) {
 	case WMI_DIRECT_BUF_SPECTRAL:
+		ring = ath12k_spectral_get_dbring(ar);
 		break;
 	default:
 		ring = NULL;
@@ -299,7 +330,7 @@
 
 		ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
 
-		buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+		buf_id = u32_get_bits(cookie, DP_DIR_BUF_COOKIE_BUF_ID);
 
 		spin_lock_bh(&ring->idr_lock);
 		buff = idr_find(&ring->bufs_idr, buf_id);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dbring.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dbring.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/dbring.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dbring.h	2024-01-19 17:01:19.853846702 +0100
@@ -31,6 +31,20 @@
 	u32 num_meta;
 };
 
+struct wmi_pdev_sscan_fw_param_event {
+	struct ath12k_wmi_pdev_sscan_fw_cmd_fixed_param fixed;
+	struct ath12k_wmi_pdev_sscan_fft_bin_index *bin;
+	struct ath12k_wmi_pdev_sscan_chan_info ch_info;
+	struct ath12k_wmi_pdev_sscan_per_detector_info *det_info;
+};
+
+struct wmi_spectral_capabilities_event {
+	struct ath12k_wmi_spectral_scan_bw_capabilities *sscan_bw_caps;
+	struct ath12k_wmi_spectral_fft_size_capabilities *fft_size_caps;
+	u32 num_bw_caps_entry;
+	u32 num_fft_size_caps_entry;
+};
+
 struct ath12k_dbring_cap {
 	u32 pdev_id;
 	enum wmi_direct_buffer_module id;
@@ -77,4 +91,5 @@
 			  struct ath12k_dbring_cap *db_cap);
 void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring);
 void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring);
+int ath12k_dbring_validate_buffer(struct ath12k *ar, void *data, u32 size);
 #endif /* ATH12K_DBRING_H */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/debug.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debug.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/debug.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debug.c	2024-01-26 16:32:21.055710360 +0100
@@ -4,6 +4,7 @@
  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
+#define DEBUG
 #include <linux/vmalloc.h>
 #include "core.h"
 #include "debug.h"
@@ -17,7 +18,10 @@
 
 	va_start(args, fmt);
 	vaf.va = &args;
+	if (ab)
 	dev_info(ab->dev, "%pV", &vaf);
+	else
+		pr_info("ath12k: %pV", &vaf);
 	/* TODO: Trace the log */
 	va_end(args);
 }
@@ -31,7 +35,10 @@
 
 	va_start(args, fmt);
 	vaf.va = &args;
+	if (ab)
 	dev_err(ab->dev, "%pV", &vaf);
+	else
+		pr_err("ath12k: %pV", &vaf);
 	/* TODO: Trace the log */
 	va_end(args);
 }
@@ -45,7 +52,10 @@
 
 	va_start(args, fmt);
 	vaf.va = &args;
+	if (ab)
 	dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+	else
+		pr_warn_ratelimited("ath12k: %pV", &vaf);
 	/* TODO: Trace the log */
 	va_end(args);
 }
@@ -55,6 +65,10 @@
 void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
 		  const char *fmt, ...)
 {
+#define LEVEL_MASK GENMASK(31,28)
+#define debug_mask GENMASK(27,0)
+        u32 local_mask_level = mask & LEVEL_MASK;
+        u32 global_mask_level = ath12k_debug_mask & LEVEL_MASK;
 	struct va_format vaf;
 	va_list args;
 
@@ -63,8 +77,15 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	if (ath12k_debug_mask & mask)
+	if ((mask & debug_mask) & ath12k_debug_mask) {
+		if (((local_mask_level) && (global_mask_level >= local_mask_level)) ||
+		    (!global_mask_level && (local_mask_level == ATH12K_DBG_L0))) {
+			if (ab)
 		dev_dbg(ab->dev, "%pV", &vaf);
+                	else
+                        	pr_devel("ath12k: %pV", &vaf);
+		}
+	}
 
 	/* TODO: trace log */
 
@@ -99,4 +120,35 @@
 	}
 }
 
+void ath12k_err_dump(struct ath12k_base *ab, const char *msg,
+		     const char *prefix, const void *buf,
+		     size_t len, struct hal_srng *srng)
+{
+	char linebuf[512];
+	size_t linebuflen;
+	const void *ptr;
+
+	if (msg)
+		ath12k_err(ab, msg);
+
+	for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+		linebuflen = 0;
+		linebuflen += scnprintf(linebuf + linebuflen,
+					sizeof(linebuf) - linebuflen,
+					"%s%08x: ",
+					(prefix ? prefix : ""),
+					(unsigned int)(ptr - buf));
+		hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+				   linebuf + linebuflen,
+				   sizeof(linebuf) - linebuflen, true);
+		dev_err(ab->dev, "%s\n", linebuf);
+	}
+
+	if (srng && srng->ring_dir == HAL_SRNG_DIR_DST)
+		dev_err(ab->dev, "ring_base_vaddr=%px tp=0x%X size=0x%X cached_hp=0x%X",
+			srng->ring_base_vaddr,
+			srng->u.dst_ring.tp,
+			srng->ring_size,
+			srng->u.dst_ring.cached_hp);
+}
 #endif /* CONFIG_ATH12K_DEBUG */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/debug.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debug.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/debug.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debug.h	2024-01-19 17:01:19.853846702 +0100
@@ -8,6 +8,7 @@
 #define _ATH12K_DEBUG_H_
 
 #include "trace.h"
+#include "debugfs.h"
 
 enum ath12k_debug_mask {
 	ATH12K_DBG_AHB		= 0x00000001,
@@ -25,14 +26,32 @@
 	ATH12K_DBG_PCI		= 0x00001000,
 	ATH12K_DBG_DP_TX	= 0x00002000,
 	ATH12K_DBG_DP_RX	= 0x00004000,
+	ATH12K_DBG_OFFSET	= 0x00008000,
+	ATH12K_DBG_RX_REO	= 0x00010000,
+	ATH12K_DBG_PEER		= 0x00020000,
+
+	/* keep last*/
+	ATH12K_DBG_SAWF		= 0x00040000,
+	ATH12K_DBG_PPE          = 0x00080000,
+	ATH12K_DBG_DP_UMAC_RESET = 0x00100000,
+	ATH12K_DBG_MODE1_RECOVERY = 0x00200000,
 	ATH12K_DBG_ANY		= 0xffffffff,
 };
 
+enum ath12k_debug_mask_level {
+	ATH12K_DBG_L0		= 0x00000000,
+        ATH12K_DBG_L1		= 0x10000000,
+        ATH12K_DBG_L2		= 0x20000000,
+        ATH12K_DBG_L3		= 0x30000000,
+};
+
 __printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
 __printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
 __printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
 
 extern unsigned int ath12k_debug_mask;
+extern unsigned int ath12k_ftm_mode;
+extern bool ath12k_en_shutdown;
 
 #ifdef CONFIG_ATH12K_DEBUG
 __printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
@@ -42,6 +61,9 @@
 		     enum ath12k_debug_mask mask,
 		     const char *msg, const char *prefix,
 		     const void *buf, size_t len);
+void ath12k_err_dump(struct ath12k_base *ab, const char *msg,
+                     const char *prefix, const void *buf,
+                     size_t len, struct hal_srng *srng);
 #else /* CONFIG_ATH12K_DEBUG */
 static inline void __ath12k_dbg(struct ath12k_base *ab,
 				enum ath12k_debug_mask dbg_mask,
@@ -55,8 +77,16 @@
 				   const void *buf, size_t len)
 {
 }
+
+static inline void ath12k_err_dump(struct ath12k_base *ab, const char *msg,
+                     const char *prefix, const void *buf,
+                     size_t len, struct hal_srng *srng)
+{
+}
 #endif /* CONFIG_ATH12K_DEBUG */
 
+#define ATH12K_DBG_SET(mask, level) ATH12K_DBG_##mask | ATH12K_DBG_##level
+
 #define ath12k_dbg(ar, dbg_mask, fmt, ...)			\
 do {								\
 	typeof(dbg_mask) mask = (dbg_mask);			\
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp.c	2024-03-18 14:40:14.847741224 +0100
@@ -13,6 +13,17 @@
 #include "dp_rx.h"
 #include "peer.h"
 #include "dp_mon.h"
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#include "ppe.h"
+#endif
+
+#define ATH12K_DP_LMAC_PEER_ID_LEGACY	2
+#define ATH12K_DP_LMAC_PEER_ID_MLO	3
+#define ATH12K_DP_PEER_ROUTING_LMAC_ID_MASK	GENMASK(7,6)
+
+#ifdef CONFIG_MAC80211_BONDED_SUPPORT
+extern int g_bonded_interface_model;
+#endif
 
 static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
 					  struct sk_buff *skb)
@@ -36,63 +47,118 @@
 		return;
 	}
 
+	if (!peer->primary_link) {
+		spin_unlock_bh(&ab->base_lock);
+		return;
+	}
+
 	ath12k_dp_rx_peer_tid_cleanup(ar, peer);
 	crypto_free_shash(peer->tfm_mmic);
+	peer->dp_setup_done = false;
 	spin_unlock_bh(&ab->base_lock);
 }
 
-int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
+int ath12k_dp_peer_default_route_setup(struct ath12k *ar, struct ath12k_link_vif *arvif,
+				       struct ath12k_link_sta *arsta)
 {
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_peer *peer;
-	u32 reo_dest;
-	int ret = 0, tid;
+	struct ieee80211_sta *sta;
+	u32 reo_dest, param_value;
+	u32 lmac_peer_routing_id = ATH12K_DP_LMAC_PEER_ID_LEGACY;
+	int ret;
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	struct ath12k_vif *ahvif = arvif->ahvif;
+#endif
+
+	sta = container_of((void *)arsta->ahsta, struct ieee80211_sta, drv_priv);
 
 	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
 	reo_dest = ar->dp.mac_id + 1;
-	ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
-					WMI_PEER_SET_DEFAULT_ROUTING,
-					DP_RX_HASH_ENABLE | (reo_dest << 1));
+	param_value = (reo_dest << 1 | DP_RX_HASH_ENABLE);
+
+	/* For MLO supported peers and it has multi link capablity, use MLO conf */
+	if (hweight16(sta->valid_links) > 1)
+		lmac_peer_routing_id = ATH12K_DP_LMAC_PEER_ID_MLO;
+
+	param_value |= FIELD_PREP(ATH12K_DP_PEER_ROUTING_LMAC_ID_MASK,
+				  lmac_peer_routing_id);
 
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
+					WMI_PEER_SET_DEFAULT_ROUTING, param_value);
 	if (ret) {
 		ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
-			    ret, addr, vdev_id);
+			    ret, arsta->addr, arvif->vdev_id);
 		return ret;
 	}
 
+	ath12k_dbg(ab, ATH12K_DBG_DP_RX, "peer %pM set def route id %d sta_link %d\n",
+		   arsta->addr, lmac_peer_routing_id,
+		   hweight16(sta->valid_links));
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	/* keep last - override any PPE DS specific routing config */
+	if ((ahvif->vdev_type == WMI_VDEV_TYPE_AP) && !((ahvif->vdev_type == WMI_VDEV_TYPE_AP) && (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_MESH_11S)))
+		ath12k_dp_peer_ppeds_route_setup(ar, arvif, arsta);
+#endif
+
+	return 0;
+}
+
+int ath12k_dp_peer_setup(struct ath12k *ar, struct ath12k_link_vif *arvif,
+			 struct ath12k_link_sta *arsta)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_peer *peer;
+	struct crypto_shash *tfm;
+	int ret = 0, tid;
+
+	tfm = crypto_alloc_shash("michael_mic", 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find(ab, arvif->vdev_id, arsta->addr);
+	if (!peer) {
+		ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+			    arsta->addr, arvif->vdev_id);
+		ret = -ENOENT;
+		goto free_shash;
+	}
+
+	if (peer->mlo && peer->link_id != arsta->ahsta->primary_link_id) {
+		ret = 0;
+		peer->primary_link = false;
+		goto free_shash;
+	}
+
+	peer->primary_link = true;
+
 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
-		ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
-						  HAL_PN_TYPE_NONE);
+		ret = ath12k_dp_rx_peer_tid_setup(ar, tid, 1, 0, HAL_PN_TYPE_NONE, peer);
 		if (ret) {
 			ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
 				    tid, ret);
-			goto peer_clean;
+			goto peer_tid_clean;
 		}
 	}
 
-	ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
+	ret = ath12k_dp_rx_peer_frag_setup(ar, peer, tfm);
 	if (ret) {
 		ath12k_warn(ab, "failed to setup rx defrag context\n");
-		goto peer_clean;
+		goto peer_tid_clean;
 	}
 
 	/* TODO: Setup other peer specific resource used in data path */
 
-	return 0;
-
-peer_clean:
-	spin_lock_bh(&ab->base_lock);
-
-	peer = ath12k_peer_find(ab, vdev_id, addr);
-	if (!peer) {
-		ath12k_warn(ab, "failed to find the peer to del rx tid\n");
 		spin_unlock_bh(&ab->base_lock);
-		return -ENOENT;
-	}
 
-	for (; tid >= 0; tid--)
+	return 0;
+
+peer_tid_clean:
+	for (tid--; tid >= 0; tid--)
 		ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
 
+free_shash:
+	crypto_free_shash(tfm);
 	spin_unlock_bh(&ab->base_lock);
 
 	return ret;
@@ -103,6 +169,9 @@
 	if (!ring->vaddr_unaligned)
 		return;
 
+	if (ring->cached)
+		kfree(ring->vaddr_unaligned);
+	else
 	dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
 			  ring->paddr_unaligned);
 
@@ -114,7 +183,7 @@
 	int ext_group_num;
 	u8 mask = 1 << ring_num;
 
-	for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
+	for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_DP_NUM_VECTORS;
 	     ext_group_num++) {
 		if (mask & grp_mask[ext_group_num])
 			return ext_group_num;
@@ -123,38 +192,76 @@
 	return -ENOENT;
 }
 
+bool ath12k_dp_umac_reset_in_progress(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset = &ag->mlo_umac_reset;
+	bool umac_in_progress = false;
+
+	if (!ab->hw_params->support_umac_reset)
+		return umac_in_progress;
+
+	spin_lock_bh(&mlo_umac_reset->lock);
+	if (mlo_umac_reset->umac_reset_info &
+	    ATH12K_IS_UMAC_RESET_IN_PROGRESS)
+		umac_in_progress = true;
+	spin_unlock_bh(&mlo_umac_reset->lock);
+
+	return umac_in_progress;
+}
+
 static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
 					      enum hal_ring_type type, int ring_num)
 {
 	const u8 *grp_mask;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *umac_reset;
+	struct ath12k_hw_ring_mask *ring_mask;
+
+	umac_reset = &ag->mlo_umac_reset;
+	if (ath12k_dp_umac_reset_in_progress(ab) && umac_reset)
+		ring_mask = &umac_reset->intr_bkup;
+	else
+		ring_mask = ab->hw_params->ring_mask;
 
 	switch (type) {
 	case HAL_WBM2SW_RELEASE:
 		if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
-			grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
+			grp_mask = &ring_mask->rx_wbm_rel[0];
 			ring_num = 0;
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+		} else if (ring_num == HAL_WBM2SW_PPEDS_TX_CMPLN_RING_NUM) {
+			grp_mask = &ring_mask->wbm2sw6_ppeds_tx_cmpln[0];
+			ring_num = 0;
+#endif
 		} else {
-			grp_mask = &ab->hw_params->ring_mask->tx[0];
+			grp_mask = &ring_mask->tx[0];
 		}
 		break;
 	case HAL_REO_EXCEPTION:
-		grp_mask = &ab->hw_params->ring_mask->rx_err[0];
+		grp_mask = &ring_mask->rx_err[0];
 		break;
 	case HAL_REO_DST:
-		grp_mask = &ab->hw_params->ring_mask->rx[0];
+		grp_mask = &ring_mask->rx[0];
 		break;
 	case HAL_REO_STATUS:
-		grp_mask = &ab->hw_params->ring_mask->reo_status[0];
+		grp_mask = &ring_mask->reo_status[0];
 		break;
 	case HAL_RXDMA_MONITOR_STATUS:
 	case HAL_RXDMA_MONITOR_DST:
-		grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
+		grp_mask = &ring_mask->rx_mon_dest[0];
 		break;
 	case HAL_TX_MONITOR_DST:
-		grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
+		grp_mask = &ring_mask->tx_mon_dest[0];
 		break;
 	case HAL_RXDMA_BUF:
-		grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
+		grp_mask = &ring_mask->host2rxdma[0];
+		break;
+	case HAL_PPE2TCL:
+		grp_mask = &ring_mask->ppe2tcl[0];
+		break;
+	case HAL_REO2PPE:
+		grp_mask = &ring_mask->reo2ppe[0];
 		break;
 	case HAL_RXDMA_MONITOR_BUF:
 	case HAL_TCL_DATA:
@@ -181,6 +288,7 @@
 	int msi_group_number, msi_data_count;
 	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
 	int ret;
+	int vector;
 
 	ret = ath12k_hif_get_user_msi_vector(ab, "DP",
 					     &msi_data_count, &msi_data_start,
@@ -209,11 +317,197 @@
 
 	ring_params->msi_addr = addr_lo;
 	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+	if (ab->hif.bus == ATH12K_BUS_HYBRID)
+		ring_params->msi_data = ab->ipci.dp_msi_data[msi_group_number];
+	else
 	ring_params->msi_data = (msi_group_number % msi_data_count)
 		+ msi_data_start;
 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+
+	vector = msi_irq_start  + (msi_group_number % msi_data_count);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	/* Interrupt will not be freed for non-asserted chips
+	 * during UMAC reset
+	 */
+	if (!ath12k_dp_umac_reset_in_progress(ab))
+		ath12k_hif_ppeds_register_interrupts(ab, type, vector, ring_num);
+#endif
+}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+static int ath12k_dp_srng_alloc(struct ath12k_base *ab, struct dp_srng *ring,
+				enum hal_ring_type type, int ring_num,
+				int num_entries)
+{
+	int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
+	int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
+	bool cached = false;
+
+	if (max_entries < 0 || entry_sz < 0)
+		return -EINVAL;
+
+	if (num_entries > max_entries)
+		num_entries = max_entries;
+
+	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+	if (ab->hw_params->alloc_cacheable_memory) {
+		/* Allocate the reo dst and tx completion rings from cacheable memory */
+		switch (type) {
+		case HAL_REO_DST:
+		case HAL_WBM2SW_RELEASE:
+			cached = true;
+			break;
+		default:
+			cached = false;
+		}
+
+		if (cached) {
+			ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
+			ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+		}
+	}
+	if (!cached)
+		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+							   &ring->paddr_unaligned,
+							   GFP_KERNEL);
+	if (!ring->vaddr_unaligned)
+		return -ENOMEM;
+
+	memset(ring->vaddr_unaligned, 0, ring->size);
+	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+		      (unsigned long)ring->vaddr_unaligned);
+
+	return 0;
 }
 
+static int ath12k_dp_srng_init_idx(struct ath12k_base *ab, struct dp_srng *ring,
+				   enum hal_ring_type type, int ring_num,
+				   int mac_id,
+				   int num_entries, u32 idx)
+{
+	struct hal_srng_params params = { 0 };
+	bool cached = false;
+	int ret;
+
+	params.ring_base_vaddr = ring->vaddr;
+	params.ring_base_paddr = ring->paddr;
+	params.num_entries = num_entries;
+	ath12k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
+
+	switch (type) {
+	case HAL_REO_DST:
+	case HAL_REO2PPE:
+		params.intr_batch_cntr_thres_entries =
+					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+		break;
+	case HAL_RXDMA_BUF:
+		params.intr_batch_cntr_thres_entries = 0;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+		break;
+	case HAL_RXDMA_MONITOR_BUF:
+	case HAL_RXDMA_MONITOR_STATUS:
+		if (type == HAL_RXDMA_MONITOR_BUF)
+			params.low_threshold = DP_RX_MONITOR_BUF_LOW_TH;
+		else
+			params.low_threshold = num_entries >> 3;
+
+		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+
+		params.intr_batch_cntr_thres_entries = 0;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+		break;
+	case HAL_TX_MONITOR_BUF:
+	case HAL_TX_MONITOR_DST:
+		params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
+		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+		params.intr_batch_cntr_thres_entries = 0;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+		break;
+	case HAL_WBM2SW_RELEASE:
+		if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
+			params.intr_batch_cntr_thres_entries =
+					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+			params.intr_timer_thres_us =
+					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+			break;
+		}
+		/* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
+		fallthrough;
+	case HAL_REO_EXCEPTION:
+	case HAL_REO_REINJECT:
+	case HAL_REO_CMD:
+	case HAL_REO_STATUS:
+	case HAL_TCL_DATA:
+	case HAL_TCL_CMD:
+	case HAL_TCL_STATUS:
+	case HAL_WBM_IDLE_LINK:
+	case HAL_SW2WBM_RELEASE:
+	case HAL_RXDMA_DST:
+	case HAL_RXDMA_MONITOR_DST:
+		params.intr_batch_cntr_thres_entries =
+					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+		break;
+	case HAL_RXDMA_DIR_BUF:
+		break;
+	case HAL_PPE2TCL:
+		params.intr_batch_cntr_thres_entries =
+					HAL_SRNG_INT_BATCH_THRESHOLD_PPE2TCL;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_PPE2TCL;
+		break;
+	default:
+		ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+		return -EINVAL;
+	}
+
+	if (cached) {
+		params.flags |= HAL_SRNG_FLAGS_CACHED;
+		ring->cached = 1;
+	}
+
+	ret = ath12k_hal_srng_setup_idx(ab, type, ring_num, mac_id, &params, idx);
+	if (ret < 0) {
+		ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+			    ret, ring_num);
+		return ret;
+	}
+
+	ring->ring_id = ret;
+
+	return 0;
+
+}
+
+int ath12k_ppeds_dp_srng_alloc(struct ath12k_base *ab, struct dp_srng *ring,
+			       enum hal_ring_type type, int ring_num,
+			       int num_entries)
+{
+	int ret;
+
+	ret = ath12k_dp_srng_alloc(ab, ring, type, ring_num, num_entries);
+	if (ret != 0)
+		ath12k_warn(ab, "Failed to allocate dp srng ring.\n");
+
+	return 0;
+}
+
+int ath12k_ppeds_dp_srng_init(struct ath12k_base *ab, struct dp_srng *ring,
+			      enum hal_ring_type type, int ring_num,
+			      int mac_id, int num_entries, u32 idx)
+{
+	int ret;
+
+	ret = ath12k_dp_srng_init_idx(ab, ring, type, ring_num, mac_id, num_entries, idx);
+	if (ret != 0)
+		ath12k_warn(ab, "Failed to initialize dp srng ring.\n");
+
+	return 0;
+}
+#endif
+
 int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
 			 enum hal_ring_type type, int ring_num,
 			 int mac_id, int num_entries)
@@ -222,6 +516,7 @@
 	int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
 	int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
 	int ret;
+	bool cached = false;
 
 	if (max_entries < 0 || entry_sz < 0)
 		return -EINVAL;
@@ -230,9 +525,30 @@
 		num_entries = max_entries;
 
 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+	if (ab->hw_params->alloc_cacheable_memory) {
+		/* Allocate the reo dst and tx completion rings from cacheable memory */
+		switch (type) {
+		case HAL_REO_DST:
+		case HAL_WBM2SW_RELEASE:
+			cached = true;
+			break;
+		default:
+			cached = false;
+		}
+	}
+
+	if (ath12k_dp_umac_reset_in_progress(ab))
+		goto skip_dma_alloc;
+
+	if (cached) {
+		ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
+		ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+	} else {
 	ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 						   &ring->paddr_unaligned,
 						   GFP_KERNEL);
+	}
+
 	if (!ring->vaddr_unaligned)
 		return -ENOMEM;
 
@@ -240,6 +556,8 @@
 	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 		      (unsigned long)ring->vaddr_unaligned);
 
+skip_dma_alloc:
+	memset(ring->vaddr_unaligned, 0, ring->size);
 	params.ring_base_vaddr = ring->vaddr;
 	params.ring_base_paddr = ring->paddr;
 	params.num_entries = num_entries;
@@ -247,18 +565,28 @@
 
 	switch (type) {
 	case HAL_REO_DST:
+	case HAL_REO2PPE:
 		params.intr_batch_cntr_thres_entries =
 					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 		break;
 	case HAL_RXDMA_BUF:
+		params.intr_batch_cntr_thres_entries = 0;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+		break;
 	case HAL_RXDMA_MONITOR_BUF:
 	case HAL_RXDMA_MONITOR_STATUS:
+		if (type == HAL_RXDMA_MONITOR_BUF)
+			params.low_threshold = DP_RX_MONITOR_BUF_LOW_TH;
+		else
 		params.low_threshold = num_entries >> 3;
+
 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+
 		params.intr_batch_cntr_thres_entries = 0;
 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 		break;
+	case HAL_TX_MONITOR_BUF:
 	case HAL_TX_MONITOR_DST:
 		params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
@@ -272,6 +600,12 @@
 			params.intr_timer_thres_us =
 					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 			break;
+		} else if (ring_num == HAL_WBM2SW_PPEDS_TX_CMPLN_RING_NUM) {
+			params.intr_batch_cntr_thres_entries =
+					HAL_SRNG_INT_BATCH_THRESHOLD_PPE_WBM2SW_REL;
+			params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+
+				break;
 		}
 		/* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
 		fallthrough;
@@ -286,19 +620,28 @@
 	case HAL_SW2WBM_RELEASE:
 	case HAL_RXDMA_DST:
 	case HAL_RXDMA_MONITOR_DST:
-	case HAL_RXDMA_MONITOR_DESC:
 		params.intr_batch_cntr_thres_entries =
 					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 		break;
 	case HAL_RXDMA_DIR_BUF:
 		break;
+	case HAL_PPE2TCL:
+		params.intr_batch_cntr_thres_entries =
+					HAL_SRNG_INT_BATCH_THRESHOLD_PPE2TCL;
+		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_PPE2TCL;
+		break;
 	default:
 		ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 		return -EINVAL;
 	}
 
-	ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+	if (cached) {
+		params.flags |= HAL_SRNG_FLAGS_CACHED;
+		ring->cached = 1;
+	}
+
+	ret = ath12k_hal_srng_setup_idx(ab, type, ring_num, mac_id, &params, 0);
 	if (ret < 0) {
 		ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 			    ret, ring_num);
@@ -311,26 +654,36 @@
 }
 
 static
-u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
+u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_link_vif *arvif)
 {
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	u32 bank_config = 0;
+	struct ath12k_hw_group *ag = ab->ag;
+	enum hal_encrypt_type encrypt_type = 0;
 
 	/* Only valid for raw frames with HW crypto enabled.
 	 * With SW crypto, mac80211 sets key per packet
 	 */
-	if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
-	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
-		bank_config |=
-			u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
+	if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ag->dev_flags) &&
+	    ahvif->key_cipher != INVALID_CIPHER)
+		encrypt_type = ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher);
+	else
+		encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+
+	bank_config |= u32_encode_bits(ahvif->tx_encap_type,
+					HAL_TX_BANK_CONFIG_ENCAP_TYPE) |
+					u32_encode_bits(encrypt_type,
 					HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
 
-	bank_config |= u32_encode_bits(arvif->tx_encap_type,
-					HAL_TX_BANK_CONFIG_ENCAP_TYPE);
 	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
 			u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
 			u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
 
 	/* only valid if idx_lookup_override is not set in tcl_data_cmd */
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+		bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+	else
 	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
 
 	bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
@@ -339,7 +692,7 @@
 					HAL_TX_ADDRY_EN),
 					HAL_TX_BANK_CONFIG_ADDRY_EN);
 
-	bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
+	bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0,
 					HAL_TX_BANK_CONFIG_MESH_EN) |
 			u32_encode_bits(arvif->vdev_id_check_en,
 					HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
@@ -349,7 +702,7 @@
 	return bank_config;
 }
 
-static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
+static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_link_vif *arvif,
 					 struct ath12k_dp *dp)
 {
 	int bank_id = DP_INVALID_BANK_ID;
@@ -406,6 +759,20 @@
 	spin_unlock_bh(&dp->tx_bank_lock);
 }
 
+void ath12k_dp_tx_update_bank_profile(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_base *ab = arvif->ar->ab;
+	struct ath12k_dp *dp = &ab->dp;
+
+	ath12k_dp_tx_put_bank_profile(dp, arvif->bank_id);
+	arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, dp);
+	arvif->desc.info0 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_BANK_ID,
+				       arvif->bank_id);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ath12k_dp_ppeds_update_vp_entry(arvif->ar, arvif);
+#endif
+}
+
 static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
@@ -437,6 +804,49 @@
 	return 0;
 }
 
+static void ath12k_dp_srng_hw_disable(struct ath12k_base *ab, struct dp_srng *ring)
+{
+	struct hal_srng *srng = &ab->hal.srng_list[ring->ring_id];
+	u32 reg_base, val, addr;
+
+	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+		if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK)
+			addr = HAL_SEQ_WCSS_UMAC_WBM_REG +
+			       HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab);
+		else
+			addr = reg_base + HAL_TCL1_RING_MISC_OFFSET(ab);
+		val = ath12k_hif_read32(ab, addr);
+		val &= ~HAL_TCL1_RING_MISC_SRNG_ENABLE;
+		ath12k_hif_write32(ab, addr, val);
+	} else {
+		val = ath12k_hif_read32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET);
+		val &= ~HAL_REO1_RING_MISC_SRNG_ENABLE;
+		ath12k_hif_write32(ab , reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+	}
+}
+
+void ath12k_dp_srng_hw_ring_disable(struct ath12k_base *ab)
+{
+	struct ath12k_dp *dp = &ab->dp;
+	int i;
+
+	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+		ath12k_dp_srng_hw_disable(ab, &dp->reo_dst_ring[i]);
+	ath12k_dp_srng_hw_disable(ab, &dp->wbm_desc_rel_ring);
+
+	for(i = 0; i < ab->hw_params->max_tx_ring; i++) {
+		ath12k_dp_srng_hw_disable(ab, &dp->tx_ring[i].tcl_data_ring);
+		ath12k_dp_srng_hw_disable(ab, &dp->tx_ring[i].tcl_comp_ring);
+	}
+	ath12k_dp_srng_hw_disable(ab, &dp->reo_reinject_ring);
+	ath12k_dp_srng_hw_disable(ab, &dp->rx_rel_ring);
+	ath12k_dp_srng_hw_disable(ab, &dp->reo_except_ring);
+	ath12k_dp_srng_hw_disable(ab, &dp->reo_cmd_ring);
+	ath12k_dp_srng_hw_disable(ab, &dp->reo_status_ring);
+	ath12k_dp_srng_hw_disable(ab, &dp->wbm_idle_ring);
+}
+
 static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
@@ -451,9 +861,11 @@
 		ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 		ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 	}
-	ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
-	ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 	ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ath12k_dp_srng_ppeds_cleanup(ab);
+#endif
 }
 
 static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
@@ -463,6 +875,7 @@
 	struct hal_srng *srng;
 	int i, ret, tx_comp_ring_num;
 	u32 ring_hash_map;
+	u8 rbm_id;
 
 	ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 				   HAL_SW2WBM_RELEASE, 0, 0,
@@ -473,23 +886,10 @@
 		goto err;
 	}
 
-	ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
-				   DP_TCL_CMD_RING_SIZE);
-	if (ret) {
-		ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
-		goto err;
-	}
-
-	ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
-				   0, 0, DP_TCL_STATUS_RING_SIZE);
-	if (ret) {
-		ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
-		goto err;
-	}
-
 	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
 		map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
 		tx_comp_ring_num = map[i].wbm_ring_num;
+		rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].rbm_id;
 
 		ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 					   HAL_TCL_DATA, i, 0,
@@ -499,6 +899,7 @@
 				    i, ret);
 			goto err;
 		}
+		ath12k_hal_tx_config_rbm_mapping(ab, i, rbm_id, HAL_TCL_DATA);
 
 		ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 					   HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
@@ -551,6 +952,9 @@
 		goto err;
 	}
 
+	if (ath12k_dp_umac_reset_in_progress(ab))
+		goto skip_reo_setup;
+
 	/* When hash based routing of rx packet is enabled, 32 entries to map
 	 * the hash values to the ring will be configured. Each hash entry uses
 	 * four bits to map to a particular ring. The ring mapping will be
@@ -568,6 +972,15 @@
 
 	ath12k_hal_reo_hw_setup(ab, ring_hash_map);
 
+skip_reo_setup:
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ret = ath12k_dp_srng_ppeds_setup(ab);
+	if (ret) {
+		ath12k_warn(ab, "failed to set up ppe-ds srngs :%d\n", ret);
+		goto err;
+	}
+#endif
+
 	return 0;
 
 err:
@@ -610,6 +1023,7 @@
 	int i;
 	int ret = 0;
 	u32 end_offset, cookie;
+	u8 rbm_id;
 
 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 		ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
@@ -618,6 +1032,7 @@
 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 		return -EINVAL;
 
+	if (!ath12k_dp_umac_reset_in_progress(ab)) {
 	for (i = 0; i < num_scatter_buf; i++) {
 		slist[i].vaddr = dma_alloc_coherent(ab->dev,
 						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
@@ -627,10 +1042,12 @@
 			goto err;
 		}
 	}
+	}
 
 	scatter_idx = 0;
 	scatter_buf = slist[scatter_idx].vaddr;
 	rem_entries = n_entries_per_buf;
+	rbm_id = dp->idle_link_rbm_id;
 
 	for (i = 0; i < n_link_desc_bank; i++) {
 		align_bytes = link_desc_banks[i].vaddr -
@@ -640,7 +1057,8 @@
 		paddr = link_desc_banks[i].paddr;
 		while (n_entries) {
 			cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
-			ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+			ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
+						      paddr, rbm_id);
 			n_entries--;
 			paddr += HAL_LINK_DESC_SIZE;
 			if (rem_entries) {
@@ -729,12 +1147,9 @@
 				 u32 ring_type, struct dp_srng *ring)
 {
 	ath12k_dp_link_desc_bank_free(ab, desc_bank);
-
-	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 		ath12k_dp_srng_cleanup(ab, ring);
 		ath12k_dp_scatter_idle_link_desc_cleanup(ab);
 	}
-}
 
 static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
 {
@@ -784,6 +1199,7 @@
 	u32 paddr;
 	int i, ret;
 	u32 cookie;
+	u8 rbm_id;
 
 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
@@ -806,18 +1222,20 @@
 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 		return -EINVAL;
 
+	if (!ath12k_dp_umac_reset_in_progress(ab)) {
 	ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 					     n_link_desc_bank, last_bank_sz);
 	if (ret)
 		return ret;
+	}
 
 	/* Setup link desc idle list for HW internal usage */
 	entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
 	tot_mem_sz = entry_sz * n_link_desc;
+	rbm_id = ab->dp.idle_link_rbm_id;
 
 	/* Setup scatter desc list when the total memory requirement is more */
-	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
-	    ring_type != HAL_RXDMA_MONITOR_DESC) {
+	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 		ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 							     n_link_desc_bank,
 							     n_link_desc,
@@ -845,7 +1263,7 @@
 		       (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
 			cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
 			ath12k_hal_set_link_desc_addr(desc,
-						      cookie, paddr);
+						      cookie, paddr, rbm_id);
 			n_entries--;
 			paddr += HAL_LINK_DESC_SIZE;
 		}
@@ -873,16 +1291,35 @@
 	int i = 0, j;
 	int tot_work_done = 0;
 	enum dp_monitor_mode monitor_mode;
-	u8 ring_mask;
+	u8 ring_mask_val;
+	struct ath12k_hw_ring_mask *ring_mask = ab->hw_params->ring_mask;
+	u8 tx_mask = ring_mask->tx[grp_id];
+	u8 rx_err_mask = ring_mask->rx_err[grp_id];
+	u8 rx_wbm_rel_mask = ring_mask->rx_wbm_rel[grp_id];
+	u8 rx_mask = ring_mask->rx[grp_id];
+	u8 reo_status_mask = ring_mask->reo_status[grp_id];
+	u8 host2rxdma_mask = ring_mask->host2rxdma[grp_id];
+	u8 rx_mon_dest_mask = ring_mask->rx_mon_dest[grp_id];
+	u8 tx_mon_dest_mask = ring_mask->tx_mon_dest[grp_id];
+	u8 misc_intr_mask = rx_mon_dest_mask |
+			    tx_mon_dest_mask |
+			    reo_status_mask |
+			    host2rxdma_mask;
 
 	while (i < ab->hw_params->max_tx_ring) {
-		if (ab->hw_params->ring_mask->tx[grp_id] &
-			BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
-			ath12k_dp_tx_completion_handler(ab, i);
+		if (tx_mask &
+		    BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num)) {
+			work_done = ath12k_dp_tx_completion_handler(ab, i,
+								    budget);
+			budget -= work_done;
+			tot_work_done += work_done;
+			if (budget <= 0)
+				goto done;
+		}
 		i++;
 	}
 
-	if (ab->hw_params->ring_mask->rx_err[grp_id]) {
+	if (rx_err_mask) {
 		work_done = ath12k_dp_rx_process_err(ab, napi, budget);
 		budget -= work_done;
 		tot_work_done += work_done;
@@ -890,7 +1327,7 @@
 			goto done;
 	}
 
-	if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
+	if (rx_wbm_rel_mask) {
 		work_done = ath12k_dp_rx_process_wbm_err(ab,
 							 napi,
 							 budget);
@@ -901,8 +1338,9 @@
 			goto done;
 	}
 
-	if (ab->hw_params->ring_mask->rx[grp_id]) {
-		i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
+	while (rx_mask) {
+		i =  fls(rx_mask) - 1;
+		rx_mask ^=  1 << i;
 		work_done = ath12k_dp_rx_process(ab, i, napi,
 						 budget);
 		budget -= work_done;
@@ -911,14 +1349,17 @@
 			goto done;
 	}
 
-	if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
+	if (!misc_intr_mask)
+		goto done;
+
+	if (rx_mon_dest_mask) {
 		monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
-		ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
+		ring_mask_val = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
 		for (i = 0; i < ab->num_radios; i++) {
 			for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
 				int id = i * ab->hw_params->num_rxmda_per_pdev + j;
 
-				if (ring_mask & BIT(id)) {
+				if (ring_mask_val & BIT(id)) {
 					work_done =
 					ath12k_dp_mon_process_ring(ab, id, napi, budget,
 								   monitor_mode);
@@ -932,14 +1373,14 @@
 		}
 	}
 
-	if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
+	if (tx_mon_dest_mask) {
 		monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
-		ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
+		ring_mask_val = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
 		for (i = 0; i < ab->num_radios; i++) {
 			for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
 				int id = i * ab->hw_params->num_rxmda_per_pdev + j;
 
-				if (ring_mask & BIT(id)) {
+				if (ring_mask_val & BIT(id)) {
 					work_done =
 					ath12k_dp_mon_process_ring(ab, id, napi, budget,
 								   monitor_mode);
@@ -953,16 +1394,15 @@
 		}
 	}
 
-	if (ab->hw_params->ring_mask->reo_status[grp_id])
+	if (reo_status_mask)
 		ath12k_dp_rx_process_reo_status(ab);
 
-	if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
+	if (host2rxdma_mask) {
 		struct ath12k_dp *dp = &ab->dp;
 		struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+		LIST_HEAD(list);
 
-		ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0,
-					    ab->hw_params->hal_params->rx_buf_rbm,
-					    true);
+		ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
 	}
 
 	/* TODO: Implement handler for other interrupts */
@@ -973,12 +1413,18 @@
 
 void ath12k_dp_pdev_free(struct ath12k_base *ab)
 {
+	struct ath12k *ar;
 	int i;
 
 	del_timer_sync(&ab->mon_reap_timer);
 
-	for (i = 0; i < ab->num_radios; i++)
+	for (i = 0; i < ab->num_radios; i++) {
+		ar = ab->pdevs[i].ar;
 		ath12k_dp_rx_pdev_free(ab, i);
+		ath12k_dp_rx_pdev_mon_detach(ab, i);
+		ath12k_fw_stats_free(&ar->fw_stats);
+		ath12k_debugfs_unregister(ar);
+	}
 }
 
 void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
@@ -992,7 +1438,7 @@
 		dp = &ar->dp;
 		dp->mac_id = i;
 		atomic_set(&dp->num_tx_pending, 0);
-		init_waitqueue_head(&dp->tx_empty_waitq);
+		atomic_set(&ar->flush_request, 0);
 
 		/* TODO: Add any RXDMA setup required per pdev */
 	}
@@ -1013,11 +1459,38 @@
 
 static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
 {
+	timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
+
 	if (ab->hw_params->rxdma1_enable)
 		return;
+}
 
-	timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
+#ifdef CONFIG_MAC80211_BONDED_SUPPORT
+static int ath12k_dp_ppe_rxole_rxdma_cfg(struct ath12k_base *ab)
+{
+	struct ath12k_dp_htt_rxdma_ppe_cfg_param param = {0};
+	int ret;
+
+	if (!g_bonded_interface_model)
+		return 0;
+
+	param.override = 1;
+	param.reo_dst_ind = HAL_REO2PPE_DST_IND;
+	param.multi_buffer_msdu_override_en = 0;
+
+	/* Override use_ppe to 0 in RxOLE for the following cases */
+	param.intra_bss_override = 1;
+	param.decap_raw_override = 1;
+	param.decap_nwifi_override = 1;
+	param.ip_frag_override = 1;
+
+	ret = ath12k_dp_rx_htt_rxdma_rxole_ppe_cfg_set(ab, &param);
+	if (ret)
+		ath12k_err(ab, "RxOLE and RxDMA PPE config failed %d\n", ret);
+
+	return ret;
 }
+#endif
 
 int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
 {
@@ -1025,6 +1498,15 @@
 	int ret;
 	int i;
 
+#ifdef CONFIG_MAC80211_BONDED_SUPPORT
+	ret = ath12k_dp_ppe_rxole_rxdma_cfg(ab);
+	if (ret) {
+		ath12k_err(ab, "Failed to send htt RxOLE and RxDMA messages to target :%d\n",
+			   ret);
+		goto out;
+	}
+#endif
+
 	ret = ath12k_dp_rx_htt_setup(ab);
 	if (ret)
 		goto out;
@@ -1077,15 +1559,14 @@
 	return 0;
 }
 
-static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
+static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
 {
-	switch (arvif->vdev_type) {
+	struct ath12k_vif *ahvif = arvif->ahvif;
+
+	switch (ahvif->vdev_type) {
 	case WMI_VDEV_TYPE_STA:
-		/* TODO: Verify the search type and flags since ast hash
-		 * is not part of peer mapv3
-		 */
-		arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
-		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+		arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 		break;
 	case WMI_VDEV_TYPE_AP:
 	case WMI_VDEV_TYPE_IBSS:
@@ -1098,14 +1579,14 @@
 	}
 }
 
-void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif)
 {
 	struct ath12k_base *ab = ar->ab;
 
-	arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
-			       u32_encode_bits(arvif->vdev_id,
+	arvif->tcl_metadata = HTT_TCL_META_DATA_GET(1, HTT_TCL_META_DATA_TYPE) |
+			      HTT_TCL_META_DATA_GET(arvif->vdev_id,
 					       HTT_TCL_META_DATA_VDEV_ID) |
-			       u32_encode_bits(ar->pdev->pdev_id,
+			      HTT_TCL_META_DATA_GET(ar->pdev->pdev_id,
 					       HTT_TCL_META_DATA_PDEV_ID);
 
 	/* set HTT extension valid bit to 0 by default */
@@ -1115,20 +1596,109 @@
 	arvif->vdev_id_check_en = true;
 	arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
 
+	arvif->desc.info0 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_BANK_ID,
+				       arvif->bank_id);
+	arvif->desc.info1 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_CMD_NUM,
+				       arvif->tcl_metadata);
+	arvif->desc.info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_PMAC_ID,
+				       ar->lmac_id) |
+			    FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_VDEV_ID,
+			    	       arvif->vdev_id);
+	arvif->desc.info4 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX,
+				       arvif->ast_idx) |
+			    FIELD_PREP(HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM,
+			    	       arvif->ast_hash);
+
 	/* TODO: error path for bank id failure */
 	if (arvif->bank_id == DP_INVALID_BANK_ID) {
 		ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
 		return;
 	}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	ath12k_dp_ppeds_update_vp_entry(ar, arvif);
+#endif
+}
+
+void ath12k_dp_umac_txrx_desc_cleanup(struct ath12k_base *ab)
+{
+	struct ath12k_rx_desc_info *desc_info;
+	struct ath12k_tx_desc_info *tx_desc_info;
+	struct ath12k_dp *dp = &ab->dp;
+	struct sk_buff *skb;
+	int i, j, k;
+	u32  tx_spt_page;
+
+	/* RX Descriptor cleanup */
+	spin_lock_bh(&dp->rx_desc_lock);
+
+	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+		desc_info = dp->spt_info->rxbaddr[i];
+
+		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+			if (!desc_info[j].in_use)
+				continue;
+
+			skb = desc_info[j].skb;
+			desc_info[j].skb = NULL;
+			desc_info[j].paddr = 0;
+			desc_info[j].in_use = false;
+			list_add_tail(&desc_info[j].list, &dp->rx_desc_free_list);
+
+			if (!skb)
+				continue;
+
+			dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+					 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+			dev_kfree_skb_any(skb);
+		}
+	}
+
+	spin_unlock_bh(&dp->rx_desc_lock);
+
+	/* TX Descriptor cleanup */
+	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
+		spin_lock_bh(&dp->tx_desc_lock[i]);
+
+		for (j = 0; j < ATH12K_TX_SPT_PAGES_PER_POOL; j++) {
+			tx_spt_page = j + i * ATH12K_TX_SPT_PAGES_PER_POOL;
+			tx_desc_info = dp->spt_info->txbaddr[tx_spt_page];
+
+			for (k = 0; k < ATH12K_MAX_SPT_ENTRIES; k++) {
+				if (!tx_desc_info[k].in_use)
+					continue;
+
+				skb = tx_desc_info[k].skb;
+				if (!skb)
+					continue;
+
+				tx_desc_info[k].skb = NULL;
+				tx_desc_info[k].skb_ext_desc = NULL;
+				tx_desc_info[k].in_use = false;
+				list_add_tail(&tx_desc_info[k].list, &dp->tx_desc_free_list[i]);
+				dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+						 skb->len, DMA_TO_DEVICE);
+				if (tx_desc_info[k].skb_ext_desc) {
+					dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr_ext_desc,
+							 tx_desc_info[k].skb_ext_desc->len, DMA_TO_DEVICE);
+					dev_kfree_skb_any(tx_desc_info[k].skb_ext_desc);
+				}
+				dev_kfree_skb_any(skb);
+			}
+		}
+
+		spin_unlock_bh(&dp->tx_desc_lock[i]);
+	}
 }
 
 static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
 {
-	struct ath12k_rx_desc_info *desc_info, *tmp;
-	struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
+	struct ath12k_rx_desc_info *desc_info;
+	struct ath12k_tx_desc_info *tx_desc_info;
 	struct ath12k_dp *dp = &ab->dp;
 	struct sk_buff *skb;
-	int i;
+	int i, j, k;
+	u32  pool_id, tx_spt_page;
 
 	if (!dp->spt_info)
 		return;
@@ -1136,10 +1706,16 @@
 	/* RX Descriptor cleanup */
 	spin_lock_bh(&dp->rx_desc_lock);
 
-	list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
-		list_del(&desc_info->list);
-		skb = desc_info->skb;
+	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+		desc_info = dp->spt_info->rxbaddr[i];
 
+		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+			if (!desc_info[j].in_use) {
+				list_del(&desc_info[j].list);
+				continue;
+			}
+
+			skb = desc_info[j].skb;
 		if (!skb)
 			continue;
 
@@ -1147,6 +1723,14 @@
 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
 		dev_kfree_skb_any(skb);
 	}
+	}
+
+	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+		if (!dp->spt_info->rxbaddr[i])
+			continue;
+
+		kfree(dp->spt_info->rxbaddr[i]);
+	}
 
 	spin_unlock_bh(&dp->rx_desc_lock);
 
@@ -1154,22 +1738,53 @@
 	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
 		spin_lock_bh(&dp->tx_desc_lock[i]);
 
-		list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
-					 list) {
-			list_del(&tx_desc_info->list);
-			skb = tx_desc_info->skb;
+		for (j = 0; j < ATH12K_TX_SPT_PAGES_PER_POOL; j++) {
+			tx_spt_page = j + i * ATH12K_TX_SPT_PAGES_PER_POOL;
+			tx_desc_info = dp->spt_info->txbaddr[tx_spt_page];
+			for (k = 0; k < ATH12K_MAX_SPT_ENTRIES; k++) {
+				if (!tx_desc_info[k].in_use)
+					continue;
+
+				skb = tx_desc_info[k].skb;
 
 			if (!skb)
 				continue;
 
+				tx_desc_info[k].skb = NULL;
 			dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
 					 skb->len, DMA_TO_DEVICE);
+
+				if (tx_desc_info[k].skb_ext_desc) {
+					dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr_ext_desc,
+							 tx_desc_info[k].skb_ext_desc->len, DMA_TO_DEVICE);
+					dev_kfree_skb_any(tx_desc_info[k].skb_ext_desc);
+				}
 			dev_kfree_skb_any(skb);
 		}
+		}
 
 		spin_unlock_bh(&dp->tx_desc_lock[i]);
 	}
 
+	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
+		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+
+		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
+			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+			if (!dp->spt_info->txbaddr[tx_spt_page])
+				continue;
+
+			kfree(dp->spt_info->txbaddr[tx_spt_page]);
+		}
+
+		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+	}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags))
+		ath12k_ppeds_detach(ab);
+#endif
+
 	/* unmap SPT pages */
 	for (i = 0; i < dp->num_spt_pages; i++) {
 		if (!dp->spt_info[i].vaddr)
@@ -1193,18 +1808,22 @@
 	if (!dp->reoq_lut.vaddr)
 		return;
 
+	if (dp->reoq_lut.vaddr) {
 	dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
 			  dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
 	dp->reoq_lut.vaddr = NULL;
+	}
 
-	ath12k_hif_write32(ab,
-			   HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+	if (dp->ml_reoq_lut.vaddr) {
+		dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+				  dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
+		dp->ml_reoq_lut.vaddr = NULL;
+	}
 }
 
 void ath12k_dp_free(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
-	int i;
 
 	ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
@@ -1216,9 +1835,6 @@
 
 	ath12k_dp_rx_reo_cmd_list_cleanup(ab);
 
-	for (i = 0; i < ab->hw_params->max_tx_ring; i++)
-		kfree(dp->tx_ring[i].tx_status);
-
 	ath12k_dp_rx_free(ab);
 	/* Deinit any SOC level resource */
 }
@@ -1230,6 +1846,9 @@
 	u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
 	u32 val = 0;
 
+	if (ath12k_ftm_mode)
+		return;
+
 	ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
 
 	val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
@@ -1283,12 +1902,13 @@
 {
 	struct ath12k_dp *dp = &ab->dp;
 
-	return dp->spt_info[ppt_idx].vaddr + spt_idx;
+	return dp->spt_info[ppt_idx].vaddr + (spt_idx * sizeof(u64));
 }
 
 struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
 						  u32 cookie)
 {
+	struct ath12k_dp *dp = &ab->dp;
 	struct ath12k_rx_desc_info **desc_addr_ptr;
 	u16 ppt_idx, spt_idx;
 
@@ -1299,6 +1919,11 @@
 	    spt_idx > ATH12K_MAX_SPT_ENTRIES)
 		return NULL;
 
+	if (WARN_ON(spt_idx < dp->rx_spt_base))
+		return NULL;
+
+	spt_idx = spt_idx - dp->rx_spt_base;
+
 	desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
 
 	return *desc_addr_ptr;
@@ -1323,16 +1948,270 @@
 	return *desc_addr_ptr;
 }
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+static u8 *ath12k_dp_cc_find_desc(struct ath12k_base *ab, u32 cookie, bool is_rx)
+{
+	struct ath12k_dp *dp = &ab->dp;
+	u16 spt_page_id, spt_idx;
+	u8 *spt_va;
+	spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+	spt_page_id = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
+	if (is_rx) {
+		if (WARN_ON(spt_page_id < dp->rx_spt_base))
+			return NULL;
+		spt_page_id = spt_page_id - dp->rx_spt_base;
+	}
+	spt_va = (u8 *)dp->spt_info[spt_page_id].vaddr;
+	return (spt_va + spt_idx * sizeof(u64));
+}
+
+struct ath12k_ppeds_tx_desc_info *ath12k_dp_get_ppeds_tx_desc(struct ath12k_base *ab,
+						  u32 desc_id)
+{
+	u8 *desc_addr_ptr;
+
+	desc_addr_ptr = ath12k_dp_cc_find_desc(ab, desc_id, false);
+	return *(struct ath12k_ppeds_tx_desc_info **)desc_addr_ptr;
+}
+#endif
+
+static void ath12k_dp_tx_cmem_init(struct ath12k_base *ab, struct ath12k_dp *dp)
+{
+	u32 cmem_base;
+	int i;
+
+	cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+
+	for (i = ATH12K_TX_SPT_PAGE_OFFSET;
+	     i < (ATH12K_TX_SPT_PAGE_OFFSET + ATH12K_NUM_TX_SPT_PAGES); i++) {
+		/* Write to PPT in CMEM */
+		if (ab->hif.ops->cmem_write32)
+			ath12k_hif_cmem_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+						dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+		else
+			ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+					   dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+	}
+}
+
+static void ath12k_dp_rx_cmem_init(struct ath12k_base *ab, struct ath12k_dp *dp)
+{
+	u32 cmem_base;
+	int i;
+
+	cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+	cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_spt_base);
+
+	for (i = ATH12K_RX_SPT_PAGE_OFFSET;
+	     i < (ATH12K_RX_SPT_PAGE_OFFSET + ATH12K_NUM_RX_SPT_PAGES); i++) {
+		/* Write to PPT in CMEM */
+		if (ab->hif.ops->cmem_write32)
+			ath12k_hif_cmem_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+					   dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+		else
+			ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+					dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+	}
+}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+void ath12k_dp_ppeds_tx_cmem_init(struct ath12k_base *ab, struct ath12k_dp *dp)
+{
+	u32 cmem_base;
+	int i;
+
+	cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+
+	for (i = ATH12K_PPEDS_TX_SPT_PAGE_OFFSET;
+	     i < (ATH12K_PPEDS_TX_SPT_PAGE_OFFSET + ATH12K_NUM_PPEDS_TX_SPT_PAGES); i++) {
+		/* Write to PPT in CMEM */
+		if (ab->hif.ops->cmem_write32)
+			ath12k_hif_cmem_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+						dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+		else
+			ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+					   dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+	}
+}
+
+static void ath12k_dp_ppeds_tx_desc_cleanup(struct ath12k_base *ab)
+{
+	struct ath12k_ppeds_tx_desc_info *ppeds_tx_desc_info, *tmp2;
+	struct ath12k_dp *dp = &ab->dp;
+	struct sk_buff *skb;
+	int i;
+
+	/* PPEDS TX Descriptor cleanup */
+	for (i = 0; i < ATH12K_HW_MAX_QUEUES_PPEDS; i++) {
+		spin_lock_bh(&dp->ppeds_tx_desc_lock[i]);
+
+		/* clean up used desc list */
+		list_for_each_entry_safe(ppeds_tx_desc_info, tmp2,
+					 &dp->ppeds_tx_desc_used_list[i],
+					 list) {
+			list_move_tail(&ppeds_tx_desc_info->list,
+				       &dp->ppeds_tx_desc_free_list[i]);
+			skb = ppeds_tx_desc_info->skb;
+			ppeds_tx_desc_info->skb = NULL;
+			if (!skb) {
+				WARN_ON_ONCE(1);
+				continue;
+			}
+			dma_unmap_single_attrs(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+					       skb->len, DMA_TO_DEVICE,
+					       DMA_ATTR_SKIP_CPU_SYNC);
+			dev_kfree_skb_any(skb);
+		}
+
+		/* clean up descriptors and skbs from reuse list */
+		list_for_each_entry_safe(ppeds_tx_desc_info, tmp2,
+					 &dp->ppeds_tx_desc_reuse_list[i],
+					 list) {
+			list_move_tail(&ppeds_tx_desc_info->list,
+				       &dp->ppeds_tx_desc_free_list[i]);
+			skb = ppeds_tx_desc_info->skb;
+			ppeds_tx_desc_info->skb = NULL;
+			if (!skb) {
+				WARN_ON_ONCE(1);
+				continue;
+			}
+			dma_unmap_single_attrs(ab->dev, ppeds_tx_desc_info->paddr,
+					       skb->len, DMA_TO_DEVICE,
+					       DMA_ATTR_SKIP_CPU_SYNC);
+			dev_kfree_skb_any(skb);
+		}
+		dp->ppeds_tx_desc_reuse_list_len[i] = 0;
+
+		spin_unlock_bh(&dp->ppeds_tx_desc_lock[i]);
+	}
+}
+
+int ath12k_dp_cc_ppeds_desc_cleanup(struct ath12k_base *ab)
+{
+	struct ath12k_ppeds_tx_desc_info *ppeds_tx_desc_info, *tmp2;
+	struct ath12k_dp *dp = &ab->dp;
+	struct sk_buff *skb;
+	int i;
+	u32  pool_id, ppeds_tx_spt_page;
+
+	if (!dp->spt_info) {
+		ath12k_err(ab,"ath12k_dp_cc_ppeds_desc_cleanup failed");
+		return -EINVAL;
+	}
+
+	/* PPEDS TX Descriptor cleanup */
+	for (i = 0; i < ATH12K_HW_MAX_QUEUES_PPEDS; i++) {
+		spin_lock_bh(&dp->ppeds_tx_desc_lock[i]);
+
+		/* clean up used desc list */
+		list_for_each_entry_safe(ppeds_tx_desc_info, tmp2,
+					 &dp->ppeds_tx_desc_used_list[i],
+					 list) {
+			list_del(&ppeds_tx_desc_info->list);
+			skb = ppeds_tx_desc_info->skb;
+			if (!skb)
+				continue;
+
+			dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+					 skb->len, DMA_TO_DEVICE);
+			dev_kfree_skb_any(skb);
+		}
+
+		/* clean up descriptors and skbs from reuse list */
+		list_for_each_entry_safe(ppeds_tx_desc_info, tmp2,
+					 &dp->ppeds_tx_desc_reuse_list[i],
+					 list) {
+			list_del(&ppeds_tx_desc_info->list);
+			skb = ppeds_tx_desc_info->skb;
+			if (!skb)
+				continue;
+
+			dma_unmap_single(ab->dev, ppeds_tx_desc_info->paddr,
+					 skb->len, DMA_TO_DEVICE);
+			dev_kfree_skb_any(skb);
+		}
+
+		spin_unlock_bh(&dp->ppeds_tx_desc_lock[i]);
+	}
+
+	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES_PPEDS; pool_id++) {
+		spin_lock_bh(&dp->ppeds_tx_desc_lock[pool_id]);
+
+		for (i = 0; i < ATH12K_PPEDS_TX_SPT_PAGES_PER_POOL; i++) {
+			ppeds_tx_spt_page = i + pool_id * ATH12K_PPEDS_TX_SPT_PAGES_PER_POOL;
+			if (!dp->spt_info->ppedstxbaddr[ppeds_tx_spt_page])
+				continue;
+
+			kfree(dp->spt_info->ppedstxbaddr[ppeds_tx_spt_page]);
+		}
+
+		spin_unlock_bh(&dp->ppeds_tx_desc_lock[pool_id]);
+	}
+	ath12k_dbg(ab, ATH12K_DBG_PPE, "ath12k_dp_cc_ppeds_desc_cleanup success\n");
+
+	return 0;
+}
+
+int ath12k_dp_cc_ppeds_desc_init(struct ath12k_base *ab)
+{
+	struct ath12k_dp *dp = &ab->dp;
+	struct ath12k_ppeds_tx_desc_info *ppeds_tx_descs;
+	struct ath12k_spt_info *ppeds_tx_spt_pages;
+	u32 i, j, pool_id, ppeds_tx_spt_page;
+	u32 ppt_idx;
+
+	/* pointer to start of TX pages */
+	ppeds_tx_spt_pages = &dp->spt_info[ATH12K_PPEDS_TX_SPT_PAGE_OFFSET];
+
+	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES_PPEDS; pool_id++) {
+		spin_lock_bh(&dp->ppeds_tx_desc_lock[pool_id]);
+		for (i = 0; i < ATH12K_PPEDS_TX_SPT_PAGES_PER_POOL; i++) {
+			ppeds_tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*ppeds_tx_descs),
+					   GFP_ATOMIC);
+
+			if (!ppeds_tx_descs) {
+				spin_unlock_bh(&dp->ppeds_tx_desc_lock[pool_id]);
+				ath12k_dp_cc_ppeds_desc_cleanup(ab);
+				return -ENOMEM;
+			}
+
+			ppeds_tx_spt_page = i + pool_id * ATH12K_PPEDS_TX_SPT_PAGES_PER_POOL;
+			dp->spt_info->ppedstxbaddr[ppeds_tx_spt_page] = &ppeds_tx_descs[0];
+
+			for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+				ppt_idx = ATH12K_PPEDS_TX_SPT_PAGE_OFFSET + ppeds_tx_spt_page;
+				ppeds_tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+				ppeds_tx_descs[j].pool_id = pool_id;
+				list_add_tail(&ppeds_tx_descs[j].list,
+					      &dp->ppeds_tx_desc_free_list[pool_id]);
+
+				/* Update descriptor VA in SPT */
+				*(struct ath12k_ppeds_tx_desc_info **)
+					((u8 *)ppeds_tx_spt_pages[ppeds_tx_spt_page].vaddr +
+					 (j * sizeof(u64))) = &ppeds_tx_descs[j];
+			}
+		}
+		spin_unlock_bh(&dp->ppeds_tx_desc_lock[pool_id]);
+	}
+	ath12k_dbg(ab, ATH12K_DBG_PPE, "ath12k_dp_cc_ppeds_desc_init success\n");
+
+	return 0;
+}
+#endif
+
 static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
-	struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
-	struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
+	struct ath12k_rx_desc_info *rx_descs;
+	struct ath12k_tx_desc_info *tx_descs;
+	struct ath12k_spt_info *tx_spt_pages, *rx_spt_pages;
 	u32 i, j, pool_id, tx_spt_page;
 	u32 ppt_idx;
 
 	spin_lock_bh(&dp->rx_desc_lock);
 
+	rx_spt_pages = &dp->spt_info[ATH12K_RX_SPT_PAGE_OFFSET];
+
 	/* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
 	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
 		rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
@@ -1343,19 +2222,26 @@
 			return -ENOMEM;
 		}
 
+		dp->spt_info->rxbaddr[i] = &rx_descs[0];
+		ppt_idx = dp->rx_spt_base + ATH12K_RX_SPT_PAGE_OFFSET + i;
+
 		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
-			rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
+			rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j);
 			rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+			rx_descs[j].chip_id = ab->chip_id;
 			list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
 
 			/* Update descriptor VA in SPT */
-			rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
-			*rx_desc_addr = &rx_descs[j];
+			*(struct ath12k_rx_desc_info **)((u8 *)rx_spt_pages[i].vaddr +
+				(j * sizeof(u64))) = &rx_descs[j];
 		}
 	}
 
 	spin_unlock_bh(&dp->rx_desc_lock);
 
+	/* pointer to start of TX pages */
+	tx_spt_pages = &dp->spt_info[ATH12K_TX_SPT_PAGE_OFFSET];
+
 	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
 		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
 		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
@@ -1368,18 +2254,20 @@
 				return -ENOMEM;
 			}
 
-			for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
 				tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
-				ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
+			dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
+
+			for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+				ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
 				tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
 				tx_descs[j].pool_id = pool_id;
 				list_add_tail(&tx_descs[j].list,
 					      &dp->tx_desc_free_list[pool_id]);
 
 				/* Update descriptor VA in SPT */
-				tx_desc_addr =
-					ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
-				*tx_desc_addr = &tx_descs[j];
+				*(struct ath12k_tx_desc_info **)
+					((u8 *)tx_spt_pages[tx_spt_page].vaddr +
+					 (j * sizeof(u64))) = &tx_descs[j];
 			}
 		}
 		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
@@ -1387,6 +2275,23 @@
 	return 0;
 }
 
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_base *partner_ab;
+	struct ath12k_dp *dp;
+	int i;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+		if (partner_ab == ab)
+			continue;
+
+		dp = &partner_ab->dp;
+		ath12k_dp_rx_cmem_init(ab, dp);
+	}
+}
+
 static int ath12k_dp_cc_init(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
@@ -1394,7 +2299,6 @@
 	u32 cmem_base;
 
 	INIT_LIST_HEAD(&dp->rx_desc_free_list);
-	INIT_LIST_HEAD(&dp->rx_desc_used_list);
 	spin_lock_init(&dp->rx_desc_lock);
 
 	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
@@ -1415,6 +2319,7 @@
 		return -ENOMEM;
 	}
 
+	dp->rx_spt_base = ab->chip_id * ATH12K_NUM_RX_SPT_PAGES;
 	cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
 
 	for (i = 0; i < dp->num_spt_pages; i++) {
@@ -1433,18 +2338,31 @@
 			ret = -EINVAL;
 			goto free;
 		}
-
-		/* Write to PPT in CMEM */
-		ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
-				   dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
 	}
 
+	ath12k_dp_tx_cmem_init(ab, dp);
+
+	ath12k_dp_rx_cmem_init(ab, dp);
+
 	ret = ath12k_dp_cc_desc_init(ab);
 	if (ret) {
 		ath12k_warn(ab, "HW CC desc init failed %d", ret);
 		goto free;
 	}
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags)) {
+		for (i = 0; i < ATH12K_HW_MAX_QUEUES_PPEDS; i++) {
+			INIT_LIST_HEAD(&dp->ppeds_tx_desc_free_list[i]);
+			INIT_LIST_HEAD(&dp->ppeds_tx_desc_reuse_list[i]);
+			INIT_LIST_HEAD(&dp->ppeds_tx_desc_used_list[i]);
+			spin_lock_init(&dp->ppeds_tx_desc_lock[i]);
+			dp->ppeds_tx_desc_reuse_list_len[i] = 0;
+		}
+		ath12k_ppeds_attach(ab);
+	}
+#endif
+
 	return 0;
 free:
 	ath12k_dp_cc_cleanup(ab);
@@ -1454,6 +2372,7 @@
 static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
+	u32 val;
 
 	if (!ab->hw_params->reoq_lut_support)
 		return 0;
@@ -1467,8 +2386,38 @@
 		return -ENOMEM;
 	}
 
+	dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+						   DP_REOQ_LUT_SIZE,
+						   &dp->ml_reoq_lut.paddr,
+						   GFP_KERNEL);
+
+	if (!dp->ml_reoq_lut.vaddr) {
+		ath12k_warn(ab, "failed to allocate memory for ML reoq table");
+
+		/* cleanup non-ML REOQ LUT allocated above */
+		dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+				  dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+		dp->reoq_lut.vaddr = NULL;
+		return -ENOMEM;
+	}
+
+	memset(dp->reoq_lut.vaddr, 0, DP_REOQ_LUT_SIZE);
+	memset(dp->ml_reoq_lut.vaddr, 0, DP_REOQ_LUT_SIZE);
+
 	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
-			   dp->reoq_lut.paddr);
+			    dp->reoq_lut.paddr >> 8);
+
+	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
+			   dp->ml_reoq_lut.paddr >> 8);
+
+	val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR_READ(ab));
+
+	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR_READ(ab),
+			   val | HAL_REO_QDESC_ADDR_READ_LUT_ENABLE);
+
+	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_MAX_PEERID(ab),
+			   HAL_REO_QDESC_MAX_PEERID);
+
 	return 0;
 }
 
@@ -1480,14 +2429,19 @@
 	u32 n_link_desc = 0;
 	int ret;
 	int i;
+	u8 chip_id;
 
 	dp->ab = ab;
 
 	INIT_LIST_HEAD(&dp->reo_cmd_list);
 	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+	INIT_LIST_HEAD(&dp->reo_cmd_update_rx_queue_list);
 	spin_lock_init(&dp->reo_cmd_lock);
+	spin_lock_init(&dp->reo_cmd_update_rx_queue_lock);
 
 	dp->reo_cmd_cache_flush_count = 0;
+	chip_id = (ab->ag->mlo_capable) ? ab->chip_id : 0;
+	dp->idle_link_rbm_id = HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST + chip_id;
 
 	ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
 	if (ret) {
@@ -1530,17 +2484,6 @@
 
 	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
 		dp->tx_ring[i].tcl_data_ring_id = i;
-
-		dp->tx_ring[i].tx_status_head = 0;
-		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
-		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
-		if (!dp->tx_ring[i].tx_status) {
-			ret = -ENOMEM;
-			/* FIXME: The allocated tx status is not freed
-			 * properly here
-			 */
-			goto fail_cmn_reoq_cleanup;
-		}
 	}
 
 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
@@ -1556,8 +2499,6 @@
 
 fail_dp_rx_free:
 	ath12k_dp_rx_free(ab);
-
-fail_cmn_reoq_cleanup:
 	ath12k_dp_reoq_lut_cleanup(ab);
 
 fail_cmn_srng_cleanup:
@@ -1575,3 +2516,81 @@
 
 	return ret;
 }
+
+int ath12k_dp_rxdma_ring_setup(struct ath12k_base *ab)
+{
+	struct ath12k_dp *dp = &ab->dp;
+	int ret;
+	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+	LIST_HEAD(list);
+
+	ret = ath12k_dp_srng_setup(ab,
+				   &dp->rx_refill_buf_ring.refill_buf_ring,
+				   HAL_RXDMA_BUF, 0, 0,
+				   DP_RXDMA_BUF_RING_SIZE);
+
+	if (ret) {
+		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
+		return ret;
+	}
+
+	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
+	return 0;
+}
+
+void ath12k_umac_reset_handle_post_reset_start(struct ath12k_base *ab)
+{
+	struct ath12k_dp *dp = &ab->dp;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset = &ag->mlo_umac_reset;
+	int i, n_link_desc, ret;
+	struct hal_srng *srng = NULL;
+	unsigned long end;
+
+	ath12k_dp_srng_hw_ring_disable(ab);
+
+	/* Busy wait for 2 ms to make sure the rings are
+	 * in idle state before enabling it
+	 */
+	end = jiffies + msecs_to_jiffies(2);
+	while (time_before(jiffies, end))
+		;
+
+	ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
+
+	if (ret)
+		ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+
+	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+	ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
+				  HAL_WBM_IDLE_LINK, srng, n_link_desc);
+	if (ret)
+		ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
+
+	ath12k_dp_srng_common_setup(ab);
+	ath12k_dp_umac_txrx_desc_cleanup(ab);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (ab->ppeds_handle)
+		ath12k_dp_ppeds_tx_desc_cleanup(ab);
+#endif
+	ath12k_dp_rxdma_ring_setup(ab);
+
+	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
+					   HAL_REO_DST, i, 0,
+					   DP_REO_DST_RING_SIZE);
+		if (ret)
+			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
+	}
+
+	ath12k_dp_rx_reo_cmd_list_cleanup(ab);
+
+	ath12k_dp_tid_cleanup(ab);
+
+	atomic_inc(&mlo_umac_reset->response_chip);
+	ath12k_umac_reset_notify_target_sync_and_send(ab, ATH12K_UMAC_RESET_TX_CMD_POST_RESET_START_DONE);
+
+	return;
+}
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp.h	2024-03-18 14:40:14.847741224 +0100
@@ -6,7 +6,7 @@
 
 #ifndef ATH12K_DP_H
 #define ATH12K_DP_H
-
+#include "core.h"
 #include "hal_rx.h"
 #include "hw.h"
 
@@ -15,7 +15,8 @@
 struct ath12k_base;
 struct ath12k_peer;
 struct ath12k_dp;
-struct ath12k_vif;
+struct ath12k_link_vif;
+struct ath12k_link_sta;
 struct hal_tcl_status_ring;
 struct ath12k_ext_irq_grp;
 
@@ -29,6 +30,7 @@
 	dma_addr_t paddr;
 	int size;
 	u32 ring_id;
+	u8 cached;
 };
 
 struct dp_rxdma_ring {
@@ -39,15 +41,19 @@
 	int bufs_max;
 };
 
-#define ATH12K_TX_COMPL_NEXT(x)	(((x) + 1) % DP_TX_COMP_RING_SIZE)
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+struct dp_ppeds_tx_comp_ring {
+	struct dp_srng ppe_wbm2sw_ring;
+	struct hal_wbm_completion_ring_tx *tx_status;
+	int tx_status_head;
+	int tx_status_tail;
+};
+#endif
 
 struct dp_tx_ring {
 	u8 tcl_data_ring_id;
 	struct dp_srng tcl_data_ring;
 	struct dp_srng tcl_comp_ring;
-	struct hal_wbm_completion_ring_tx *tx_status;
-	int tx_status_head;
-	int tx_status_tail;
 };
 
 struct ath12k_pdev_mon_stats {
@@ -127,12 +133,11 @@
 struct ath12k_pdev_dp {
 	u32 mac_id;
 	atomic_t num_tx_pending;
-	wait_queue_head_t tx_empty_waitq;
 	struct dp_srng rxdma_mon_dst_ring[MAX_RXDMA_PER_PDEV];
 	struct dp_srng tx_mon_dst_ring[MAX_RXDMA_PER_PDEV];
 
 	struct ieee80211_rx_status rx_status;
-	struct ath12k_mon_data mon_data;
+	struct ath12k_mon_data *mon_data;
 };
 
 #define DP_NUM_CLIENTS_MAX 64
@@ -145,32 +150,51 @@
 
 #define DP_RX_HASH_ENABLE	1 /* Enable hash based Rx steering */
 
-#define DP_BA_WIN_SZ_MAX	256
+#define DP_BA_WIN_SZ_MAX	1024
 
 #define DP_TCL_NUM_RING_MAX	4
 
 #define DP_IDLE_SCATTER_BUFS_MAX 16
 
+#ifdef CONFIG_ATH12K_MEM_PROFILE_512M
+
+#define DP_TX_COMP_RING_SIZE		8192
+#define DP_RXDMA_MON_STATUS_RING_SIZE	512
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE	256
+#define DP_RXDMA_MONITOR_DST_RING_SIZE	512
+#define ATH12K_NUM_POOL_TX_DESC		8192
+#define DP_REO2PPE_RING_SIZE		2048
+/* TODO revisit this count during testing */
+#define ATH12K_RX_DESC_COUNT		(4096)
+
+#else
+#define DP_TX_COMP_RING_SIZE		32768
+#define DP_RXDMA_MON_STATUS_RING_SIZE	2048
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE	8192
+#define ATH12K_NUM_POOL_TX_DESC		32768
+#define DP_REO2PPE_RING_SIZE 16384
+/* TODO revisit this count during testing */
+#define ATH12K_RX_DESC_COUNT		(12288)
+#endif
+
 #define DP_WBM_RELEASE_RING_SIZE	64
 #define DP_TCL_DATA_RING_SIZE		512
-#define DP_TX_COMP_RING_SIZE		32768
 #define DP_TX_IDR_SIZE			DP_TX_COMP_RING_SIZE
 #define DP_TCL_CMD_RING_SIZE		32
 #define DP_TCL_STATUS_RING_SIZE		32
-#define DP_REO_DST_RING_MAX		8
+#define DP_REO_DST_RING_MAX		4
 #define DP_REO_DST_RING_SIZE		2048
 #define DP_REO_REINJECT_RING_SIZE	32
-#define DP_RX_RELEASE_RING_SIZE		1024
+#define DP_RX_RELEASE_RING_SIZE		4096
 #define DP_REO_EXCEPTION_RING_SIZE	128
-#define DP_REO_CMD_RING_SIZE		128
+#define DP_REO_CMD_RING_SIZE		256
 #define DP_REO_STATUS_RING_SIZE		2048
-#define DP_RXDMA_BUF_RING_SIZE		4096
+#define DP_RXDMA_BUF_RING_SIZE		8192
 #define DP_RXDMA_REFILL_RING_SIZE	2048
 #define DP_RXDMA_ERR_DST_RING_SIZE	1024
-#define DP_RXDMA_MON_STATUS_RING_SIZE	1024
-#define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
-#define DP_RXDMA_MONITOR_DST_RING_SIZE	2048
-#define DP_RXDMA_MONITOR_DESC_RING_SIZE	4096
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE	8192
+#define DP_RX_MONITOR_BUF_LOW_TH	32
 #define DP_TX_MONITOR_BUF_RING_SIZE	4096
 #define DP_TX_MONITOR_DEST_RING_SIZE	2048
 
@@ -182,8 +206,14 @@
 #define DP_RX_BUFFER_SIZE_LITE	1024
 #define DP_RX_BUFFER_ALIGN_SIZE	128
 
-#define DP_RXDMA_BUF_COOKIE_BUF_ID	GENMASK(17, 0)
-#define DP_RXDMA_BUF_COOKIE_PDEV_ID	GENMASK(19, 18)
+#define DP_PPE2TCL_RING_SIZE 2048
+#define DP_PPE_WBM2SW_RING_SIZE 8192
+#define HAL_REO2PPE_DST_IND 6
+
+#define DP_DIR_BUF_COOKIE_BUF_ID	GENMASK(17, 0)
+#define DP_DIR_BUF_COOKIE_PDEV_ID	GENMASK(19, 18)
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID	GENMASK(19, 0)
 
 #define DP_HW2SW_MACID(mac_id) ({ typeof(mac_id) x = (mac_id); x ? x - 1 : 0; })
 #define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
@@ -195,10 +225,11 @@
 #define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
 #define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10
 
-#define ATH12K_NUM_POOL_TX_DESC	32768
-
-/* TODO: revisit this count during testing */
-#define ATH12K_RX_DESC_COUNT	(12288)
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#define ATH12K_NUM_POOL_PPEDS_TX_DESC 0x10000
+#else
+#define ATH12K_NUM_POOL_PPEDS_TX_DESC 0
+#endif
 
 #define ATH12K_PAGE_SIZE	PAGE_SIZE
 
@@ -215,7 +246,19 @@
 #define ATH12K_TX_SPT_PAGES_PER_POOL (ATH12K_NUM_POOL_TX_DESC / \
 					  ATH12K_MAX_SPT_ENTRIES)
 #define ATH12K_NUM_TX_SPT_PAGES	(ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES)
-#define ATH12K_NUM_SPT_PAGES	(ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES)
+
+#define ATH12K_PPEDS_TX_SPT_PAGE_OFFSET 0
+#define ATH12K_TX_SPT_PAGE_OFFSET ATH12K_NUM_PPEDS_TX_SPT_PAGES
+#define ATH12K_RX_SPT_PAGE_OFFSET ATH12K_NUM_PPEDS_TX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES
+
+
+#define ATH12K_PPEDS_TX_SPT_PAGES_PER_POOL (ATH12K_NUM_POOL_PPEDS_TX_DESC / \
+					    ATH12K_MAX_SPT_ENTRIES)
+#define ATH12K_NUM_PPEDS_TX_SPT_PAGES (ATH12K_PPEDS_TX_SPT_PAGES_PER_POOL *\
+				       ATH12K_HW_MAX_QUEUES_PPEDS)
+
+#define ATH12K_NUM_SPT_PAGES	(ATH12K_NUM_TX_SPT_PAGES + ATH12K_NUM_RX_SPT_PAGES + \
+				 ATH12K_NUM_PPEDS_TX_SPT_PAGES)
 
 /* The SPT pages are divided for RX and TX, first block for RX
  * and remaining for TX
@@ -245,6 +288,9 @@
 #define DP_REO_QREF_NUM		GENMASK(31, 16)
 #define DP_MAX_PEER_ID		2047
 
+#define ATH12K_DP_PDEV_TX_LIMIT		24000
+#define ATH12K_DP_GROUP_TX_LIMIT	32000
+
 /* Total size of the LUT is based on 2K peers, each having reference
  * for 17tids, note each entry is of type ath12k_reo_queue_ref
  * hence total size is 2048 * 17 * 8 = 278528
@@ -254,6 +300,10 @@
 /* Invalid TX Bank ID value */
 #define DP_INVALID_BANK_ID -1
 
+#define MAX_TQM_RELEASE_REASON 15
+#define MAX_FW_TX_STATUS 7
+#define MAX_TCL_RING 4
+
 struct ath12k_dp_tx_bank_profile {
 	u8 is_configured;
 	u32 num_users;
@@ -273,22 +323,44 @@
 
 struct ath12k_rx_desc_info {
 	struct list_head list;
-	struct sk_buff *skb;
 	u32 cookie;
+	dma_addr_t paddr;
+	u8 chip_id		: 3,
+	   in_use		: 1;
+	struct sk_buff *skb;
 	u32 magic;
 };
 
 struct ath12k_tx_desc_info {
 	struct list_head list;
 	struct sk_buff *skb;
+	struct sk_buff *skb_ext_desc;
+	u32 desc_id; /* Cookie */
+	u8 mac_id : 5,
+	   in_use : 1;
+	u8 pool_id;
+	u8 recycler_fast_xmit;
+	ktime_t timestamp;
+};
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+struct ath12k_ppeds_tx_desc_info {
+	struct list_head list;
+	struct sk_buff *skb;
+	dma_addr_t paddr;
 	u32 desc_id; /* Cookie */
 	u8 mac_id;
 	u8 pool_id;
+	u8 flags;
 };
+#endif
 
 struct ath12k_spt_info {
 	dma_addr_t paddr;
 	u64 *vaddr;
+	struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
+	struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
+	struct ath12k_ppeds_tx_desc_info *ppedstxbaddr[ATH12K_NUM_PPEDS_TX_SPT_PAGES];
 };
 
 struct ath12k_reo_queue_ref {
@@ -301,6 +373,32 @@
 	u32 *vaddr;
 };
 
+struct host_link_stats {
+	u32 tx_enqueued;
+	u32 tx_completed;
+	u32 tx_bcast_mcast;
+	u32 tx_dropped;
+	u32 tx_encap_type[HAL_TCL_ENCAP_TYPE_MAX];
+	u32 tx_encrypt_type[HAL_ENCRYPT_TYPE_MAX];
+	u32 tx_desc_type[HAL_TCL_DESC_TYPE_MAX];
+};
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#define PPE_VP_ENTRIES_MAX 32
+#define MAX_PPEDS_IRQ_NAME_LEN 20
+#define MAX_PPEDS_IRQS 3
+struct ath12k_dp_ppe_vp_profile {
+	bool is_configured;
+	u8 vp_num;
+	u8 ppe_vp_num_idx;
+	u8 search_idx_reg_num;
+	u8 drop_prec_enable;
+	u8 to_fw;
+	u8 use_ppe_int_pri;
+	struct ath12k_link_vif *arvif;
+};
+#endif
+
 struct ath12k_dp {
 	struct ath12k_base *ab;
 	u8 num_bank_profiles;
@@ -312,14 +410,18 @@
 	u8 htt_tgt_ver_major;
 	u8 htt_tgt_ver_minor;
 	struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+	u8 idle_link_rbm_id;
 	struct dp_srng wbm_idle_ring;
 	struct dp_srng wbm_desc_rel_ring;
-	struct dp_srng tcl_cmd_ring;
-	struct dp_srng tcl_status_ring;
 	struct dp_srng reo_reinject_ring;
 	struct dp_srng rx_rel_ring;
 	struct dp_srng reo_except_ring;
 	struct dp_srng reo_cmd_ring;
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	struct dp_srng reo2ppe_ring;
+	struct dp_srng ppe2tcl_ring;
+	struct dp_ppeds_tx_comp_ring ppeds_comp_ring;
+#endif
 	struct dp_srng reo_status_ring;
 	struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
 	struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
@@ -334,12 +436,19 @@
 	 * - reo_cmd_cache_flush_count
 	 */
 	spinlock_t reo_cmd_lock;
+	struct list_head reo_cmd_update_rx_queue_list;
+	/**
+	 * protects access to below field,
+	 * - reo_cmd_update_rx_queue_list
+	*/
+	spinlock_t reo_cmd_update_rx_queue_lock;
 	struct ath12k_hp_update_timer reo_cmd_timer;
 	struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
 	struct ath12k_spt_info *spt_info;
 	u32 num_spt_pages;
+	u32 rx_spt_base;
 	struct list_head rx_desc_free_list;
-	struct list_head rx_desc_used_list;
+	struct list_head rx_ppeds_reuse_list;
 	/* protects the free and used desc list */
 	spinlock_t rx_desc_lock;
 
@@ -348,30 +457,72 @@
 	/* protects the free and used desc lists */
 	spinlock_t tx_desc_lock[ATH12K_HW_MAX_QUEUES];
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	struct list_head ppeds_tx_desc_free_list[ATH12K_HW_MAX_QUEUES_PPEDS];
+	struct list_head ppeds_tx_desc_reuse_list[ATH12K_HW_MAX_QUEUES_PPEDS];
+	struct list_head ppeds_tx_desc_used_list[ATH12K_HW_MAX_QUEUES_PPEDS];
+	int ppeds_tx_desc_reuse_list_len[ATH12K_HW_MAX_QUEUES_PPEDS];
+	/* protects the free and used desc lists */
+	spinlock_t ppeds_tx_desc_lock[ATH12K_HW_MAX_QUEUES_PPEDS];
+
+	struct ath12k_dp_ppe_vp_profile ppe_vp_profile[PPE_VP_ENTRIES_MAX];
+	char ppeds_irq_name[MAX_PPEDS_IRQS][MAX_PPEDS_IRQ_NAME_LEN];
+	int ppeds_irq[MAX_PPEDS_IRQS];
+#endif
+
 	struct dp_rxdma_ring rx_refill_buf_ring;
 	struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
 	struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
 	struct dp_rxdma_ring rxdma_mon_buf_ring;
 	struct dp_rxdma_ring tx_mon_buf_ring;
 	struct ath12k_reo_q_addr_lut reoq_lut;
+	struct ath12k_reo_q_addr_lut ml_reoq_lut;
+	unsigned long ppeds_service_running;
 };
 
 /* HTT definitions */
+#define HTT_TAG_TCL_METADATA_VERSION		5
+
+#define HTT_TCL_META_DATA_TYPE_FTM		BIT(0)
+#define HTT_TCL_META_DATA_TYPE_MISSION		GENMASK(1, 0)
 
-#define HTT_TCL_META_DATA_TYPE			BIT(0)
-#define HTT_TCL_META_DATA_VALID_HTT		BIT(1)
+#define HTT_TCL_META_DATA_VALID_HTT_FTM		BIT(1)
+#define HTT_TCL_META_DATA_VALID_HTT_MISSION     BIT(2)
+
+#define HTT_TCL_META_DATA_VALID_HTT		\
+			(ath12k_ftm_mode ? HTT_TCL_META_DATA_VALID_HTT_FTM : \
+					   HTT_TCL_META_DATA_VALID_HTT_MISSION)
 
 /* vdev meta data */
-#define HTT_TCL_META_DATA_VDEV_ID		GENMASK(9, 2)
-#define HTT_TCL_META_DATA_PDEV_ID		GENMASK(11, 10)
-#define HTT_TCL_META_DATA_HOST_INSPECTED	BIT(12)
+#define HTT_TCL_META_DATA_VDEV_ID_FTM		 GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID_FTM		 GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED_FTM	 BIT(12)
+#define HTT_TCL_META_DATA_VDEV_ID_MISSION	 GENMASK(10, 3)
+#define HTT_TCL_META_DATA_PDEV_ID_MISSION	 GENMASK(12, 11)
+#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13)
 
 /* peer meta data */
-#define HTT_TCL_META_DATA_PEER_ID		GENMASK(15, 2)
+#define HTT_TCL_META_DATA_PEER_ID_FTM		GENMASK(15, 2)
+#define HTT_TCL_META_DATA_PEER_ID_MISSION	GENMASK(15, 3)
+
+#define HTT_TCL_META_DATA_GET(_val, _mask)      \
+		(ath12k_ftm_mode ? u32_encode_bits(_val, _mask##_FTM) : \
+				   u32_encode_bits(_val, _mask##_MISSION))
 
 #define HTT_TX_WBM_COMP_STATUS_OFFSET 8
 
-/* HTT tx completion is overlaid in wbm_release_ring */
+/* Global sequence number */
+#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM		3
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED	BIT(2)
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM		GENMASK(14, 3)
+#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID	128
+
+/* Service Class meta data */
+#define HTT_TCL_META_DATA_TYPE_SVC_ID_BASED	2
+#define HTT_TCL_META_DATA_SAWF_SVC_ID		GENMASK(10, 3)
+#define HTT_TCL_META_DATA_SAWF_TID_OVERRIDE	BIT(12)
+
+/* HTT tx completion is overlayed in wbm_release_ring v3 version */
 #define HTT_TX_WBM_COMP_INFO0_STATUS		GENMASK(16, 13)
 #define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON	GENMASK(3, 0)
 #define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME	BIT(4)
@@ -379,7 +530,6 @@
 #define HTT_TX_WBM_COMP_INFO2_ACK_RSSI		GENMASK(31, 24)
 
 struct htt_tx_wbm_completion {
-	__le32 rsvd0[2];
 	__le32 info0;
 	__le32 info1;
 	__le32 info2;
@@ -395,14 +545,27 @@
 	HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG	= 0xc,
 	HTT_H2T_MSG_TYPE_EXT_STATS_CFG		= 0x10,
 	HTT_H2T_MSG_TYPE_PPDU_STATS_CFG		= 0x11,
+	HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG	= 0x19,
 	HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG	= 0x1a,
 	HTT_H2T_MSG_TYPE_TX_MONITOR_CFG		= 0x1b,
+	HTT_H2T_MSG_TYPE_SAWF_DEF_Q_MAP_REQ	= 0x1c,
+	HTT_H2T_MSG_TYPE_SAWF_DEF_Q_UNMAP_REQ	= 0x1d,
+	HTT_H2T_MSG_TYPE_SAWF_DEF_Q_MAP_REPORT_REQ = 0x1e,
+	HTT_H2T_MSG_TYPE_STREAMING_STATS_REQ	= 0x20,
+	HTT_H2T_MSG_TYPE_UMAC_RESET_PREREQUISITE_SETUP = 0x21,
+	HTT_H2T_MSG_TYPE_UMAC_RESET_START_PRE_RESET = 0x22,
 };
 
 #define HTT_VER_REQ_INFO_MSG_ID		GENMASK(7, 0)
+#define HTT_OPTION_TCL_METADATA_VER_V2	2
+#define HTT_OPTION_TAG			GENMASK(7, 0)
+#define HTT_OPTION_LEN			GENMASK(15, 8)
+#define HTT_OPTION_VALUE		GENMASK(31, 16)
+#define HTT_TCL_METADATA_VER_SZ		4
 
 struct htt_ver_req_cmd {
 	__le32 ver_reg_info;
+	__le32 tcl_metadata_version;
 } __packed;
 
 enum htt_srng_ring_type {
@@ -417,11 +580,14 @@
 	HTT_RXDMA_MONITOR_BUF_RING,
 	HTT_RXDMA_MONITOR_DESC_RING,
 	HTT_RXDMA_MONITOR_DEST_RING,
+	HTT_RXDMA_HOST_BUF_RING2,
 	HTT_HOST1_TO_FW_RXBUF_RING,
 	HTT_HOST2_TO_FW_RXBUF_RING,
 	HTT_RXDMA_NON_MONITOR_DEST_RING,
 	HTT_TX_MON_HOST2MON_BUF_RING,
 	HTT_TX_MON_MON2HOST_DEST_RING,
+	HTT_RX_MON_HOST2MON_BUF_RING,
+	HTT_RX_MON_MON2HOST_DEST_RING,
 };
 
 /* host -> target  HTT_SRING_SETUP message
@@ -747,8 +913,15 @@
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID	GENMASK(23, 16)
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS		BIT(24)
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS		BIT(25)
+
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE	GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT	GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL	GENMASK(21, 19)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA	GENMASK(24, 22)
+
 #define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID      BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_DROP_THRES_VAL	BIT(27)
+#define HTT_RX_RING_SELECTION_CFG_CMD_RXMON_GCONF_EN	BIT(28)
 
 #define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET      GENMASK(15, 0)
 #define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET      GENMASK(31, 16)
@@ -758,6 +931,11 @@
 #define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET  GENMASK(31, 16)
 #define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET   GENMASK(15, 0)
 
+#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET	BIT(23)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK   GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK   GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK   GENMASK(16, 0)
+
 enum htt_rx_filter_tlv_flags {
 	HTT_RX_FILTER_TLV_FLAGS_MPDU_START		= BIT(0),
 	HTT_RX_FILTER_TLV_FLAGS_MSDU_START		= BIT(1),
@@ -772,6 +950,7 @@
 	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS	= BIT(10),
 	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT	= BIT(11),
 	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE	= BIT(12),
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO	= BIT(13),
 };
 
 enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
@@ -1060,6 +1239,21 @@
 		HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
 		HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
 
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \
+		(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+		 HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+		 HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+		 HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+		 HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+		 HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+		 HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+		 HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+		 HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+		 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+		 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+		 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+		 HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO)
+
 /* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
 #define HTT_RX_TLV_FLAGS_RXDMA_RING \
 		(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
@@ -1081,8 +1275,25 @@
 	__le32 rx_mpdu_offset;
 	__le32 rx_msdu_offset;
 	__le32 rx_attn_offset;
+	__le32 info2;
+	__le32 reserved[2];
+	__le16 rx_mpdu_start_word_mask;
+	__le16 rx_mpdu_end_word_mask;
+	__le32 rx_msdu_end_word_mask;
+	__le32 info3;
 } __packed;
 
+#define HTT_RX_TLV_FILTER_INFO0_RX_DROP_THRESHOLD		GENMASK(9, 0)
+#define HTT_RX_TLV_FILTER_INFO0_EN_MSDU_MPDU_LOG_MGMT_TYPE	BIT(17)
+#define HTT_RX_TLV_FILTER_INFO0_EN_MSDU_MPDU_LOG_CTRL_TYPE	BIT(18)
+#define HTT_RX_TLV_FILTER_INFO0_EN_MSDU_MPDU_LOG_DATA_TYPE	BIT(19)
+#define HTT_RX_TLV_FILTER_INFO1_EN_RX_TLV_PKT_OFFSET		BIT(0)
+#define HTT_RX_TLV_FILTER_INFO1_RX_PKT_TLV_OFFSET		GENMASK(14, 1)
+
+#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE	32
+#define HTT_RX_RING_DEFAULT_DMA_LENGTH		0x7
+#define HTT_RX_RING_PKT_TLV_OFFSET		0x1
+
 struct htt_rx_ring_tlv_filter {
 	u32 rx_filter; /* see htt_rx_filter_tlv_flags */
 	u32 pkt_filter_flags0; /* MGMT */
@@ -1097,6 +1308,17 @@
 	u16 rx_msdu_end_offset;
 	u16 rx_msdu_start_offset;
 	u16 rx_attn_offset;
+	u32 rx_drop_threshold;
+	u32 conf_len_ctrl;
+	u32 conf_len_mgmt;
+	u32 conf_len_data;
+	u32 info0;
+	u32 info1;
+	bool drop_threshold_valid;
+	bool rxmon_disable;
+	u16 rx_mpdu_start_wmask;
+	u16 rx_mpdu_end_wmask;
+	u32 rx_msdu_end_wmask;
 };
 
 #define HTT_STATS_FRAME_CTRL_TYPE_MGMT  0x0
@@ -1195,8 +1417,13 @@
 	HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
 	HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
 	HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND = 0x28,
+	HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP = 0x29,
+	HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP = 0x2a,
 	HTT_T2H_MSG_TYPE_PEER_MAP3	= 0x2b,
 	HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND = 0x2c,
+	HTT_T2H_MSG_TYPE_SAWF_DEF_QUEUES_MAP_REPORT_CONF = 0x2d,
+	HTT_T2H_MSG_TYPE_SAWF_MSDUQ_INFO_IND = 0x2e,
+	HTT_T2H_MSG_TYPE_STREAMING_STATS_IND = 0x2f,
 };
 
 #define HTT_TARGET_VERSION_MAJOR 3
@@ -1214,6 +1441,8 @@
 #define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16	GENMASK(15, 0)
 #define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID	GENMASK(31, 16)
 #define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL	GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID	GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL	GENMASK(31, 16)
 #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M	BIT(16)
 #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S	16
 
@@ -1249,8 +1478,7 @@
 	(((u64)__le32_to_cpu(msg_u32) << 32) | (__le32_to_cpu(msg_l32)))
 #define HTT_T2H_VDEV_STATS_PERIODIC_MSG_TYPE		GENMASK(7, 0)
 #define HTT_T2H_VDEV_STATS_PERIODIC_PDEV_ID		GENMASK(15, 8)
-#define HTT_T2H_VDEV_STATS_PERIODIC_NUM_VDEV		GENMASK(23, 16)
-#define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES	GENMASK(15, 0)
+#define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES	GENMASK(23, 16)
 #define HTT_VDEV_TXRX_STATS_COMMON_TLV		0
 #define HTT_VDEV_TXRX_STATS_HW_STATS_TLV	1
 
@@ -1283,6 +1511,124 @@
 	__le32 soc_drop_count_hi;
 } __packed;
 
+#define HTT_BACKPRESSURE_EVENT_PDEV_ID_M GENMASK(15, 8)
+#define HTT_BACKPRESSURE_EVENT_RING_TYPE_M GENMASK(23, 16)
+#define HTT_BACKPRESSURE_EVENT_RING_ID_M GENMASK(31, 24)
+
+#define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
+#define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
+
+#define HTT_BACKPRESSURE_UMAC_RING_TYPE	0
+#define HTT_BACKPRESSURE_LMAC_RING_TYPE	1
+
+enum htt_backpressure_umac_ringid {
+	HTT_SW_RING_IDX_REO_REO2SW1_RING,
+	HTT_SW_RING_IDX_REO_REO2SW2_RING,
+	HTT_SW_RING_IDX_REO_REO2SW3_RING,
+	HTT_SW_RING_IDX_REO_REO2SW4_RING,
+	HTT_SW_RING_IDX_REO_WBM2REO_LINK_RING,
+	HTT_SW_RING_IDX_REO_REO2TCL_RING,
+	HTT_SW_RING_IDX_REO_REO2FW_RING,
+	HTT_SW_RING_IDX_REO_REO_RELEASE_RING,
+	HTT_SW_RING_IDX_WBM_PPE_RELEASE_RING,
+	HTT_SW_RING_IDX_TCL_TCL2TQM_RING,
+	HTT_SW_RING_IDX_WBM_TQM_RELEASE_RING,
+	HTT_SW_RING_IDX_WBM_REO_RELEASE_RING,
+	HTT_SW_RING_IDX_WBM_WBM2SW0_RELEASE_RING,
+	HTT_SW_RING_IDX_WBM_WBM2SW1_RELEASE_RING,
+	HTT_SW_RING_IDX_WBM_WBM2SW2_RELEASE_RING,
+	HTT_SW_RING_IDX_WBM_WBM2SW3_RELEASE_RING,
+	HTT_SW_RING_IDX_REO_REO_CMD_RING,
+	HTT_SW_RING_IDX_REO_REO_STATUS_RING,
+	HTT_SW_UMAC_RING_IDX_MAX,
+};
+
+enum htt_backpressure_lmac_ringid {
+	HTT_SW_RING_IDX_FW2RXDMA_BUF_RING,
+	HTT_SW_RING_IDX_FW2RXDMA_STATUS_RING,
+	HTT_SW_RING_IDX_FW2RXDMA_LINK_RING,
+	HTT_SW_RING_IDX_SW2RXDMA_BUF_RING,
+	HTT_SW_RING_IDX_WBM2RXDMA_LINK_RING,
+	HTT_SW_RING_IDX_RXDMA2FW_RING,
+	HTT_SW_RING_IDX_RXDMA2SW_RING,
+	HTT_SW_RING_IDX_RXDMA2RELEASE_RING,
+	HTT_SW_RING_IDX_RXDMA2REO_RING,
+	HTT_SW_RING_IDX_MONITOR_STATUS_RING,
+	HTT_SW_RING_IDX_MONITOR_BUF_RING,
+	HTT_SW_RING_IDX_MONITOR_DESC_RING,
+	HTT_SW_RING_IDX_MONITOR_DEST_RING,
+	HTT_SW_LMAC_RING_IDX_MAX,
+};
+
+/* MSG_TYPE => HTT_T2H_SAWF_MSDUQ_INFO_IND
+ *
+ * @details
+ * When SAWF is enabled and a flow is mapped to a policy during the traffic
+ * flow if the flow is seen the associated service class is conveyed to the
+ * target via TCL Data Command. Target on the other hand internally creates the
+ * MSDUQ. Once the target creates the MSDUQ the target sends the information
+ * of the newly created MSDUQ and some other identifiers to uniquely identity
+ * the newly created MSDUQ
+ *
+ * |31    27|          24|23    16|15|14          11|10|9 8|7     4|3    0|
+ * |------------------------------+------------------------+--------------|
+ * |             peer ID          |         HTT qtype      |   msg type   |
+ * |---------------------------------+--------------+--+---+-------+------|
+ * |            reserved             |AST list index|FO|WC | HLOS  | remap|
+ * |                                 |              |  |   | TID   | TID  |
+ * |---------------------+------------------------------------------------|
+ * |    reserved1        |               tgt_opaque_id                    |
+ * |---------------------+------------------------------------------------|
+ *
+ * Header fields:
+ *
+ * info0 - b'7:0       - msg_type: This will be set to
+ *                        0x2e (HTT_T2H_SAWF_MSDUQ_INFO_IND)
+ *          b'15:8      - HTT qtype
+ *          b'31:16     - peer ID
+ *
+ * info1 - b'3:0       - remap TID, as assigned in firmware
+ *          b'7:4       - HLOS TID, as sent by host in TCL Data Command
+ *                        hlos_tid : Common to Lithium and Beryllium
+ *          b'9:8       - who_classify_info_sel (WC), as sent by host in
+ *                        TCL Data Command : Beryllium
+ *          b10         - flow_override (FO), as sent by host in
+ *                        TCL Data Command: Beryllium
+ *          b11:14      - ast_list_idx
+ *                        Array index into the list of extension AST entries
+ *                        (not the actual AST 16-bit index).
+ *                        The ast_list_idx is one-based, with the following
+ *                        range of values:
+ *                          - legacy targets supporting 16 user-defined
+ *                            MSDU queues: 1-2
+ *                          - legacy targets supporting 48 user-defined
+ *                            MSDU queues: 1-6
+ *                          - new targets: 0 (peer_id is used instead)
+ *                        Note that since ast_list_idx is one-based,
+ *                        the host will need to subtract 1 to use it as an
+ *                        index into a list of extension AST entries.
+ *          b15:31      - reserved
+ *
+ * info2 - b'23:0      - tgt_opaque_id Opaque Tx flow number which is a
+ *                        unique MSDUQ id in firmware
+ *          b'24:31     - reserved1
+ */
+
+#define HTT_T2H_SAWF_MSDUQ_INFO_0_IND_HTT_QTYPE_ID             GENMASK(15, 8)
+#define HTT_T2H_SAWF_MSDUQ_INFO_0_IND_PEER_ID                  GENMASK(31, 16)
+#define HTT_T2H_SAWF_MSDUQ_INFO_1_IND_REMAP_TID_ID             GENMASK(3, 0)
+#define HTT_T2H_SAWF_MSDUQ_INFO_1_IND_HLOS_TID_ID              GENMASK(7, 4)
+#define HTT_T2H_SAWF_MSDUQ_INFO_1_IND_WHO_CLSFY_INFO_SEL_ID    GENMASK(9, 8)
+#define HTT_T2H_SAWF_MSDUQ_INFO_1_IND_FLOW_OVERRIDE_ID         BIT(10)
+#define HTT_T2H_SAWF_MSDUQ_INFO_1_IND_AST_INDEX_ID             GENMASK(14, 11)
+#define HTT_T2H_SAWF_MSDUQ_INFO_2_IND_TGT_OPAQUE_ID            GENMASK(23, 0)
+
+struct htt_t2h_sawf_info_ind {
+	__le32 info0;
+	__le32 info1;
+	__le32 info2;
+} __packed;
+
 /* ppdu stats
  *
  * @details
@@ -1352,6 +1698,8 @@
 	HTT_PPDU_STATS_BANDWIDTH_80MHZ  = 4,
 	HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
 	HTT_PPDU_STATS_BANDWIDTH_DYN    = 6,
+	HTT_PPDU_STATS_BANDWIDTH_DYN_PATTERNS = 7,
+	HTT_PPDU_STATS_BANDWIDTH_320MHZ = 8,
 };
 
 #define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M	GENMASK(7, 0)
@@ -1386,6 +1734,7 @@
 
 #define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M	GENMASK(3, 0)
 #define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M	GENMASK(11, 4)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_RU_SIZE		GENMASK(15, 12)
 
 enum HTT_PPDU_STATS_PPDU_TYPE {
 	HTT_PPDU_STATS_PPDU_TYPE_SU,
@@ -1400,7 +1749,12 @@
 	HTT_PPDU_STATS_PPDU_TYPE_MAX
 };
 
-#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M	BIT(0)
+enum HTT_PPDU_STATS_RESP_PPDU_TYPE {
+	HTT_PPDU_STATS_RESP_PPDU_TYPE_MU_MIMO_UL,
+	HTT_PPDU_STATS_RESP_PPDU_TYPE_MU_OFDMA_UL,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALID	BIT(0)
 #define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M	GENMASK(5, 1)
 
 #define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M	GENMASK(1, 0)
@@ -1427,6 +1781,8 @@
 		le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M)
 #define HTT_USR_RATE_DCM(_val) \
 		le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M)
+#define HTT_USR_RATE_PPDU_TYPE(_val) \
+		u32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M)
 
 #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M		GENMASK(1, 0)
 #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M		BIT(2)
@@ -1439,6 +1795,10 @@
 #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M		GENMASK(27, 24)
 #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M		BIT(28)
 #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M		BIT(29)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PPDU_TYPE		GENMASK(31, 30)
+
+#define HTT_USR_RESP_RATE_PPDU_TYPE(_val) \
+	u32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PPDU_TYPE)
 
 struct htt_ppdu_stats_user_rate {
 	u8 tid_num;
@@ -1453,6 +1813,8 @@
 	__le32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
 	/* Note: resp_rate_info is only valid for if resp_type is UL */
 	__le32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+	__le16 punctured;
+	__le16 reserved1;
 } __packed;
 
 #define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M		GENMASK(7, 0)
@@ -1519,6 +1881,7 @@
 #define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM	GENMASK(31, 25)
 
 #define HTT_PPDU_STATS_NON_QOS_TID	16
+#define HTT_PPDU_STATS_PPDU_ID		GENMASK(24, 0)
 
 struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
 	__le32 ppdu_id;
@@ -1559,6 +1922,44 @@
 	struct list_head list;
 };
 
+/* @brief target -> host packet log message
+ *
+ * @details
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31                         16|15  12|11   10|9    8|7            0|
+ * |------------------------------------------------------------------|
+ * |        payload_size         | rsvd |pdev_id|mac_id|   msg type   |
+ * |------------------------------------------------------------------|
+ * |                              payload                             |
+ * |------------------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a pktlog message
+ *     Value: HTT_T2H_MSG_TYPE_PKTLOG
+ *   - mac_id
+ *     Bits 9:8
+ *     Purpose: identifies which MAC/PHY instance generated this pktlog info
+ *     Value: 0-3
+ *   - pdev_id
+ *     Bits 11:10
+ *     Purpose: pdev_id
+ *     Value: 0-3
+ *     0 (for rings at SOC level),
+ *     1/2/3 PDEV -> 0/1/2
+ *   - payload_size
+ *     Bits 31:16
+ *     Purpose: explicitly specify the payload size
+ *     Value: payload size in bytes (payload size is a multiple of 4 bytes)
+ */
+struct htt_pktlog_msg {
+	u32 hdr;
+	u8 payload[0];
+};
+
 /* @brief target -> host MLO offset indiciation message
  *
  * @details
@@ -1756,12 +2157,13 @@
  *   4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
  *   5 bit htt_rx_tid_stats_tlv
  *   6 bit htt_msdu_flow_stats_tlv
+ *   7 bit htt_peer_sched_stats_tlv
  * @config_param2: [Bit31 : Bit0] mac_addr31to0
  * @config_param3: [Bit15 : Bit0] mac_addr47to32
  *                [Bit31 : Bit16] reserved
  */
 #define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
-#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0xff
 
 /* Used to set different configs to the specified stats type.*/
 struct htt_ext_stats_cfg_params {
@@ -1771,10 +2173,128 @@
 	u32 cfg3;
 };
 
-enum vdev_stats_offload_timer_duration {
-	ATH12K_STATS_TIMER_DUR_500MS = 1,
-	ATH12K_STATS_TIMER_DUR_1SEC = 2,
-	ATH12K_STATS_TIMER_DUR_2SEC = 3,
+struct htt_h2t_msg_type_vdev_txrx_stats_req {
+	u32 hdr;
+	u32 vdev_id_lo_bitmask;
+	u32 vdev_id_hi_bitmask;
+};
+
+#define HTT_H2T_RXOLE_PPE_CFG_MSG_TYPE			GENMASK(7, 0)
+#define HTT_H2T_RXOLE_PPE_CFG_OVERRIDE			BIT(8)
+#define HTT_H2T_RXOLE_PPE_CFG_REO_DST_IND		GENMASK(13, 9)
+#define HTT_H2T_RXOLE_PPE_CFG_MULTI_BUF_MSDU_OVRD_EN	BIT(14)
+#define HTT_H2T_RXOLE_PPE_CFG_INTRA_BUS_OVRD		BIT(15)
+#define HTT_H2T_RXOLE_PPE_CFG_DECAP_RAW_OVRD		BIT(16)
+#define HTT_H2T_RXOLE_PPE_CFG_NWIFI_OVRD		BIT(17)
+#define HTT_H2T_RXOLE_PPE_CFG_IP_FRAG_OVRD		BIT(18)
+
+struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg {
+	u32 info0;
+};
+
+#define HTT_H2T_VDEV_TXRX_HDR_MSG_TYPE		GENMASK(7, 0)
+#define HTT_H2T_VDEV_TXRX_HDR_PDEV_ID		GENMASK(15, 8)
+#define HTT_H2T_VDEV_TXRX_HDR_ENABLE		BIT(16)
+#define HTT_H2T_VDEV_TXRX_HDR_INTERVAL		GENMASK(24, 17)
+#define HTT_H2T_VDEV_TXRX_HDR_RESET_STATS	GENMASK(26, 25)
+#define HTT_H2T_VDEV_TXRX_LO_BITMASK		GENMASK(31, 0)
+#define HTT_H2T_VDEV_TXRX_HI_BITMASK		GENMASK_ULL(63, 32)
+
+#define ATH12K_STATS_TIMER_DUR_1SEC		1000
+/* @brief target -> host extended statistics upload
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats indication
+ * will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31                         16|15    12|11|10 8|7   5|4       0|
+ * |--------------------------------------------------------------|
+ * |                   reserved                   |    msg type   |
+ * |--------------------------------------------------------------|
+ * |                         cookie LSBs                          |
+ * |--------------------------------------------------------------|
+ * |                         cookie MSBs                          |
+ * |--------------------------------------------------------------|
+ * |      stats entry length     | rsvd   | D|  S |   stat type   |
+ * |--------------------------------------------------------------|
+ * |                   type-specific stats info                   |
+ * |                      (see htt_stats.h)                       |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: Identifies this is a extended statistics upload confirmation
+ *             message.
+ *    Value: 0x1c
+ *  - COOKIE_LSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: LSBs of the opaque cookie specified by the host-side requestor
+ *  - COOKIE_MSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ *  - STAT_TYPE
+ *    Bits 7:0
+ *    Purpose: identifies the type of statistics info held in the
+ *        following information element
+ *    Value: htt_dbg_ext_stats_type
+ *  - STATUS
+ *    Bits 10:8
+ *    Purpose: indicate whether the requested stats are present
+ *    Value: htt_dbg_ext_stats_status
+ *  - DONE
+ *    Bits 11
+ *    Purpose:
+ *        Indicates the completion of the stats entry, this will be the last
+ *        stats conf HTT segment for the requested stats type.
+ *    Value:
+ *        0 -> the stats retrieval is ongoing
+ *        1 -> the stats retrieval is complete
+ *  - LENGTH
+ *    Bits 31:16
+ *    Purpose: indicate the stats information size
+ *    Value: This field specifies the number of bytes of stats information
+ *       that follows the element tag-length header.
+ *       It is expected but not required that this length is a multiple of
+ *       4 bytes.
+ */
+
+#define HTT_T2H_EXT_STATS_INFO1_DONE	BIT(11)
+#define HTT_T2H_EXT_STATS_INFO1_LENGTH   GENMASK(31, 16)
+
+struct ath12k_htt_extd_stats_msg {
+	u32 info0;
+	u64 cookie;
+	u32 info1;
+	u8 data[0];
+} __packed;
+
+#define	HTT_MAC_ADDR_L32_0	GENMASK(7, 0)
+#define	HTT_MAC_ADDR_L32_1	GENMASK(15, 8)
+#define	HTT_MAC_ADDR_L32_2	GENMASK(23, 16)
+#define	HTT_MAC_ADDR_L32_3	GENMASK(31, 24)
+#define	HTT_MAC_ADDR_H16_0	GENMASK(7, 0)
+#define	HTT_MAC_ADDR_H16_1	GENMASK(15, 8)
+
+struct htt_mac_addr {
+	u32 mac_addr_l32;
+	u32 mac_addr_h16;
 };
 
 static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
@@ -1783,20 +2303,287 @@
 	memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
 }
 
+#define ATH12K_ML_PEER_ID		GENMASK(13, 0)
+#define ATH12K_ML_PEER_ID_VALID		BIT(13)
+
+#define ATH12K_PEER_ID_INVALID		0x3FFF
+
+#define ATH12K_HTT_MLO_PEER_MAP_TLV_LINK_INFO_TAG	0
+#define ATH12K_HTT_MAX_MLO_LINKS	3
+#define ATH12K_HTT_MLO_CHIP_ID		GENMASK(2, 0)
+
+struct ath11k_htt_mlo_link_peer_info {
+	struct htt_tlv tlv_hdr;
+	u16 sw_peer_id;
+	u8 vdev_id;
+	u8 chip_id;
+} __packed;
+
+#define ATH12K_HTT_MLO_PEER_MAP_INFO0_PEER_ID		GENMASK(23, 8)
+#define ATH12K_HTT_MLO_PEER_MAP_MAC_ADDR_H16		GENMASK(15, 0)
+
+struct ath11k_htt_mlo_peer_map_msg {
+	u32 info0;
+	struct htt_mac_addr mac_addr;
+	u32 info1;
+	u32 info2;
+	u32 info3;
+	u32 rsvd0;
+	u32 rsvd1;
+	struct ath11k_htt_mlo_link_peer_info link_peer[ATH12K_HTT_MAX_MLO_LINKS];
+} __packed;
+
+#define ATH12K_HTT_MLO_PEER_UNMAP_PEER_ID		GENMASK(23, 8)
+struct ath11k_htt_mlo_peer_unmap_msg {
+	u32 info0;
+} __packed;
+
+/**
+ * struct ath12k_dp_htt_rxdma_ppe_cfg_param - Rx DMA and RxOLE PPE config
+ * @override: RxDMA override to override the reo_destinatoin_indication
+ * @reo_dst_ind: REO destination indication value
+ * @multi_buffer_msdu_override_en: Override the indication for SG
+ * @intra_bss_override: Rx OLE IntraBSS override
+ * @decap_raw_override: Rx Decap Raw override
+ * @decap_nwifi_override: Rx Native override
+ * @ip_frag_override: IP fragments override
+ */
+struct ath12k_dp_htt_rxdma_ppe_cfg_param {
+	u8 override;
+	u8 reo_dst_ind;
+	u8 multi_buffer_msdu_override_en;
+	u8 intra_bss_override;
+	u8 decap_raw_override;
+	u8 decap_nwifi_override;
+	u8 ip_frag_override;
+};
+
+#define HTT_ATH12K_UMAC_RESET_T2H_DO_PRE_RESET	BIT(0)
+#define HTT_ATH12K_UMAC_RESET_T2H_DO_POST_RESET_START	BIT(1)
+#define HTT_ATH12K_UMAC_RESET_T2H_DO_POST_RESET_COMPLETE	BIT(2)
+#define HTT_ATH12K_UMAC_RESET_T2H_INIT_UMAC_RECOVERY	BIT(3)
+#define HTT_ATH12K_UMAC_RESET_T2H_INIT_TARGET_RECOVERY_SYNC_USING_UMAC	BIT(4)
+
+enum dp_umac_reset_recover_action {
+	ATH12K_UMAC_RESET_RX_EVENT_NONE,
+	ATH12K_UMAC_RESET_INIT_UMAC_RECOVERY,
+	ATH12K_UMAC_RESET_INIT_TARGET_RECOVERY_SYNC_USING_UMAC,
+	ATH12K_UMAC_RESET_DO_PRE_RESET,
+	ATH12K_UMAC_RESET_DO_POST_RESET_START,
+	ATH12K_UMAC_RESET_DO_POST_RESET_COMPLETE,
+};
+
+enum dp_umac_reset_tx_cmd {
+	ATH12K_UMAC_RESET_TX_CMD_TRIGGER_DONE,
+	ATH12K_UMAC_RESET_TX_CMD_PRE_RESET_DONE,
+	ATH12K_UMAC_RESET_TX_CMD_POST_RESET_START_DONE,
+	ATH12K_UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE,
+};
+
+#define ATH12K_HTT_UMAC_RESET_MSG_SHMEM_PRE_RESET_DONE_SET	BIT(0)
+#define ATH12K_HTT_UMAC_RESET_MSG_SHMEM_POST_RESET_START_DONE_SET BIT(1)
+#define ATH12K_HTT_UMAC_RESET_MSG_SHMEM_POST_RESET_COMPLETE_DONE BIT(2)
+
+struct ath12k_dp_htt_umac_reset_recovery_msg_shmem_t {
+	u32 magic_num;
+	union {
+		/*
+		 * BIT [0]        :- T2H msg to do pre-reset
+		 * BIT [1]        :- T2H msg to do post-reset start
+		 * BIT [2]        :- T2H msg to do post-reset complete
+		 * BIT [3]        :- T2H msg to indicate to Host that
+		 *                   a trigger request for MLO UMAC Recovery
+		 *                   is received for UMAC hang.
+		 * BIT [4]        :- T2H msg to indicate to Host that
+		 *                   a trigger request for MLO UMAC Recovery
+		 *                   is received for Mode-1 Target Recovery.
+		 * BIT [31 : 5]   :- reserved
+		 */
+		u32 t2h_msg;
+		u32 recovery_action;
+	};
+	union {
+		/*
+		 * BIT [0]        :- H2T msg to send pre-reset done
+		 * BIT [1]        :- H2T msg to send post-reset start done
+		 * BIT [2]        :- H2T msg to send post-reset complete done
+		 * BIT [3]        :- H2T msg to start pre-reset. This is deprecated.
+		 * BIT [31 : 4]   :- reserved
+		 */
+		u32 h2t_msg;
+		u32 recovery_action_done;
+	};
+};
+
+struct ath12k_umac_reset_ts {
+	u64 trigger_start;
+	u64 trigger_done;
+	u64 pre_reset_start;
+	u64 pre_reset_done;
+	u64 post_reset_start;
+	u64 post_reset_done;
+	u64 post_reset_complete_start;
+	u64 post_reset_complete_done;
+};
+
+struct ath12k_dp_umac_reset {
+	struct ath12k_base *ab;
+	dma_addr_t shmem_paddr_unaligned;
+	void *shmem_vaddr_unaligned;
+	dma_addr_t shmem_paddr_aligned;
+	struct ath12k_dp_htt_umac_reset_recovery_msg_shmem_t *shmem_vaddr_aligned;
+	size_t shmem_size;
+	uint32_t magic_num;
+	int intr_offset;
+	struct tasklet_struct intr_tq;
+	int irq_num;
+	struct ath12k_umac_reset_ts ts;
+	bool umac_pre_reset_in_prog;
+};
+
+struct ath12k_htt_umac_reset_setup_cmd_params {
+	uint32_t msi_data;
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+};
+
+struct htt_h2t_paddr_size {
+	u32 size;
+	u32 addr_lo;
+	u32 addr_hi;
+};
+
+#define HTT_H2T_MSG_TYPE_SET	GENMASK(7, 0)
+#define HTT_H2T_MSG_METHOD	GENMASK(11, 8)
+#define HTT_T2H_MSG_METHOD	GENMASK(15, 12)
+
+#define ATH12K_DP_UMAC_RESET_SHMEM_MAGIC_NUM	0xDEADBEEF
+#define ATH12K_DP_UMAC_RESET_SHMEM_ALIGN 	8
+
+/**
+ * @brief HTT_H2T_MSG_TYPE_UMAC_RESET_PREREQUISITE_SETUP message
+ *
+ * @details
+ *  The HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP message is sent
+ *  by the host to provide prerequisite info to target for the UMAC hang
+ *  recovery feature.
+ *  The info sent in this H2T message are T2H message method, H2T message
+ *  method, T2H MSI interrupt number and physical start address, size of
+ *  the shared memory (refers to the shared memory dedicated for messaging
+ *  between host and target when the DUT is in UMAC hang recovery mode).
+ *  This H2T message is expected to be only sent if the WMI service bit
+ *  WMI_SERVICE_UMAC_HANG_RECOVERY_SUPPORT was firstly indicated by the target.
+ *
+ * |31                           16|15          12|11           8|7          0|
+ * |-------------------------------+--------------+--------------+------------|
+ * |            reserved           |h2t msg method|t2h msg method|  msg_type  |
+ * |--------------------------------------------------------------------------|
+ * |                           t2h msi interrupt number                       |
+ * |--------------------------------------------------------------------------|
+ * |                           shared memory area size                        |
+ * |--------------------------------------------------------------------------|
+ * |                     shared memory area physical address low              |
+ * |--------------------------------------------------------------------------|
+ * |                     shared memory area physical address high             |
+ * |--------------------------------------------------------------------------|
+  * The message is interpreted as follows:
+ * dword0 - b'0:7   - msg_type
+ *                    (HTT_H2T_MSG_TYPE_UMAC_RESET_PREREQUISITE_SETUP)
+ *          b'8:11  - t2h_msg_method: indicates method to be used for
+ *                    T2H communication in UMAC hang recovery mode.
+ *                    Value zero indicates MSI interrupt (default method).
+ *                    Refer to htt_umac_hang_recovery_msg_method enum.
+ *          b'12:15 - h2t_msg_method: indicates method to be used for
+ *                    H2T communication in UMAC hang recovery mode.
+ *                    Value zero indicates polling by target for this h2t msg
+ *                    during UMAC hang recovery mode.
+ *                    Refer to htt_umac_hang_recovery_msg_method enum.
+ *          b'16:31 - reserved.
+ * dword1 - b'0:31  - t2h_msi_data: MSI data to be used for
+ *                    T2H communication in UMAC hang recovery mode.
+ * dword2 - b'0:31  - size: size of shared memory dedicated for messaging
+ *                    only when in UMAC hang recovery mode.
+ *                    This refers to size in bytes.
+ * dword3 - b'0:31  - physical_address_lo: lower 32 bit physical address
+ *                    of the shared memory dedicated for messaging only when
+ *                    in UMAC hang recovery mode.
+ * dword4 - b'0:31  - physical_address_hi: higher 32 bit physical address
+ *                    of the shared memory dedicated for messaging only when
+ *                    in UMAC hang recovery mode.
+ */
+
+struct htt_dp_umac_reset_setup_req_cmd {
+	u32 msg_info;
+	u32 msi_data;
+	struct htt_h2t_paddr_size msg_shared_mem;
+}__packed;
+
+/**
+ * @brief HTT_H2T_MSG_TYPE_UMAC_RESET_START_PRE_RESET message
+ *
+ * @details
+ *  The HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_SOC_START_PRE_RESET is a SOC level
+ *  HTT message sent by the host to indicate that the target needs to start the
+ *  UMAC hang recovery feature from the point of pre-reset routine.
+ *  The purpose of this H2T message is to have host synchronize and trigger
+ *  UMAC recovery across all targets.
+ *  The info sent in this H2T message is the flag to indicate whether the
+ *  target needs to execute UMAC-recovery in context of the Initiator or
+ *  Non-Initiator.
+ *  This H2T message is expected to be sent as response to the
+ *  initiate_umac_recovery indication from the Initiator target attached to
+ *  this same host.
+ *  This H2T message is expected to be only sent if the WMI service bit
+ *  WMI_SERVICE_UMAC_HANG_RECOVERY_SUPPORT was firstly indicated by the target
+ *  and HTT_H2T_MSG_TYPE_UMAC_HANG_RECOVERY_PREREQUISITE_SETUP was sent
+ *  beforehand.
+ *
+ * |31                                    10|9|8|7            0|
+ * |-----------------------------------------------------------|
+ * |                 reserved               |U|I|   msg_type   |
+ * |-----------------------------------------------------------|
+ * Where:
+ *     I = is_initiator
+ *     U = is_umac_hang
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7   - msg_type
+ *                    (HTT_H2T_MSG_TYPE_UMAC_RESET_START_PRE_RESET)
+ *	    b'8     - is_initiator: indicates whether the target needs to
+ *                    execute the UMAC-recovery in context of the Initiator or
+ *                    Non-Initiator.
+ *                    The value zero indicates this target is Non-Initiator.
+ *          b'9     - is_umac_hang: indicates whether MLO UMAC recovery
+ *                    executed in context of UMAC hang or Target recovery.
+ *          b'10:31 - reserved.
+ */
+struct h2t_umac_hang_recovery_start_pre_reset {
+	u8 hdr;
+} __packed;
+
+#define HTT_H2T_UMAC_RESET_MSG_TYPE	GENMASK(7, 0)
+#define HTT_H2T_UMAC_RESET_IS_INITIATOR_SET	BIT(8)
+#define HTT_H2T_UMAC_RESET_IS_TARGET_RECOVERY_SET	BIT(9)
+
 int ath12k_dp_service_srng(struct ath12k_base *ab,
 			   struct ath12k_ext_irq_grp *irq_grp,
 			   int budget);
 int ath12k_dp_htt_connect(struct ath12k_dp *dp);
-void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif);
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif);
 void ath12k_dp_free(struct ath12k_base *ab);
 int ath12k_dp_alloc(struct ath12k_base *ab);
 void ath12k_dp_cc_config(struct ath12k_base *ab);
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab);
 int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
 void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
 void ath12k_dp_pdev_free(struct ath12k_base *ab);
 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
 				int mac_id, enum hal_ring_type ring_type);
-int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr);
+int ath12k_dp_peer_default_route_setup(struct ath12k *ar,
+				       struct ath12k_link_vif *arvif,
+				       struct ath12k_link_sta *arsta);
+int ath12k_dp_peer_setup(struct ath12k *ar, struct ath12k_link_vif *arvif,
+			 struct ath12k_link_sta *arsta);
 void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr);
 void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring);
 int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
@@ -1813,4 +2600,24 @@
 						  u32 cookie);
 struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
 						  u32 desc_id);
+void ath12k_dp_tx_update_bank_profile(struct ath12k_link_vif *arvif);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+int ath12k_ppeds_dp_srng_alloc(struct ath12k_base *ab, struct dp_srng *ring,
+			       enum hal_ring_type type, int ring_num,
+			       int num_entries);
+int ath12k_ppeds_dp_srng_init(struct ath12k_base *ab, struct dp_srng *ring,
+			      enum hal_ring_type type, int ring_num,
+			      int mac_id, int num_entries, u32 idx);
+struct ath12k_ppeds_tx_desc_info *ath12k_dp_get_ppeds_tx_desc(struct ath12k_base *ab,
+						 	      u32 desc_id);
+int ath12k_dp_cc_ppeds_desc_init(struct ath12k_base *ab);
+int ath12k_dp_cc_ppeds_desc_cleanup(struct ath12k_base *ab);
+void ath12k_dp_ppeds_tx_cmem_init(struct ath12k_base *ab, struct ath12k_dp *dp);
+#endif
+void ath12k_umac_reset_handle_post_reset_start(struct ath12k_base *ab);
+void ath12k_umac_reset_notify_target_sync_and_send(struct ath12k_base *ab,
+					       enum dp_umac_reset_tx_cmd tx_event);
+void ath12k_dp_reset_interrupt_mask(struct ath12k_base *ab);
+void ath12k_dp_restore_interrupt_mask(struct ath12k_base *ab);
+bool ath12k_dp_umac_reset_in_progress(struct ath12k_base *ab);
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp_mon.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_mon.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp_mon.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_mon.c	2024-03-18 14:40:14.851741333 +0100
@@ -10,6 +10,22 @@
 #include "dp_tx.h"
 #include "peer.h"
 
+#define ATH12K_LE32_DEC_ENC(value, decode_bits, encode_bits) \
+	le32_encode_bits(le32_get_bits(value, decode_bits), encode_bits)
+
+#define ATH12K_LE64_DEC_ENC(value, decode_bits, encode_bits) \
+	le64_encode_bits(le64_get_bits(value, decode_bits), encode_bits)
+
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+extern int g_bonded_interface_model;
+#endif
+
+#define LE32_DEC_ENC(value, dec_bits, enc_bits)				\
+        le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define LE64_DEC_ENC(value, dec_bits, enc_bits)				\
+        le64_encode_bits(le64_get_bits(value, dec_bits), enc_bits)
+
 static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
 					       struct hal_rx_user_status *rx_user_status)
 {
@@ -33,10 +49,10 @@
 
 	rx_user_status->mpdu_ok_byte_count =
 		u32_get_bits(mpdu_ok_byte_count,
-			     HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT);
+			     HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT);
 	rx_user_status->mpdu_err_byte_count =
 		u32_get_bits(mpdu_err_byte_count,
-			     HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT);
+			     HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT);
 }
 
 static void
@@ -107,15 +123,11 @@
 	if (ppdu_info->is_stbc && nsts > 0)
 		nsts = ((nsts + 1) >> 1) - 1;
 
-	ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK);
+	ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK) + 1;
 	ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
 	ppdu_info->beamformed = u32_get_bits(info1,
 					     HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
 	group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
-	if (group_id == 0 || group_id == 63)
-		ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
-	else
-		ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
 	ppdu_info->vht_flag_values5 = group_id;
 	ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
 					    ppdu_info->nss);
@@ -137,8 +149,7 @@
 	ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC);
 	ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
 	ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
-	ppdu_info->nss = (ppdu_info->mcs >> 3);
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+	ppdu_info->nss = (ppdu_info->mcs >> 3) + 1;
 }
 
 static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
@@ -172,7 +183,6 @@
 
 	ppdu_info->rate = rate;
 	ppdu_info->cck_flag = 1;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
 }
 
 static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
@@ -214,7 +224,6 @@
 	}
 
 	ppdu_info->rate = rate;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
 }
 
 static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
@@ -249,10 +258,9 @@
 	value = value << HE_STA_ID_SHIFT;
 	ppdu_info->he_data4 |= value;
 
-	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS);
+	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS) + 1;
 	ppdu_info->beamformed = u32_get_bits(info0,
 					     HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
 }
 
 static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
@@ -279,7 +287,7 @@
 	value = value << HE_STA_ID_SHIFT;
 	ppdu_info->he_data4 |= value;
 
-	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
+	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS) + 1;
 }
 
 static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
@@ -292,9 +300,8 @@
 
 	ru_tones = u32_get_bits(info0,
 				HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
-	ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+	ppdu_info->ru_alloc = ath12k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
 	ppdu_info->he_RU[0] = ru_tones;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
 }
 
 static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
@@ -429,7 +436,6 @@
 
 	ppdu_info->is_stbc = info1 &
 			     HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
 }
 
 static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
@@ -577,18 +583,877 @@
 	ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
 	ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
 	dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
-	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS) + 1;
 	ppdu_info->dcm = dcm;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_u_sig_cmn(u8 *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_mon_usig_hdr *usig = (struct hal_mon_usig_hdr *)rx_tlv;
+	struct hal_mon_usig_cmn *usig_1 = &usig->usig_1;
+
+	ppdu_info->usig_common |=
+			IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN |
+			IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN |
+			IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+			IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN |
+			IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN |
+			ATH12K_LE32_DEC_ENC(usig->usig_2.tb.info0,
+				     HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS,
+				     IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC) |
+			ATH12K_LE32_DEC_ENC(usig_1->info0,
+				     HAL_RX_USIG_CMN_INFO0_PHY_VERSION,
+				     IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
+			ATH12K_LE32_DEC_ENC(usig_1->info0,
+				     HAL_RX_USIG_CMN_INFO0_BW,
+				     IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
+			ATH12K_LE32_DEC_ENC(usig_1->info0,
+				     HAL_RX_USIG_CMN_INFO0_UL_DL,
+				     IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) |
+			ATH12K_LE32_DEC_ENC(usig_1->info0,
+				     HAL_RX_USIG_CMN_INFO0_BSS_COLOR,
+				     IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) |
+			ATH12K_LE32_DEC_ENC(usig_1->info0,
+				     HAL_RX_USIG_CMN_INFO0_TXOP,
+				     IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+
+	ppdu_info->u_sig_info.bw = le32_get_bits(usig_1->info0,
+						 HAL_RX_USIG_CMN_INFO0_BW);
+
+	ppdu_info->bw = ppdu_info->u_sig_info.bw;
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_u_sig_tb(u8 *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_mon_usig_hdr *usig = (struct hal_mon_usig_hdr *)rx_tlv;
+	struct hal_mon_usig_tb *usig_tb = &usig->usig_2.tb;
+
+	ppdu_info->usig_mask |=
+			IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+			ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE,
+				     IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) |
+			IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+			ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1,
+				     IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1) |
+			ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2,
+				     IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2) |
+			IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+			ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_CRC,
+				     IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) |
+			ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_TAIL,
+				     IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL);
+
+	ppdu_info->u_sig_info.ppdu_type_comp_mode =
+		le32_get_bits(usig_tb->info0,
+			      HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE);
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline uint32_t
+ath12k_dp_mon_hal_rx_parse_u_sig_mu(u8 *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_mon_usig_hdr *usig = (struct hal_mon_usig_hdr *)rx_tlv;
+	struct hal_mon_usig_mu *usig_mu = &usig->usig_2.mu;
+
+	ppdu_info->usig_mask |=
+			IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+			IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+			ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) |
+			IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+			ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO) |
+			IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+			ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) |
+			ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS) |
+			ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_CRC,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) |
+			ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_TB_INFO0_TAIL,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL);
+
+	ppdu_info->u_sig_info.ppdu_type_comp_mode =
+		le32_get_bits(usig_mu->info0,
+			      HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE);
+	ppdu_info->u_sig_info.eht_sig_mcs =
+		le32_get_bits(usig_mu->info0,
+			      HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS);
+	ppdu_info->u_sig_info.num_eht_sig_sym =
+		le32_get_bits(usig_mu->info0,
+			      HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM);
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_u_sig_hdr(u8 *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_mon_usig_hdr *usig = (struct hal_mon_usig_hdr *)rx_tlv;
+	struct hal_mon_usig_cmn *usig_cmn = &usig->usig_1;
+	u8 comp_mode;
+	bool ul_dl;
+
+        ppdu_info->usig_flags = 1;
+
+	ath12k_dp_mon_hal_rx_parse_u_sig_cmn(rx_tlv, ppdu_info);
+
+	ul_dl = u32_get_bits(__le32_to_cpu(usig_cmn->info0), HAL_RX_USIG_CMN_INFO0_UL_DL);
+
+	comp_mode = u32_get_bits(__le32_to_cpu(usig->usig_2.mu.info0),
+				 HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+
+	if (comp_mode == 0 && ul_dl)
+		return ath12k_dp_mon_hal_rx_parse_u_sig_tb(rx_tlv, ppdu_info);
+	 else
+		return ath12k_dp_mon_hal_rx_parse_u_sig_mu(rx_tlv, ppdu_info);
+}
+
+static enum hal_rx_mon_status
+ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info,
+				   u16 tlv_len, u8 *tlv_data)
+{
+	if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) {
+		memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len,
+		       tlv_data, tlv_len);
+		ppdu_info->tlv_aggr.cur_len += tlv_len;
+	}
+
+	return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_mu_mimo_user(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	if (ppdu_info->u_sig_info.ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU &&
+	    ppdu_info->u_sig_info.ul_dl == 1)
+		return true;
+
+	return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_frame_type_ndp(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	if (ppdu_info->u_sig_info.ppdu_type_comp_mode == 1 &&
+	    ppdu_info->u_sig_info.eht_sig_mcs == 0 &&
+	    ppdu_info->u_sig_info.num_eht_sig_sym == 0)
+		return true;
+
+	return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_non_ofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	u32 ppdu_type_comp_mode = ppdu_info->u_sig_info.ppdu_type_comp_mode;
+	u32 ul_dl = ppdu_info->u_sig_info.ul_dl;
+
+	if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) ||
+	    (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) ||
+	    (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO  && ul_dl == 1))
+		return true;
+
+	return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_ofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	if (ppdu_info->u_sig_info.ppdu_type_comp_mode == 0 &&
+	    ppdu_info->u_sig_info.ul_dl == 0)
+		return true;
+
+	return false;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(u8 *tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+        struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp =
+				(struct hal_eht_sig_ndp_cmn_eb *)tlv;
+
+        ppdu_info->eht_known |=
+		IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+		IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+		IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+		IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S |
+		IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S |
+		IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 |
+		IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1;
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+			     HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE,
+			     IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+	/*
+	 * GI and LTF size are separately indicated in radiotap header
+	 * and hence will be parsed from other TLV
+	 */
+
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+			     HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM,
+			     IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+			     HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC,
+			     IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O);
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+			     HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD,
+			     IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S);
+
+	ppdu_info->eht_data[7] |=
+		ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+			     HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS,
+			     IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+
+	ppdu_info->eht_data[7] |=
+		ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+			     HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED,
+			     IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+
+        return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_usig_overflow(u8 *tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_eht_sig_cc_usig_overflow *usig_ovflow =
+		(struct hal_eht_sig_cc_usig_overflow *)tlv;
+
+        ppdu_info->eht_known |=
+		IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+		IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+		IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM |
+		IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM |
+		IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM |
+		IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O;
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(usig_ovflow->info0,
+			     HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE,
+			     IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+	/*
+	 * GI and LTF size are separately indicated in radiotap header
+	 * and hence will be parsed from other TLV
+	 */
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(usig_ovflow->info0,
+			     HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM,
+			     IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(usig_ovflow->info0,
+			     HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM,
+			     IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(usig_ovflow->info0,
+			     HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR,
+			     IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(usig_ovflow->info0,
+			     HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY,
+			     IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+	ppdu_info->eht_data[0] |=
+		ATH12K_LE32_DEC_ENC(usig_ovflow->info0,
+			     HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD,
+			     IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O);
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_ru_allocation(u8 *tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	u64 *ehtsig_tlv = (u64 *)tlv;
+	struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1;
+	struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2;
+
+	ofdma_cmn_eb1 = (struct hal_eht_sig_ofdma_cmn_eb1 *)ehtsig_tlv;
+	ofdma_cmn_eb2 = (struct hal_eht_sig_ofdma_cmn_eb2 *)(ehtsig_tlv + 1);
+
+/*FIX ME to get CC2 values*/
+
+	switch (ppdu_info->u_sig_info.bw) {
+	case HAL_EHT_BW_320_2:
+	case HAL_EHT_BW_320_1:
+		/* CC1 2::3 */
+		ppdu_info->eht_data[4] |=
+			IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3,
+				     IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3);
+
+		/* CC1 2::4 */
+		ppdu_info->eht_data[5] |=
+			IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4,
+				     IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4);
+
+		/* CC1 2::5 */
+		ppdu_info->eht_data[5] |=
+			IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5,
+				     IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5);
+
+		/* CC1 2::6 */
+		ppdu_info->eht_data[6] |=
+			IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6,
+				     IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6);
+
+	        /* fallthrough */
+		fallthrough;
+
+	case HAL_EHT_BW_160:
+		/* CC1 2::1 */
+		ppdu_info->eht_data[3] |=
+			IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1,
+				     IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1);
+		/* CC1 2::2 */
+		ppdu_info->eht_data[3] |=
+			IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2,
+				     IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2);
+
+		/* fallthrough */
+		fallthrough;
+
+	case HAL_EHT_BW_80:
+		/* CC1 1::2 */
+		ppdu_info->eht_data[2] |=
+			IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2,
+				     IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2);
+
+		/* fallthrough */
+		fallthrough;
+
+	case HAL_EHT_BW_40:
+		fallthrough;
+	case HAL_EHT_BW_20:
+		/* CC1 1::1 */
+		ppdu_info->eht_data[1] |=
+			IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+				     HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1,
+				     IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1);
+		break;
+	default:
+	        break;
+        }
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline uint32_t
+ath12k_dp_mon_hal_rx_parse_non_ofdma_users(u8 *tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_eht_sig_non_ofdma_cmn_eb *non_ofdma_cmn_eb =
+	                        (struct hal_eht_sig_non_ofdma_cmn_eb *)tlv;
+
+	ppdu_info->eht_known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M;
+	ppdu_info->eht_data[7] |=
+		ATH12K_LE32_DEC_ENC(non_ofdma_cmn_eb->info0,
+			     HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS,
+			     IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_eht_sig_mumimo_user_info(u8 *tlv,
+						    struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_eht_sig_mu_mimo_user_info *user_info;
+	u32 user_idx = ppdu_info->num_eht_user_info_valid;
+
+	if (ppdu_info->num_eht_user_info_valid >= ARRAY_SIZE(ppdu_info->eht_user_info))
+		return HAL_TLV_STATUS_PPDU_NOT_DONE;
+
+	user_info = (struct hal_eht_sig_mu_mimo_user_info *)tlv;
+
+	ppdu_info->eht_user_info[user_idx] |=
+		IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M);
+
+	ppdu_info->mcs = le32_get_bits(user_info->info0,
+				       HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS);
+
+	/* FIXME: get USER_ENCODING_BLOCK_CRC */
+
+	ppdu_info->num_eht_user_info_valid++;
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_eht_sig_non_mumimo_user(u8 *tlv,
+						   struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_eht_sig_non_mu_mimo_user_info *user_info;
+	u32 user_idx = ppdu_info->num_eht_user_info_valid;
+
+	if (ppdu_info->num_eht_user_info_valid >= ARRAY_SIZE(ppdu_info->eht_user_info))
+		return HAL_TLV_STATUS_PPDU_NOT_DONE;
+
+	user_info = (struct hal_eht_sig_non_mu_mimo_user_info *)tlv;
+
+	ppdu_info->eht_user_info[user_idx] |=
+		IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) |
+		ATH12K_LE32_DEC_ENC(user_info->info0,
+			     HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED,
+			     IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+	ppdu_info->mcs = le32_get_bits(user_info->info0,
+				       HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS);
+
+	ppdu_info->nss = le32_get_bits(user_info->info0,
+				       HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1;
+
+	ppdu_info->num_eht_user_info_valid++;
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(u8 *tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	void *user_info = (void *)((u8 *)tlv + HTT_TLV_HDR_LEN);
+
+	ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+	ath12k_dp_mon_hal_rx_parse_non_ofdma_users(tlv, ppdu_info);
+
+        if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(ppdu_info))
+                ath12k_dp_mon_hal_rx_parse_eht_sig_mumimo_user_info(user_info,
+								    ppdu_info);
+        else
+		ath12k_dp_mon_hal_rx_parse_eht_sig_non_mumimo_user(user_info,
+								   ppdu_info);
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline u32
+ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(u8 *tlv, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	u64 *eht_sig_tlv = (u64 *)tlv;
+	void *user_info = (void *)(eht_sig_tlv + 2);
+
+	ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+    	ath12k_dp_mon_hal_rx_parse_ru_allocation    (tlv, ppdu_info);
+	ath12k_dp_mon_hal_rx_parse_eht_sig_non_mumimo_user(user_info,
+							   ppdu_info);
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static void
+ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info,
+				u8 *tlv_data)
+{
+	ppdu_info->eht_flags = 1;
+
+	if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(ppdu_info))
+		ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info);
+	else if (ath12k_dp_mon_hal_rx_is_non_ofdma(ppdu_info))
+		ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info);
+	else if (ath12k_dp_mon_hal_rx_is_ofdma(ppdu_info))
+		ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info);
+}
+
+static u32
+ath12k_dp_mon_hal_rx_status_process_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	u32 aggr_tlv_tag = ppdu_info->tlv_aggr.tlv_tag;
+
+	if (aggr_tlv_tag == HAL_PHYRX_GENERIC_EHT_SIG)
+		ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf);
+
+	ppdu_info->tlv_aggr.in_progress = 0;
+	ppdu_info->tlv_aggr.cur_len = 0;
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
+}
+
+static inline enum ath12k_eht_ru_size
+hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size)
+{
+	switch (hal_ru_size) {
+	case HAL_EHT_RU_26:
+		return ATH12K_EHT_RU_26;
+	case HAL_EHT_RU_52:
+		return ATH12K_EHT_RU_52;
+	case HAL_EHT_RU_78:
+		return ATH12K_EHT_RU_52_26;
+	case HAL_EHT_RU_106:
+		return ATH12K_EHT_RU_106;
+	case HAL_EHT_RU_132:
+		return ATH12K_EHT_RU_106_26;
+	case HAL_EHT_RU_242:
+		return ATH12K_EHT_RU_242;
+	case HAL_EHT_RU_484:
+		return ATH12K_EHT_RU_484;
+	case HAL_EHT_RU_726:
+		return ATH12K_EHT_RU_484_242;
+	case HAL_EHT_RU_996:
+		return ATH12K_EHT_RU_996;
+	case HAL_EHT_RU_996x2:
+		return ATH12K_EHT_RU_996x2;
+	case HAL_EHT_RU_996x3:
+		return ATH12K_EHT_RU_996x3;
+	case HAL_EHT_RU_996x4:
+		return ATH12K_EHT_RU_996x4;
+	case HAL_EHT_RU_NONE:
+		return ATH12K_EHT_RU_INVALID;
+	case HAL_EHT_RU_996_484:
+		return ATH12K_EHT_RU_996_484;
+	case HAL_EHT_RU_996x2_484:
+		return ATH12K_EHT_RU_996x2_484;
+	case HAL_EHT_RU_996x3_484:
+		return ATH12K_EHT_RU_996x3_484;
+	case HAL_EHT_RU_996_484_242:
+		return ATH12K_EHT_RU_996_484_242;
+	default:
+		return ATH12K_EHT_RU_INVALID;
+	}
+}
+
+static inline void hal_rx_ul_ofdma_ru_size_to_width(u32 ru_size, u32 *ru_width)
+{
+	uint32_t width = 0;
+
+	switch (ru_size) {
+	case ATH12K_EHT_RU_26:
+		width = RU_26;
+		break;
+	case ATH12K_EHT_RU_52:
+		width = RU_52;
+		break;
+	case ATH12K_EHT_RU_52_26:
+		width = RU_52_26;
+		break;
+	case ATH12K_EHT_RU_106:
+		width = RU_106;
+		break;
+	case ATH12K_EHT_RU_106_26:
+		width = RU_106_26;
+		break;
+	case ATH12K_EHT_RU_242:
+		width = RU_242;
+		break;
+	case ATH12K_EHT_RU_484:
+		width = RU_484;
+		break;
+	case ATH12K_EHT_RU_484_242:
+		width = RU_484_242;
+		break;
+	case ATH12K_EHT_RU_996:
+		width = RU_996;
+		break;
+	case ATH12K_EHT_RU_996_484:
+		width = RU_996_484;
+		break;
+	case ATH12K_EHT_RU_996_484_242:
+		width = RU_996_484_242;
+		break;
+	case ATH12K_EHT_RU_996x2:
+		width = RU_2X996;
+		break;
+	case ATH12K_EHT_RU_996x2_484:
+		width = RU_2X996_484;
+		break;
+	case ATH12K_EHT_RU_996x3:
+		width = RU_3X996;
+		break;
+	case ATH12K_EHT_RU_996x3_484:
+		width = RU_3X996_484;
+		break;
+	case ATH12K_EHT_RU_996x4:
+		width = RU_4X996;
+		break;
+	default:
+		break;
+	}
+
+	*ru_width = width;
+}
+
+static u32
+ath12k_dp_mon_hal_rx_parse_receive_user_info(u8 *tlv, u16 user_id,
+					     struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_receive_user_info *rx_usr_info = (struct hal_receive_user_info *)tlv;
+	struct hal_rx_user_status *mon_rx_user_status = NULL;
+	u64 ru_index_320mhz = 0;
+	u32 ru_index_per80mhz;
+	u32 ru_size = 0, num_80mhz_with_ru = 0, ru_start_index_80_0;
+	u32 ru_index = HAL_EHT_RU_INVALID;
+	u32 rtap_ru_size = ATH12K_EHT_RU_INVALID;
+	u32 ru_width, reception_type, ru_type_80_0;
+	u32 ru_type_80_1, ru_start_index_80_1, ru_type_80_2, ru_start_index_80_2;
+	u32 ru_type_80_3, ru_start_index_80_3;
+
+	reception_type = u32_get_bits(__le32_to_cpu(rx_usr_info->info0),
+				      HAL_RX_USR_INFO0_RECEPTION_TYPE);
+
+	switch (reception_type) {
+	case HAL_RECEPTION_TYPE_SU:
 	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+	        break;
+	case HAL_RECEPTION_TYPE_DL_MU_MIMO:
+	case HAL_RECEPTION_TYPE_UL_MU_MIMO:
+	        ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+	        break;
+	case HAL_RECEPTION_TYPE_DL_MU_OFMA:
+	case HAL_RECEPTION_TYPE_UL_MU_OFDMA:
+	        ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+	        break;
+	case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO:
+	case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO:
+	        ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO;
+	}
+
+	ppdu_info->is_stbc = u32_get_bits(__le32_to_cpu(rx_usr_info->info0),
+					  HAL_RX_USR_INFO0_STBC);
+	ppdu_info->ldpc = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+				       HAL_RX_USR_INFO2_LDPC);
+	ppdu_info->dcm = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+				      HAL_RX_USR_INFO2_STA_DCM);
+	ppdu_info->bw = u32_get_bits(__le32_to_cpu(rx_usr_info->info1),
+				     HAL_RX_USR_INFO1_RX_BW);
+	ppdu_info->mcs = u32_get_bits(__le32_to_cpu(rx_usr_info->info1),
+				      HAL_RX_USR_INFO1_MCS);
+	ppdu_info->nss = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+				      HAL_RX_USR_INFO2_NSS) + 1;
+
+	if (user_id < HAL_MAX_UL_MU_USERS) {
+	        mon_rx_user_status =
+	                &ppdu_info->userstats[user_id];
+	        mon_rx_user_status->mcs = ppdu_info->mcs;
+	        mon_rx_user_status->nss = ppdu_info->nss;
+	}
+
+	if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
+	      ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+	      ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
+			return HAL_TLV_STATUS_PPDU_NOT_DONE;
+
+	/* RU allocation present only for OFDMA reception */
+	ru_type_80_0 = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+				    HAL_RX_USR_INFO2_RU_TYPE_80_0);
+	ru_start_index_80_0 = u32_get_bits(__le32_to_cpu(rx_usr_info->info3),
+					   HAL_RX_USR_INFO3_RU_START_IDX_80_0);
+	if (ru_type_80_0 != HAL_EHT_RU_NONE) {
+	        ru_size += ru_type_80_0;
+	        ru_index = ru_index_per80mhz = ru_start_index_80_0;
+	        HAL_SET_RU_PER80(ru_index_320mhz, ru_type_80_0, ru_index_per80mhz, 0);
+	        num_80mhz_with_ru++;
+	}
+
+	ru_type_80_1 = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+				    HAL_RX_USR_INFO2_RU_TYPE_80_1);
+	ru_start_index_80_1 = u32_get_bits(__le32_to_cpu(rx_usr_info->info3),
+					   HAL_RX_USR_INFO3_RU_START_IDX_80_1);
+	if (ru_type_80_1 != HAL_EHT_RU_NONE) {
+	        ru_size += ru_type_80_1;
+	        ru_index = ru_index_per80mhz = ru_start_index_80_1;
+	        HAL_SET_RU_PER80(ru_index_320mhz, ru_type_80_1, ru_index_per80mhz, 1);
+	        num_80mhz_with_ru++;
+	}
+
+	ru_type_80_2 = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+				    HAL_RX_USR_INFO2_RU_TYPE_80_2);
+	ru_start_index_80_2 = u32_get_bits(__le32_to_cpu(rx_usr_info->info3),
+					   HAL_RX_USR_INFO3_RU_START_IDX_80_2);
+	if (ru_type_80_2 != HAL_EHT_RU_NONE) {
+	        ru_size += ru_type_80_2;
+	        ru_index = ru_index_per80mhz = ru_start_index_80_2;
+	        HAL_SET_RU_PER80(ru_index_320mhz, ru_type_80_2, ru_index_per80mhz, 2);
+	        num_80mhz_with_ru++;
+	}
+
+	ru_type_80_3 = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+				    HAL_RX_USR_INFO2_RU_TYPE_80_3);
+	ru_start_index_80_3 = u32_get_bits(__le32_to_cpu(rx_usr_info->info2),
+					   HAL_RX_USR_INFO3_RU_START_IDX_80_3);
+	if (ru_type_80_3 != HAL_EHT_RU_NONE) {
+	        ru_size += ru_type_80_3;
+	        ru_index = ru_index_per80mhz = ru_start_index_80_3;
+	        HAL_SET_RU_PER80(ru_index_320mhz, ru_type_80_3, ru_index_per80mhz, 3);
+	        num_80mhz_with_ru++;
+	}
+
+	if (num_80mhz_with_ru > 1) {
+	        /* Calculate the MRU index */
+		switch (ru_index_320mhz) {
+		case HAL_EHT_RU_996_484_0:
+		case HAL_EHT_RU_996x2_484_0:
+		case HAL_EHT_RU_996x3_484_0:
+			ru_index = 0;
+			break;
+		case HAL_EHT_RU_996_484_1:
+		case HAL_EHT_RU_996x2_484_1:
+		case HAL_EHT_RU_996x3_484_1:
+			ru_index = 1;
+			break;
+		case HAL_EHT_RU_996_484_2:
+		case HAL_EHT_RU_996x2_484_2:
+		case HAL_EHT_RU_996x3_484_2:
+			ru_index = 2;
+			break;
+		case HAL_EHT_RU_996_484_3:
+		case HAL_EHT_RU_996x2_484_3:
+		case HAL_EHT_RU_996x3_484_3:
+			ru_index = 3;
+			break;
+		case HAL_EHT_RU_996_484_4:
+		case HAL_EHT_RU_996x2_484_4:
+		case HAL_EHT_RU_996x3_484_4:
+			ru_index = 4;
+			break;
+		case HAL_EHT_RU_996_484_5:
+		case HAL_EHT_RU_996x2_484_5:
+		case HAL_EHT_RU_996x3_484_5:
+			ru_index = 5;
+			break;
+		case HAL_EHT_RU_996_484_6:
+		case HAL_EHT_RU_996x2_484_6:
+		case HAL_EHT_RU_996x3_484_6:
+			ru_index = 6;
+			break;
+		case HAL_EHT_RU_996_484_7:
+		case HAL_EHT_RU_996x2_484_7:
+		case HAL_EHT_RU_996x3_484_7:
+			ru_index = 7;
+			break;
+		case HAL_EHT_RU_996x2_484_8:
+			ru_index = 8;
+			break;
+		case HAL_EHT_RU_996x2_484_9:
+			ru_index = 9;
+			break;
+		case HAL_EHT_RU_996x2_484_10:
+		        ru_index = 10;
+		        break;
+		case HAL_EHT_RU_996x2_484_11:
+			ru_index = 11;
+			break;
+		default:
+			ru_index = HAL_EHT_RU_INVALID;
+	        break;
+		}
+
+		ru_size += 4;
+	}
+
+	rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size);
+	if (rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+		ppdu_info->eht_known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM;
+		ppdu_info->eht_data[1] |=
+			le32_encode_bits(rtap_ru_size,
+					 IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE);
+	}
+
+	if (ru_index != HAL_EHT_RU_INVALID) {
+		ppdu_info->eht_known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM;
+		ppdu_info->eht_data[1] |=
+			le32_encode_bits(rtap_ru_size,
+					 IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX);
+	}
+
+	if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID &&
+	    rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+		mon_rx_user_status->ul_ofdma_ru_start_index = ru_index;
+		mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size;
+		hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size, &ru_width);
+		mon_rx_user_status->ul_ofdma_ru_width = ru_width;
+		mon_rx_user_status->ofdma_info_valid = 1;
+	}
+
+	return HAL_TLV_STATUS_PPDU_NOT_DONE;
 }
 
 static enum hal_rx_mon_status
 ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
-				  struct ath12k_mon_data *pmon,
-				  u32 tlv_tag, u8 *tlv_data, u32 userid)
+				  struct hal_rx_mon_ppdu_info *ppdu_info,
+				  struct hal_tlv_parsed_hdr *tlv_parsed_hdr)
 {
-	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
 	u32 info[7];
+	u16 tlv_tag = tlv_parsed_hdr->tlv_tag;
+	u16 tlv_len = tlv_parsed_hdr->tlv_len;
+	u16 userid = tlv_parsed_hdr->tlv_userid;
+	u8 *tlv_data = tlv_parsed_hdr->tlv_data;
+
+	if (ppdu_info->tlv_aggr.in_progress) {
+		if (ppdu_info->tlv_aggr.tlv_tag == tlv_tag) {
+			return ath12k_dp_mon_hal_aggr_tlv(ppdu_info,
+							  tlv_len, tlv_data);
+		} else {
+			ath12k_dp_mon_hal_rx_status_process_aggr_tlv(ppdu_info);
+		}
+	}
 
 	switch (tlv_tag) {
 	case HAL_RX_PPDU_START: {
@@ -596,11 +1461,16 @@
 			(struct hal_rx_ppdu_start *)tlv_data;
 
 		info[0] = __le32_to_cpu(ppdu_start->info0);
-
 		ppdu_info->ppdu_id =
 			u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID);
-		ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
-		ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+
+		info[1] = __le32_to_cpu(ppdu_start->info1);
+		ppdu_info->chan_num = u32_get_bits(info[1],
+						   HAL_RX_PPDU_START_INFO1_CHAN_NUM);
+		ppdu_info->freq = u32_get_bits(info[1],
+					       HAL_RX_PPDU_START_INFO1_CHAN_FREQ);
+		ppdu_info->ppdu_ts = ((__le32_to_cpu(ppdu_start->ppdu_start_ts_31_0) |
+				      (__le32_to_cpu(ppdu_start->ppdu_start_ts_63_32) << 31)));
 
 		if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
 			ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
@@ -628,8 +1498,8 @@
 			u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
 		ppdu_info->tid =
 			ffs(u32_get_bits(info[6],
-					 HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
-					 - 1);
+					 HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP))
+					 - 1;
 		ppdu_info->tcp_msdu_count =
 			u32_get_bits(info[4],
 				     HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
@@ -651,6 +1521,12 @@
 		ppdu_info->num_mpdu_fcs_err =
 			u32_get_bits(info[0],
 				     HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+		ppdu_info->peer_id =
+			u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID);
+
+		if ((ppdu_info->num_mpdu_fcs_ok | ppdu_info->num_mpdu_fcs_err) > 1)
+			ppdu_info->userstats[userid].ampdu_present = true;
+
 		switch (ppdu_info->preamble_type) {
 		case HAL_RX_PREAMBLE_11N:
 			ppdu_info->ht_flags = 1;
@@ -661,6 +1537,9 @@
 		case HAL_RX_PREAMBLE_11AX:
 			ppdu_info->he_flags = 1;
 			break;
+		case HAL_RX_PREAMBLE_11BE:
+			ppdu_info->eht_flags = 1;
+			break;
 		default:
 			break;
 		}
@@ -728,10 +1607,9 @@
 	case HAL_PHYRX_RSSI_LEGACY: {
 		struct hal_rx_phyrx_rssi_legacy_info *rssi =
 			(struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
-		u32 reception_type = 0;
-		u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]);
+		u32 rssi_legacy_info = __le32_to_cpu(rssi->info0);
 
-		info[0] = __le32_to_cpu(rssi->info0);
+		info[0] = __le32_to_cpu(rssi->info1);
 
 		/* TODO: Please note that the combined rssi will not be accurate
 		 * in MU case. Rssi in MU needs to be retrieved from
@@ -739,22 +1617,9 @@
 		 */
 		ppdu_info->rssi_comb =
 			u32_get_bits(info[0],
-				     HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB);
-		reception_type =
-			u32_get_bits(rssi_legacy_info,
-				     HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION);
-
-		switch (reception_type) {
-		case HAL_RECEPTION_TYPE_ULOFMDA:
-			ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
-			break;
-		case HAL_RECEPTION_TYPE_ULMIMO:
-			ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
-			break;
-		default:
-			ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
-			break;
-		}
+				     HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB);
+		ppdu_info->bw = u32_get_bits(rssi_legacy_info,
+					     HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
 		break;
 	}
 	case HAL_RXPCU_PPDU_END_INFO: {
@@ -772,9 +1637,9 @@
 	case HAL_RX_MPDU_START: {
 		struct hal_rx_mpdu_start *mpdu_start =
 			(struct hal_rx_mpdu_start *)tlv_data;
-		struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
 		u16 peer_id;
 
+		info[0] = __le32_to_cpu(mpdu_start->info0);
 		info[1] = __le32_to_cpu(mpdu_start->info1);
 		peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID);
 		if (peer_id)
@@ -783,73 +1648,52 @@
 		ppdu_info->mpdu_len += u32_get_bits(info[1],
 						    HAL_RX_MPDU_START_INFO2_MPDU_LEN);
 		if (userid < HAL_MAX_UL_MU_USERS) {
-			info[0] = __le32_to_cpu(mpdu_start->info0);
+			info[1] = __le32_to_cpu(mpdu_start->info1);
 			ppdu_info->userid = userid;
-			ppdu_info->ampdu_id[userid] =
-				u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
+			ppdu_info->userstats[userid].ampdu_id =
+				u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
 		}
 
-		mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
-		if (!mon_mpdu)
-			return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
-
 		break;
 	}
-	case HAL_RX_MSDU_START:
-		/* TODO: add msdu start parsing logic */
+	case HAL_RX_PPDU_START_USER_INFO: {
+		ath12k_dp_mon_hal_rx_parse_receive_user_info(tlv_data, userid, ppdu_info);
 		break;
-	case HAL_MON_BUF_ADDR: {
-		struct dp_rxdma_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
-		struct dp_mon_packet_info *packet_info =
-			(struct dp_mon_packet_info *)tlv_data;
-		int buf_id = u32_get_bits(packet_info->cookie,
-					  DP_RXDMA_BUF_COOKIE_BUF_ID);
-		struct sk_buff *msdu;
-		struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
-		struct ath12k_skb_rxcb *rxcb;
-
-		spin_lock_bh(&buf_ring->idr_lock);
-		msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
-		spin_unlock_bh(&buf_ring->idr_lock);
-
-		if (unlikely(!msdu)) {
-			ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
-				    buf_id);
-			return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
 		}
-
-		rxcb = ATH12K_SKB_RXCB(msdu);
-		dma_unmap_single(ab->dev, rxcb->paddr,
-				 msdu->len + skb_tailroom(msdu),
-				 DMA_FROM_DEVICE);
-
-		if (mon_mpdu->tail)
-			mon_mpdu->tail->next = msdu;
-		else
-			mon_mpdu->tail = msdu;
-
-		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
-
+	case HAL_PHYRX_OTHER_RECEIVE_INFO: {
+		struct phyrx_common_user_info *cmn_usr_info =
+			(struct phyrx_common_user_info *)tlv_data;
+		ppdu_info->gi = u32_get_bits(__le32_to_cpu(cmn_usr_info->info0),
+					     HAL_RX_PHY_CMN_USER_INFO0_GI);
 		break;
 	}
-	case HAL_RX_MSDU_END: {
-		struct rx_msdu_end_qcn9274 *msdu_end =
-			(struct rx_msdu_end_qcn9274 *)tlv_data;
-		bool is_first_msdu_in_mpdu;
-		u16 msdu_end_info;
-
-		msdu_end_info = __le16_to_cpu(msdu_end->info5);
-		is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info,
-						     RX_MSDU_END_INFO5_FIRST_MSDU);
-		if (is_first_msdu_in_mpdu) {
-			pmon->mon_mpdu->head = pmon->mon_mpdu->tail;
-			pmon->mon_mpdu->tail = NULL;
-		}
+	case HAL_MON_DROP:
+		/* TODO: Drop mpdu counts for the current ppdu can be recorded for
+		 * statistics
+		 */
 		break;
-	}
+	case HAL_MON_BUF_ADDR:
+		return HAL_RX_MON_STATUS_BUF_ADDR;
 	case HAL_RX_MPDU_END:
-		list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+		return HAL_RX_MON_STATUS_MPDU_END;
+	case HAL_PHYRX_GENERIC_U_SIG:
+		ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info);
+		break;
+	case HAL_PHYRX_GENERIC_EHT_SIG: {
+		/*
+		 * Handle the case where aggregation is in progress
+		 * or the current TLV is one of the TLVs which should be
+		 * aggregated
+		 */
+		ppdu_info->tlv_aggr.in_progress = 1;
+		ppdu_info->tlv_aggr.tlv_tag = tlv_tag;
+		ppdu_info->tlv_aggr.cur_len = 0;
+		ppdu_info->eht_flags = 1;
+
+		ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data);
+
 		break;
+	}
 	case HAL_DUMMY:
 		return HAL_RX_MON_STATUS_BUF_DONE;
 	case HAL_RX_PPDU_END_STATUS_DONE:
@@ -862,25 +1706,152 @@
 	return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
 }
 
-static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu)
+static void
+ath12k_dp_mon_fill_rx_rate(struct ath12k *ar, struct hal_rx_mon_ppdu_info *ppdu_info,
+			   struct ieee80211_rx_status *rx_status)
+{
+	struct ieee80211_supported_band *sband;
+	enum rx_msdu_start_pkt_type pkt_type;
+	u8 rate_mcs, nss, sgi;
+	bool is_cck;
+
+	pkt_type = ppdu_info->preamble_type;
+	rate_mcs = ppdu_info->rate;
+	nss = ppdu_info->nss;
+	sgi = ppdu_info->gi;
+
+	switch (pkt_type) {
+	case RX_MSDU_START_PKT_TYPE_11A:
+	case RX_MSDU_START_PKT_TYPE_11B:
+		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+		if (rx_status->band < NUM_NL80211_BANDS) {
+			sband = &ar->mac.sbands[rx_status->band];
+			rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
+									is_cck);
+		}
+		break;
+	case RX_MSDU_START_PKT_TYPE_11N:
+		rx_status->encoding = RX_ENC_HT;
+		if (rate_mcs > ATH12K_HT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in HT mode %d\n",
+				     rate_mcs);
+			break;
+		}
+		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+		if (sgi)
+			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+		break;
+	case RX_MSDU_START_PKT_TYPE_11AC:
+		rx_status->encoding = RX_ENC_VHT;
+		rx_status->rate_idx = rate_mcs;
+		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in VHT mode %d\n",
+				     rate_mcs);
+			break;
+		}
+		if (sgi)
+			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+		break;
+	case RX_MSDU_START_PKT_TYPE_11AX:
+		rx_status->rate_idx = rate_mcs;
+		if (rate_mcs > ATH12K_HE_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in HE mode %d\n",
+				    rate_mcs);
+			break;
+		}
+		rx_status->encoding = RX_ENC_HE;
+		rx_status->he_gi = ath12k_mac_he_gi_to_nl80211_he_gi(sgi);
+		break;
+	case RX_MSDU_START_PKT_TYPE_11BE:
+		rx_status->rate_idx = rate_mcs;
+		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in EHT mode %d\n",
+				    rate_mcs);
+			break;
+		}
+		rx_status->encoding = RX_ENC_EHT;
+		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+		break;
+	default:
+		ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+			   "monitor receives invalid preamble type %d"
+			   ,pkt_type);
+		break;
+	}
+}
+
+static void
+ath12k_dp_mon_fill_rx_stats(struct ath12k *ar, struct hal_rx_mon_ppdu_info *ppdu_info,
+			     struct ieee80211_rx_status *rx_status)
 {
-	u32 rx_pkt_offset, l2_hdr_offset;
+	struct ieee80211_channel *channel;
+	u32 center_freq;
+	u8 channel_num;
+
+	rx_status->freq = ppdu_info->freq;
+	rx_status->bw = ath12k_mac_bw_to_mac80211_bw(ppdu_info->bw);
+	rx_status->nss = ppdu_info->nss;
+	rx_status->rate_idx = 0;
+	rx_status->encoding = RX_ENC_LEGACY;
+
+	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+	channel_num = ppdu_info->chan_num;
+	center_freq = ppdu_info->freq;
+
+	rx_status->band = NUM_NL80211_BANDS;
+
+	if (center_freq >= ATH12K_MIN_6G_FREQ &&
+	    center_freq <= ATH12K_MAX_6G_FREQ) {
+		rx_status->band = NL80211_BAND_6GHZ;
+		rx_status->freq = center_freq;
+	} else if (channel_num >= ATH12K_MIN_2G_CHAN &&
+		  channel_num <= ATH12K_MAX_2G_CHAN) {
+		rx_status->band = NL80211_BAND_2GHZ;
+	} else if (channel_num >= ATH12K_MIN_5G_CHAN &&
+		  channel_num <= ATH12K_MAX_5G_CHAN) {
+		rx_status->band = NL80211_BAND_5GHZ;
+	}
+
+	if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
+		     !ar->ah->hw->wiphy->bands[rx_status->band])) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+			   "sband is NULL for status band 1 %d channel_num %d center_freq %d pdev_id %d\n",
+			   rx_status->band, channel_num, center_freq, ar->pdev_idx);
+		spin_lock_bh(&ar->data_lock);
+		channel = ar->rx_channel;
+		if (channel) {
+			rx_status->band = channel->band;
+			channel_num =
+				ieee80211_frequency_to_channel(channel->center_freq);
+		} else {
+			ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
+		}
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	if (rx_status->band < NUM_NL80211_BANDS)
+		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+								 rx_status->band);
 
-	rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
-	l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
-					     (struct hal_rx_desc *)msdu->data);
-	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+	ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rx_status);
 }
 
+#define DP_MON_RX_PKT_OFFSET	8
+#define DP_MON_RX_L2_HDR_OFFSET	2
+
 static struct sk_buff *
-ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
-			    u32 mac_id, struct sk_buff *head_msdu,
-			    struct ieee80211_rx_status *rxs, bool *fcs_err)
+ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mpdu_idx,
+			    struct sk_buff *head_msdu, struct ieee80211_rx_status *rxs,
+			    bool *fcs_err, struct hal_rx_mon_ppdu_info *ppdu_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct sk_buff *msdu, *mpdu_buf, *prev_buf;
-	struct hal_rx_desc *rx_desc;
-	u8 *hdr_desc, *dest, decap_format;
+	u8 *dest, *msdu_payload, decap_format;
 	struct ieee80211_hdr_3addr *wh;
 	u32 err_bitmap;
 
@@ -889,24 +1860,23 @@
 	if (!head_msdu)
 		goto err_merge_fail;
 
-	rx_desc = (struct hal_rx_desc *)head_msdu->data;
-	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+	err_bitmap = ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_idx].msdu_info[0].errmap;
 
 	if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
 		*fcs_err = true;
 
-	decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+	decap_format = ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_idx].msdu_info[0].decap_format;
 
-	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+	ath12k_dp_mon_fill_rx_stats(ar, ppdu_info, rxs);
 
 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
-		ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu);
+		skb_pull(head_msdu, DP_MON_RX_PKT_OFFSET);
 
 		prev_buf = head_msdu;
 		msdu = head_msdu->next;
 
 		while (msdu) {
-			ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+			skb_pull(msdu, DP_MON_RX_PKT_OFFSET);
 
 			prev_buf = msdu;
 			msdu = msdu->next;
@@ -914,15 +1884,14 @@
 
 		prev_buf->next = NULL;
 
-		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+		skb_trim(prev_buf, prev_buf->len);
 	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
 		u8 qos_pkt = 0;
 
-		rx_desc = (struct hal_rx_desc *)head_msdu->data;
-		hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
+		msdu_payload = head_msdu->data;
 
 		/* Base size */
-		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+		wh = (struct ieee80211_hdr_3addr *)msdu_payload;
 
 		if (ieee80211_is_data_qos(wh->frame_control))
 			qos_pkt = 1;
@@ -930,12 +1899,13 @@
 		msdu = head_msdu;
 
 		while (msdu) {
-			ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+			skb_pull(msdu, DP_MON_RX_L2_HDR_OFFSET);
 			if (qos_pkt) {
 				dest = skb_push(msdu, sizeof(__le16));
 				if (!dest)
 					goto err_merge_fail;
-				memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
+				memcpy(dest, msdu_payload,
+				       sizeof(struct ieee80211_qos_hdr));
 			}
 			prev_buf = msdu;
 			msdu = msdu->next;
@@ -1014,6 +1984,88 @@
 	rtap_buf[rtap_len] = rx_status->he_RU[3];
 }
 
+static enum nl80211_eht_ru_alloc
+ath12k_dp_mon_rx_mon_ru_size_to_nl_ru_size(u32 ru_size)
+{
+	switch(ru_size) {
+	case ATH12K_EHT_RU_26:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+	case ATH12K_EHT_RU_52:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+	case ATH12K_EHT_RU_52_26:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+	case ATH12K_EHT_RU_106:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+	case ATH12K_EHT_RU_106_26:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+	case ATH12K_EHT_RU_242:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+	case ATH12K_EHT_RU_484:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+	case ATH12K_EHT_RU_484_242:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+	case ATH12K_EHT_RU_996:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+	case ATH12K_EHT_RU_996x2:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+	case ATH12K_EHT_RU_996x3:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+	case ATH12K_EHT_RU_996x4:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+	case ATH12K_EHT_RU_996_484:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+	case ATH12K_EHT_RU_996x2_484:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+	case ATH12K_EHT_RU_996x3_484:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+	case ATH12K_EHT_RU_996_484_242:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+	default:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+	}
+}
+
+static void
+ath12k_dp_mon_rx_update_radiotap_u_sig(struct hal_rx_mon_ppdu_info *rx_status,
+				       u8 *rtap_buf)
+{
+	u32 rtap_len = 0;
+
+	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
+	rtap_len += 4;
+
+	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
+	rtap_len += 4;
+
+	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
+}
+
+static void
+ath12k_dp_mon_rx_update_radiotap_eht(struct hal_rx_mon_ppdu_info *rx_status,
+				     u8 *rtap_buf)
+{
+	u32 rtap_len = 0, user, i;
+
+	put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
+	rtap_len += 4;
+
+	for (i = 0; i < ARRAY_SIZE(rx_status->eht_data); i++) {
+		put_unaligned_le32(rx_status->eht_data[i],
+				   &rtap_buf[rtap_len]);
+		rtap_len += 4;
+	}
+
+	if (rx_status->num_eht_user_info_valid > EHT_MAX_USER_INFO)
+		return;
+
+	for (user = 0; user < rx_status->num_eht_user_info_valid; user++) {
+		put_unaligned_le32(rx_status->eht_user_info[user],
+				   &rtap_buf[rtap_len]);
+		rtap_len += 4;
+	}
+}
+
+
 static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
 					  struct hal_rx_mon_ppdu_info *ppduinfo,
 					  struct sk_buff *mon_skb,
@@ -1022,24 +2074,67 @@
 	struct ieee80211_supported_band *sband;
 	u8 *ptr = NULL;
 	u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
+	u32 ru_size;
+	bool ampdu_present = ppduinfo->userstats[ppduinfo->userid].ampdu_present;
 
 	rxs->flag |= RX_FLAG_MACTIME_START;
-	rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+	rxs->signal = ppduinfo->rssi_comb + ar->rssi_offsets.rssi_offset;
 	rxs->nss = ppduinfo->nss + 1;
 
-	if (ampdu_id) {
+	if (ampdu_present) {
 		rxs->flag |= RX_FLAG_AMPDU_DETAILS;
 		rxs->ampdu_reference = ampdu_id;
 	}
 
-	if (ppduinfo->he_mu_flags) {
+	if (ppduinfo->usig_flags) {
+		struct ieee80211_radiotap_tlv *tlv;
+		struct ieee80211_radiotap_eht_usig *usig;
+		int usig_len = sizeof (*usig);
+		int len;
+
+		if (!(rxs->flag & RX_FLAG_RADIOTAP_TLV_AT_END))
+			skb_reset_mac_header(mon_skb);
+		rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+		rxs->encoding = RX_ENC_EHT;
+		len = sizeof(*tlv) + ALIGN(usig_len, 4);
+		tlv = skb_push(mon_skb, len);
+		memset(tlv, 0, len);
+		tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG);
+		tlv->len = cpu_to_le16(usig_len);
+		ptr = tlv->data;
+		ath12k_dp_mon_rx_update_radiotap_u_sig(ppduinfo, ptr);
+	}
+
+	if (ppduinfo->eht_flags) {
+		struct ieee80211_radiotap_tlv *tlv;
+		struct ieee80211_radiotap_eht *eht;
+		int eht_len = struct_size(eht, user_info,
+					  ppduinfo->num_eht_user_info_valid);
+		int len;
+
+		rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+		rxs->encoding = RX_ENC_EHT;
+
+		len = sizeof(*tlv) + ALIGN(eht_len, 4);
+		skb_reset_mac_header(mon_skb);
+		tlv = skb_push(mon_skb, len);
+		memset(tlv, 0, len);
+		tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT);
+		tlv->len = cpu_to_le16(eht_len);
+		ptr = tlv->data;
+		ath12k_dp_mon_rx_update_radiotap_eht(ppduinfo, ptr);
+	} else if (ppduinfo->he_mu_flags) {
 		rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
 		rxs->encoding = RX_ENC_HE;
-		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu))
+;
 		ath12k_dp_mon_rx_update_radiotap_he_mu(ppduinfo, ptr);
+		rxs->ampdu_reference = ampdu_id;
 	} else if (ppduinfo->he_flags) {
 		rxs->flag |= RX_FLAG_RADIOTAP_HE;
 		rxs->encoding = RX_ENC_HE;
+		ru_size = ppduinfo->userstats[ppduinfo->userid].ul_ofdma_ru_size;
+		rxs->eht.ru = ath12k_dp_mon_rx_mon_ru_size_to_nl_ru_size(ru_size);
 		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
 		ath12k_dp_mon_rx_update_radiotap_he(ppduinfo, ptr);
 		rxs->rate_idx = ppduinfo->rate;
@@ -1060,15 +2155,16 @@
 }
 
 static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
-					  struct sk_buff *msdu,
-					  struct ieee80211_rx_status *status)
+					  struct sk_buff *msdu, u32 mpdu_idx,
+					  struct hal_rx_mon_ppdu_info *ppduinfo,
+					  struct ath12k_dp_rx_info *rx_info)
 {
 	static const struct ieee80211_radiotap_he known = {
 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
 				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
 	};
-	struct ieee80211_rx_status *rx_status;
+	struct ieee80211_rx_status *rx_status, *status = rx_info->rx_status;
 	struct ieee80211_radiotap_he *he = NULL;
 	struct ieee80211_sta *pubsta = NULL;
 	struct ath12k_peer *peer;
@@ -1084,16 +2180,24 @@
 		status->flag |= RX_FLAG_RADIOTAP_HE;
 	}
 
+	status->link_valid = 0;
+	status->link_id = 0;
+
 	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
-		decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
+		decap = ppduinfo->cmn_mpdu_info.mon_mpdu[mpdu_idx].msdu_info[0].decap_format;
 	spin_lock_bh(&ar->ab->base_lock);
-	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
-	if (peer && peer->sta)
+	peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id);
+	if (peer && peer->sta) {
 		pubsta = peer->sta;
+		if (pubsta->valid_links) {
+			status->link_valid = 1;
+			status->link_id = peer->link_id;
+		}
+	}
 	spin_unlock_bh(&ar->ab->base_lock);
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
-		   "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+		   "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
 		   msdu,
 		   msdu->len,
 		   peer ? peer->addr : NULL,
@@ -1106,6 +2210,7 @@
 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
+		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
 		   status->rate_idx,
 		   status->nss,
@@ -1131,10 +2236,10 @@
 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
 		rx_status->flag |= RX_FLAG_8023;
 
-	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+	ieee80211_rx_napi(ar->ah->hw, pubsta, msdu, napi);
 }
 
-static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
+static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mpdu_idx,
 				    struct sk_buff *head_msdu,
 				    struct hal_rx_mon_ppdu_info *ppduinfo,
 				    struct napi_struct *napi)
@@ -1142,10 +2247,14 @@
 	struct ath12k_pdev_dp *dp = &ar->dp;
 	struct sk_buff *mon_skb, *skb_next, *header;
 	struct ieee80211_rx_status *rxs = &dp->rx_status;
+	struct ath12k_dp_rx_info rx_info;
 	bool fcs_err = false;
 
-	mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu,
-					      rxs, &fcs_err);
+	rx_info.filled = 0;
+	rx_info.rx_status = rxs;
+
+	mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mpdu_idx, head_msdu,
+					      rxs, &fcs_err, ppduinfo);
 	if (!mon_skb)
 		goto mon_deliver_fail;
 
@@ -1170,7 +2279,8 @@
 		}
 		rxs->flag |= RX_FLAG_ONLY_MONITOR;
 		ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
-		ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+		ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, mpdu_idx,
+					      ppduinfo, &rx_info);
 		mon_skb = skb_next;
 	} while (mon_skb);
 	rxs->flag = 0;
@@ -1187,24 +2297,159 @@
 	return -EINVAL;
 }
 
+static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+	if (skb->len > len) {
+		skb_trim(skb, len);
+	} else {
+		if (skb_tailroom(skb) < len - skb->len) {
+			if ((pskb_expand_head(skb, 0,
+					      len - skb->len - skb_tailroom(skb),
+					      GFP_ATOMIC))) {
+				dev_kfree_skb_any(skb);
+				return -ENOMEM;
+			}
+		}
+
+		skb_put(skb, (len - skb->len));
+	}
+
+	return 0;
+}
+
+static void
+ath12k_dp_mon_parse_rx_msdu_end(u8 *tlv_data, struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct rx_msdu_end_qcn9274 *msdu_end =
+				(struct rx_msdu_end_qcn9274 *)tlv_data;
+	u32 info = __le32_to_cpu(msdu_end->info13);
+	u32 errmap = 0, mpdu_idx, msdu_idx, decap_format;
+
+	if (info & RX_MSDU_END_INFO13_FCS_ERR)
+		errmap |= HAL_RX_MPDU_ERR_FCS;
+
+	if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+		errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+	if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+		errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+	if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+		errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+	if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+		errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+	if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+		errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+	if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+		errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+	decap_format = u32_get_bits(info, RX_MSDU_END_INFO11_DECAP_FORMAT);
+
+	mpdu_idx = ppdu_info->cmn_mpdu_info.mpdu_count;
+	msdu_idx = ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_idx].msdu_count;
+	ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_idx].msdu_info[msdu_idx].errmap = errmap;
+	ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_idx].msdu_info[msdu_idx].decap_format = decap_format;
+}
+
+static void
+ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar,
+				struct hal_rx_mon_ppdu_info *ppdu_info,
+				enum hal_rx_mon_status hal_status, u8 *tlv_data)
+{
+	int offset;
+
+	if (ppdu_info->cmn_mpdu_info.mpdu_count > HAL_RX_MAX_MPDU) {
+		ath12k_warn(ar->ab, "MPDU count reached max limit\n");
+		return;
+	}
+
+	switch (hal_status) {
+	case HAL_RX_MON_STATUS_BUF_ADDR: {
+		struct dp_mon_packet_info *packet_info =
+				(struct dp_mon_packet_info *)tlv_data;
+		int buf_id = u32_get_bits(packet_info->cookie,
+					  DP_RXDMA_BUF_COOKIE_BUF_ID);
+		struct sk_buff *msdu;
+		struct ath12k_skb_rxcb *rxcb;
+		struct dp_rxdma_ring *buf_ring;
+		struct ath12k_dp *dp = &ar->ab->dp;
+		u16 msdu_count, mpdu_count;
+
+		buf_ring = &dp->rxdma_mon_buf_ring;
+		spin_lock_bh(&buf_ring->idr_lock);
+		msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+		spin_unlock_bh(&buf_ring->idr_lock);
+
+		if (unlikely(!msdu)) {
+			ath12k_warn(ar->ab,
+				    "mon buf_addr: dest desc with inval buf_id %d\n",
+				    buf_id);
+			return;
+		}
+
+		rxcb = ATH12K_SKB_RXCB(msdu);
+		dma_unmap_single(ar->ab->dev, rxcb->paddr,
+				 msdu->len + skb_tailroom(msdu),
+				 DMA_FROM_DEVICE);
+
+		offset = packet_info->dma_length + ATH12K_WIFIRX_DOT11_OFFSET;
+
+		mpdu_count = ppdu_info->cmn_mpdu_info.mpdu_count;
+		msdu_count = ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_count].msdu_count;
+		if (ath12k_dp_pkt_set_pktlen(msdu, offset) ||
+		    msdu_count >= HAL_RX_MAX_MSDU) {
+			dev_kfree_skb_any(msdu);
+			return;
+		}
+
+		if (!msdu_count)
+			ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_count].head_msdu = msdu;
+		else
+			ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_count].tail_msdu->next = msdu;
+
+		ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_count].tail_msdu = msdu;
+		ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_count].msdu_count++;
+
+		ath12k_dp_mon_buf_replenish(ar->ab, buf_ring, 1);
+		break;
+	}
+	case HAL_RX_MON_STATUS_MPDU_END: {
+		u16 mpdu_count = ppdu_info->cmn_mpdu_info.mpdu_count;
+
+		if (ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_count].msdu_count)
+			ppdu_info->cmn_mpdu_info.mon_mpdu[mpdu_count].tail_msdu->next = NULL;
+		ppdu_info->cmn_mpdu_info.mpdu_count++;
+		break;
+	}
+	case HAL_RX_MON_STATUS_MSDU_END:
+		ath12k_dp_mon_parse_rx_msdu_end(tlv_data, ppdu_info);
+	default:
+		break;
+	}
+}
+
 static enum hal_rx_mon_status
-ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon,
+ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct hal_rx_mon_ppdu_info *ppdu_info,
 			    struct sk_buff *skb)
 {
-	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
-	struct hal_tlv_hdr *tlv;
-	enum hal_rx_mon_status hal_status;
+	struct hal_tlv_64_hdr *tlv;
+	struct hal_tlv_parsed_hdr tlv_parsed_hdr = {0};
+	struct ath12k_skb_rxcb *rxcb;
+	enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
 	u32 tlv_userid = 0;
 	u16 tlv_tag, tlv_len;
 	u8 *ptr = skb->data;
 
-	memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
+	bool montior_started = test_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags);
 
 	do {
-		tlv = (struct hal_tlv_hdr *)ptr;
-		tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
-		tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
-		tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+		tlv = (struct hal_tlv_64_hdr *)ptr;
+		tlv_tag = u64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+		tlv_len = u64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+		tlv_userid = u64_get_bits(tlv->tl, HAL_TLV_USR_ID);
 		ptr += sizeof(*tlv);
 
 		/* The actual length of PPDU_END is the combined length of many PHY
@@ -1216,46 +2461,62 @@
 		if (tlv_tag == HAL_RX_PPDU_END)
 			tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
 
-		hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
-							       tlv_tag, ptr, tlv_userid);
+		tlv_parsed_hdr.tlv_tag = tlv_tag;
+		tlv_parsed_hdr.tlv_len = tlv_len;
+		tlv_parsed_hdr.tlv_userid = tlv_userid;
+		tlv_parsed_hdr.tlv_data = ptr;
+
+		hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar->ab, ppdu_info,
+							       &tlv_parsed_hdr);
+
+		if (montior_started)
+			ath12k_dp_mon_parse_rx_dest_tlv(ar, ppdu_info,
+							hal_status, ptr);
+
 		ptr += tlv_len;
-		ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+		ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
 
-		if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+		if ((ptr - skb->data) >= skb->len + 1)
 			break;
 
-	} while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+	} while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
+		 (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+		 (hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
+		 (hal_status == HAL_RX_MON_STATUS_MSDU_END));
+
+	rxcb = ATH12K_SKB_RXCB(skb);
+	if (rxcb->is_end_of_ppdu)
+		hal_status = HAL_RX_MON_STATUS_PPDU_DONE;
 
 	return hal_status;
 }
 
 enum hal_rx_mon_status
 ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
-				  struct ath12k_mon_data *pmon,
+				  struct hal_rx_mon_ppdu_info *ppdu_info,
 				  int mac_id,
 				  struct sk_buff *skb,
 				  struct napi_struct *napi)
 {
-	struct ath12k_base *ab = ar->ab;
-	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
-	struct dp_mon_mpdu *tmp;
-	struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
-	struct sk_buff *head_msdu, *tail_msdu;
+	struct mon_mpdu_data *mon_mpdu;
+	struct sk_buff *head_msdu;
 	enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+	int i;
 
-	ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+	hal_status = ath12k_dp_mon_parse_rx_dest(ar, ppdu_info, skb);
+	if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE)
+		return hal_status;
 
-	list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
-		list_del(&mon_mpdu->list);
-		head_msdu = mon_mpdu->head;
-		tail_msdu = mon_mpdu->tail;
+	for (i = 0; i < ppdu_info->cmn_mpdu_info.mpdu_count; i++) {
+		mon_mpdu = &ppdu_info->cmn_mpdu_info.mon_mpdu[i];
+		if (!mon_mpdu)
+			continue;
 
-		if (head_msdu && tail_msdu) {
-			ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
-						 ppdu_info, napi);
-		}
+		head_msdu = mon_mpdu->head_msdu;
 
-		kfree(mon_mpdu);
+		if (head_msdu)
+			ath12k_dp_mon_rx_deliver(ar, i, head_msdu,
+						 ppdu_info, napi);
 	}
 	return hal_status;
 }
@@ -1312,6 +2573,7 @@
 		mon_buf->paddr_lo = cpu_to_le32(lower_32_bits(paddr));
 		mon_buf->paddr_hi = cpu_to_le32(upper_32_bits(paddr));
 		mon_buf->cookie = cpu_to_le64(cookie);
+		mon_buf->magic = DP_MON_MAGIC_VALUE;
 
 		req_entries--;
 	}
@@ -1604,6 +2866,9 @@
 
 	tx_ppdu_info = ath12k_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag);
 
+	if (tlv_tag == HAL_MON_BUF_ADDR && !ab->hw_params->supports_tx_monitor)
+		return status;
+
 	switch (tlv_tag) {
 	case HAL_TX_FES_SETUP: {
 		struct hal_tx_fes_setup *tx_fes_setup =
@@ -2056,163 +3321,122 @@
 	return tlv_status;
 }
 
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
-			       enum dp_monitor_mode monitor_mode,
-			       struct napi_struct *napi)
+static void
+ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
+					      struct hal_rx_mon_ppdu_info *ppdu_info,
+					      struct hal_rx_user_status *user_stats,
+					      u32 num_msdu)
 {
-	struct hal_mon_dest_desc *mon_dst_desc;
-	struct ath12k_pdev_dp *pdev_dp = &ar->dp;
-	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
-	struct ath12k_base *ab = ar->ab;
-	struct ath12k_dp *dp = &ab->dp;
-	struct sk_buff *skb;
-	struct ath12k_skb_rxcb *rxcb;
-	struct dp_srng *mon_dst_ring;
-	struct hal_srng *srng;
-	struct dp_rxdma_ring *buf_ring;
-	u64 cookie;
-	u32 ppdu_id;
-	int num_buffs_reaped = 0, srng_id, buf_id;
-	u8 dest_idx = 0, i;
-	bool end_of_ppdu;
-	struct hal_rx_mon_ppdu_info *ppdu_info;
-	struct ath12k_peer *peer = NULL;
+	u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
+	u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
+	u32 bw_idx = ppdu_info->bw;
+	u32 gi_idx = ppdu_info->gi;
 
-	ppdu_info = &pmon->mon_ppdu_info;
-	memset(ppdu_info, 0, sizeof(*ppdu_info));
-	ppdu_info->peer_id = HAL_INVALID_PEERID;
+	if (mcs_idx > HAL_RX_MAX_MCS_BE || nss_idx >= HAL_RX_MAX_NSS ||
+	    bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) {
+		return;
+	}
 
-	srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX ||
+	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE)
+		gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
 
-	if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) {
-		mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
-		buf_ring = &dp->rxdma_mon_buf_ring;
-	} else {
-		mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
-		buf_ring = &dp->tx_mon_buf_ring;
+	rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu;
+	if (user_stats)
+		rx_stats->byte_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += user_stats->mpdu_ok_byte_count;
+	else
+		rx_stats->byte_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += ppdu_info->mpdu_len;
 	}
 
-	srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
-
-	spin_lock_bh(&srng->lock);
-	ath12k_hal_srng_access_begin(ab, srng);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+static void ath12k_dp_mon_rx_update_peer_stats_bonded(struct ath12k *ar,
+						      struct ath12k_link_sta *arsta,
+						      struct hal_rx_mon_ppdu_info *ppdu_info,
+						      struct ieee80211_rx_status *rx_status)
+{
+	struct ieee80211_rx_status status;
+	struct ieee80211_sta *sta;
+	u32 uid;
 
-	while (likely(*budget)) {
-		*budget -= 1;
-		mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
-		if (unlikely(!mon_dst_desc))
-			break;
+	if (ar->ab->stats_disable || !g_bonded_interface_model ||
+	    !test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ar->ab->dev_flags))
+		return;
 
-		cookie = le32_to_cpu(mon_dst_desc->cookie);
-		buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+	memset(&status, 0 , sizeof(status));
 
-		spin_lock_bh(&buf_ring->idr_lock);
-		skb = idr_remove(&buf_ring->bufs_idr, buf_id);
-		spin_unlock_bh(&buf_ring->idr_lock);
+	lockdep_assert_held(&ar->ab->base_lock);
+	if (arsta) { // SU stats
+		if (arsta->arvif && (arsta->arvif->ppe_vp_num == -1 ||
+		    (arsta->arvif->ahvif &&
+		     hweight16(arsta->arvif->ahvif->links_map) <= 1)))
+			return;
 
-		if (unlikely(!skb)) {
-			ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
-				    buf_id);
-			goto move_next;
+		if (!rx_status) {
+			ath12k_dp_mon_fill_rx_stats(ar, ppdu_info, &status);
+			rx_status = &status;
 		}
 
-		rxcb = ATH12K_SKB_RXCB(skb);
-		dma_unmap_single(ab->dev, rxcb->paddr,
-				 skb->len + skb_tailroom(skb),
-				 DMA_FROM_DEVICE);
-
-		pmon->dest_skb_q[dest_idx] = skb;
-		dest_idx++;
-		ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id);
-		end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
-					    HAL_MON_DEST_INFO0_END_OF_PPDU);
-		if (!end_of_ppdu)
-			continue;
+		sta = container_of((void *)arsta->ahsta, struct ieee80211_sta, drv_priv);
+		ieee80211_rx_update_stats(ar->ah->hw, sta, arsta->link_id,
+					  ppdu_info->mpdu_len, rx_status);
+		return;
+	}
 
-		for (i = 0; i < dest_idx; i++) {
-			skb = pmon->dest_skb_q[i];
+	for (uid = 0; uid < ppdu_info->num_users; uid++) { // MU stats
+		struct ath12k_peer *peer;
 
-			if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
-				ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id,
-								  skb, napi);
-			else
-				ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
-								  skb, napi, ppdu_id);
+		if (uid == HAL_MAX_UL_MU_USERS)
+			break;
 
-			peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+		if (ppdu_info->peer_id == HAL_INVALID_PEERID)
+			return;
+		peer = ath12k_peer_find_by_id(ar->ab, ppdu_info->peer_id);
 
-			if (!peer || !peer->sta) {
-				ath12k_dbg(ab, ATH12K_DBG_DATA,
-					   "failed to find the peer with peer_id %d\n",
+		if (!peer) {
+			ath12k_warn(ar->ab, "peer with peer id %d can't be found\n",
 					   ppdu_info->peer_id);
-				dev_kfree_skb_any(skb);
 				continue;
 			}
 
-			dev_kfree_skb_any(skb);
-			pmon->dest_skb_q[i] = NULL;
-		}
-
-		dest_idx = 0;
-move_next:
-		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
-		ath12k_hal_srng_src_get_next_entry(ab, srng);
-		num_buffs_reaped++;
-	}
-
-	ath12k_hal_srng_access_end(ab, srng);
-	spin_unlock_bh(&srng->lock);
-
-	return num_buffs_reaped;
+		arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+		if (!arsta) {
+			ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
+				    peer->addr, peer->peer_id);
+			continue;
 }
 
-static void
-ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
-					      struct hal_rx_mon_ppdu_info *ppdu_info,
-					      struct hal_rx_user_status *user_stats,
-					      u32 num_msdu)
-{
-	u32 rate_idx = 0;
-	u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
-	u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
-	u32 bw_idx = ppdu_info->bw;
-	u32 gi_idx = ppdu_info->gi;
+		if (arsta->arvif && (arsta->arvif->ppe_vp_num == -1 ||
+		    (arsta->arvif->ahvif &&
+		     hweight16(arsta->arvif->ahvif->links_map) <= 1)))
+			continue;
 
-	if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) ||
-	    (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) {
-		return;
+		if (!rx_status) {
+			ath12k_dp_mon_fill_rx_stats(ar, ppdu_info, &status);
+			rx_status = &status;
 	}
 
-	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N ||
-	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) {
-		rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx;
-		rate_idx += bw_idx * 2 + gi_idx;
-	} else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) {
-		gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
-		rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx;
-		rate_idx += bw_idx * 3 + gi_idx;
-	} else {
-		return;
+		sta = container_of((void *)arsta->ahsta, struct ieee80211_sta, drv_priv);
+		ieee80211_rx_update_stats(ar->ah->hw, sta, arsta->link_id,
+					  ppdu_info->mpdu_len, rx_status);
 	}
-
-	rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu;
-	if (user_stats)
-		rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count;
-	else
-		rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len;
 }
+#endif /* CONFIG_ATH12K_BONDED_DS_SUPPORT */
 
 static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
-						  struct ath12k_sta *arsta,
+						  struct ath12k_link_sta *arsta,
 						  struct hal_rx_mon_ppdu_info *ppdu_info)
 {
 	struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
 	u32 num_msdu;
+	u32 bw_offset;
 
-	if (!rx_stats)
-		return;
-
+	arsta->last_tx_pkt_bw = ppdu_info->bw;
+	bw_offset = arsta->last_tx_pkt_bw * 3;
 	arsta->rssi_comb = ppdu_info->rssi_comb;
+	ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb + bw_offset);
+
+	if (!ath12k_debugfs_is_extd_rx_stats_enabled(ar) || !rx_stats)
+		return;
 
 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -2285,6 +3509,12 @@
 		rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
 	}
 
+	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE &&
+	    ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) {
+		rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu;
+		rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+	}
+
 	if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
 	     ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
 	     ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
@@ -2359,26 +3589,31 @@
 				   struct hal_rx_mon_ppdu_info *ppdu_info,
 				   u32 uid)
 {
-	struct ath12k_sta *arsta = NULL;
+	struct ath12k_link_sta *arsta;
 	struct ath12k_rx_peer_stats *rx_stats = NULL;
 	struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
 	struct ath12k_peer *peer;
 	u32 num_msdu;
 
-	if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF)
+	if (ppdu_info->peer_id == HAL_INVALID_PEERID)
 		return;
 
-	peer = ath12k_peer_find_by_ast(ar->ab, user_stats->ast_index);
+	peer = ath12k_peer_find_by_id(ar->ab, ppdu_info->peer_id);
 
 	if (!peer) {
-		ath12k_warn(ar->ab, "peer ast idx %d can't be found\n",
-			    user_stats->ast_index);
+		ath12k_warn(ar->ab, "peer with peer id %d can't be found\n",
+			    ppdu_info->peer_id);
 		return;
 	}
 
-	arsta = (struct ath12k_sta *)peer->sta->drv_priv;
-	rx_stats = arsta->rx_stats;
+	arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+	if (!arsta) {
+		ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
+			    peer->addr, peer->peer_id);
+		return;
+	}
 
+	rx_stats = arsta->rx_stats;
 	if (!rx_stats)
 		return;
 
@@ -2461,6 +3696,9 @@
 {
 	u32 num_users, i;
 
+	if (!ath12k_debugfs_is_extd_rx_stats_enabled(ar))
+		return;
+
 	num_users = ppdu_info->num_users;
 	if (num_users > HAL_MAX_UL_MU_USERS)
 		num_users = HAL_MAX_UL_MU_USERS;
@@ -2469,12 +3707,213 @@
 		ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
 }
 
+static inline void
+ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	int i, len;
+
+	len = (sizeof(struct hal_rx_mon_ppdu_info) -
+	       sizeof(struct hal_rx_mon_cmn_mpdu_info));
+
+	memset(ppdu_info, 0, len);
+	for (i = 0; i < ppdu_info->cmn_mpdu_info.mpdu_count; i++) {
+		ppdu_info->cmn_mpdu_info.mon_mpdu[i].msdu_count = 0;
+		ppdu_info->cmn_mpdu_info.mon_mpdu[i].head_msdu = NULL;
+		ppdu_info->cmn_mpdu_info.mon_mpdu[i].tail_msdu = NULL;
+	}
+	ppdu_info->cmn_mpdu_info.mpdu_count = 0;
+}
+
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
+			       enum dp_monitor_mode dp_mon_mode, struct napi_struct *napi)
+{
+	struct hal_mon_dest_desc *mon_dst_desc;
+	struct ath12k_pdev_dp *pdev_dp = &ar->dp;
+	struct ath12k_mon_data *pmon = pdev_dp->mon_data;
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_dp *dp = &ab->dp;
+	struct sk_buff *skb;
+	struct ath12k_skb_rxcb *rxcb;
+	struct dp_srng *mon_dst_ring;
+	struct hal_srng *srng;
+	struct dp_rxdma_ring *buf_ring;
+	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+	struct sk_buff_head skb_list;
+	struct ath12k_peer *peer = NULL;
+	struct ath12k_link_sta *arsta;
+	u64 cookie;
+	u32 hal_status, end_reason, ppdu_id, end_offset;
+	int num_buffs_reaped = 0, srng_id, buf_id;
+	char buf[64] = {0};
+
+	__skb_queue_head_init(&skb_list);
+
+	srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+
+	if (dp_mon_mode == ATH12K_DP_RX_MONITOR_MODE) {
+		mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
+		buf_ring = &dp->rxdma_mon_buf_ring;
+	} else {
+		if (!ab->hw_params->supports_tx_monitor)
+			return 0;
+
+		mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
+		buf_ring = &dp->tx_mon_buf_ring;
+	}
+
+	srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
+	spin_lock_bh(&srng->lock);
+	ath12k_hal_srng_access_begin(ab, srng);
+
+	while (likely(*budget)) {
+		mon_dst_desc = (struct hal_mon_dest_desc *)
+				ath12k_hal_srng_dst_peek(ab, srng);
+		if (unlikely(!mon_dst_desc))
+			break;
+
+		if (u32_get_bits(mon_dst_desc->info0, HAL_MON_DEST_INFO0_EMPTY_DESC)) {
+			ab->soc_stats.mon_drop_desc++;
+			goto move_next;
+		}
+
+		if (unlikely(mon_dst_desc->magic != DP_MON_MAGIC_VALUE)) {
+			scnprintf(buf, sizeof(buf), "invalid mon dest desc in mon srng process\n");
+			ath12k_err_dump(ab, buf, "mon dest desc: ", mon_dst_desc,
+					sizeof(*mon_dst_desc), srng);
+			BUG_ON(1);
+		}
+
+		cookie = mon_dst_desc->cookie;
+		ppdu_id = mon_dst_desc->ppdu_id;
+		buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+		spin_lock_bh(&buf_ring->idr_lock);
+		skb = idr_remove(&buf_ring->bufs_idr, buf_id);
+		spin_unlock_bh(&buf_ring->idr_lock);
+
+		if (unlikely(!skb)) {
+			ath12k_warn(ab, "mon: dest desc with inval buf_id %d\n",
+				    buf_id);
+			goto move_next;
+		}
+
+		rxcb = ATH12K_SKB_RXCB(skb);
+		dma_unmap_single(ab->dev, rxcb->paddr,
+				 skb->len + skb_tailroom(skb),
+				 DMA_FROM_DEVICE);
+		end_reason = u32_get_bits(mon_dst_desc->info0,
+					  HAL_MON_DEST_INFO0_END_REASON);
+		if ((end_reason == HAL_MON_FLUSH_DETECTED) ||
+		    (end_reason == HAL_MON_PPDU_TRUNCATED)) {
+			ath12k_dbg(ab, ATH12K_DBG_DATA,
+				   "Monitor dest descriptor end reason %d", end_reason);
+			dev_kfree_skb_any(skb);
+			goto move_next;
+		}
+
+		if (end_reason == HAL_MON_END_OF_PPDU) {
+			*budget -= 1;
+			rxcb->is_end_of_ppdu = true;
+		}
+
+		end_offset = u32_get_bits(mon_dst_desc->info0,
+					  HAL_MON_DEST_INFO0_END_OFFSET);
+
+		if (likely(end_offset < DP_RX_BUFFER_SIZE)) {
+			skb_put(skb, end_offset);
+		} else {
+			ath12k_warn(ab, "invalid offset received from mon dest %u\n",
+				    end_offset);
+			skb_put(skb, DP_RX_BUFFER_SIZE);
+		}
+
+		__skb_queue_tail(&skb_list, skb);
+move_next:
+		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+		ath12k_hal_srng_dst_get_next_entry(ab, srng);
+		num_buffs_reaped++;
+	}
+
+	ath12k_hal_srng_access_end(ab, srng);
+	spin_unlock_bh(&srng->lock);
+
+	if (!num_buffs_reaped)
+		return 0;
+
+	if (!ppdu_info->ppdu_continuation)
+		ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+
+	while ((skb = __skb_dequeue(&skb_list))) {
+		if (dp_mon_mode == ATH12K_DP_RX_MONITOR_MODE)
+			hal_status =
+				ath12k_dp_mon_rx_parse_mon_status(ar, ppdu_info, mac_id,
+								  skb, napi);
+		else
+			hal_status =
+				ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
+								  skb, napi, ppdu_id);
+
+		if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+			ppdu_info->ppdu_continuation = true;
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+
+		if (ppdu_info->peer_id == HAL_INVALID_PEERID)
+			goto free_skb;
+
+		rcu_read_lock();
+		spin_lock_bh(&ab->base_lock);
+		peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+		if (!peer || !peer->sta) {
+			ath12k_dbg(ab, ATH12K_DBG_DATA,
+				   "failed to find the peer with link id %d peer_id %d\n",
+				   peer ? peer->link_id : -1, ppdu_info->peer_id);
+			goto next_skb;
+		}
+
+		if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
+			arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+			if (!arsta) {
+				ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
+					    peer->addr, peer->peer_id);
+				goto next_skb;
+			}
+
+			ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
+							      ppdu_info);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+			ath12k_dp_mon_rx_update_peer_stats_bonded(ar, arsta, ppdu_info,
+								  &ar->dp.rx_status);
+#endif /* CONFIG_ATH12K_BONDED_DS_SUPPORT */
+		} else if ((ppdu_info->fc_valid) &&
+			   (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
+			ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
+			ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+			ath12k_dp_mon_rx_update_peer_stats_bonded(ar, NULL, ppdu_info,
+								  &ar->dp.rx_status);
+#endif /* CONFIG_ATH12K_BONDED_DS_SUPPORT */
+		}
+
+next_skb:
+		spin_unlock_bh(&ab->base_lock);
+		rcu_read_unlock();
+free_skb:
+		dev_kfree_skb_any(skb);
+		ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+		ppdu_info->peer_id = HAL_INVALID_PEERID;
+	}
+
+	return num_buffs_reaped;
+}
+
 int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
 				   struct napi_struct *napi, int *budget)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_pdev_dp *pdev_dp = &ar->dp;
-	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
+	struct ath12k_mon_data *pmon = pdev_dp->mon_data;
 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
 	struct ath12k_dp *dp = &ab->dp;
 	struct hal_mon_dest_desc *mon_dst_desc;
@@ -2483,13 +3922,17 @@
 	struct dp_srng *mon_dst_ring;
 	struct hal_srng *srng;
 	struct dp_rxdma_ring *buf_ring;
-	struct ath12k_sta *arsta = NULL;
+	struct ath12k_link_sta *arsta = NULL;
 	struct ath12k_peer *peer;
+	struct sk_buff_head skb_list;
+	struct ath12k_neighbor_peer *nrp, *tmp;
 	u64 cookie;
 	int num_buffs_reaped = 0, srng_id, buf_id;
-	u8 dest_idx = 0, i;
-	bool end_of_ppdu;
-	u32 hal_status;
+	u32 hal_status, end_reason, end_offset, rx_buf_sz;
+	char buf[64] = {0};
+	u16 log_type = 0;
+
+	__skb_queue_head_init(&skb_list);
 
 	srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
 	mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
@@ -2500,10 +3943,22 @@
 	ath12k_hal_srng_access_begin(ab, srng);
 
 	while (likely(*budget)) {
-		*budget -= 1;
 		mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
 		if (unlikely(!mon_dst_desc))
 			break;
+		
+		if (u32_get_bits(mon_dst_desc->info0, HAL_MON_DEST_INFO0_EMPTY_DESC)) {
+			ab->soc_stats.mon_drop_desc++;
+			goto move_next;
+		}
+
+		if (unlikely(mon_dst_desc->magic != DP_MON_MAGIC_VALUE)) {
+			scnprintf(buf, sizeof(buf), "invalid mon dest desc in mon stats process\n");
+			ath12k_err_dump(ab, buf, "mon dest desc: ", mon_dst_desc,
+					sizeof(*mon_dst_desc), srng);
+			BUG_ON(1);
+		}
+
 		cookie = le32_to_cpu(mon_dst_desc->cookie);
 		buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
 
@@ -2512,7 +3967,7 @@
 		spin_unlock_bh(&buf_ring->idr_lock);
 
 		if (unlikely(!skb)) {
-			ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
+			ath12k_warn(ab, "mon stats: dest ring with inval buf_id %d\n",
 				    buf_id);
 			goto move_next;
 		}
@@ -2521,62 +3976,132 @@
 		dma_unmap_single(ab->dev, rxcb->paddr,
 				 skb->len + skb_tailroom(skb),
 				 DMA_FROM_DEVICE);
-		pmon->dest_skb_q[dest_idx] = skb;
-		dest_idx++;
-		end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
-					    HAL_MON_DEST_INFO0_END_OF_PPDU);
-		if (!end_of_ppdu)
-			continue;
+		end_reason = u32_get_bits(mon_dst_desc->info0,
+					  HAL_MON_DEST_INFO0_END_REASON);
+		if ((end_reason == HAL_MON_FLUSH_DETECTED) ||
+		    (end_reason == HAL_MON_PPDU_TRUNCATED)) {
+			ath12k_dbg(ab, ATH12K_DBG_DATA,
+				    "Monitor dest descriptor end reason %d", end_reason);
+			dev_kfree_skb_any(skb);
+			goto move_next;
+		}
 
-		for (i = 0; i < dest_idx; i++) {
-			skb = pmon->dest_skb_q[i];
-			hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+		if (end_reason == HAL_MON_END_OF_PPDU)
+			*budget -= 1;
 
-			if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
-			    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+		end_offset = u32_get_bits(mon_dst_desc->info0,
+					  HAL_MON_DEST_INFO0_END_OFFSET);
+
+		if (likely(end_offset < DP_RX_BUFFER_SIZE)) {
+			skb_put(skb, end_offset);
+		} else {
+			ath12k_warn(ab, "invalid offset received on mon stats dest %u\n",
+				    end_offset);
+			skb_put(skb, DP_RX_BUFFER_SIZE);
+		}
+
+		__skb_queue_tail(&skb_list, skb);
+move_next:
+		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+		ath12k_hal_srng_dst_get_next_entry(ab, srng);
+		num_buffs_reaped++;
+	}
+
+	ath12k_hal_srng_access_end(ab, srng);
+	spin_unlock_bh(&srng->lock);
+	if (!num_buffs_reaped)
+		return 0;
+
+	if (!ppdu_info->ppdu_continuation)
+		ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+
+	while ((skb = __skb_dequeue(&skb_list))) {
+		if (ath12k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+			log_type = ATH12K_PKTLOG_TYPE_LITE_RX;
+			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+		} else if (ath12k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+			log_type = ATH12K_PKTLOG_TYPE_RX_STATBUF;
+			rx_buf_sz = DP_RX_BUFFER_SIZE;
+			ath12k_rx_stats_buf_pktlog_process(ar, skb->data, log_type,
+			                                   rx_buf_sz);
+		}
+		if (log_type)
+			trace_ath12k_htt_rxdesc(ar, skb->data, log_type,
+			                        rx_buf_sz);
+
+		hal_status = ath12k_dp_mon_parse_rx_dest(ar, ppdu_info, skb);
+		if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+			ppdu_info->ppdu_continuation = true;
 				dev_kfree_skb_any(skb);
 				continue;
 			}
 
+		if (ppdu_info->peer_id == HAL_INVALID_PEERID)
+			goto free_skb;
+
 			rcu_read_lock();
 			spin_lock_bh(&ab->base_lock);
 			peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+		if (!list_empty(&ab->neighbor_peers)) {
+			if (peer && !peer->sta) {
+				list_for_each_entry_safe(nrp, tmp, &ab->neighbor_peers, list) {
+					if (nrp->is_filter_on && ether_addr_equal(nrp->addr, peer->addr)) {
+						nrp->rssi = ppdu_info->rssi_comb +
+							    ar->rssi_offsets.rssi_offset;
+						nrp->timestamp = ktime_to_ms(ktime_get_real());
+						complete(&nrp->filter_done);
+					}
+				}
+				goto next_skb;
+			}
+		}
+
 			if (!peer || !peer->sta) {
 				ath12k_dbg(ab, ATH12K_DBG_DATA,
-					   "failed to find the peer with peer_id %d\n",
-					   ppdu_info->peer_id);
-				spin_unlock_bh(&ab->base_lock);
-				rcu_read_unlock();
-				dev_kfree_skb_any(skb);
-				continue;
+				   "failed to find the peer with monitor link id %d peer_id %d\n",
+				   peer ? peer->link_id : -1, ppdu_info->peer_id);
+			goto next_skb;
 			}
 
 			if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
-				arsta = (struct ath12k_sta *)peer->sta->drv_priv;
+			arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+			if (!arsta) {
+				ath12k_warn(ab, "link sta not found on peer %pM id %d\n",
+					    peer->addr, peer->peer_id);
+				goto next_skb;
+			}
+
 				ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
 								      ppdu_info);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+			ath12k_dp_mon_rx_update_peer_stats_bonded(ar, arsta, ppdu_info,
+								  NULL);
+#endif /* CONFIG_ATH12K_BONDED_DS_SUPPORT */
 			} else if ((ppdu_info->fc_valid) &&
 				   (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
 				ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
 				ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+			ath12k_dp_mon_rx_update_peer_stats_bonded(ar, NULL, ppdu_info,
+								  NULL);
+#endif /* CONFIG_ATH12K_BONDED_DS_SUPPORT */
+ 		}
+
+		if (ath12k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) {
+			trace_ath12k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+			ath12k_rx_stats_buf_pktlog_process(ar, skb->data, log_type,
+			                                   rx_buf_sz);
 			}
 
+next_skb:
 			spin_unlock_bh(&ab->base_lock);
 			rcu_read_unlock();
+free_skb:
 			dev_kfree_skb_any(skb);
-			memset(ppdu_info, 0, sizeof(*ppdu_info));
+		ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
 			ppdu_info->peer_id = HAL_INVALID_PEERID;
 		}
 
-		dest_idx = 0;
-move_next:
-		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
-		ath12k_hal_srng_src_get_next_entry(ab, srng);
-		num_buffs_reaped++;
-	}
-
-	ath12k_hal_srng_access_end(ab, srng);
-	spin_unlock_bh(&srng->lock);
 	return num_buffs_reaped;
 }
 
@@ -2587,8 +4112,9 @@
 	struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
 	int num_buffs_reaped = 0;
 
-	if (!ar->monitor_started)
-		ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget);
+	if (!test_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags))
+		num_buffs_reaped = ath12k_dp_mon_rx_process_stats(ar, mac_id,
+								  napi, &budget);
 	else
 		num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget,
 							      monitor_mode, napi);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp_mon.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_mon.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp_mon.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_mon.h	2024-01-19 17:01:19.861846921 +0100
@@ -8,12 +8,17 @@
 #define ATH12K_DP_MON_H
 
 #include "core.h"
+#include "debugfs_htt_stats.h"
 
 enum dp_monitor_mode {
 	ATH12K_DP_TX_MONITOR_MODE,
 	ATH12K_DP_RX_MONITOR_MODE
 };
 
+#define ATH12K_WIFIRX_DOT11_OFFSET	5
+
+#define DP_MON_MAGIC_VALUE	0xDECAFEED
+
 enum dp_mon_tx_ppdu_info_type {
 	DP_MON_TX_PROT_PPDU_INFO,
 	DP_MON_TX_DATA_PPDU_INFO
@@ -76,7 +81,7 @@
 
 enum hal_rx_mon_status
 ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
-				  struct ath12k_mon_data *pmon,
+				  struct hal_rx_mon_ppdu_info *ppdu_info,
 				  int mac_id, struct sk_buff *skb,
 				  struct napi_struct *napi);
 int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp_rx.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_rx.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp_rx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_rx.c	2024-03-18 14:40:14.851741333 +0100
@@ -1,15 +1,18 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/ieee80211.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <crypto/hash.h>
+#include <asm/cacheflush.h>
 #include "core.h"
 #include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs_sta.h"
 #include "hal_desc.h"
 #include "hw.h"
 #include "dp_rx.h"
@@ -17,9 +20,19 @@
 #include "dp_tx.h"
 #include "peer.h"
 #include "dp_mon.h"
+#include "hif.h"
+#include "sawf.h"
+#ifdef CONFIG_MAC80211_PPE_SUPPORT
+#include <ppe_vp_public.h>
+#include <ppe_vp_tx.h>
+#endif
+
 
 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
 
+static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
+				      enum hal_reo_cmd_status status);
+
 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
 						    struct hal_rx_desc *desc)
 {
@@ -83,18 +96,6 @@
 	return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
 }
 
-static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
-					 struct hal_rx_desc *desc)
-{
-	return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
-}
-
-static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
-					 struct hal_rx_desc *desc)
-{
-	return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
-}
-
 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
 					struct hal_rx_desc *desc)
 {
@@ -113,42 +114,6 @@
 	return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
 }
 
-static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
-			     struct hal_rx_desc *desc)
-{
-	return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
-}
-
-static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
-				  struct hal_rx_desc *desc)
-{
-	return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
-}
-
-static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
-			       struct hal_rx_desc *desc)
-{
-	return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
-}
-
-static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
-			       struct hal_rx_desc *desc)
-{
-	return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
-}
-
-static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
-				  struct hal_rx_desc *desc)
-{
-	return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
-}
-
-static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
-			     struct hal_rx_desc *desc)
-{
-	return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
-}
-
 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
 			     struct hal_rx_desc *desc)
 {
@@ -193,17 +158,11 @@
 	ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
 }
 
-static bool ath12k_dp_rx_h_is_mcbc(struct ath12k_base *ab,
-				   struct hal_rx_desc *desc)
-{
-	return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
-		ab->hw_params->hal_ops->rx_desc_is_mcbc(desc));
-}
-
 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
 					     struct hal_rx_desc *desc)
 {
-	return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
+	return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
+		ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc));
 }
 
 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
@@ -227,10 +186,20 @@
 	ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
 }
 
-static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
+static inline u8 ath12k_dp_rx_h_msdu_end_ip_valid(struct ath12k_base *ab,
+                                                    struct hal_rx_desc *desc)
+{
+       return ab->hw_params->hal_ops->rx_desc_get_ip_valid(desc);
+}
+
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
 						struct hal_rx_desc *desc)
 {
-	return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
+	/* If the ops unassigned, return the first pdev idx */
+	if (!ab->hw_params->hal_ops->rx_desc_get_msdu_src_link_id)
+		return 0;
+
+	return ab->hw_params->hal_ops->rx_desc_get_msdu_src_link_id(desc);
 }
 
 static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
@@ -255,23 +224,70 @@
 	return -ETIMEDOUT;
 }
 
+static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
+				       struct list_head *head,
+				       size_t count)
+{
+	struct list_head *cur;
+	struct ath12k_rx_desc_info *rx_desc;
+	size_t nodes = 0;
+
+	if (!count) {
+		INIT_LIST_HEAD(list);
+		goto out;
+	}
+
+	list_for_each(cur, head) {
+		if (!count)
+			break;
+
+		rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
+		rx_desc->in_use = true;
+
+		count--;
+		nodes++;
+	}
+
+	list_cut_before(list, head, cur);
+out:
+	return nodes;
+}
+
+static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
+				      struct list_head *used_list)
+{
+	struct list_head *cur;
+	struct ath12k_rx_desc_info *rx_desc;
+
+	spin_lock_bh(&dp->rx_desc_lock);
+
+	/* Reset the use flag */
+	list_for_each(cur, used_list) {
+		rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
+		rx_desc->in_use = false;
+	}
+
+	list_splice_tail(used_list, &dp->rx_desc_free_list);
+
+	spin_unlock_bh(&dp->rx_desc_lock);
+}
+
 /* Returns number of Rx buffers replenished */
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
 				struct dp_rxdma_ring *rx_ring,
-				int req_entries,
-				enum hal_rx_buf_return_buf_manager mgr,
-				bool hw_cc)
+				struct list_head *used_list,
+				int req_entries)
 {
 	struct ath12k_buffer_addr *desc;
 	struct hal_srng *srng;
 	struct sk_buff *skb;
 	int num_free;
 	int num_remain;
-	int buf_id;
 	u32 cookie;
 	dma_addr_t paddr;
 	struct ath12k_dp *dp = &ab->dp;
 	struct ath12k_rx_desc_info *rx_desc;
+	enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm;
 
 	req_entries = min(req_entries, rx_ring->bufs_max);
 
@@ -288,10 +304,23 @@
 	req_entries = min(num_free, req_entries);
 	num_remain = req_entries;
 
+	if (!num_remain)
+		goto skip_replenish;
+
+	/* Get the descriptor from free list */
+	if (list_empty(used_list)) {
+		spin_lock_bh(&dp->rx_desc_lock);
+		req_entries = ath12k_dp_list_cut_nodes(used_list,
+						       &dp->rx_desc_free_list,
+						       num_remain);
+		spin_unlock_bh(&dp->rx_desc_lock);
+		num_remain = req_entries;
+	}
+
 	while (num_remain > 0) {
 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
 				    DP_RX_BUFFER_ALIGN_SIZE);
-		if (!skb)
+		if (unlikely(!skb))
 			break;
 
 		if (!IS_ALIGNED((unsigned long)skb->data,
@@ -301,52 +330,27 @@
 				 skb->data);
 		}
 
-		paddr = dma_map_single(ab->dev, skb->data,
+		paddr = dma_map_single_attrs(ab->dev, skb->data,
 				       skb->len + skb_tailroom(skb),
-				       DMA_FROM_DEVICE);
+					     DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 		if (dma_mapping_error(ab->dev, paddr))
 			goto fail_free_skb;
 
-		if (hw_cc) {
-			spin_lock_bh(&dp->rx_desc_lock);
-
-			/* Get desc from free list and store in used list
-			 * for cleanup purposes
-			 *
-			 * TODO: pass the removed descs rather than
-			 * add/read to optimize
-			 */
-			rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+		rx_desc = list_first_entry_or_null(used_list,
 							   struct ath12k_rx_desc_info,
 							   list);
-			if (!rx_desc) {
-				spin_unlock_bh(&dp->rx_desc_lock);
+		if (!rx_desc)
 				goto fail_dma_unmap;
-			}
 
 			rx_desc->skb = skb;
+		rx_desc->paddr = paddr;
 			cookie = rx_desc->cookie;
-			list_del(&rx_desc->list);
-			list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
-
-			spin_unlock_bh(&dp->rx_desc_lock);
-		} else {
-			spin_lock_bh(&rx_ring->idr_lock);
-			buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
-					   rx_ring->bufs_max * 3, GFP_ATOMIC);
-			spin_unlock_bh(&rx_ring->idr_lock);
-			if (buf_id < 0)
-				goto fail_dma_unmap;
-			cookie = u32_encode_bits(mac_id,
-						 DP_RXDMA_BUF_COOKIE_PDEV_ID) |
-				 u32_encode_bits(buf_id,
-						 DP_RXDMA_BUF_COOKIE_BUF_ID);
-		}
 
 		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
 		if (!desc)
-			goto fail_buf_unassign;
+			goto fail_dma_unmap;
 
+		list_del(&rx_desc->list);
 		ATH12K_SKB_RXCB(skb)->paddr = paddr;
 
 		num_remain--;
@@ -354,32 +358,27 @@
 		ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
 	}
 
+skip_replenish:
 	ath12k_hal_srng_access_end(ab, srng);
 
+	if (!list_empty(used_list))
+		ath12k_dp_rx_enqueue_free(dp, used_list);
+
 	spin_unlock_bh(&srng->lock);
 
 	return req_entries - num_remain;
 
-fail_buf_unassign:
-	if (hw_cc) {
-		spin_lock_bh(&dp->rx_desc_lock);
-		list_del(&rx_desc->list);
-		list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
-		rx_desc->skb = NULL;
-		spin_unlock_bh(&dp->rx_desc_lock);
-	} else {
-		spin_lock_bh(&rx_ring->idr_lock);
-		idr_remove(&rx_ring->bufs_idr, buf_id);
-		spin_unlock_bh(&rx_ring->idr_lock);
-	}
 fail_dma_unmap:
-	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
-			 DMA_FROM_DEVICE);
+	dma_unmap_single_attrs(ab->dev, paddr, skb->len + skb_tailroom(skb),
+			       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 fail_free_skb:
 	dev_kfree_skb_any(skb);
 
 	ath12k_hal_srng_access_end(ab, srng);
 
+	if (!list_empty(used_list))
+		ath12k_dp_rx_enqueue_free(dp, used_list);
+
 	spin_unlock_bh(&srng->lock);
 
 	return req_entries - num_remain;
@@ -418,8 +417,10 @@
 	rx_ring = &dp->rxdma_mon_buf_ring;
 	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
 
+	if (ab->hw_params->supports_tx_monitor) {
 	rx_ring = &dp->tx_mon_buf_ring;
 	ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+	}
 
 	return 0;
 }
@@ -428,6 +429,7 @@
 					  struct dp_rxdma_ring *rx_ring,
 					  u32 ringtype)
 {
+	LIST_HEAD(list);
 	int num_entries;
 
 	num_entries = rx_ring->refill_buf_ring.size /
@@ -437,9 +439,7 @@
 	if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
 		ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
 	else
-		ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
-					    ab->hw_params->hal_params->rx_buf_rbm,
-					    ringtype == HAL_RXDMA_BUF);
+		ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
 	return 0;
 }
 
@@ -467,6 +467,7 @@
 			return ret;
 		}
 
+		if (ab->hw_params->supports_tx_monitor) {
 		rx_ring = &dp->tx_mon_buf_ring;
 		ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
 						     HAL_TX_MONITOR_BUF);
@@ -476,6 +477,7 @@
 			return ret;
 		}
 	}
+	}
 
 	return 0;
 }
@@ -488,6 +490,7 @@
 
 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
+		if (ab->hw_params->supports_tx_monitor)
 		ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
 	}
 }
@@ -545,6 +548,7 @@
 			return ret;
 		}
 
+		if (ab->hw_params->supports_tx_monitor) {
 		ret = ath12k_dp_srng_setup(ar->ab,
 					   &dp->tx_mon_dst_ring[i],
 					   HAL_TX_MONITOR_DST,
@@ -556,50 +560,56 @@
 			return ret;
 		}
 	}
+	}
 
 	return 0;
 }
 
-void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
+static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
 {
+	struct ath12k_reo_queue_ref *qref;
 	struct ath12k_dp *dp = &ab->dp;
-	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
-	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+	bool ml_peer = false;
 
-	spin_lock_bh(&dp->reo_cmd_lock);
-	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
-		list_del(&cmd->list);
-		dma_unmap_single(ab->dev, cmd->data.paddr,
-				 cmd->data.size, DMA_BIDIRECTIONAL);
-		kfree(cmd->data.vaddr);
-		kfree(cmd);
-	}
+	if (!ab->hw_params->reoq_lut_support)
+		return;
 
-	list_for_each_entry_safe(cmd_cache, tmp_cache,
-				 &dp->reo_cmd_cache_flush_list, list) {
-		list_del(&cmd_cache->list);
-		dp->reo_cmd_cache_flush_count--;
-		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
-				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
-		kfree(cmd_cache->data.vaddr);
-		kfree(cmd_cache);
+	if (peer_id & ATH12K_ML_PEER_ID_VALID) {
+		peer_id &= ~ATH12K_ML_PEER_ID_VALID;
+		ml_peer = true;
 	}
-	spin_unlock_bh(&dp->reo_cmd_lock);
+
+	if (ml_peer)
+		qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+	else
+		qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+	qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
+	qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR);
 }
 
-static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
-				   enum hal_reo_cmd_status status)
+void ath12k_dp_tid_cleanup(struct ath12k_base *ab)
 {
-	struct ath12k_dp_rx_tid *rx_tid = ctx;
-
-	if (status != HAL_REO_CMD_SUCCESS)
-		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
-			    rx_tid->tid, status);
+	struct ath12k_peer *peer;
+	struct ath12k_dp_rx_tid *rx_tid;
+	int tid;
+	void *vaddr;
+	u32 *addr_aligned;
 
-	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
-			 DMA_BIDIRECTIONAL);
-	kfree(rx_tid->vaddr);
-	rx_tid->vaddr = NULL;
+	spin_lock_bh(&ab->base_lock);
+	list_for_each_entry(peer, &ab->peers, list) {
+		for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+			rx_tid = &peer->rx_tid[tid];
+			if (rx_tid->active) {
+				vaddr = rx_tid->vaddr;
+				addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+				ath12k_dp_reset_rx_reo_tid_q(addr_aligned, rx_tid->ba_win_sz, tid);
+			}
+		}
+	}
+	spin_unlock_bh(&ab->base_lock);
 }
 
 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
@@ -613,6 +623,10 @@
 	struct hal_srng *cmd_ring;
 	int cmd_num;
 
+	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags) ||
+	    test_bit(ATH12K_FLAG_UMAC_PRERESET_START, &ab->dev_flags))
+		return -ESHUTDOWN;
+
 	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 	cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
 
@@ -647,54 +661,137 @@
 	return 0;
 }
 
-static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
-				      struct ath12k_dp_rx_tid *rx_tid)
+static int ath12k_peer_rx_tid_delete_handler(struct ath12k_base *ab,
+					      struct ath12k_dp_rx_tid *rx_tid, u8 tid)
 {
 	struct ath12k_hal_reo_cmd cmd = {0};
-	unsigned long tot_desc_sz, desc_sz;
-	int ret;
+	struct ath12k_dp *dp = &ab->dp;
 
-	tot_desc_sz = rx_tid->size;
-	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+	lockdep_assert_held(&dp->reo_cmd_update_rx_queue_lock);
 
-	while (tot_desc_sz > desc_sz) {
-		tot_desc_sz -= desc_sz;
-		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+	rx_tid->active = false;
+	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
-		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
-					     HAL_REO_CMD_FLUSH_CACHE, &cmd,
-					     NULL);
-		if (ret)
-			ath12k_warn(ab,
-				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
-				    rx_tid->tid, ret);
+	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+	cmd.upd0 |= HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+	cmd.ba_window_size = (tid == HAL_DESC_REO_NON_QOS_TID) ?
+			      rx_tid->ba_win_sz : DP_BA_WIN_SZ_MAX;
+	cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
+
+	return ath12k_dp_reo_cmd_send(ab, rx_tid,
+				      HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+				      ath12k_dp_rx_tid_del_func);
+}
+
+void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
+{
+	struct ath12k_dp *dp = &ab->dp;
+	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
+	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+	struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue;
+	struct ath12k_dp_rx_tid *rx_tid;
+
+	spin_lock_bh(&dp->reo_cmd_update_rx_queue_lock);
+	list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list,
+			list) {
+		list_del(&cmd_queue->list);
+		rx_tid = &cmd_queue->data;
+		if (rx_tid->vaddr) {
+			dma_unmap_single(ab->dev, rx_tid->paddr,
+	                                 rx_tid->size, DMA_BIDIRECTIONAL);
+                        kfree(rx_tid->vaddr);
+                        rx_tid->vaddr = NULL;
+		}
+		kfree(cmd_queue);
+	}
+	spin_unlock_bh(&dp->reo_cmd_update_rx_queue_lock);
+
+	spin_lock_bh(&dp->reo_cmd_lock);
+	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+		list_del(&cmd->list);
+		rx_tid = &cmd->data;
+		if (rx_tid->vaddr) {
+			dma_unmap_single(ab->dev, rx_tid->paddr,
+					 rx_tid->size, DMA_BIDIRECTIONAL);
+			kfree(rx_tid->vaddr);
+			rx_tid->vaddr = NULL;
+		}
+		kfree(cmd);
+	}
+
+	list_for_each_entry_safe(cmd_cache, tmp_cache,
+				 &dp->reo_cmd_cache_flush_list, list) {
+		list_del(&cmd_cache->list);
+		dp->reo_cmd_cache_flush_count--;
+		rx_tid = &cmd_cache->data;
+                if (rx_tid->vaddr) {
+                       dma_unmap_single(ab->dev, rx_tid->paddr,
+	                                 rx_tid->size, DMA_BIDIRECTIONAL);
+                        kfree(rx_tid->vaddr);
+                        rx_tid->vaddr = NULL;
+                }
+		kfree(cmd_cache);
 	}
+	spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
+				   enum hal_reo_cmd_status status)
+{
+	struct ath12k_dp_rx_tid *rx_tid = ctx;
+
+	if (status != HAL_REO_CMD_SUCCESS)
+		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+			    rx_tid->tid, status);
+
+	ath12k_hal_reo_shared_qaddr_cache_clear(dp->ab);
+
+	if (rx_tid->vaddr) {
+		dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+			 DMA_BIDIRECTIONAL);
+		kfree(rx_tid->vaddr);
+		rx_tid->vaddr = NULL;
+	}
+}
+
+static int ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
+				      struct ath12k_dp_rx_tid *rx_tid)
+{
+	struct ath12k_hal_reo_cmd cmd = {0};
+	int ret;
 
 	memset(&cmd, 0, sizeof(cmd));
 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
-	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS |
+		    HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS;
+
+	/* For all NON_QOS tid, driver allocates max window
+	 * size of 1024. For this, driver can send flush 1K Desc
+	 * in one command instead of sending 11 cmd for
+	 * single NON_QOS tid(s).
+	 */
+	if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID)
+		cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC;
+
 	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
 				     HAL_REO_CMD_FLUSH_CACHE,
 				     &cmd, ath12k_dp_reo_cmd_free);
-	if (ret) {
-		ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
-			   rx_tid->tid, ret);
-		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
-				 DMA_BIDIRECTIONAL);
-		kfree(rx_tid->vaddr);
-		rx_tid->vaddr = NULL;
-	}
+
+	return ret;
 }
 
 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
 				      enum hal_reo_cmd_status status)
 {
 	struct ath12k_base *ab = dp->ab;
-	struct ath12k_dp_rx_tid *rx_tid = ctx;
+	struct ath12k_dp_rx_tid *rx_tid = ctx, *update_rx_tid;
 	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
+	struct dp_reo_update_rx_queue_elem *qelem, *qtmp;
 
 	if (status == HAL_REO_CMD_DRAIN) {
+		ab->soc_stats.hal_reo_cmd_drain++;
 		goto free_desc;
 	} else if (status != HAL_REO_CMD_SUCCESS) {
 		/* Shouldn't happen! Cleanup in case of other failure? */
@@ -703,10 +800,39 @@
 		return;
 	}
 
+	/* Check if there is any pending rx_queue, if yes then update it */
+	spin_lock_bh(&dp->reo_cmd_update_rx_queue_lock);
+	list_for_each_entry_safe(qelem, qtmp, &dp->reo_cmd_update_rx_queue_list,
+			list) {
+		if (qelem->reo_cmd_update_rx_queue_resend_flag &&
+				qelem->data.active) {
+			update_rx_tid = &qelem->data;
+
+			if (ath12k_peer_rx_tid_delete_handler(ab, update_rx_tid, qelem->tid)) {
+				update_rx_tid->active = true;
+				break;
+			}
+			ath12k_peer_rx_tid_qref_reset(ab,
+						      qelem->is_ml_peer ? qelem->ml_peer_id : qelem->peer_id,
+						      qelem->tid);
+			ath12k_hal_reo_shared_qaddr_cache_clear(ab);
+			update_rx_tid->vaddr = NULL;
+			update_rx_tid->paddr = 0;
+			update_rx_tid->size = 0;
+			update_rx_tid->pending_desc_size = 0;
+
+			list_del(&qelem->list);
+			kfree(qelem);
+		}
+	}
+	spin_unlock_bh(&dp->reo_cmd_update_rx_queue_lock);
+
 	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
-	if (!elem)
+	if (!elem) {
+		 ath12k_warn(ab, "failed to alloc reo_cache_flush_elem, rx tid %d\n",
+				                             rx_tid->tid);
 		goto free_desc;
-
+	}
 	elem->ts = jiffies;
 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
 
@@ -720,9 +846,6 @@
 		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
 		    time_after(jiffies, elem->ts +
 			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
-			list_del(&elem->list);
-			dp->reo_cmd_cache_flush_count--;
-
 			/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
 			 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
 			 * is used in only two contexts, one is in this function called
@@ -732,10 +855,18 @@
 			 * delete to this list. Hence unlock-lock is safe here.
 			 */
 			spin_unlock_bh(&dp->reo_cmd_lock);
-
-			ath12k_dp_reo_cache_flush(ab, &elem->data);
-			kfree(elem);
+			if (ath12k_dp_reo_cache_flush(ab, &elem->data)) {
+				ab->soc_stats.reo_cmd_cache_error++;
+				/* In failure case, just update the timestamp
+				 * for flush cache elem and continue */
+				spin_lock_bh(&dp->reo_cmd_lock);
+				elem->ts = jiffies;
+				break;
+			}
 			spin_lock_bh(&dp->reo_cmd_lock);
+			list_del(&elem->list);
+			dp->reo_cmd_cache_flush_count--;
+			kfree(elem);
 		}
 	}
 	spin_unlock_bh(&dp->reo_cmd_lock);
@@ -753,13 +884,20 @@
 {
 	struct ath12k_reo_queue_ref *qref;
 	struct ath12k_dp *dp = &ab->dp;
+	bool ml_peer = false;
 
 	if (!ab->hw_params->reoq_lut_support)
 		return;
 
-	/* TODO: based on ML peer or not, select the LUT. below assumes non
-	 * ML peer
-	 */
+	if (peer_id & ATH12K_ML_PEER_ID_VALID) {
+		peer_id &= ~ATH12K_ML_PEER_ID_VALID;
+		ml_peer = true;
+	}
+
+	if (ml_peer)
+		qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+	else
 	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
 			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
 
@@ -768,56 +906,63 @@
 	qref->info1 = u32_encode_bits(upper_32_bits(paddr),
 				      BUFFER_ADDR_INFO1_ADDR) |
 		      u32_encode_bits(tid, DP_REO_QREF_NUM);
-}
-
-static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
-{
-	struct ath12k_reo_queue_ref *qref;
-	struct ath12k_dp *dp = &ab->dp;
 
-	if (!ab->hw_params->reoq_lut_support)
-		return;
-
-	/* TODO: based on ML peer or not, select the LUT. below assumes non
-	 * ML peer
-	 */
-	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
-			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
-
-	qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
-	qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
-		      u32_encode_bits(tid, DP_REO_QREF_NUM);
+	ath12k_hal_reo_shared_qaddr_cache_clear(ab);
 }
 
 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
 				  struct ath12k_peer *peer, u8 tid)
 {
-	struct ath12k_hal_reo_cmd cmd = {0};
 	struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
-	int ret;
+	struct dp_reo_update_rx_queue_elem *elem, *tmp;
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_dp *dp = &ab->dp;
 
 	if (!rx_tid->active)
 		return;
 
-	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
-	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
-	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
-	cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
-	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
-				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
-				     ath12k_dp_rx_tid_del_func);
-	if (ret) {
-		ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
-			   tid, ret);
-		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
-				 DMA_BIDIRECTIONAL);
-		kfree(rx_tid->vaddr);
-		rx_tid->vaddr = NULL;
+	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+	if (!elem) {
+		ath12k_warn(ar->ab, "failed to alloc reo_update_rx_queue_elem, rx tid %d\n",
+				rx_tid->tid);
+		return;
 	}
+	elem->reo_cmd_update_rx_queue_resend_flag = false;
+	elem->peer_id = peer->peer_id;
+	elem->tid = tid;
+	elem->is_ml_peer = peer->mlo ? true : false;
+	elem->ml_peer_id = peer->ml_peer_id;
+
+	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
 
-	ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+	spin_lock_bh(&dp->reo_cmd_update_rx_queue_lock);
+	list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list);
 
-	rx_tid->active = false;
+	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list,
+			list) {
+		rx_tid = &elem->data;
+
+		if (ath12k_peer_rx_tid_delete_handler(ab, rx_tid, elem->tid)) {
+			rx_tid->active = true;
+			ab->soc_stats.reo_cmd_update_rx_queue_error++;
+			elem->reo_cmd_update_rx_queue_resend_flag = true;
+			break;
+		}
+		ath12k_peer_rx_tid_qref_reset(ab,
+					      elem->is_ml_peer ? elem->ml_peer_id : elem->peer_id,
+					      elem->tid);
+		ath12k_hal_reo_shared_qaddr_cache_clear(ab);
+		rx_tid->vaddr = NULL;
+		rx_tid->paddr = 0;
+		rx_tid->size = 0;
+		rx_tid->pending_desc_size = 0;
+
+		list_del(&elem->list);
+		kfree(elem);
+	}
+	spin_unlock_bh(&dp->reo_cmd_update_rx_queue_lock);
+
+	return;
 }
 
 /* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
@@ -889,14 +1034,13 @@
 
 		ath12k_dp_rx_peer_tid_delete(ar, peer, i);
 		ath12k_dp_rx_frags_cleanup(rx_tid, true);
-
 		spin_unlock_bh(&ar->ab->base_lock);
 		del_timer_sync(&rx_tid->frag_timer);
 		spin_lock_bh(&ar->ab->base_lock);
 	}
 }
 
-static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
+static int ath12k_peer_rx_tid_reo_update(struct ath12k_base *ab,
 					 struct ath12k_peer *peer,
 					 struct ath12k_dp_rx_tid *rx_tid,
 					 u32 ba_win_sz, u16 ssn,
@@ -916,11 +1060,11 @@
 		cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
 	}
 
-	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
+	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
 				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
 				     NULL);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+		ath12k_warn(ab, "failed to update rx tid queue, tid %d (%d)\n",
 			    rx_tid->tid, ret);
 		return ret;
 	}
@@ -930,31 +1074,79 @@
 	return 0;
 }
 
-int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
-				u8 tid, u32 ba_win_sz, u16 ssn,
-				enum hal_pn_type pn_type)
+void ath12k_dp_peer_reo_tid_setup(struct ath12k_base *ab,
+				  struct ath12k_link_sta *arsta)
+{
+	struct ath12k_dp_rx_tid *rx_tid;
+	struct ath12k_peer *peer;
+	u8 tid;
+	int ret;
+
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find_by_addr(ab, arsta->addr);
+	if (peer) {
+		for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+			rx_tid = &peer->rx_tid[tid];
+			if (rx_tid->active) {
+				ret = ath12k_peer_rx_tid_reo_update(ab, peer, rx_tid,
+								    rx_tid->ba_win_sz, 0, false);
+				if (ret) {
+					ath12k_warn(ab, "failed to update reo for peer %pM rx tid %d\n",
+						    peer->addr, tid);
+				}
+			}
+		}
+	}
+	spin_unlock_bh(&ab->base_lock);
+}
+
+void ath12k_dp_tid_setup(void *data, struct ieee80211_sta *sta)
+{
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_base *ab = data;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_link_vif *arvif;
+	u8 link_id;
+
+	if (sta->mlo)
+		return;
+
+	for_each_set_bit(link_id, &ahsta->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arsta = ahsta->link[link_id];
+		if (!arsta)
+			continue;
+		arvif = arsta->arvif;
+		if (arvif->ab == ab)
+			ath12k_dp_peer_reo_tid_setup(ab, arsta);
+	}
+}
+
+void ath12k_dp_peer_tid_setup(struct ath12k_base *ab)
+{
+	struct ath12k *ar;
+	int i;
+
+	for (i = 0; i <  ab->num_radios; i++) {
+		ar = ab->pdevs[i].ar;
+		ieee80211_iterate_stations_atomic(ar->ah->hw,
+						  ath12k_dp_tid_setup,
+						  ab);
+	}
+}
+
+int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, u8 tid, u32 ba_win_sz, u16 ssn,
+				enum hal_pn_type pn_type, struct ath12k_peer *peer)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_dp *dp = &ab->dp;
 	struct hal_rx_reo_queue *addr_aligned;
-	struct ath12k_peer *peer;
 	struct ath12k_dp_rx_tid *rx_tid;
 	u32 hw_desc_sz;
 	void *vaddr;
 	dma_addr_t paddr;
 	int ret;
 
-	spin_lock_bh(&ab->base_lock);
-
-	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
-	if (!peer) {
-		spin_unlock_bh(&ab->base_lock);
-		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
-		return -ENOENT;
-	}
-
-	if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
-		spin_unlock_bh(&ab->base_lock);
+	if (ab->hw_params->reoq_lut_support && (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
 		ath12k_warn(ab, "reo qref table is not setup\n");
 		return -EINVAL;
 	}
@@ -962,7 +1154,6 @@
 	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
 		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
 			    peer->peer_id, tid);
-		spin_unlock_bh(&ab->base_lock);
 		return -EINVAL;
 	}
 
@@ -970,16 +1161,28 @@
 	/* Update the tid queue if it is already setup */
 	if (rx_tid->active) {
 		paddr = rx_tid->paddr;
-		ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+		ret = ath12k_peer_rx_tid_reo_update(ab, peer, rx_tid,
 						    ba_win_sz, ssn, true);
-		spin_unlock_bh(&ab->base_lock);
 		if (ret) {
-			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
+			ath12k_warn(ab, "failed to update reo for peer %pM rx tid %d\n",
+				    peer->addr, tid);
 			return ret;
 		}
 
+		if (!ab->hw_params->reoq_lut_support) {
+			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, peer->vdev_id,
+								     peer->addr,
+								     paddr, tid, 1,
+								     ba_win_sz);
+			if (ret) {
+				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
+					    tid, ret);
 		return ret;
 	}
+		}
+
+		return 0;
+	}
 
 	rx_tid->tid = tid;
 
@@ -995,7 +1198,6 @@
 
 	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
 	if (!vaddr) {
-		spin_unlock_bh(&ab->base_lock);
 		return -ENOMEM;
 	}
 
@@ -1009,7 +1211,8 @@
 
 	ret = dma_mapping_error(ab->dev, paddr);
 	if (ret) {
-		spin_unlock_bh(&ab->base_lock);
+		ath12k_warn(ab, "failed to dma map for peer %pM rx tid :%d setup\n",
+				peer->addr, tid);
 		goto err_mem_free;
 	}
 
@@ -1022,52 +1225,87 @@
 		/* Update the REO queue LUT at the corresponding peer id
 		 * and tid with qaddr.
 		 */
+		if (peer->mlo)
+			ath12k_peer_rx_tid_qref_setup(ab, peer->ml_peer_id, tid, paddr);
+		else
 		ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
-		spin_unlock_bh(&ab->base_lock);
 	} else {
 		spin_unlock_bh(&ab->base_lock);
-		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, peer->vdev_id, peer->addr,
 							     paddr, tid, 1, ba_win_sz);
+		spin_lock_bh(&ab->base_lock);
 	}
 
 	return ret;
 
 err_mem_free:
-	kfree(vaddr);
-
+	 kfree(rx_tid->vaddr);
+	 rx_tid->vaddr = NULL;
 	return ret;
 }
 
 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
-			     struct ieee80211_ampdu_params *params)
+			     struct ieee80211_ampdu_params *params,
+			     u8 link_id)
 {
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
-	int vdev_id = arsta->arvif->vdev_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
+	struct ath12k_link_sta *arsta;
+	struct ath12k_peer *peer;
+	int vdev_id;
 	int ret;
 
-	ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
-					  params->tid, params->buf_size,
-					  params->ssn, arsta->pn_type);
+	arsta = ahsta->link[link_id];
+
+	if (!arsta)
+		return -ENOENT;
+
+	vdev_id = arsta->arvif->vdev_id;
+
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
+	if (!peer) {
+		ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+			     arsta->addr, vdev_id);
+		spin_unlock_bh(&ab->base_lock);
+		return -ENOENT;
+	}
+
+	if (!peer->primary_link) {
+		spin_unlock_bh(&ab->base_lock);
+		return 0;
+	}
+
+	ret = ath12k_dp_rx_peer_tid_setup(ar, params->tid, params->buf_size,
+					  params->ssn, ahsta->pn_type, peer);
 	if (ret)
 		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
 
+	spin_unlock_bh(&ab->base_lock);
 	return ret;
 }
 
 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
-			    struct ieee80211_ampdu_params *params)
+			    struct ieee80211_ampdu_params *params,
+			    u8 link_id)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_peer *peer;
-	struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
-	int vdev_id = arsta->arvif->vdev_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
+	struct ath12k_link_sta *arsta;
+	int vdev_id;
 	bool active;
 	int ret;
 
+	arsta = ahsta->link[link_id];
+
+	if (!arsta)
+		return -ENOENT;
+
+	vdev_id = arsta->arvif->vdev_id;
 	spin_lock_bh(&ab->base_lock);
 
-	peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+	peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
 	if (!peer) {
 		spin_unlock_bh(&ab->base_lock);
 		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@@ -1081,7 +1319,7 @@
 		return 0;
 	}
 
-	ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+	ret = ath12k_peer_rx_tid_reo_update(ab, peer, peer->rx_tid, 1, 0, false);
 	spin_unlock_bh(&ab->base_lock);
 	if (ret) {
 		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
@@ -1092,7 +1330,7 @@
 	return ret;
 }
 
-int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
 				       const u8 *peer_addr,
 				       enum set_key_cmd key_cmd,
 				       struct ieee80211_key_conf *key)
@@ -1182,6 +1420,26 @@
 	return -EINVAL;
 }
 
+static void ath12k_htt_pktlog(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+	struct ath12k_pktlog_hdr *hdr = (struct ath12k_pktlog_hdr *)data;
+	struct ath12k *ar;
+	u8 pdev_id;
+
+	pdev_id = u32_get_bits(data->hdr, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+	if (!ar) {
+		ath12k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+		return;
+	}
+	hdr->m_timestamp = ar->pdev->timestamp;
+
+	trace_ath12k_htt_pktlog(ar, data->payload, hdr->size,
+			ar->ab->pktlog_defs_checksum);
+	ath12k_htt_pktlog_process(ar, (u8 *)data->payload);
+}
+
 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
 					   u16 tag, u16 len, const void *ptr,
 					   void *data)
@@ -1193,6 +1451,7 @@
 	struct htt_ppdu_user_stats *user_stats;
 	int cur_user;
 	u16 peer_id;
+	u32 ppdu_id;
 
 	ppdu_info = data;
 
@@ -1223,7 +1482,7 @@
 		user_stats->is_valid_peer_id = true;
 		memcpy(&user_stats->rate, ptr,
 		       sizeof(struct htt_ppdu_stats_user_rate));
-		user_stats->tlv_flags |= BIT(tag);
+		ppdu_info->tlv_bitmap |= BIT(tag);
 		break;
 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
@@ -1243,7 +1502,7 @@
 		user_stats->is_valid_peer_id = true;
 		memcpy(&user_stats->cmpltn_cmn, ptr,
 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
-		user_stats->tlv_flags |= BIT(tag);
+		ppdu_info->tlv_bitmap |= BIT(tag);
 		break;
 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
 		if (len <
@@ -1254,6 +1513,8 @@
 		}
 
 		ba_status = ptr;
+		ppdu_id =
+		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->ppdu_id;
 		peer_id = le16_to_cpu(ba_status->sw_peer_id);
 		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
 						      peer_id);
@@ -1262,24 +1523,34 @@
 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
 		user_stats->peer_id = peer_id;
 		user_stats->is_valid_peer_id = true;
+		ppdu_info->ppdu_id = FIELD_GET(HTT_PPDU_STATS_PPDU_ID, ppdu_id);
 		memcpy(&user_stats->ack_ba, ptr,
 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
-		user_stats->tlv_flags |= BIT(tag);
+		ppdu_info->tlv_bitmap |= BIT(tag);
+		break;
+	case HTT_PPDU_STATS_TAG_SCH_CMD_STATUS:
+		ppdu_info->tlv_bitmap |= BIT(tag);
 		break;
 	}
 	return 0;
 }
 
-static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
 				  int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
 					      const void *ptr, void *data),
 				  void *data)
 {
+	struct htt_ppdu_stats_info *ppdu_info = NULL;
 	const struct htt_tlv *tlv;
 	const void *begin = ptr;
 	u16 tlv_tag, tlv_len;
 	int ret = -EINVAL;
 
+	if (!data)
+		return ret;
+
+	ppdu_info = (struct htt_ppdu_stats_info *)data;
+
 	while (len > 0) {
 		if (len < sizeof(*tlv)) {
 			ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
@@ -1297,7 +1568,7 @@
 				   tlv_tag, ptr - begin, len, tlv_len);
 			return -EINVAL;
 		}
-		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+		ret = iter(ab, tlv_tag, tlv_len, ptr, ppdu_info);
 		if (ret == -ENOMEM)
 			return ret;
 
@@ -1307,68 +1578,162 @@
 	return 0;
 }
 
+static u32 ath12k_dp_rx_ru_alloc_from_ru_size(u16 ru_size)
+{
+	u32 width = 0;
+
+	switch (ru_size) {
+	case HTT_PPDU_STATS_RU_26:
+		width = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+		break;
+	case HTT_PPDU_STATS_RU_52:
+		width = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+		break;
+	case HTT_PPDU_STATS_RU_106:
+		width = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+		break;
+	case HTT_PPDU_STATS_RU_242:
+		width = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+		break;
+	case HTT_PPDU_STATS_RU_484:
+		width = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+		break;
+	case HTT_PPDU_STATS_RU_996:
+		width = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+		break;
+	default:
+		width = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+		break;
+	}
+
+	return width;
+}
+
+/* Align bw value as per host data structures */
+static u8 ath12k_htt_bw_to_mac_bw(u32 rate_flags)
+{
+	u8 bw = HTT_USR_RATE_BW(rate_flags);
+
+	switch (bw) {
+	case HTT_PPDU_STATS_BANDWIDTH_320MHZ:
+		bw = ATH12K_BW_320;
+		break;
+	case HTT_PPDU_STATS_BANDWIDTH_160MHZ:
+		bw = ATH12K_BW_160;
+		break;
+	case HTT_PPDU_STATS_BANDWIDTH_80MHZ:
+		bw = ATH12K_BW_80;
+		break;
+	case HTT_PPDU_STATS_BANDWIDTH_40MHZ:
+		bw = ATH12K_BW_40;
+		break;
+	default:
+		bw = ATH12K_BW_20;
+	break;
+	}
+
+	return bw;
+}
+
 static void
 ath12k_update_per_peer_tx_stats(struct ath12k *ar,
-				struct htt_ppdu_stats *ppdu_stats, u8 user)
+				struct htt_ppdu_stats_info *ppdu_info, u8 user)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_peer *peer;
 	struct ieee80211_sta *sta;
-	struct ath12k_sta *arsta;
+	struct ath12k_link_sta *arsta;
 	struct htt_ppdu_stats_user_rate *user_rate;
+	struct htt_ppdu_stats *ppdu_stats = &ppdu_info->ppdu_stats;
 	struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
 	int ret;
 	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
-	u32 v, succ_bytes = 0;
-	u16 tones, rate = 0, succ_pkts = 0;
-	u32 tx_duration = 0;
+	u32 v, succ_bytes = 0, ppdu_type;
+	u16 rate = 0, succ_pkts = 0, ru_start, ru_end;
+	u32 tx_duration = 0, ru_tones, ru_format, tlv_bitmap, rate_flags;
+	bool is_ampdu = false, resp_type_valid;
+
 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
-	bool is_ampdu = false;
 
 	if (!usr_stats)
 		return;
 
-	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+	tlv_bitmap = ppdu_info->tlv_bitmap;
+
+	if (!(tlv_bitmap & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
 		return;
 
-	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+	if (tlv_bitmap & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
 		is_ampdu =
 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+		if (tlv_bitmap & BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS)) {
+			succ_pkts = usr_stats->cmpltn_cmn.mpdu_success;
+			tid = usr_stats->cmpltn_cmn.tid_num;
+		}
+	}
 
-	if (usr_stats->tlv_flags &
-	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+	if (tlv_bitmap & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
 		succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
 		succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
 					  HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
 		tid = le32_get_bits(usr_stats->ack_ba.info,
 				    HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
+		ar->wmm_stats.tx_type = ath12k_tid_to_ac(tid > ATH12K_DSCP_PRIORITY ? 0: tid);
+		ar->wmm_stats.total_wmm_tx_pkts[ar->wmm_stats.tx_type]++;
 	}
 
 	if (common->fes_duration_us)
 		tx_duration = le32_to_cpu(common->fes_duration_us);
 
 	user_rate = &usr_stats->rate;
-	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
-	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
-	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
-	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
-	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
-	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+	ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1);
+
+	resp_type_valid = u32_get_bits(user_rate->info1,
+				       HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALID);
+	if (resp_type_valid) {
+		rate_flags = user_rate->resp_rate_flags;
+		ru_start = user_rate->resp_ru_start;
+		ru_end = user_rate->ru_end;
+		ppdu_type = HTT_USR_RESP_RATE_PPDU_TYPE(user_rate->resp_rate_flags);
+		if (ppdu_type == HTT_PPDU_STATS_RESP_PPDU_TYPE_MU_OFDMA_UL)
+			ppdu_type = HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA;
+		else
+			ppdu_type = HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO;
+	} else {
+		rate_flags = user_rate->rate_flags;
+		ru_start = user_rate->ru_start;
+		ru_end = user_rate->ru_end;
+	}
+
+	flags = HTT_USR_RATE_PREAMBLE(rate_flags);
+	bw = ath12k_htt_bw_to_mac_bw(rate_flags);
+	nss = HTT_USR_RATE_NSS(rate_flags) + 1;
+	mcs = HTT_USR_RATE_MCS(rate_flags);
+	sgi = HTT_USR_RATE_GI(rate_flags);
+	dcm = HTT_USR_RATE_DCM(rate_flags);
+	ru_format = FIELD_GET(HTT_PPDU_STATS_USER_RATE_INFO0_RU_SIZE,
+			      user_rate->info0);
+	if (ru_format == 1)
+		ru_tones = ath12k_dp_rx_ru_alloc_from_ru_size(ru_start);
+	else if (!ru_format)
+		ru_tones = ru_end - ru_start + 1;
+	else
+		ru_tones = ath12k_dp_rx_ru_alloc_from_ru_size(HTT_PPDU_STATS_RU_26);
 
 	/* Note: If host configured fixed rates and in some other special
 	 * cases, the broadcast/management frames are sent in different rates.
 	 * Firmware rate's control to be skipped for this?
 	 */
 
-	if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
+	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
 		ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
 		return;
 	}
 
-	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
-		ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
+	if (flags == WMI_RATE_PREAMBLE_EHT && mcs > ATH12K_EHT_MCS_MAX) {
+		ath12k_warn(ab, "Invalid EHT mcs %d peer stats",  mcs);
 		return;
 	}
 
@@ -1403,7 +1768,12 @@
 	}
 
 	sta = peer->sta;
-	arsta = (struct ath12k_sta *)sta->drv_priv;
+	arsta = ath12k_peer_get_link_sta(ab, peer);
+	if (!arsta) {
+		spin_unlock_bh(&ab->base_lock);
+		rcu_read_unlock();
+		return;
+	}
 
 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
 
@@ -1431,10 +1801,21 @@
 		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
 		arsta->txrate.he_dcm = dcm;
 		arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
-		tones = le16_to_cpu(user_rate->ru_end) -
-			le16_to_cpu(user_rate->ru_start) + 1;
-		v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
-		arsta->txrate.he_ru_alloc = v;
+		arsta->txrate.he_ru_alloc = ru_tones;
+		peer_stats->ru_tones = arsta->txrate.he_ru_alloc;
+		break;
+	case WMI_RATE_PREAMBLE_EHT:
+		arsta->txrate.mcs = mcs;
+		arsta->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+		arsta->txrate.he_dcm = dcm;
+		arsta->txrate.eht_gi = ath12k_eht_gi_to_nl80211_eht_gi(sgi);
+		v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc((ru_end -
+								ru_start) + 1);
+		arsta->txrate.eht_ru_alloc = v;
+		ru_tones = v;
+		peer_stats->ru_tones = arsta->txrate.eht_ru_alloc;
+		break;
+	default:
 		break;
 	}
 
@@ -1452,9 +1833,13 @@
 		peer_stats->succ_bytes = succ_bytes;
 		peer_stats->is_ampdu = is_ampdu;
 		peer_stats->duration = tx_duration;
+		peer_stats->ru_tones = ru_tones;
 		peer_stats->ba_fails =
 			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
 			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+		peer_stats->ppdu_type = ppdu_type;
+		if (ath12k_debugfs_is_extd_tx_stats_enabled(ar))
+			ath12k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
 	}
 
 	spin_unlock_bh(&ab->base_lock);
@@ -1462,12 +1847,12 @@
 }
 
 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
-					 struct htt_ppdu_stats *ppdu_stats)
+					 struct htt_ppdu_stats_info *ppdu_info)
 {
 	u8 user;
 
 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
-		ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+		ath12k_update_per_peer_tx_stats(ar, ppdu_info, user);
 }
 
 static
@@ -1488,7 +1873,7 @@
 						     typeof(*ppdu_info), list);
 			list_del(&ppdu_info->list);
 			ar->ppdu_stat_list_depth--;
-			ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+			ath12k_htt_update_ppdu_stats(ar, ppdu_info);
 			kfree(ppdu_info);
 		}
 	}
@@ -1558,6 +1943,11 @@
 		goto exit;
 	}
 
+	if (ath12k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+		trace_ath12k_htt_ppdu_stats(ar, skb->data, len);
+		ath12k_htt_ppdu_pktlog_process(ar, (u8 *)skb->data, DP_RX_BUFFER_SIZE);
+	}
+
 	spin_lock_bh(&ar->data_lock);
 	ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
 	if (!ppdu_info) {
@@ -1623,23 +2013,72 @@
 	return ret;
 }
 
+static void ath12k_htt_backpressure_event_handler(struct ath12k_base *ab,
+						  struct sk_buff *skb)
+{
+	u32 *data = (u32 *)skb->data;
+	u8 pdev_id, ring_type, ring_id, pdev_idx;
+	u16 hp, tp;
+	u32 backpressure_time;
+	struct ath12k_bp_stats *bp_stats;
+
+	pdev_id = u32_get_bits(*data, HTT_BACKPRESSURE_EVENT_PDEV_ID_M);
+	ring_type = u32_get_bits(*data, HTT_BACKPRESSURE_EVENT_RING_TYPE_M);
+	ring_id = u32_get_bits(*data, HTT_BACKPRESSURE_EVENT_RING_ID_M);
+	++data;
+
+	hp = u32_get_bits(*data, HTT_BACKPRESSURE_EVENT_HP_M);
+	tp = u32_get_bits(*data, HTT_BACKPRESSURE_EVENT_TP_M);
+	++data;
+
+	backpressure_time = *data;
+
+	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
+		   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
+
+	if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
+		if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
+			return;
+
+		bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
+	} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
+		pdev_idx = DP_HW2SW_MACID(pdev_id);
+
+		if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
+			return;
+
+		bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
+	} else {
+		ath12k_warn(ab, "unknown ring type received in htt bp event %d\n",
+			    ring_type);
+		return;
+	}
+
+	spin_lock_bh(&ab->base_lock);
+	bp_stats->hp = hp;
+	bp_stats->tp = tp;
+	bp_stats->count++;
+	bp_stats->jiffies = jiffies;
+	spin_unlock_bh(&ab->base_lock);
+}
+
 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
 						struct sk_buff *skb)
 {
 	struct ath12k_htt_mlo_offset_msg *msg;
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ath12k_pdev *pdev;
 	struct ath12k *ar;
 	u8 pdev_id;
+	int i, j;
 
 	msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
 	pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
 			       HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
 
-	if (!ar) {
-		ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+	if (!ar)
 		return;
-	}
 
 	spin_lock_bh(&ar->data_lock);
 	pdev = ar->pdev;
@@ -1653,9 +2092,160 @@
 	pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
 	pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
 
+	ag->mlo_tstamp_offset =  ((u64)pdev->timestamp.mlo_offset_hi << 32 | pdev->timestamp.mlo_offset_lo);
+
+	/* MLO TSAMP OFFSET is common for all chips and
+	 * fetch delta_tsf2 for all the radios for this event
+	 */
+	for (i = 0; i < ag->num_chip; i++) {
+		struct ath12k_base *tmp_ab = ag->ab[i];
+
+		for (j = 0; j < tmp_ab->num_radios; j++) {
+			struct ath12k *tmp_ar;
+
+			pdev = &tmp_ab->pdevs[j];
+			tmp_ar = pdev->ar;
+			if (!tmp_ar)
+				continue;
+
+			if (tmp_ab->hw_params->hal_ops->hal_get_tsf2_scratch_reg)
+				tmp_ab->hw_params->hal_ops->hal_get_tsf2_scratch_reg(tmp_ab, tmp_ar->lmac_id,
+										     &tmp_ar->delta_tsf2);
+		}
+	}
+
+
 	spin_unlock_bh(&ar->data_lock);
 }
 
+static void ath12k_htt_vdev_txrx_stats_handler(struct ath12k_base *ab,
+					       struct sk_buff *skb)
+{
+	struct htt_t2h_vdev_txrx_stats_ind *vdev_tlv;
+	struct htt_t2h_vdev_common_stats_tlv *soc_tlv;
+	struct ath12k_link_vif *arvif;
+	u32 *data = (u32 *)skb->data;
+	u32 vdev_id;
+	u16 payload_bytes, tlv_tag, tlv_len;
+
+	payload_bytes = u32_get_bits(*data, HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES);
+	data += 3;
+
+	if (payload_bytes > skb->len)
+		return;
+
+	while (payload_bytes > 0) {
+		tlv_tag = u32_get_bits(*data, HAL_TLV_HDR_TAG);
+		tlv_len = u32_get_bits(*data, HAL_TLV_HDR_LEN);
+		data++;
+
+		if (tlv_tag == HTT_VDEV_TXRX_STATS_HW_STATS_TLV) {
+			vdev_tlv = (struct htt_t2h_vdev_txrx_stats_ind *)data;
+			vdev_id = __le32_to_cpu(vdev_tlv->vdev_id);
+			arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+
+			arvif->vdev_stats.rx_msdu_byte_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->rx_msdu_byte_cnt_lo,
+						       vdev_tlv->rx_msdu_byte_cnt_hi);
+			arvif->vdev_stats.rx_msdu_pkt_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->rx_msdu_cnt_lo,
+						       vdev_tlv->rx_msdu_cnt_hi);
+			arvif->vdev_stats.tx_msdu_byte_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->tx_msdu_byte_cnt_lo,
+						       vdev_tlv->tx_msdu_byte_cnt_hi);
+			arvif->vdev_stats.tx_msdu_pkt_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->tx_msdu_cnt_lo,
+						       vdev_tlv->tx_msdu_cnt_hi);
+			arvif->vdev_stats.tx_retry_byte_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->tx_retry_byte_cnt_lo,
+						       vdev_tlv->tx_retry_byte_cnt_hi);
+			arvif->vdev_stats.tx_retry_pkt_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->tx_retry_cnt_lo,
+						       vdev_tlv->tx_retry_cnt_hi);
+			arvif->vdev_stats.tx_drop_byte_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->tx_drop_byte_cnt_lo,
+						       vdev_tlv->tx_drop_byte_cnt_hi);
+			arvif->vdev_stats.tx_msdu_ttl_byte_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->msdu_ttl_byte_cnt_lo,
+						       vdev_tlv->msdu_ttl_byte_cnt_hi);
+			arvif->vdev_stats.tx_msdu_ttl_pkt_cnt =
+				HTT_VDEV_GET_STATS_U64(vdev_tlv->msdu_ttl_cnt_lo,
+						       vdev_tlv->msdu_ttl_cnt_hi);
+		}
+
+		if (tlv_tag == HTT_VDEV_TXRX_STATS_COMMON_TLV) {
+			soc_tlv = (struct htt_t2h_vdev_common_stats_tlv *)data;
+			ab->fw_soc_drop_count =
+				HTT_VDEV_GET_STATS_U64(soc_tlv->soc_drop_count_lo,
+						       soc_tlv->soc_drop_count_hi);
+		}
+		data += tlv_len >> 2;
+		payload_bytes -= tlv_len;
+	}
+}
+
+#ifdef CONFIG_ATH12K_SAWF
+static void ath12k_htt_sawf_info_ind_handler(struct ath12k_base *ab,
+					     struct sk_buff *skb)
+{
+	struct htt_t2h_sawf_info_ind *resp = (struct htt_t2h_sawf_info_ind *)skb->data;
+	struct ath12k_peer *peer = NULL;
+	u32 htt_qtype, remapped_tid, peer_id, default_msduq_per_tid, default_msduq_max, sawf_msduq_per_tid;
+	u32 hlos_tid, flow_or, ast_idx, who_cl, tgt_opaque_id, sawf_msduq_max;
+	u8 msduq_index;
+
+	htt_qtype = u32_get_bits(__le32_to_cpu(resp->info0),
+				 HTT_T2H_SAWF_MSDUQ_INFO_0_IND_HTT_QTYPE_ID);
+	peer_id = u32_get_bits(__le32_to_cpu(resp->info0),
+			       HTT_T2H_SAWF_MSDUQ_INFO_0_IND_PEER_ID);
+
+	remapped_tid = u32_get_bits(__le32_to_cpu(resp->info1),
+			       HTT_T2H_SAWF_MSDUQ_INFO_1_IND_REMAP_TID_ID);
+	hlos_tid = u32_get_bits(__le32_to_cpu(resp->info1),
+			       HTT_T2H_SAWF_MSDUQ_INFO_1_IND_HLOS_TID_ID);
+	who_cl = u32_get_bits(__le32_to_cpu(resp->info1),
+			       HTT_T2H_SAWF_MSDUQ_INFO_1_IND_WHO_CLSFY_INFO_SEL_ID);
+	flow_or = u32_get_bits(__le32_to_cpu(resp->info1),
+			       HTT_T2H_SAWF_MSDUQ_INFO_1_IND_FLOW_OVERRIDE_ID);
+	ast_idx = u32_get_bits(__le32_to_cpu(resp->info1),
+			       HTT_T2H_SAWF_MSDUQ_INFO_1_IND_AST_INDEX_ID);
+
+	tgt_opaque_id = u32_get_bits(__le32_to_cpu(resp->info2),
+			       HTT_T2H_SAWF_MSDUQ_INFO_2_IND_TGT_OPAQUE_ID);
+
+	ath12k_dbg(ab, ATH12K_DBG_SAWF, "Sawf Info ind:\n");
+	ath12k_dbg(ab, ATH12K_DBG_SAWF,
+		   "htt_qtype[0x%x]Peer_Id[0x%x]Remp_Tid[0x%x]Hlos_Tid[0x%x]\n",
+		   htt_qtype,
+		   peer_id,
+		   remapped_tid,
+		   hlos_tid);
+	ath12k_dbg(ab, ATH12K_DBG_SAWF,
+		   "who_cl[0x%x]flow_or[0x%x]Ast[0x%x]Op[0x%x]\n",
+		   who_cl,
+		   flow_or,
+		   ast_idx,
+		   tgt_opaque_id);
+	spin_lock_bh(&ab->base_lock);
+
+	default_msduq_per_tid = ab->default_msduq_per_tid;
+	sawf_msduq_per_tid = ab->max_msduq_per_tid - ab->default_msduq_per_tid;
+	default_msduq_max = default_msduq_per_tid * ATH12K_SAWF_MAX_TID_SUPPORT;
+	sawf_msduq_max = sawf_msduq_per_tid * ATH12K_SAWF_MAX_TID_SUPPORT;
+	msduq_index = ((who_cl * default_msduq_max) +
+		      (flow_or * ATH12K_SAWF_MAX_TID_SUPPORT) + hlos_tid) -
+		      default_msduq_max;
+
+	peer = ath12k_peer_find_by_id(ab, peer_id);
+	if ((msduq_index < sawf_msduq_max) && peer) {
+		if (peer->sawf_ctx_peer.telemetry_peer_ctx)
+			ath12k_telemetry_update_tid_msduq(peer->sawf_ctx_peer.telemetry_peer_ctx,
+							  msduq_index ,remapped_tid,
+							  (htt_qtype - default_msduq_per_tid));
+	}
+	spin_unlock_bh(&ab->base_lock);
+}
+#endif /* CONFIG_ATH12K_SAWF */
 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
 				       struct sk_buff *skb)
 {
@@ -1691,6 +2281,8 @@
 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
 				       peer_mac_h16, mac_addr);
+		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
+				     resp->peer_map_ev.info2);
 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
 		break;
 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
@@ -1718,8 +2310,12 @@
 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
 				       peer_mac_h16, mac_addr);
+		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL,
+				     resp->peer_map_ev.info2);
+		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID,
+				resp->peer_map_ev.info2);
 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
-				      peer_id);
+				      hw_peer_id);
 		break;
 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
@@ -1727,14 +2323,41 @@
 					HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
 		ath12k_peer_unmap_event(ab, peer_id);
 		break;
+	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_MAP:
+		ath12k_peer_mlo_map_event(ab, skb);
+		break;
+	case HTT_T2H_MSG_TYPE_MLO_RX_PEER_UNMAP:
+		ath12k_peer_mlo_unmap_event(ab, skb);
+		break;
 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
 		ath12k_htt_pull_ppdu_stats(ab, skb);
 		break;
 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+		ath12k_debugfs_htt_ext_stats_handler(ab, skb);
+		break;
+	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
+		ath12k_htt_backpressure_event_handler(ab, skb);
+		break;
+	case HTT_T2H_MSG_TYPE_PKTLOG:
+		ath12k_htt_pktlog(ab, skb);
 		break;
 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
 		ath12k_htt_mlo_offset_event_handler(ab, skb);
 		break;
+	case HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND:
+		ath12k_htt_vdev_txrx_stats_handler(ab, skb);
+		break;
+#ifdef CONFIG_ATH12K_SAWF
+	case HTT_T2H_MSG_TYPE_SAWF_MSDUQ_INFO_IND:
+		ath12k_htt_sawf_info_ind_handler(ab, skb);
+		break;
+	case HTT_T2H_MSG_TYPE_SAWF_DEF_QUEUES_MAP_REPORT_CONF:
+		ath12k_htt_sawf_def_q_map_report_handler(ab, skb);
+		break;
+	case HTT_T2H_MSG_TYPE_STREAMING_STATS_IND:
+		ath12k_htt_sawf_streaming_stats_ind_handler(ab, skb);
+		break;
+#endif /* CONFIG_ATH12K_SAWF */
 	default:
 		ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
 			   type);
@@ -1747,7 +2370,8 @@
 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
 				      struct sk_buff_head *msdu_list,
 				      struct sk_buff *first, struct sk_buff *last,
-				      u8 l3pad_bytes, int msdu_len)
+				      u8 l3pad_bytes, int msdu_len,
+				      struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct sk_buff *skb;
@@ -1756,6 +2380,7 @@
 	struct hal_rx_desc *ldesc;
 	int space_extra, rem_len, buf_len;
 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+	bool is_continuation;
 
 	/* As the msdu is spread across multiple rx buffers,
 	 * find the offset to the start of msdu for computing
@@ -1771,8 +2396,9 @@
 	}
 
 	ldesc = (struct hal_rx_desc *)last->data;
-	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
-	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
+	ab->hw_params->hal_ops->rx_desc_get_first_last_msdu(ldesc, rx_info);
+	rxcb->is_first_msdu = rx_info->is_first_msdu;
+	rxcb->is_last_msdu = rx_info->is_last_msdu;
 
 	/* MSDU spans over multiple buffers because the length of the MSDU
 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
@@ -1804,7 +2430,8 @@
 	rem_len = msdu_len - buf_first_len;
 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
 		rxcb = ATH12K_SKB_RXCB(skb);
-		if (rxcb->is_continuation)
+		is_continuation = rxcb->is_continuation;
+		if (is_continuation)
 			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
 		else
 			buf_len = rem_len;
@@ -1822,8 +2449,15 @@
 		dev_kfree_skb_any(skb);
 
 		rem_len -= buf_len;
-		if (!rxcb->is_continuation)
+		if (!is_continuation)
 			break;
+
+		skb = msdu_list->next;
+		if (likely(skb)) {
+			prefetch(skb);
+			prefetch(skb->data + 64);
+			prefetch(skb->data + 128);
+		}
 	}
 
 	return 0;
@@ -1847,14 +2481,13 @@
 	return NULL;
 }
 
-static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu,
+					struct ath12k_dp_rx_info *rx_info)
 {
-	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
-	struct ath12k_base *ab = ar->ab;
 	bool ip_csum_fail, l4_csum_fail;
 
-	ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
-	l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
+	ip_csum_fail = rx_info->ip_csum_fail;
+	l4_csum_fail = rx_info->l4_csum_fail;
 
 	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
@@ -2000,7 +2633,9 @@
 
 	if (!rxcb->is_first_msdu ||
 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
-		WARN_ON_ONCE(1);
+		/* TODO: Change below stats increment back to WARN_ON_ONCE(1)
+		*/
+		ar->ab->soc_stats.first_and_last_msdu_bit_miss++;
 		return;
 	}
 
@@ -2055,10 +2690,9 @@
 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
 	struct ath12k_base *ab = ar->ab;
 	size_t hdr_len, crypto_len;
-	struct ieee80211_hdr *hdr;
+	struct ieee80211_hdr hdr;
+	u8 *crypto_hdr, mesh_ctrl;
 	u16 qos_ctl;
-	__le16 fc;
-	u8 *crypto_hdr;
 
 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
@@ -2066,26 +2700,25 @@
 		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
 	}
 
-	fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
-	hdr_len = ieee80211_hdrlen(fc);
-	skb_push(msdu, hdr_len);
-	hdr = (struct ieee80211_hdr *)msdu->data;
-	hdr->frame_control = fc;
-
 	/* Get wifi header from rx_desc */
-	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
+	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
+
+	hdr_len = ieee80211_hdrlen(hdr.frame_control);
+	mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
+	skb_push(msdu, hdr_len);
+	memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
 
 	if (rxcb->is_mcbc)
 		status->flag &= ~RX_FLAG_PN_VALIDATED;
 
 	/* Add QOS header */
-	if (ieee80211_is_data_qos(hdr->frame_control)) {
+	if (ieee80211_is_data_qos(hdr.frame_control)) {
 		qos_ctl = rxcb->tid;
-		if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
+		if (mesh_ctrl)
 			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
 
 		/* TODO: Add other QoS ctl fields when required */
-		memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
+		memcpy(ieee80211_get_qos_ctl((struct ieee80211_hdr *)msdu->data),
 		       &qos_ctl, IEEE80211_QOS_CTL_LEN);
 	}
 }
@@ -2143,7 +2776,8 @@
 		ehdr = (struct ethhdr *)msdu->data;
 
 		/* mac80211 allows fast path only for authorized STA */
-		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE) ||
+		    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) {
 			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
 			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
 			break;
@@ -2159,10 +2793,12 @@
 		/* TODO: Handle undecap for these formats */
 		break;
 	}
+	ar->wmm_stats.total_wmm_rx_pkts[ar->wmm_stats.rx_type]++;
 }
 
 struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+			 struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
@@ -2176,19 +2812,83 @@
 	if (peer)
 		return peer;
 
-	if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
-		return NULL;
-
-	peer = ath12k_peer_find_by_addr(ab,
+	if (!(rx_info->filled & BIT_ULL(ATH12K_RX_INFO_ADDR2))) {
+		if (rx_desc && ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
+			ether_addr_copy(rx_info->addr2,
 					ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
 									      rx_desc));
+			rx_info->filled |= BIT_ULL(ATH12K_RX_INFO_ADDR2);
+		} else {
+			return NULL;
+		}
+	}
+
+	peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
 	return peer;
 }
 
+static bool ath12k_dp_rx_check_fast_rx(struct ath12k *ar,
+                                       struct sk_buff *msdu,
+                                       struct hal_rx_desc *rx_desc,
+                                       struct ath12k_peer *peer,
+				       struct ath12k_dp_rx_info *rx_info)
+{
+	struct ethhdr *ehdr;
+	struct ath12k_peer *f_peer;
+	struct ath12k_skb_rxcb *rxcb;
+	u8 decap;
+	bool ip_is_valid;
+
+	lockdep_assert_held(&ar->ab->base_lock);
+
+	decap = rx_info->decap_type;
+	ip_is_valid = rx_info->ip_is_valid;
+
+	rxcb = ATH12K_SKB_RXCB(msdu);
+
+	if (unlikely(!ar->ab->stats_disable ||
+		decap != DP_RX_DECAP_TYPE_ETHERNET2_DIX ||
+		peer->vif->type != NL80211_IFTYPE_AP))
+		return false;
+
+	/* mcbc packets go through mac80211 for PN validation */
+	if (unlikely(rxcb->is_mcbc))
+		return false;
+
+	if (unlikely(!peer->is_authorized))
+		return false;
+
+	if (unlikely(!ip_is_valid))
+		return false;
+
+	/* fast rx is supported only on ethernet decap, so
+	 * we can directly gfet the ethernet header
+	 */
+	ehdr = (struct ethhdr *)msdu->data;
+
+	/* requires rebroadcast from mac80211 */
+	if (is_multicast_ether_addr(ehdr->h_dest))
+	        return false;
+
+	/* TODO: make use of hw assisted intra-bss bit in msdu_end
+	 * to skip peer lookup and forward frames directly.
+	 */
+
+	/* check if the msdu needs to be bridged to our connected peer */
+	f_peer = ath12k_peer_find_by_addr(ar->ab, ehdr->h_dest);
+
+	if (f_peer && f_peer != peer)
+		return false;
+
+	/* allow direct rx */
+	return true;
+}
+
 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
 				struct sk_buff *msdu,
 				struct hal_rx_desc *rx_desc,
-				struct ieee80211_rx_status *rx_status)
+				struct ath12k_dp_rx_info *rx_info,
+				bool *fast_rx)
 {
 	bool  fill_crypto_hdr;
 	struct ath12k_base *ab = ar->ab;
@@ -2197,19 +2897,62 @@
 	bool is_decrypted = false;
 	struct ieee80211_hdr *hdr;
 	struct ath12k_peer *peer;
+	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
 	u32 err_bitmap;
+	struct ath12k_dp_rx_tid *rx_tid;
+	u8 tid;
+#if defined(CONFIG_MAC80211_PPE_SUPPORT) || defined(CONFIG_ATH12K_BONDED_DS_SUPPORT)
+	int vp;
+#endif
+	struct wireless_dev *wdev = NULL;
 
 	/* PN for multicast packets will be checked in mac80211 */
 	rxcb = ATH12K_SKB_RXCB(msdu);
-	fill_crypto_hdr = ath12k_dp_rx_h_is_mcbc(ar->ab, rx_desc);
+	fill_crypto_hdr = rx_info->is_mcbc;
 	rxcb->is_mcbc = fill_crypto_hdr;
 
 	if (rxcb->is_mcbc)
-		rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
+		rxcb->peer_id = rx_info->peer_id;
 
 	spin_lock_bh(&ar->ab->base_lock);
-	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
 	if (peer) {
+		/* If the pkt is a valid IP packet and peer supports
+		 * fast rx, deliver directly to net, also note that
+		 * pkts with crypto error are not expected to arrive in this
+		 * path, so its safe to skip checking errors here */
+		if (*fast_rx &&
+		    ath12k_dp_rx_check_fast_rx(ar, msdu, rx_desc, peer, rx_info)) {
+			wdev = ieee80211_vif_to_wdev(peer->vif);
+		        if (wdev) {
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+				vp = peer->ppe_vp_num;
+				if (unlikely(wdev->bond_netdev))
+					msdu->dev = wdev->bond_netdev;
+				else
+#endif
+					msdu->dev = wdev->netdev;
+
+				spin_unlock_bh(&ar->ab->base_lock);
+				ath12k_dp_rx_h_csum_offload(ar, msdu, rx_info);
+#if defined(CONFIG_MAC80211_PPE_SUPPORT) || defined(CONFIG_ATH12K_BONDED_DS_SUPPORT)
+				if (vp > 0) {
+					if (likely(ppe_vp_tx_to_ppe(vp, msdu)))
+						return;
+				}
+#endif
+				msdu->protocol = eth_type_trans(msdu, msdu->dev);
+				netif_receive_skb(msdu);
+				return;
+		        }
+		}
+
+		tid = rx_info->tid;
+		rx_tid = &peer->rx_tid[tid];
+		ar->wmm_stats.rx_type =
+			ath12k_tid_to_ac(rx_tid->tid > ATH12K_DSCP_PRIORITY ? 0: rx_tid->tid);
+		ar->wmm_stats.total_wmm_rx_pkts[ar->wmm_stats.rx_type]++;
+
 		if (rxcb->is_mcbc)
 			enctype = peer->sec_type_grp;
 		else
@@ -2219,6 +2962,16 @@
 	}
 	spin_unlock_bh(&ar->ab->base_lock);
 
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	if (peer) {
+		wdev = ieee80211_vif_to_wdev(peer->vif);
+		if (wdev && wdev->bond_netdev)
+			msdu->dev = wdev->bond_netdev;
+	}
+#endif
+
+	*fast_rx = false;
+
 	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
 		is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
@@ -2246,36 +2999,43 @@
 					   RX_FLAG_PN_VALIDATED;
 	}
 
-	ath12k_dp_rx_h_csum_offload(ar, msdu);
+	ath12k_dp_rx_h_csum_offload(ar, msdu, rx_info);
 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
 			       enctype, rx_status, is_decrypted);
 
 	if (!is_decrypted || fill_crypto_hdr)
 		return;
 
-	if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
-	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+	if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
 		hdr = (void *)msdu->data;
 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
 	}
 }
 
-static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
-				struct ieee80211_rx_status *rx_status)
+static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
 {
-	struct ath12k_base *ab = ar->ab;
+	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
 	struct ieee80211_supported_band *sband;
-	enum rx_msdu_start_pkt_type pkt_type;
-	u8 bw;
-	u8 rate_mcs, nss;
-	u8 sgi;
+	enum rx_msdu_start_pkt_type pkt_type = RX_MSDU_START_PKT_TYPE_11A;
+	u8 bw = 0;
+	u8 rate_mcs = 0, nss = 0;
+	u8 sgi = 0;
 	bool is_cck;
 
-	pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
-	bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
-	rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
-	nss = ath12k_dp_rx_h_nss(ab, rx_desc);
-	sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
+	if (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_PKT_TYPE))
+		pkt_type = rx_info->pkt_type;
+
+	if (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_BW))
+		bw = rx_info->bw;
+
+	if (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_RATE_MCS))
+		rate_mcs = rx_info->rate_mcs;
+
+	if (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_NSS))
+		nss = rx_info->nss;
+
+	if (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_SGI))
+		sgi = rx_info->sgi;
 
 	switch (pkt_type) {
 	case RX_MSDU_START_PKT_TYPE_11A:
@@ -2322,18 +3082,55 @@
 		}
 		rx_status->encoding = RX_ENC_HE;
 		rx_status->nss = nss;
+		rx_status->he_gi = ath12k_mac_he_gi_to_nl80211_he_gi(sgi);
+		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+		break;
+	case RX_MSDU_START_PKT_TYPE_11BE:
+		rx_status->rate_idx = rate_mcs;
+		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in EHT mode %d\n",
+				    rate_mcs);
+			break;
+		}
+		rx_status->encoding = RX_ENC_EHT;
+		rx_status->nss = nss;
 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
 		break;
+	default:
+		break;
 	}
 }
 
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
-			 struct ieee80211_rx_status *rx_status)
+static void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab,
+				      struct hal_rx_desc *rx_desc,
+				      struct ath12k_dp_rx_info *rx_info)
 {
-	struct ath12k_base *ab = ar->ab;
+	ab->hw_params->hal_ops->rx_get_desc_info(rx_desc, rx_info);
+	rx_info->filled |= BIT_ULL(ATH12K_RX_INFO_PHY_META_DATA) |
+			   BIT_ULL(ATH12K_RX_INFO_DECAP_TYPE) |
+			   BIT_ULL(ATH12K_RX_INFO_PKT_TYPE) |
+			   BIT_ULL(ATH12K_RX_INFO_SGI) |
+			   BIT_ULL(ATH12K_RX_INFO_RATE_MCS) |
+			   BIT_ULL(ATH12K_RX_INFO_BW) |
+			   BIT_ULL(ATH12K_RX_INFO_NSS);
+
+	if (ab->stats_disable)
+		return;
+
+	if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
+		ether_addr_copy(rx_info->addr2,
+				ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
+		rx_info->filled |= BIT_ULL(ATH12K_RX_INFO_ADDR2);
+	}
+}
+
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
+{
+	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
 	u8 channel_num;
-	u32 center_freq, meta_data;
+	u32 center_freq, meta_data = 0;
 	struct ieee80211_channel *channel;
 
 	rx_status->freq = 0;
@@ -2345,38 +3142,52 @@
 
 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
-	meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+	if (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_PHY_META_DATA))
+		meta_data = rx_info->phy_meta_data;
+
 	channel_num = meta_data;
 	center_freq = meta_data >> 16;
 
-	if (center_freq >= 5935 && center_freq <= 7105) {
+	rx_status->band = NUM_NL80211_BANDS;
+
+	if (center_freq >= ATH12K_MIN_6G_FREQ &&
+	    center_freq <= ATH12K_MAX_6G_FREQ) {
 		rx_status->band = NL80211_BAND_6GHZ;
-	} else if (channel_num >= 1 && channel_num <= 14) {
+		rx_status->freq = center_freq;
+	} else if (channel_num >= ATH12K_MIN_2G_CHAN &&
+		  channel_num <= ATH12K_MAX_2G_CHAN) {
 		rx_status->band = NL80211_BAND_2GHZ;
-	} else if (channel_num >= 36 && channel_num <= 173) {
+	} else if (channel_num >= ATH12K_MIN_5G_CHAN &&
+		  channel_num <= ATH12K_MAX_5G_CHAN) {
 		rx_status->band = NL80211_BAND_5GHZ;
-	} else {
+	}
+
+	if (unlikely(rx_status->band == NUM_NL80211_BANDS || !ar->ah->hw->wiphy->bands[rx_status->band])) {
+        ath12k_err(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+                    rx_status->band, channel_num, center_freq, ar->pdev_idx);
+
 		spin_lock_bh(&ar->data_lock);
 		channel = ar->rx_channel;
 		if (channel) {
 			rx_status->band = channel->band;
 			channel_num =
 				ieee80211_frequency_to_channel(channel->center_freq);
+		} else {
+			ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
 		}
 		spin_unlock_bh(&ar->data_lock);
-		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
-				rx_desc, sizeof(*rx_desc));
 	}
 
+	if (rx_status->band != NL80211_BAND_6GHZ)
 	rx_status->freq = ieee80211_channel_to_frequency(channel_num,
 							 rx_status->band);
 
-	ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+	ath12k_dp_rx_h_rate(ar, rx_info);
 }
 
 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
 				      struct sk_buff *msdu,
-				      struct ieee80211_rx_status *status)
+				      struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	static const struct ieee80211_radiotap_he known = {
@@ -2386,12 +3197,14 @@
 	};
 	struct ieee80211_radiotap_he *he;
 	struct ieee80211_rx_status *rx_status;
-	struct ieee80211_sta *pubsta;
-	struct ath12k_peer *peer;
+	struct ieee80211_sta *pubsta = NULL;
+	struct ath12k_peer *peer = NULL;
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
 	u8 decap = DP_RX_DECAP_TYPE_RAW;
 	bool is_mcbc = rxcb->is_mcbc;
 	bool is_eapol = rxcb->is_eapol;
+	struct ath12k_link_sta *arsta = NULL;
+	struct ieee80211_rx_status *status = rx_info->rx_status;
 
 	if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
 	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
@@ -2400,31 +3213,42 @@
 		status->flag |= RX_FLAG_RADIOTAP_HE;
 	}
 
-	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
-		decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
+	if (!(status->flag & RX_FLAG_ONLY_MONITOR) &&
+	    rx_info->filled & BIT_ULL(ATH12K_RX_INFO_DECAP_TYPE))
+		decap = rx_info->decap_type;
 
 	spin_lock_bh(&ab->base_lock);
-	peer = ath12k_dp_rx_h_find_peer(ab, msdu);
+	if (rxcb->peer_id)
+		peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
+	if (!peer && (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_ADDR2)))
+		peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
 
-	pubsta = peer ? peer->sta : NULL;
+	if (peer && peer->sta) {
+		pubsta = peer->sta;
+		if (pubsta->valid_links) {
+			status->link_valid = 1;
+			status->link_id = peer->link_id;
+		}
+	}
 
 	spin_unlock_bh(&ab->base_lock);
 
 	ath12k_dbg(ab, ATH12K_DBG_DATA,
-		   "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+		   "rx skb %pK len %u peer %pM %d %s %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
 		   msdu,
 		   msdu->len,
 		   peer ? peer->addr : NULL,
 		   rxcb->tid,
 		   is_mcbc ? "mcast" : "ucast",
-		   ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
 		   (status->encoding == RX_ENC_HE) ? "he" : "",
+		   (status->encoding == RX_ENC_EHT) ? "eht" : "",
 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
+		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
 		   status->rate_idx,
 		   status->nss,
@@ -2451,13 +3275,47 @@
 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
 		rx_status->flag |= RX_FLAG_8023;
 
-	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+	ieee80211_rx_napi(ar->ah->hw, pubsta, msdu, napi);
+
+	if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) {
+		if (!(status->flag & RX_FLAG_ONLY_MONITOR)) {
+			spin_lock_bh(&ar->ab->base_lock);
+			if (peer && peer->sta)
+				arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+			spin_unlock_bh(&ar->ab->base_lock);
+			if (arsta)
+				atomic_inc(&arsta->drv_rx_pkts.pkts_out);
+		}
+	}
+}
+
+static bool ath12k_dp_rx_check_max_nwifi_hdr_len(struct ath12k_base *ab,
+						 struct hal_rx_desc *rx_desc,
+						 struct sk_buff *msdu,
+						 const char *msg)
+{
+	struct ieee80211_hdr *hdr;
+	u32 hdr_len;
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	if (unlikely(hdr_len > DP_MAX_NWIFI_HDR_LEN)) {
+		ab->soc_stats.invalid_rbm++;
+		ath12k_err_dump(ab, msg, "msdu_data: ", msdu->data, msdu->len,
+				NULL);
+		ath12k_err_dump(ab, NULL, "rx_desc: ", rx_desc, sizeof(*rx_desc),
+				NULL);
+		return true;
+	}
+
+	return false;
 }
 
 static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
 				     struct sk_buff *msdu,
 				     struct sk_buff_head *msdu_list,
-				     struct ieee80211_rx_status *rx_status)
+				     struct ath12k_dp_rx_info *rx_info,
+				     bool *fast_rx)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct hal_rx_desc *rx_desc, *lrx_desc;
@@ -2478,7 +3336,8 @@
 
 	rx_desc = (struct hal_rx_desc *)msdu->data;
 	lrx_desc = (struct hal_rx_desc *)last_buf->data;
-	if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
+	ab->hw_params->hal_ops->rx_get_desc_msdulen_l3pad(lrx_desc, rx_info);
+	if (!rx_info->msdu_done) {
 		ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
 		ret = -EIO;
 		goto free_out;
@@ -2486,8 +3345,8 @@
 
 	rxcb = ATH12K_SKB_RXCB(msdu);
 	rxcb->rx_desc = rx_desc;
-	msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
-	l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
+	msdu_len = rx_info->msdu_len;
+	l3_pad_bytes = rx_info->l3_pad_bytes;
 
 	if (rxcb->is_frag) {
 		skb_pull(msdu, hal_rx_desc_sz);
@@ -2504,7 +3363,8 @@
 	} else {
 		ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
 						 msdu, last_buf,
-						 l3_pad_bytes, msdu_len);
+						 l3_pad_bytes, msdu_len,
+						 rx_info);
 		if (ret) {
 			ath12k_warn(ab,
 				    "failed to coalesce msdu rx buffer%d\n", ret);
@@ -2512,10 +3372,23 @@
 		}
 	}
 
-	ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
-	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+	ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+
+	if (rx_info->decap_type == DP_RX_DECAP_TYPE_NATIVE_WIFI &&
+	    ath12k_dp_rx_check_max_nwifi_hdr_len(ab, rx_desc, msdu,
+						 "Invalid len in Rx\n")) {
+		WARN_ON_ONCE(1);
+		ret = -EINVAL;
+		goto free_out;
+	}
+
+	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info, fast_rx);
+	if (*fast_rx)
+		return 0;
+
+	ath12k_dp_rx_h_ppdu(ar, rx_info);
 
-	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+	rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
 
 	return 0;
 
@@ -2530,131 +3403,255 @@
 {
 	struct ieee80211_rx_status rx_status = {0};
 	struct ath12k_skb_rxcb *rxcb;
+	struct ath12k_dp_rx_info rx_info;
 	struct sk_buff *msdu;
 	struct ath12k *ar;
-	u8 mac_id;
+	u8 mac_id, hw_link_id;
 	int ret;
+	bool fast_rx;
+	struct sk_buff *skb;
 
 	if (skb_queue_empty(msdu_list))
 		return;
 
+	rx_info.filled = 0;
+	rx_info.rx_status = &rx_status;
+
 	rcu_read_lock();
 
 	while ((msdu = __skb_dequeue(msdu_list))) {
+		skb = msdu_list->next;
+		if (likely(skb)) {
+			prefetch(skb);
+			prefetch(&skb->protocol);
+			prefetch(&skb->data);
+			prefetch(skb->data);
+			prefetch(skb->data + 64);
+			prefetch(skb->data + 128);
+		}
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		mac_id = rxcb->mac_id;
-		ar = ab->pdevs[mac_id].ar;
-		if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+		/* Enable fast rx by default, the value will cahnge based on peer cap
+                * and packet type */
+		fast_rx = true;
+		rxcb->napi = napi;
+
+		hw_link_id = rxcb->hw_link_id;
+		if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
 			dev_kfree_skb_any(msdu);
+			ath12k_warn(ab, "invalid hw link id %d\n", hw_link_id);
 			continue;
 		}
 
-		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+		ar = rcu_dereference(ab->ag->hw_links[hw_link_id]);
+		if (!ar) {
+			dev_kfree_skb_any(msdu);
+			ath12k_warn(ab, "invalid pdev for hw link id %d\n", hw_link_id);
+			continue;
+		}
+
+		mac_id = ar->pdev_idx;
+		if (!rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+			dev_kfree_skb_any(msdu);
+			continue;
+		}
+
+		if (unlikely(test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags))) {
 			dev_kfree_skb_any(msdu);
 			continue;
 		}
 
-		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info,
+						&fast_rx);
 		if (ret) {
 			ath12k_dbg(ab, ATH12K_DBG_DATA,
-				   "Unable to process msdu %d", ret);
+				   "Unable to process %s chip_id %d msdu %d",
+				    (ab != ar->ab) ? "partner" : "own",
+				    ar->ab->chip_id, ret);
 			dev_kfree_skb_any(msdu);
 			continue;
 		}
 
-		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+		if (!fast_rx) {
+			ab->soc_stats.non_fast_rx[ring_id][ar->ab->chip_id]++;
+			ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
+		} else {
+			ab->soc_stats.fast_rx[ring_id][ar->ab->chip_id]++;
+		}
 	}
 
 	rcu_read_unlock();
 }
 
+/* Sends WMI config to filter packets to route packets to WBM release ring */
+int ath12k_dp_rx_pkt_type_filter(struct ath12k *ar,
+				 enum ath12k_routing_pkt_type pkt_type,
+				 u32 meta_data)
+{
+	struct ath12k_wmi_pkt_route_param param;
+	int ret;
+
+	/* Routing Eapol/ARP packets to CCE is only allowed now */
+	if (pkt_type != ATH12K_PKT_TYPE_EAP &&
+	    pkt_type != ATH12K_PKT_TYPE_ARP_IPV4)
+		return -EINVAL;
+
+	param.opcode = ATH12K_WMI_PKTROUTE_ADD;
+	param.meta_data = meta_data;
+	param.dst_ring = ATH12K_ROUTE_WBM_RELEASE(ar->ab);
+	param.dst_ring_handler = ATH12K_WMI_PKTROUTE_USE_CCE;
+	param.route_type_bmap = 1 << pkt_type;
+
+	ret = ath12k_wmi_send_pdev_pkt_route(ar, &param);
+	if (ret)
+		ath12k_warn(ar->ab, "failed to configure pkt route %d", ret);
+
+	return ret;
+}
+
 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
 			 struct napi_struct *napi, int budget)
 {
+	struct ath12k_base *src_ab;
 	struct ath12k_rx_desc_info *desc_info;
 	struct ath12k_dp *dp = &ab->dp;
 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
-	struct hal_reo_dest_ring *desc;
-	int num_buffs_reaped = 0;
+	int i, num_buffs_reaped[ATH12K_MAX_SOCS] = { };
 	struct sk_buff_head msdu_list;
 	struct ath12k_skb_rxcb *rxcb;
 	int total_msdu_reaped = 0;
 	struct hal_srng *srng;
 	struct sk_buff *msdu;
+	u32 *rx_desc;
 	bool done = false;
-	int mac_id;
 	u64 desc_va;
+	struct ath12k_link_sta *arsta = NULL;
+	struct ath12k_peer *peer = NULL;
+	struct ath12k *ar;
+	u8 hw_link_id, chip_id;
+	int valid_entries;
+	struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
 
 	__skb_queue_head_init(&msdu_list);
 
+	for (i = 0; i < ATH12K_MAX_SOCS; i++)
+		INIT_LIST_HEAD(&rx_desc_used_list[i]);
+
 	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
 
 	spin_lock_bh(&srng->lock);
 
 try_again:
 	ath12k_hal_srng_access_begin(ab, srng);
+	valid_entries = ath12k_hal_srng_dst_num_free(ab, srng, false);
+	if (unlikely(!valid_entries)) {
+		ath12k_hal_srng_access_end(ab, srng);
+		spin_unlock_bh(&srng->lock);
+		return -EINVAL;
+	}
+	ath12k_hal_srng_dst_invalidate_entry(ab, srng, valid_entries);
 
-	while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+	while (likely((rx_desc = ath12k_hal_srng_dst_get_next_cache_entry(ab, srng)))) {
+		struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
 		enum hal_reo_dest_ring_push_reason push_reason;
 		u32 cookie;
 
-		cookie = le32_get_bits(desc->buf_addr_info.info1,
+		cookie = le32_get_bits(desc.buf_addr_info.info1,
 				       BUFFER_ADDR_INFO1_SW_COOKIE);
 
-		mac_id = le32_get_bits(desc->info0,
-				       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
 
-		desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
-			   le32_to_cpu(desc->buf_va_lo));
+		hw_link_id = le32_get_bits(desc.info0,
+					  HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+		desc_va = ((u64)le32_to_cpu(desc.buf_va_hi) << 32 |
+			   le32_to_cpu(desc.buf_va_lo));
 		desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+		rcu_read_lock();
+
+		if (hw_link_id < ATH12K_GROUP_MAX_RADIO)
+			ar = rcu_dereference(ab->ag->hw_links[hw_link_id]);
+		else
+			ar = NULL;
+
+		if (!ar) {
+			rcu_read_unlock();
+
+			ab->soc_stats.hal_reo_error[ring_id]++;
+			ath12k_warn(ab, "Rx with invalid/inactive hw_link_id %d cookie 0x%x\n", hw_link_id, cookie);
+
+			if (desc_info) {
+				dev_kfree_skb_any(desc_info->skb);
+				desc_info->skb = NULL;
+			}
+			continue;
+		}
+
+		src_ab = ar->ab;
+		rcu_read_unlock();
+
 
 		/* retry manual desc retrieval */
+		if (unlikely(!desc_info)) {
+			desc_info = ath12k_dp_get_rx_desc(src_ab, cookie);
 		if (!desc_info) {
-			desc_info = ath12k_dp_get_rx_desc(ab, cookie);
-			if (!desc_info) {
-				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+				ath12k_warn(ab, "Invalid cookie in manual desc retrieval cookie 0x%x", cookie);
 				continue;
 			}
 		}
 
-		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+		if (unlikely(desc_info->magic != ATH12K_DP_RX_DESC_MAGIC))
 			ath12k_warn(ab, "Check HW CC implementation");
 
+		chip_id = src_ab->chip_id;
+
 		msdu = desc_info->skb;
 		desc_info->skb = NULL;
+		desc_info->paddr = 0;
 
-		spin_lock_bh(&dp->rx_desc_lock);
-		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
-		spin_unlock_bh(&dp->rx_desc_lock);
+		list_add_tail(&desc_info->list, &rx_desc_used_list[chip_id]);
 
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		dma_unmap_single(ab->dev, rxcb->paddr,
+		dmac_inv_range_no_dsb(msdu->data, msdu->data + (msdu->len + skb_tailroom(msdu)));
+		dma_unmap_single_attrs(src_ab->dev, rxcb->paddr,
 				 msdu->len + skb_tailroom(msdu),
-				 DMA_FROM_DEVICE);
+				       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 
-		num_buffs_reaped++;
+		num_buffs_reaped[chip_id]++;
+		ab->soc_stats.reo_rx[ring_id][chip_id]++;
 
-		push_reason = le32_get_bits(desc->info0,
+		push_reason = le32_get_bits(desc.info0,
 					    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
-		if (push_reason !=
-		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+		if (unlikely(push_reason !=
+		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
 			dev_kfree_skb_any(msdu);
-			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+			ab->soc_stats.hal_reo_error[ring_id]++;
 			continue;
 		}
 
-		rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+		rxcb->is_first_msdu = !!(le32_to_cpu(desc.rx_msdu_info.info0) &
 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
-		rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+		rxcb->is_last_msdu = !!(le32_to_cpu(desc.rx_msdu_info.info0) &
 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
-		rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+		rxcb->is_continuation = !!(le32_to_cpu(desc.rx_msdu_info.info0) &
 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
-		rxcb->mac_id = mac_id;
-		rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
+		rxcb->hw_link_id = hw_link_id;
+		rxcb->peer_id = le32_get_bits(desc.rx_mpdu_info.peer_meta_data,
 					      RX_MPDU_DESC_META_DATA_PEER_ID);
-		rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+		rxcb->tid = le32_get_bits(desc.rx_mpdu_info.info0,
 					  RX_MPDU_DESC_INFO0_TID);
 
+		ar->wmm_stats.rx_type = ath12k_tid_to_ac(rxcb->tid > ATH12K_DSCP_PRIORITY ? 0: rxcb->tid);
+
+		if (ath12k_debugfs_is_extd_rx_stats_enabled(ar) && rxcb->peer_id) {
+			rcu_read_lock();
+			spin_lock_bh(&src_ab->base_lock);
+			peer = ath12k_peer_find_by_id(src_ab, rxcb->peer_id);
+			if (peer && peer->sta)
+				arsta = ath12k_peer_get_link_sta(src_ab, peer);
+			spin_unlock_bh(&src_ab->base_lock);
+			if (arsta)
+				atomic_inc(&arsta->drv_rx_pkts.pkts_frm_hw);
+			rcu_read_unlock();
+		}
+
 		__skb_queue_tail(&msdu_list, msdu);
 
 		if (!rxcb->is_continuation) {
@@ -2683,12 +3680,23 @@
 
 	spin_unlock_bh(&srng->lock);
 
-	if (!total_msdu_reaped)
+	if (unlikely(!total_msdu_reaped))
 		goto exit;
 
-	/* TODO: Move to implicit BM? */
-	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
-				    ab->hw_params->hal_params->rx_buf_rbm, true);
+	for (i = 0; i < ab->ag->num_chip; i++) {
+		if (!num_buffs_reaped[i])
+			continue;
+
+		src_ab = ab->ag->ab[i];
+		if (!src_ab)
+			continue;
+
+		rx_ring = &src_ab->dp.rx_refill_buf_ring;
+
+		ath12k_dp_rx_bufs_replenish(src_ab, rx_ring,
+					    &rx_desc_used_list[i],
+					    num_buffs_reaped[i]);
+	}
 
 	ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
 					      ring_id);
@@ -2711,27 +3719,13 @@
 	spin_unlock_bh(&rx_tid->ab->base_lock);
 }
 
-int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
+int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, struct ath12k_peer *peer,
+				 struct crypto_shash *tfm)
 {
 	struct ath12k_base *ab = ar->ab;
-	struct crypto_shash *tfm;
-	struct ath12k_peer *peer;
 	struct ath12k_dp_rx_tid *rx_tid;
 	int i;
 
-	tfm = crypto_alloc_shash("michael_mic", 0, 0);
-	if (IS_ERR(tfm))
-		return PTR_ERR(tfm);
-
-	spin_lock_bh(&ab->base_lock);
-
-	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
-	if (!peer) {
-		spin_unlock_bh(&ab->base_lock);
-		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
-		return -ENOENT;
-	}
-
 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
 		rx_tid = &peer->rx_tid[i];
 		rx_tid->ab = ab;
@@ -2740,7 +3734,7 @@
 	}
 
 	peer->tfm_mmic = tfm;
-	spin_unlock_bh(&ab->base_lock);
+	peer->dp_setup_done = true;
 
 	return 0;
 }
@@ -2795,6 +3789,7 @@
 	struct ieee80211_key_conf *key_conf;
 	struct ieee80211_hdr *hdr;
 	u8 mic[IEEE80211_CCMP_MIC_LEN];
+	struct ath12k_dp_rx_info rx_info;
 	int head_len, tail_len, ret;
 	size_t data_len;
 	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
@@ -2804,6 +3799,9 @@
 	if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
 		return 0;
 
+	rx_info.filled = 0;
+	rx_info.rx_status = rxs;
+
 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
 	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
@@ -2830,14 +3828,23 @@
 	(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
 	(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
 
+	ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
+
 	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
 	skb_pull(msdu, hal_rx_desc_sz);
 
-	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+	if (rx_info.decap_type == DP_RX_DECAP_TYPE_NATIVE_WIFI &&
+	    ath12k_dp_rx_check_max_nwifi_hdr_len(ab, rx_desc, msdu,
+						 "Invalid len in verify tkip\n")) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	ath12k_dp_rx_h_ppdu(ar, &rx_info);
 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
-	ieee80211_rx(ar->hw, msdu);
+	ieee80211_rx(ar->ah->hw, msdu);
 	return -EINVAL;
 }
 
@@ -2992,7 +3999,7 @@
 
 	buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
 				   defrag_skb->len + skb_tailroom(defrag_skb),
-				   DMA_FROM_DEVICE);
+				   DMA_TO_DEVICE);
 	if (dma_mapping_error(ab->dev, buf_paddr))
 		return -ENOMEM;
 
@@ -3008,9 +4015,9 @@
 	}
 
 	desc_info->skb = defrag_skb;
+	desc_info->in_use = true;
 
 	list_del(&desc_info->list);
-	list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
 	spin_unlock_bh(&dp->rx_desc_lock);
 
 	ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
@@ -3036,7 +4043,7 @@
 
 	ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
 					cookie,
-					HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+					dp->idle_link_rbm_id);
 
 	mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
 		    u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
@@ -3072,13 +4079,13 @@
 
 err_free_desc:
 	spin_lock_bh(&dp->rx_desc_lock);
-	list_del(&desc_info->list);
-	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
 	desc_info->skb = NULL;
+	desc_info->in_use = false;
+	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
 	spin_unlock_bh(&dp->rx_desc_lock);
 err_unmap_dma:
 	dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
-			 DMA_FROM_DEVICE);
+			 DMA_TO_DEVICE);
 	return ret;
 }
 
@@ -3206,6 +4213,14 @@
 		ret = -ENOENT;
 		goto out_unlock;
 	}
+
+	if (!peer->dp_setup_done) {
+		ath12k_warn(ab, "The peer %pM [%d] has not initialized its datapath\n",
+				peer->addr, peer_id);
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
 	rx_tid = &peer->rx_tid[tid];
 
 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
@@ -3221,7 +4236,7 @@
 		goto out_unlock;
 	}
 
-	if (frag_no > __fls(rx_tid->rx_frag_bitmap))
+	if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
 		__skb_queue_tail(&rx_tid->rx_frags, msdu);
 	else
 		ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
@@ -3283,7 +4298,8 @@
 
 static int
 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
-			     bool drop, u32 cookie)
+			     bool drop, u32 cookie,
+			     struct list_head *list)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct sk_buff *msdu;
@@ -3302,7 +4318,7 @@
 	if (!desc_info) {
 		desc_info = ath12k_dp_get_rx_desc(ab, cookie);
 		if (!desc_info) {
-			ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+			ath12k_warn(ab, "Invalid cookie in manual desc retrieval cookie 0x%x", cookie);
 			return -EINVAL;
 		}
 	}
@@ -3312,9 +4328,7 @@
 
 	msdu = desc_info->skb;
 	desc_info->skb = NULL;
-	spin_lock_bh(&ab->dp.rx_desc_lock);
-	list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
-	spin_unlock_bh(&ab->dp.rx_desc_lock);
+	list_add_tail(&desc_info->list, list);
 
 	rxcb = ATH12K_SKB_RXCB(msdu);
 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
@@ -3359,11 +4373,46 @@
 	return 0;
 }
 
+static int ath12k_dp_h_msdu_buffer_type(struct ath12k_base *ab,
+					struct hal_reo_dest_ring *desc,
+					struct list_head *list)
+{
+	struct ath12k_rx_desc_info *desc_info;
+	struct sk_buff *msdu;
+	struct ath12k_skb_rxcb *rxcb;
+	u64 desc_va;
+
+	ab->soc_stats.reo_excep_msdu_buf_type++;
+
+	desc_va = ((u64)desc->buf_va_hi << 32 | desc->buf_va_lo);
+	desc_info = (struct ath12k_rx_desc_info *)(uintptr_t)desc_va;
+	if (!desc_info) {
+		ath12k_warn(ab, " rx exception, hw cookie conversion failed");
+		return -EINVAL;
+	}
+
+	if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) {
+		ath12k_warn(ab, " rx exception, magic check failed");
+		return -EINVAL;
+	}
+
+	msdu = desc_info->skb;
+	desc_info->skb = NULL;
+	list_add_tail(&desc_info->list, list);
+	rxcb = ATH12K_SKB_RXCB(msdu);
+	dma_unmap_single(ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu),
+			 DMA_FROM_DEVICE);
+	dev_kfree_skb_any(msdu);
+
+	return 0;
+}
+
 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
 			     int budget)
 {
 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
 	struct dp_link_desc_bank *link_desc_banks;
+	struct ath12k_base *src_ab;
 	enum hal_rx_buf_return_buf_manager rbm;
 	struct hal_rx_msdu_link *link_desc_va;
 	int tot_n_bufs_reaped, quota, ret, i;
@@ -3373,18 +4422,22 @@
 	u32 desc_bank, num_msdus;
 	struct hal_srng *srng;
 	struct ath12k_dp *dp;
-	int mac_id;
-	struct ath12k *ar;
+	struct ath12k *ar = NULL;
 	dma_addr_t paddr;
-	bool is_frag;
-	bool drop = false;
+	bool is_frag, drop;
+	char buf[64] = {0};
+	u8 hw_link_id, chip_id;
+	int num_buffs_reaped[ATH12K_MAX_SOCS] = { };
+	struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
 
 	tot_n_bufs_reaped = 0;
 	quota = budget;
 
 	dp = &ab->dp;
 	reo_except = &dp->reo_except_ring;
-	link_desc_banks = dp->link_desc_banks;
+
+	for (i = 0; i < ATH12K_MAX_SOCS; i++)
+		INIT_LIST_HEAD(&rx_desc_used_list[i]);
 
 	srng = &ab->hal.srng_list[reo_except->ring_id];
 
@@ -3398,20 +4451,61 @@
 		ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
 						    &desc_bank);
 		if (ret) {
-			ath12k_warn(ab, "failed to parse error reo desc %d\n",
-				    ret);
-			continue;
+			scnprintf(buf, sizeof(buf), "failed to parse error reo desc %d\n", ret);
+			ath12k_err_dump(ab, buf, "rx err desc: ", reo_desc,
+					sizeof(*reo_desc), srng);
+			BUG_ON(1);
 		}
+
+                hw_link_id = u32_get_bits(reo_desc->info0,
+                                          HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+
+                rcu_read_lock();
+                if (hw_link_id < ATH12K_GROUP_MAX_RADIO)
+                        ar = rcu_dereference(ab->ag->hw_links[hw_link_id]);
+		else
+			ar = NULL;
+                rcu_read_unlock();
+
+                if (!ar) {
+                        ath12k_err(ab, "invalid src link id %d on chip id %d err process\n",
+                                   hw_link_id, ab->chip_id);
+
+                        ath12k_err_dump(ab, NULL, "rx err desc: ", reo_desc,
+                                        sizeof(*reo_desc), srng);
+                        BUG_ON(1);
+                }
+
+		src_ab = ar->ab;
+		chip_id = src_ab->chip_id;
+
+		/* Below case is added to handle data packet from un-associated clients.
+		 * As it is expected that AST lookup will fail for
+		 * un-associated station's data packets.
+		 */
+		if (u32_get_bits(reo_desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE) ==
+			HAL_REO_DEST_RING_BUFFER_TYPE_MSDU) {
+			if (!ath12k_dp_h_msdu_buffer_type(src_ab, reo_desc,
+							  &rx_desc_used_list[chip_id])) {
+				num_buffs_reaped[chip_id]++;
+				tot_n_bufs_reaped++;
+			}
+
+			goto end_loop;
+		}
+
+		dp = &src_ab->dp;
+		link_desc_banks = dp->link_desc_banks;
 		link_desc_va = link_desc_banks[desc_bank].vaddr +
 			       (paddr - link_desc_banks[desc_bank].paddr);
 		ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
 						 &rbm);
-		if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+		if (rbm != dp->idle_link_rbm_id &&
 		    rbm != HAL_RX_BUF_RBM_SW3_BM &&
-		    rbm != ab->hw_params->hal_params->rx_buf_rbm) {
+		    rbm != src_ab->hw_params->hal_params->rx_buf_rbm) {
 			ab->soc_stats.invalid_rbm++;
 			ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
-			ath12k_dp_rx_link_desc_return(ab, reo_desc,
+			ath12k_dp_rx_link_desc_return(src_ab, reo_desc,
 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
 			continue;
 		}
@@ -3421,25 +4515,31 @@
 
 		/* Process only rx fragments with one msdu per link desc below, and drop
 		 * msdu's indicated due to error reasons.
+		 * Dynamic fragmentation not supported in Multi-link client, so drop.
 		 */
-		if (!is_frag || num_msdus > 1) {
+		if (!is_frag || num_msdus > 1 || src_ab != ab)
 			drop = true;
+		else
+			drop = false;
+
+		if (drop) {
 			/* Return the link desc back to wbm idle list */
-			ath12k_dp_rx_link_desc_return(ab, reo_desc,
+			ath12k_dp_rx_link_desc_return(src_ab, reo_desc,
 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+			ar->wmm_stats.total_wmm_rx_drop[ar->wmm_stats.rx_type] += num_msdus;
 		}
 
 		for (i = 0; i < num_msdus; i++) {
-			mac_id = le32_get_bits(reo_desc->info0,
-					       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
-
-			ar = ab->pdevs[mac_id].ar;
-
 			if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
-							  msdu_cookies[i]))
+							  msdu_cookies[i],
+							  &rx_desc_used_list[chip_id])) {
+				num_buffs_reaped[chip_id]++;
 				tot_n_bufs_reaped++;
 		}
+		}
 
+end_loop:
 		if (tot_n_bufs_reaped >= quota) {
 			tot_n_bufs_reaped = quota;
 			goto exit;
@@ -3453,10 +4553,20 @@
 
 	spin_unlock_bh(&srng->lock);
 
-	rx_ring = &dp->rx_refill_buf_ring;
+	for (i = 0; i < ab->ag->num_chip; i++) {
+		if (!num_buffs_reaped[i])
+			continue;
 
-	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
-				    ab->hw_params->hal_params->rx_buf_rbm, true);
+		src_ab = ab->ag->ab[i];
+		if (!src_ab)
+			continue;
+
+		rx_ring = &src_ab->dp.rx_refill_buf_ring;
+
+		ath12k_dp_rx_bufs_replenish(src_ab, rx_ring,
+					    &rx_desc_used_list[i],
+					    num_buffs_reaped[i]);
+	}
 
 	return tot_n_bufs_reaped;
 }
@@ -3486,7 +4596,7 @@
 }
 
 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
-				      struct ieee80211_rx_status *status,
+				      struct ath12k_dp_rx_info *rx_info,
 				      struct sk_buff_head *msdu_list)
 {
 	struct ath12k_base *ab = ar->ab;
@@ -3495,19 +4605,13 @@
 	u8 l3pad_bytes;
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
 	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+	bool fast_rx;
+	struct ath12k_link_sta *arsta = NULL;
+	struct ath12k_peer *peer = NULL;
 
 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
 	peer_id = ath12k_dp_rx_h_peer_id(ab, desc);
 
-	spin_lock(&ab->base_lock);
-	if (!ath12k_peer_find_by_id(ab, peer_id)) {
-		spin_unlock(&ab->base_lock);
-		ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
-			   peer_id);
-		return -EINVAL;
-	}
-	spin_unlock(&ab->base_lock);
-
 	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
 		/* First buffer will be freed by the caller, so deduct it's length */
 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
@@ -3549,9 +4653,19 @@
 		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
 		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
 	}
-	ath12k_dp_rx_h_ppdu(ar, desc, status);
 
-	ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
+	ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
+
+	if (rx_info->decap_type == DP_RX_DECAP_TYPE_NATIVE_WIFI &&
+	    ath12k_dp_rx_check_max_nwifi_hdr_len(ab, desc, msdu,
+						 "Invalid len in Null queue\n")) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	ath12k_dp_rx_h_ppdu(ar, rx_info);
+	fast_rx = false;
+	ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info, &fast_rx);
 
 	rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
 
@@ -3559,21 +4673,31 @@
 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
 	 */
 
+	if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) {
+		peer_id = ath12k_dp_rx_h_peer_id(ar->ab, desc);
+		spin_lock_bh(&ar->ab->base_lock);
+		if (peer_id)
+			peer = ath12k_peer_find_by_id(ar->ab, rxcb->peer_id);
+		if (peer && peer->sta)
+			arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+		spin_unlock_bh(&ar->ab->base_lock);
+		if (arsta)
+			atomic_inc(&arsta->drv_rx_pkts.pkts_frm_hw);
+	}
+
 	return 0;
 }
 
 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
-				   struct ieee80211_rx_status *status,
+				   struct ath12k_dp_rx_info *rx_info,
 				   struct sk_buff_head *msdu_list)
 {
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
 	bool drop = false;
 
-	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
-
 	switch (rxcb->err_code) {
 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
-		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
 			drop = true;
 		break;
 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
@@ -3590,11 +4714,16 @@
 		break;
 	}
 
+	if (drop)
+		ar->ab->soc_stats.reo_error_drop[rxcb->err_code]++;
+	else
+		ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
 	return drop;
 }
 
-static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
-					struct ieee80211_rx_status *status)
+static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+					struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	u16 msdu_len;
@@ -3608,20 +4737,94 @@
 
 	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+
+	if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
+		ath12k_warn(ab, "invalid msdu len in tkip mirc err %u\n", msdu_len);
+		ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
+				sizeof(struct hal_rx_desc));
+		return true;
+	}
+
 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
 
-	ath12k_dp_rx_h_ppdu(ar, desc, status);
+	if (rx_info->decap_type == DP_RX_DECAP_TYPE_NATIVE_WIFI &&
+	    ath12k_dp_rx_check_max_nwifi_hdr_len(ab, desc, msdu,
+						 "Invalid len in tkip mic err\n")) {
+		WARN_ON_ONCE(1);
+		return true;
+	}
+
+	ath12k_dp_rx_h_ppdu(ar, rx_info);
 
-	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+	rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
 			 RX_FLAG_DECRYPTED);
 
 	ath12k_dp_rx_h_undecap(ar, msdu, desc,
-			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+			       HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
+	return false;
+}
+
+static bool ath12k_dp_rx_h_4addr_null_frame_handler(struct ath12k *ar,
+						    struct sk_buff *msdu,
+						    struct ath12k_dp_rx_info *rx_info)
+{
+	struct ath12k_base *ab = ar->ab;
+	u16 msdu_len, peer_id;
+	u8 l3pad_bytes;
+	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+	struct hal_rx_desc *rx_desc = (struct hal_rx_desc*)msdu->data;
+	u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+	struct ieee80211_hdr *hdr;
+	size_t hdr_len;
+	struct ath12k_dp_rx_rfc1042_hdr *llc;
+	bool drop = false;
+	bool fast_rx = false;
+
+	msdu_len = ath12k_dp_rx_h_msdu_len(ab, rx_desc);
+	peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
+
+	spin_lock_bh(&ab->base_lock);
+	if(!ath12k_peer_find_by_id(ab, peer_id)) {
+		spin_unlock_bh(&ab->base_lock);
+		ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
+			   peer_id);
+		return -EINVAL;
+	}
+	spin_unlock_bh(&ab->base_lock);
+	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, rx_desc);
+
+	if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+		return -EINVAL;
+
+	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+
+	if (rx_info->decap_type == DP_RX_DECAP_TYPE_NATIVE_WIFI &&
+	    ath12k_dp_rx_check_max_nwifi_hdr_len(ab, rx_desc, msdu,
+						 "Invalid len in 4addr Null frame\n")) {
+		WARN_ON_ONCE(1);
+		return true;
 }
 
+	ath12k_dp_rx_h_ppdu(ar, rx_info);
+
+	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info, &fast_rx);
+
+	rxcb->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	llc = (struct ath12k_dp_rx_rfc1042_hdr *)(msdu->data + hdr_len);
+
+	if (!(llc->snap_type == cpu_to_be16(ETH_P_PAE) ||
+	      ieee80211_is_qos_nullfunc(hdr->frame_control)))
+		drop = true;
+
+	return drop;
+}
 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
-				     struct ieee80211_rx_status *status)
+				     struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
@@ -3629,14 +4832,19 @@
 	bool drop = false;
 	u32 err_bitmap;
 
-	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
-
 	switch (rxcb->err_code) {
+	case HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR:
+		ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+
+		drop = ath12k_dp_rx_h_4addr_null_frame_handler(ar, msdu, rx_info);
+		break;
 	case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
 		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
 		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
-			ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+			ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+
+			drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
 			break;
 		}
 		fallthrough;
@@ -3648,6 +4856,11 @@
 		break;
 	}
 
+	if (drop)
+		ar->ab->soc_stats.rxdma_error_drop[rxcb->err_code]++;
+	else
+		ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
 	return drop;
 }
 
@@ -3658,14 +4871,18 @@
 {
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
 	struct ieee80211_rx_status rxs = {0};
+	struct ath12k_dp_rx_info rx_info;
 	bool drop = true;
 
+	rx_info.filled = 0;
+	rx_info.rx_status = &rxs;
+
 	switch (rxcb->err_rel_src) {
 	case HAL_WBM_REL_SRC_MODULE_REO:
-		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
 		break;
 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
-		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
 		break;
 	default:
 		/* msdu will get freed */
@@ -3677,31 +4894,38 @@
 		return;
 	}
 
-	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
 }
 
 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
 				 struct napi_struct *napi, int budget)
 {
 	struct ath12k *ar;
+	struct ath12k_base *src_ab;
 	struct ath12k_dp *dp = &ab->dp;
 	struct dp_rxdma_ring *rx_ring;
 	struct hal_rx_wbm_rel_info err_info;
 	struct hal_srng *srng;
 	struct sk_buff *msdu;
-	struct sk_buff_head msdu_list[MAX_RADIOS];
+	struct sk_buff_head msdu_list, scatter_msdu_list;
 	struct ath12k_skb_rxcb *rxcb;
 	void *rx_desc;
-	int mac_id;
-	int num_buffs_reaped = 0;
+	int i, num_buffs_reaped[ATH12K_MAX_SOCS] = { 0 };
 	struct ath12k_rx_desc_info *desc_info;
-	int ret, i;
+	struct ath12k_soc_dp_stats *soc_stats = &ab->soc_stats;
+	int total_num_buffs_reaped = 0;
+	int ret;
+	u8 hw_link_id, chip_id;
+	char buf[64] = {0};
+	struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+
+	__skb_queue_head_init(&msdu_list);
+	__skb_queue_head_init(&scatter_msdu_list);
 
-	for (i = 0; i < ab->num_radios; i++)
-		__skb_queue_head_init(&msdu_list[i]);
+	for (i = 0; i < ATH12K_MAX_SOCS; i++)
+		INIT_LIST_HEAD(&rx_desc_used_list[i]);
 
 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
-	rx_ring = &dp->rx_refill_buf_ring;
 
 	spin_lock_bh(&srng->lock);
 
@@ -3714,10 +4938,10 @@
 
 		ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
 		if (ret) {
-			ath12k_warn(ab,
-				    "failed to parse rx error in wbm_rel ring desc %d\n",
-				    ret);
-			continue;
+			scnprintf(buf, sizeof(buf), "failed to parse rx error in wbm_rel ring desc %d\n", ret);
+			ath12k_err_dump(ab, buf, "wbm err desc: ", rx_desc,
+					sizeof(struct hal_wbm_release_ring), srng);
+			BUG_ON(1);
 		}
 
 		desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;
@@ -3726,32 +4950,29 @@
 		if (!desc_info) {
 			desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
 			if (!desc_info) {
-				ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+				ath12k_warn(ab, "Invalid cookie in manual desc retrieval cookie 0x%x", err_info.cookie);
 				continue;
 			}
 		}
 
-		/* FIXME: Extract mac id correctly. Since descs are not tied
-		 * to mac, we can extract from vdev id in ring desc.
-		 */
-		mac_id = 0;
-
 		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
 			ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
 
 		msdu = desc_info->skb;
 		desc_info->skb = NULL;
+		src_ab = ab->ag->ab[desc_info->chip_id];
+		dp = &src_ab->dp;
+		chip_id = src_ab->chip_id;
 
-		spin_lock_bh(&dp->rx_desc_lock);
-		list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
-		spin_unlock_bh(&dp->rx_desc_lock);
+		list_add_tail(&desc_info->list, &rx_desc_used_list[chip_id]);
 
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		dma_unmap_single(ab->dev, rxcb->paddr,
+		dma_unmap_single(src_ab->dev, rxcb->paddr,
 				 msdu->len + skb_tailroom(msdu),
 				 DMA_FROM_DEVICE);
 
-		num_buffs_reaped++;
+		num_buffs_reaped[chip_id]++;
+		total_num_buffs_reaped++;
 
 		if (!err_info.continuation)
 			budget--;
@@ -3764,44 +4985,101 @@
 
 		rxcb->err_rel_src = err_info.err_rel_src;
 		rxcb->err_code = err_info.err_code;
-		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
-		__skb_queue_tail(&msdu_list[mac_id], msdu);
-
 		rxcb->is_first_msdu = err_info.first_msdu;
 		rxcb->is_last_msdu = err_info.last_msdu;
 		rxcb->is_continuation = err_info.continuation;
+		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+
+		if (!err_info.continuation) {
+			hw_link_id = ath12k_dp_rx_get_msdu_src_link(src_ab, (struct hal_rx_desc *)msdu->data);
+			if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
+				if (ath12k_debug_critical) {
+					ath12k_err(ab, "WBM Err: invalid hw link id %d\n",
+						   hw_link_id);
+					ath12k_err_dump(ab, NULL, "WBM err desc: ", rx_desc,
+							sizeof(struct hal_wbm_release_ring_cc_rx), srng);
+					ath12k_err_dump(ab, NULL, "WBM Rx TLV: ", msdu->data,
+							sizeof(struct hal_rx_desc), srng);
+					dev_kfree_skb_any(msdu);
+					BUG_ON(1);
+				} else {
+					dev_kfree_skb_any(msdu);
+				}
+				continue;
+			}
+
+			if (!skb_queue_empty(&scatter_msdu_list)) {
+				__skb_queue_tail(&scatter_msdu_list, msdu);
+
+				skb_queue_walk(&scatter_msdu_list, msdu) {
+					rxcb = ATH12K_SKB_RXCB(msdu);
+					rxcb->hw_link_id = hw_link_id;
+				}
+
+				skb_queue_splice_tail_init(&scatter_msdu_list, &msdu_list);
+			} else {
+				rxcb->hw_link_id = hw_link_id;
+				__skb_queue_tail(&msdu_list, msdu);
+			}
+		} else {
+			__skb_queue_tail(&scatter_msdu_list, msdu);
+		}
 	}
 
 	ath12k_hal_srng_access_end(ab, srng);
 
 	spin_unlock_bh(&srng->lock);
 
-	if (!num_buffs_reaped)
+	if (!total_num_buffs_reaped)
 		goto done;
 
-	ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
-				    ab->hw_params->hal_params->rx_buf_rbm, true);
+	for (i = 0; i < ab->ag->num_chip; i++) {
+		if (!num_buffs_reaped[i])
+			continue;
+
+		src_ab = ab->ag->ab[i];
+		if (!src_ab)
+			continue;
+
+		rx_ring = &src_ab->dp.rx_refill_buf_ring;
+
+		ath12k_dp_rx_bufs_replenish(src_ab, rx_ring,
+					    &rx_desc_used_list[i],
+					    num_buffs_reaped[i]);
+	}
 
 	rcu_read_lock();
-	for (i = 0; i <  ab->num_radios; i++) {
-		if (!rcu_dereference(ab->pdevs_active[i])) {
-			__skb_queue_purge(&msdu_list[i]);
+	while ((msdu = __skb_dequeue(&msdu_list)) != NULL) {
+
+		rxcb = ATH12K_SKB_RXCB(msdu);
+		hw_link_id = rxcb->hw_link_id;
+
+		ar = rcu_dereference(ab->ag->hw_links[hw_link_id]);
+		if (!ar) {
+			dev_kfree_skb_any(msdu);
+			ath12k_warn(ab, "WBM Rx: invalid pdev for hw link id %d\n",
+				    hw_link_id);
 			continue;
 		}
 
-		ar = ab->pdevs[i].ar;
+		if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+			dev_kfree_skb_any(msdu);
+			continue;
+		}
 
 		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
-			__skb_queue_purge(&msdu_list[i]);
+			dev_kfree_skb_any(msdu);
 			continue;
 		}
 
-		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
-			ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+		if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX)
+			soc_stats->rx_wbm_rel_source[rxcb->err_rel_src][ar->ab->chip_id]++;
+
+		ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
 	}
 	rcu_read_unlock();
 done:
-	return num_buffs_reaped;
+	return total_num_buffs_reaped;
 }
 
 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
@@ -3899,6 +5177,8 @@
 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
 
 	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+
+	if (ab->hw_params->supports_tx_monitor)
 	ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
 
 	ath12k_dp_rxdma_buf_free(ab);
@@ -3934,10 +5214,18 @@
 	tlv_filter.rx_msdu_end_offset =
 		ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
 
-	/* TODO: Selectively subscribe to required qwords within msdu_end
-	 * and mpdu_start and setup the mask in below msg
-	 * and modify the rx_desc struct
-	 */
+	if (ab->hw_params->compact_rx_tlv) {
+		if (ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start)
+			tlv_filter.rx_mpdu_start_wmask =
+				ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
+		if (ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end)
+			tlv_filter.rx_msdu_end_wmask =
+				ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
+		ath12k_dbg(ab, ATH12K_DBG_DATA,
+				"Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
+				tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
+	}
+
 	ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
 					       HAL_RXDMA_BUF,
 					       DP_RXDMA_REFILL_RING_SIZE,
@@ -3988,6 +5276,55 @@
 	return ret;
 }
 
+int
+ath12k_dp_rx_htt_rxdma_rxole_ppe_cfg_set(struct ath12k_base *ab,
+					 struct ath12k_dp_htt_rxdma_ppe_cfg_param *param)
+{
+	struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg *cmd;
+	struct ath12k_dp *dp = &ab->dp;
+	struct sk_buff *skb;
+	int len = sizeof(*cmd), ret, val;
+
+	skb = ath12k_htc_alloc_skb(ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+
+	cmd = (struct htt_h2t_msg_type_rxdma_rxole_ppe_cfg *)skb->data;
+	memset(cmd, 0, sizeof(*cmd));
+
+	cmd->info0 =
+		u32_encode_bits(HTT_H2T_MSG_TYPE_RXDMA_RXOLE_PPE_CFG,
+				HTT_H2T_RXOLE_PPE_CFG_MSG_TYPE) |
+		u32_encode_bits(param->override, HTT_H2T_RXOLE_PPE_CFG_OVERRIDE) |
+		u32_encode_bits(param->reo_dst_ind,
+				HTT_H2T_RXOLE_PPE_CFG_REO_DST_IND) |
+		u32_encode_bits(param->multi_buffer_msdu_override_en,
+				HTT_H2T_RXOLE_PPE_CFG_MULTI_BUF_MSDU_OVRD_EN) |
+		u32_encode_bits(param->intra_bss_override,
+				HTT_H2T_RXOLE_PPE_CFG_INTRA_BUS_OVRD) |
+		u32_encode_bits(param->decap_raw_override,
+				HTT_H2T_RXOLE_PPE_CFG_DECAP_RAW_OVRD) |
+		u32_encode_bits(param->decap_nwifi_override,
+				HTT_H2T_RXOLE_PPE_CFG_NWIFI_OVRD) |
+		u32_encode_bits(param->ip_frag_override,
+				HTT_H2T_RXOLE_PPE_CFG_IP_FRAG_OVRD);
+
+	val = cmd->info0;
+	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+	if(ret) {
+		ath12k_warn(ab, "failed to send htt type H2T rx ole ppe config request: %d",
+			ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_PPE, "RXOLE ppe config request sent val 0x%x\n", val);
+
+	return 0;
+}
+
 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
@@ -4037,6 +5374,7 @@
 			return ret;
 		}
 
+		if (ab->hw_params->supports_tx_monitor) {
 		ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
 						  0, HAL_TX_MONITOR_BUF);
@@ -4046,6 +5384,7 @@
 			return ret;
 		}
 	}
+	}
 
 	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
 	if (ret) {
@@ -4067,8 +5406,10 @@
 	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 
+	if (ab->hw_params->supports_tx_monitor) {
 	idr_init(&dp->tx_mon_buf_ring.bufs_idr);
 	spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
+	}
 
 	ret = ath12k_dp_srng_setup(ab,
 				   &dp->rx_refill_buf_ring.refill_buf_ring,
@@ -4113,6 +5454,7 @@
 			return ret;
 		}
 
+		if (ab->hw_params->supports_tx_monitor) {
 		ret = ath12k_dp_srng_setup(ab,
 					   &dp->tx_mon_buf_ring.refill_buf_ring,
 					   HAL_TX_MONITOR_BUF, 0, 0,
@@ -4122,6 +5464,7 @@
 			return ret;
 		}
 	}
+	}
 
 	ret = ath12k_dp_rxdma_buf_setup(ab);
 	if (ret) {
@@ -4161,6 +5504,7 @@
 			return ret;
 		}
 
+		if (ab->hw_params->supports_tx_monitor) {
 		ring_id = dp->tx_mon_dst_ring[i].ring_id;
 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
 						  mac_id + i,
@@ -4172,14 +5516,39 @@
 			return ret;
 		}
 	}
+	}
 out:
 	return 0;
 }
 
+static int ath12k_dp_rx_pdev_mon_status_detach(struct ath12k *ar)
+{
+	struct ath12k_pdev_dp *dp = &ar->dp;
+	struct ath12k_mon_data *pmon = dp->mon_data;
+
+	if (!pmon) {
+		ath12k_err(ar->ab, "pmon is NULL\n");
+		return -EINVAL;
+	}
+
+	vfree(pmon);
+	dp->mon_data = NULL;
+
+	return 0;
+}
+
 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
 {
 	struct ath12k_pdev_dp *dp = &ar->dp;
-	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
+	struct ath12k_mon_data *pmon;
+
+	pmon = vzalloc(sizeof(*pmon));
+	if (!pmon) {
+		ath12k_warn(ar->ab, "pmon allocation failed \n");
+		return -ENOMEM;
+	}
+
+	dp->mon_data = pmon;
 
 	skb_queue_head_init(&pmon->rx_status_q);
 
@@ -4193,7 +5562,6 @@
 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
 {
 	struct ath12k_pdev_dp *dp = &ar->dp;
-	struct ath12k_mon_data *pmon = &dp->mon_data;
 	int ret = 0;
 
 	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
@@ -4208,13 +5576,23 @@
 	if (!ar->ab->hw_params->rxdma1_enable)
 		return 0;
 
-	pmon->mon_last_linkdesc_paddr = 0;
-	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
-	spin_lock_init(&pmon->mon_lock);
+	dp->mon_data->mon_last_linkdesc_paddr = 0;
+	dp->mon_data->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+	spin_lock_init(&dp->mon_data->mon_lock);
 
 	return 0;
 }
 
+void ath12k_dp_rx_pdev_mon_detach(struct ath12k_base *ab, const int pdev_idx)
+{
+	struct ath12k *ar = ab->pdevs[pdev_idx].ar;
+	int ret;
+
+	ret = ath12k_dp_rx_pdev_mon_status_detach(ar);
+	if (ret)
+		ath12k_warn(ar->ab, "pdev_mon_status_detach() failed %d\n", ret);
+}
+
 int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
 {
 	/* start reap timer */
@@ -4240,3 +5618,4 @@
 
 	return 0;
 }
+
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp_rx.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_rx.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp_rx.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_rx.h	2024-03-18 14:40:14.851741333 +0100
@@ -9,14 +9,47 @@
 #include "core.h"
 #include "rx_desc.h"
 #include "debug.h"
+#include "crypto/hash.h"
 
 #define DP_MAX_NWIFI_HDR_LEN	30
 
+
+/* different supported pkt types for routing */
+enum ath12k_routing_pkt_type {
+	ATH12K_PKT_TYPE_ARP_IPV4,
+	ATH12K_PKT_TYPE_NS_IPV6,
+	ATH12K_PKT_TYPE_IGMP_IPV4,
+	ATH12K_PKT_TYPE_MLD_IPV6,
+	ATH12K_PKT_TYPE_DHCP_IPV4,
+	ATH12K_PKT_TYPE_DHCP_IPV6,
+	ATH12K_PKT_TYPE_DNS_TCP_IPV4,
+	ATH12K_PKT_TYPE_DNS_TCP_IPV6,
+	ATH12K_PKT_TYPE_DNS_UDP_IPV4,
+	ATH12K_PKT_TYPE_DNS_UDP_IPV6,
+	ATH12K_PKT_TYPE_ICMP_IPV4,
+	ATH12K_PKT_TYPE_ICMP_IPV6,
+	ATH12K_PKT_TYPE_TCP_IPV4,
+	ATH12K_PKT_TYPE_TCP_IPV6,
+	ATH12K_PKT_TYPE_UDP_IPV4,
+	ATH12K_PKT_TYPE_UDP_IPV6,
+	ATH12K_PKT_TYPE_IPV4,
+	ATH12K_PKT_TYPE_IPV6,
+	ATH12K_PKT_TYPE_EAP,
+	ATH12K_PKT_TYPE_MAX
+};
+
+#define ATH12K_RX_PROTOCOL_TAG_START_OFFSET  128
+#define ATH12K_ROUTE_WBM_RELEASE(ab) \
+	((ab)->hw_params->route_wbm_release)
+#define ATH12K_ROUTE_EAP_METADATA       (ATH12K_RX_PROTOCOL_TAG_START_OFFSET + ATH12K_PKT_TYPE_EAP)
+#define ATH12K_ROUTE_ARP_METADATA       (ATH12K_RX_PROTOCOL_TAG_START_OFFSET + ATH12K_PKT_TYPE_ARP_IPV4)
+
 struct ath12k_dp_rx_tid {
 	u8 tid;
 	u32 *vaddr;
 	dma_addr_t paddr;
 	u32 size;
+	u32 pending_desc_size;
 	u32 ba_win_sz;
 	bool active;
 
@@ -39,6 +72,16 @@
 	unsigned long ts;
 };
 
+struct dp_reo_update_rx_queue_elem {
+	struct list_head list;
+	struct ath12k_dp_rx_tid data;
+	int peer_id;
+	u8 tid;
+	bool reo_cmd_update_rx_queue_resend_flag;
+	bool is_ml_peer;
+	u16 ml_peer_id;
+};
+
 struct ath12k_dp_rx_reo_cmd {
 	struct list_head list;
 	struct ath12k_dp_rx_tid data;
@@ -65,6 +108,45 @@
 	__be16 snap_type;
 } __packed;
 
+enum ath12k_dp_rx_info_type {
+	ATH12K_RX_INFO_DECAP_TYPE,
+	ATH12K_RX_INFO_PKT_TYPE,
+	ATH12K_RX_INFO_SGI,
+	ATH12K_RX_INFO_RATE_MCS,
+	ATH12K_RX_INFO_BW,
+	ATH12K_RX_INFO_NSS,
+	ATH12K_RX_INFO_PHY_META_DATA,
+	ATH12K_RX_INFO_ADDR2,
+
+	/* keep last */
+	ATH12K_RX_INFO_MAX
+};
+
+struct ath12k_dp_rx_info {
+	u64 filled;
+	u8 decap_type;
+	u8 pkt_type;
+	u8 sgi;
+	u8 rate_mcs;
+	u8 bw;
+	u8 nss;
+	u32 phy_meta_data;
+	u8 addr2[ETH_ALEN];
+	bool ip_csum_fail;
+	bool l4_csum_fail;
+	u8 ip_is_valid;
+	u8 decap;
+	u16 peer_id;
+	bool is_mcbc;
+	u8 l3_pad_bytes;
+	u16 msdu_len;
+	u8 tid;
+	bool msdu_done;
+	bool is_first_msdu;
+	bool is_last_msdu;
+	struct ieee80211_rx_status *rx_status;
+};
+
 static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
 {
 	u32 ret = 0;
@@ -84,20 +166,40 @@
 	return ret;
 }
 
+static inline u32 ath12k_eht_gi_to_nl80211_eht_gi(u8 sgi)
+{
+	u32 ret = 0;
+
+	switch (sgi) {
+	case RX_MSDU_START_SGI_0_8_US:
+		ret = NL80211_RATE_INFO_EHT_GI_0_8;
+		break;
+	case RX_MSDU_START_SGI_1_6_US:
+		ret = NL80211_RATE_INFO_EHT_GI_1_6;
+		break;
+	case RX_MSDU_START_SGI_3_2_US:
+		ret = NL80211_RATE_INFO_EHT_GI_3_2;
+		break;
+	}
+
+	return ret;
+}
+
 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
-			     struct ieee80211_ampdu_params *params);
+			     struct ieee80211_ampdu_params *params,
+			     u8 link_id);
 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
-			    struct ieee80211_ampdu_params *params);
-int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+			    struct ieee80211_ampdu_params *params,
+			    u8 link_id);
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
 				       const u8 *peer_addr,
 				       enum set_key_cmd key_cmd,
 				       struct ieee80211_key_conf *key);
 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer);
 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
 				  struct ath12k_peer *peer, u8 tid);
-int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
-				u8 tid, u32 ba_win_sz, u16 ssn,
-				enum hal_pn_type pn_type);
+int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, u8 tid, u32 ba_win_sz, u16 ssn,
+				enum hal_pn_type pn_type, struct ath12k_peer *peer);
 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
 				       struct sk_buff *skb);
 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab);
@@ -116,30 +218,43 @@
 int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
 			 struct napi_struct *napi,
 			 int budget);
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
 				struct dp_rxdma_ring *rx_ring,
-				int req_entries,
-				enum hal_rx_buf_return_buf_manager mgr,
-				bool hw_cc);
+				struct list_head *used_list,
+				int req_entries);
 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
-int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
-
+void ath12k_dp_rx_pdev_mon_detach(struct ath12k_base *ab, const int pdev_idx);
+int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, struct ath12k_peer *peer,
+				 struct crypto_shash *tfm);
 int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab);
 int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer);
 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
 			struct hal_rx_desc *desc);
 struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+			 struct ath12k_dp_rx_info *rx_info);
 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
 			     struct hal_rx_desc *desc);
 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
 			    struct hal_rx_desc *desc);
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
-			 struct ieee80211_rx_status *rx_status);
-struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info);
 
 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
-
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+				  int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+					      const void *ptr, void *data),
+				  void *data);
+int
+ath12k_dp_rx_htt_rxdma_rxole_ppe_cfg_set(struct ath12k_base *ab,
+					 struct ath12k_dp_htt_rxdma_ppe_cfg_param *param);
+int ath12k_dp_rx_pkt_type_filter(struct ath12k *ar,
+				 enum ath12k_routing_pkt_type pkt_type,
+				 u32 meta_data);
+void ath12k_dp_tid_cleanup(struct ath12k_base *ab);
+void ath12k_dp_peer_tid_setup(struct ath12k_base *ab);
+void ath12k_dp_peer_reo_tid_setup(struct ath12k_base *ab, struct ath12k_link_sta *arsta);
+void ath12k_dp_tid_setup(void *data, struct ieee80211_sta *sta);
+void ath12k_dp_reset_rx_reo_tid_q(void *vaddr, u32 ba_window_size,
+				  u8 tid);
 #endif /* ATH12K_DP_RX_H */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp_tx.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_tx.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp_tx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_tx.c	2024-04-19 16:04:28.953735667 +0200
@@ -7,15 +7,52 @@
 #include "core.h"
 #include "dp_tx.h"
 #include "debug.h"
+#include "debugfs_sta.h"
 #include "hw.h"
+#include "peer.h"
+#include "ppe.h"
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
 
+#ifdef CONFIG_ATH12K_SAWF
+static inline u32 ath12k_sawf_get_tcl_metadata_update(u32 sk_buff_mark)
+{
+	u32 tcl_metadata = 0;
+	u32 svc_id = u32_get_bits(sk_buff_mark, SAWF_SERVICE_CLASS_ID);
+
+	tcl_metadata = u32_encode_bits(HTT_TCL_META_DATA_TYPE_SVC_ID_BASED,
+				       HTT_TCL_META_DATA_TYPE_MISSION) |
+			u32_encode_bits(1, HTT_TCL_META_DATA_SAWF_TID_OVERRIDE) |
+			u32_encode_bits(svc_id - 1, HTT_TCL_META_DATA_SAWF_SVC_ID);
+	return tcl_metadata;
+}
+
+static inline u32 ath12k_sawf_get_tcl_cmd_info3_update(u32 msduq_id)
+{
+	u32 tid, flow_override, who_classify_info_sel, update = 0;
+
+	tid = u32_get_bits(msduq_id, TID_FROM_Q_ID);
+	flow_override = u32_get_bits(msduq_id, FLOW_OVERRIDE_FROM_Q_ID);
+	who_classify_info_sel = u32_get_bits(msduq_id, WHO_CLASSIFY_INFO_FROM_Q_ID);
+
+	update = u32_encode_bits(tid, HAL_TCL_DATA_CMD_INFO3_TID) |
+		 u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE) |
+		 u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE_EN) |
+		 u32_encode_bits(who_classify_info_sel,
+				 HAL_TCL_DATA_CMD_INFO3_CLASSIFY_INFO_SEL) |
+		 u32_encode_bits(flow_override,
+				 HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE);
+
+	return update;
+}
+
+#endif /* CONFIG_ATH12K_SAWF */
 static enum hal_tcl_encap_type
-ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
+ath12k_dp_tx_get_encap_type(struct ath12k_base *ab, struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-	struct ath12k_base *ab = arvif->ar->ab;
 
-	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->ag->dev_flags))
 		return HAL_TCL_ENCAP_TYPE_RAW;
 
 	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
@@ -41,19 +78,6 @@
 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
 }
 
-static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
-{
-	struct ieee80211_hdr *hdr = (void *)skb->data;
-	struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
-
-	if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
-		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-	else if (!ieee80211_is_data_qos(hdr->frame_control))
-		return HAL_DESC_REO_NON_QOS_TID;
-	else
-		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-}
-
 enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
 {
 	switch (cipher) {
@@ -76,31 +100,104 @@
 	}
 }
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#define ATH12K_PPEDS_HOTLIST_LEN_MAX 1024
+struct sk_buff *
+ath12k_dp_ppeds_tx_release_desc_nolock(struct ath12k_dp *dp,
+				struct ath12k_ppeds_tx_desc_info *tx_desc,
+				u8 ring_id)
+{
+	struct sk_buff *skb = NULL;
+
+	lockdep_assert_held(&dp->ppeds_tx_desc_lock[ATH12K_PPEDS_DEFAULT_POOL_ID]);
+	if (dp->ppeds_tx_desc_reuse_list_len[ring_id] < ATH12K_PPEDS_HOTLIST_LEN_MAX &&
+	    tx_desc->skb) {
+		list_move_tail(&tx_desc->list, &dp->ppeds_tx_desc_reuse_list[ring_id]);
+		dp->ppeds_tx_desc_reuse_list_len[ring_id]++;
+	} else {
+		skb = tx_desc->skb;
+		tx_desc->skb = NULL;
+		list_move_tail(&tx_desc->list, &dp->ppeds_tx_desc_free_list[ring_id]);
+	}
+
+	return skb;
+}
+
+struct ath12k_ppeds_tx_desc_info *
+ath12k_dp_ppeds_tx_assign_desc_nolock(struct ath12k_dp *dp,
+				      u8 ring_id)
+{
+	struct ath12k_ppeds_tx_desc_info *desc, *next;
+
+	lockdep_assert_held(&dp->ppeds_tx_desc_lock[ATH12K_PPEDS_DEFAULT_POOL_ID]);
+	/* first try to fetch descriptor from hotlist if not use free list */
+	desc = list_first_entry_or_null(&dp->ppeds_tx_desc_reuse_list[ring_id],
+					struct ath12k_ppeds_tx_desc_info,
+					list);
+	if (desc) {
+		list_move_tail(&desc->list, &dp->ppeds_tx_desc_used_list[ring_id]);
+		dp->ppeds_tx_desc_reuse_list_len[ring_id]--;
+		/* Prefetch next hotlist descriptor */
+		if (dp->ppeds_tx_desc_reuse_list_len[ring_id])
+			next = list_first_entry_or_null(&dp->ppeds_tx_desc_reuse_list[ring_id],
+							struct ath12k_ppeds_tx_desc_info,
+							list);
+		else
+			next = list_first_entry_or_null(&dp->ppeds_tx_desc_free_list[ring_id],
+							struct ath12k_ppeds_tx_desc_info,
+							list);
+		prefetch(next);
+
+		return desc;
+	}
+
+	/* Fetch desc from Freelist if hotlist is empty */
+	desc = list_first_entry_or_null(&dp->ppeds_tx_desc_free_list[ring_id],
+					struct ath12k_ppeds_tx_desc_info,
+					list);
+	if (unlikely(!desc)) {
+		ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
+		return NULL;
+	}
+
+	list_move_tail(&desc->list, &dp->ppeds_tx_desc_used_list[ring_id]);
+
+	return desc;
+}
+
+#endif
+
 static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
 				       struct ath12k_tx_desc_info *tx_desc,
-				       u8 pool_id)
+				       u8 ring_id)
 {
-	spin_lock_bh(&dp->tx_desc_lock[pool_id]);
-	list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
-	spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+	tx_desc->skb = NULL;
+	tx_desc->skb_ext_desc = NULL;
+	spin_lock_bh(&dp->tx_desc_lock[ring_id]);
+	tx_desc->in_use = false;
+	list_add_tail(&tx_desc->list, &dp->tx_desc_free_list[ring_id]);
+	spin_unlock_bh(&dp->tx_desc_lock[ring_id]);
 }
 
-static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
+static inline
+struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
 							      u8 pool_id)
 {
-	struct ath12k_tx_desc_info *desc;
+	struct ath12k_tx_desc_info *desc = NULL;
 
 	spin_lock_bh(&dp->tx_desc_lock[pool_id]);
 	desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
 					struct ath12k_tx_desc_info,
 					list);
 	if (!desc) {
+		ath12k_dbg(dp->ab, ATH12K_DBG_DP_TX, "failed to allocate data Tx desc\n");
 		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
-		ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
 		return NULL;
 	}
 
-	list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
+	prefetch(desc);
+	list_del(&desc->list);
+	desc->in_use = true;
 	spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
 
 	return desc;
@@ -118,21 +215,222 @@
 			       le32_encode_bits(ti->data_len,
 						HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
 
-	tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+	tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
 				le32_encode_bits(ti->encap_type,
 						 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
 				le32_encode_bits(ti->encrypt_type,
 						 HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
 }
 
-int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
-		 struct sk_buff *skb)
+#define HTT_META_DATA_ALIGNMENT	0x8
+
+static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
+{
+	struct sk_buff *tail;
+	void *metadata;
+
+	if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
+		return NULL;
+
+	metadata = pskb_put(skb, tail, tail_len);
+	memset(metadata, 0, tail_len);
+	return metadata;
+}
+
+/* Preparing HTT Metadata when utilized with ext MSDU */
+static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
+{
+	struct htt_tx_msdu_desc_ext *desc_ext;
+	void *metadata;
+	u8 htt_desc_size;
+	/* Size rounded of multiple of 8 bytes */
+	u8 htt_desc_size_aligned;
+
+
+	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext);
+	htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+	metadata = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+	if (!metadata)
+		return -ENOMEM;
+
+	desc_ext = (struct htt_tx_msdu_desc_ext *)metadata;
+	desc_ext->valid_encrypt_type = 1;
+	desc_ext->encrypt_type = HAL_ENCRYPT_TYPE_WEP_40;
+	desc_ext->host_tx_desc_pool = 1;
+
+	return 0;
+}
+
+
+int ath12k_dp_tx_direct(struct ath12k_link_vif *arvif, struct sk_buff *skb)
 {
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_base *ab = arvif->ab;
+	struct ath12k_dp *dp = arvif->dp;
+	struct ath12k_tx_desc_info *tx_desc = NULL;
+	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+	struct hal_srng *tcl_ring;
+	struct dp_tx_ring *tx_ring;
+	struct device *dev = arvif->dev;
+	void *hal_tcl_desc;
+	struct hal_tcl_data_cmd tcl_desc;
+	u8 hal_ring_id, ring_id;
+#ifdef CONFIG_ATH12K_SAWF
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ath12k_peer *peer;
+#endif
+	dma_addr_t paddr;
+	int ret;
+#ifdef CONFIG_MAC80211_SFE_SUPPORT
+	int len = skb->fast_xmit ? 256 : skb->len;
+#elif defined(CONFIG_IP_FFN)
+	int len = skb->ffn_ff_done ? skb->ffn_ff_dirty_len : skb->len;
+#else
+	int len = skb->len;
+#endif
+
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return -ESHUTDOWN;
+
+	ring_id = smp_processor_id();
+
+	tx_ring = &dp->tx_ring[ring_id];
+
+	tx_desc = ath12k_dp_tx_assign_buffer(dp, ring_id);
+	if (unlikely(!tx_desc)) {
+		ab->soc_stats.tx_err.txbuf_na[ring_id]++;
+		return -ENOSPC;
+	}
+
+	dmac_clean_range_no_dsb(skb->data, skb->data + len);
+	paddr = dma_map_single_attrs(dev, skb->data, len,
+				     DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+	if (unlikely(dma_mapping_error(dev, paddr))) {
+		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+		ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
+		ret = -ENOMEM;
+		goto fail_remove_tx_buf;
+	}
+
+	tx_desc->skb = skb;
+	tx_desc->mac_id = arvif->pdev_idx;
+	tx_desc->recycler_fast_xmit = 0;
+	tx_desc->timestamp = ktime_get_real();
+
+#ifdef CONFIG_MAC80211_SFE_SUPPORT
+	/* the edma driver uses this flags to optimize the cache invalidation */
+	skb->fast_recycled = 1;
+	if (skb->is_from_recycler)
+		tx_desc->recycler_fast_xmit = 1;
+#elif defined(CONFIG_IP_FFN)
+	/* mark it so page pool recycler will remember this */
+	if (skb->ffn_ff_done)
+		skb->ffn_ff_done |= BIT(1);
+#endif
+
+	skb_cb->vif = arvif->ahvif->vif;
+	skb_cb->paddr =  paddr;
+
+	hal_ring_id = ring_id + HAL_SRNG_RING_ID_SW2TCL1;
+	tcl_ring = &ab->hal.srng_list[hal_ring_id];
+	spin_lock_bh(&tcl_ring->lock);
+	ath12k_hal_srng_access_src_ring_begin_nolock(tcl_ring);
+
+	hal_tcl_desc = (void *)ath12k_hal_srng_src_get_next_entry_nolock(ab, tcl_ring);
+	if (unlikely(!hal_tcl_desc)) {
+		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
+		 * desc because the desc is directly enqueued onto hw queue.
+		 */
+		ath12k_hal_srng_access_umac_src_ring_end_nolock(tcl_ring);
+		spin_unlock_bh(&tcl_ring->lock);
+		ab->soc_stats.tx_err.desc_na[ring_id]++;
+		ret = -ENOMEM;
+		goto fail_unmap_dma;
+	}
+
+	ab->soc_stats.tx_enqueued[ring_id]++;
+
+	tcl_desc.buf_addr_info.info0 = (u32)paddr;
+	tcl_desc.buf_addr_info.info1 = (tx_desc->desc_id << 12);
+	tcl_desc.info0 =  arvif->desc.info0;
+	tcl_desc.info1 =  arvif->desc.info1;
+	tcl_desc.info2 =  skb->len;
+
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+		tcl_desc.info2 |= TX_IP_CHECKSUM;
+
+	tcl_desc.info3 = arvif->desc.info3;
+	tcl_desc.info4 =  arvif->desc.info4;
+	tcl_desc.info5 = 0;
+#ifdef CONFIG_ATH12K_SAWF
+	/* SAWF */
+	if (u32_get_bits(skb->mark, SAWF_TAG_ID) == SAWF_VALID_TAG) {
+		u32 msduq_id = u32_get_bits(skb->mark, SAWF_MSDUQ_ID);
+
+		if (msduq_id < (ab->max_msduq_per_tid * ATH12K_SAWF_MAX_TID_SUPPORT)) {
+			u32 meta_data_flags;
+			tcl_desc.info3 |= ath12k_sawf_get_tcl_cmd_info3_update(msduq_id);
+			meta_data_flags =
+					ath12k_sawf_get_tcl_metadata_update(skb->mark);
+			tcl_desc.info1 = u32_encode_bits(meta_data_flags,
+							 HAL_TCL_DATA_CMD_INFO1_CMD_NUM);
+		}
+
+		if (unlikely(ath12k_debugfs_is_sawf_stats_enabled(ar) & ATH12K_SAWF_STATS_BASIC)) {
+			u16 peer_id = u32_get_bits(skb->mark, SAWF_PEER_ID);
+			u32 len = skb_headlen(skb);
+			spin_lock_bh(&ab->base_lock);
+			peer = ath12k_peer_find_by_id(ab, peer_id);
+			if (unlikely(!peer || !peer->sta))
+				ath12k_dbg(ab, ATH12K_DBG_SAWF,
+					   "peer_id %u not found \n", peer_id);
+			else
+				ath12k_sawf_tx_enqueue_peer_stats(ab, peer, msduq_id, len);
+			spin_unlock_bh(&ab->base_lock);
+		}
+		/* Store the NWDELAY to skb->mark which can be fetched
+		 * during tx completion
+		 */
+		if (info->sawf.nw_delay > SAWF_NW_DELAY_MAX)
+			info->sawf.nw_delay = SAWF_NW_DELAY_MAX;
+		skb->mark = (SAWF_VALID_TAG << SAWF_TAG_SHIFT) | (info->sawf.nw_delay << SAWF_NW_DELAY_SHIFT) | msduq_id;
+	}
+#endif
+	memcpy(hal_tcl_desc, &tcl_desc, sizeof(tcl_desc));
+	dsb(st);
+	ath12k_hal_srng_access_umac_src_ring_end_nolock(tcl_ring);
+	spin_unlock_bh(&tcl_ring->lock);
+
+	atomic_inc(&ar->dp.num_tx_pending);
+	atomic_inc(&ab->ag->num_dp_tx_pending);
+
+	return 0;
+
+fail_unmap_dma:
+	dma_unmap_single(ab->dev, skb_cb->paddr, 256, DMA_TO_DEVICE);
+
+fail_remove_tx_buf:
+	ath12k_dp_tx_release_txbuf(dp, tx_desc, ring_id);
+
+	return ret;
+}
+EXPORT_SYMBOL(ath12k_dp_tx_direct);
+
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
+		 struct ath12k_sta *ahsta, struct sk_buff *skb,
+		 bool gsn_valid, int mcbc_gsn)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ath12k_base *ab = ar->ab;
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ath12k_dp *dp = &ab->dp;
+	struct ath12k_peer *peer;
 	struct hal_tx_info ti = {0};
 	struct ath12k_tx_desc_info *tx_desc;
+#ifdef CONFIG_ATH12K_SAWF
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+#endif
 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
 	struct hal_tcl_data_cmd *hal_tcl_desc;
 	struct hal_tx_msdu_ext_desc *msg;
@@ -140,92 +438,107 @@
 	struct hal_srng *tcl_ring;
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	struct dp_tx_ring *tx_ring;
-	u8 pool_id;
+	struct hal_tcl_data_cmd *tcl_cmd;
 	u8 hal_ring_id;
 	int ret;
-	u8 ring_selector, ring_map = 0;
-	bool tcl_ring_retry;
+	u16 peer_id;
 	bool msdu_ext_desc = false;
+	bool is_diff_encap = false, is_qos_null = false;
+	bool add_htt_metadata = false;
 
-	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) ||
+	    unlikely(test_bit(ATH12K_FLAG_UMAC_PRERESET_START, &ab->dev_flags)))
 		return -ESHUTDOWN;
 
-	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
-	    !ieee80211_is_data(hdr->frame_control))
+	if (unlikely(!(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+	    !ieee80211_is_data(hdr->frame_control)))
 		return -ENOTSUPP;
 
-	pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
-
-	/* Let the default ring selection be based on current processor
-	 * number, where one of the 3 tcl rings are selected based on
-	 * the smp_processor_id(). In case that ring
-	 * is full/busy, we resort to other available rings.
-	 * If all rings are full, we drop the packet.
-	 * TODO: Add throttling logic when all rings are full
-	 */
-	ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
-
-tcl_ring_sel:
-	tcl_ring_retry = false;
-	ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
-
-	ring_map |= BIT(ti.ring_id);
-	ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
+	ti.ring_id =
+		ab->hw_params->hw_ops->get_ring_selector(skb);
 
 	tx_ring = &dp->tx_ring[ti.ring_id];
 
-	tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
-	if (!tx_desc)
+	tx_desc = ath12k_dp_tx_assign_buffer(dp, ti.ring_id);
+	if (unlikely(!tx_desc)) {
+		ab->soc_stats.tx_err.txbuf_na[ti.ring_id]++;
 		return -ENOMEM;
+	}
 
 	ti.bank_id = arvif->bank_id;
-	ti.meta_data_flags = arvif->tcl_metadata;
 
-	if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
-	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
-		if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
-			ti.encrypt_type =
-				ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+	if (ieee80211_has_a4(hdr->frame_control) &&
+	    is_multicast_ether_addr(hdr->addr3) && ahsta &&
+	    ahsta->use_4addr_set) {
+		if (unlikely(!ahsta->link[ahsta->primary_link_id])) {
+			ath12k_err(ab, "arsta not found on primary link");
+			ret = -EINVAL;
+			goto fail_remove_tx_buf;
+		}
+		ti.meta_data_flags = ahsta->link[ahsta->primary_link_id]->tcl_metadata;
+		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TO_FW, 1);
+		spin_lock_bh(&ab->base_lock);
+		peer_id = FIELD_GET(HTT_TCL_META_DATA_PEER_ID_MISSION,
+				    ti.meta_data_flags);
+		peer = ath12k_peer_find_by_id(ab, peer_id);
+		if (!peer || !peer->sta) {
+			spin_unlock_bh(&ab->base_lock);
+			ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+				   "Dropped packet with non existent peer id %u\n", peer_id);
+			ret = -EINVAL;
+			goto fail_remove_tx_buf;
+		}
+		spin_unlock_bh(&ab->base_lock);
 
-			if (ieee80211_has_protected(hdr->frame_control))
-				skb_put(skb, IEEE80211_CCMP_MIC_LEN);
 		} else {
-			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+		ti.meta_data_flags = arvif->tcl_metadata;
 		}
 
-		msdu_ext_desc = true;
+	/* TCL Meta data flags are shared for both SAWF and Global Seq Number updates.
+	 * SAWF and Global Seq Number are mutually exclusive.
+	 * Global Seq Number - Multicast, SAWF - Unicast
+	 */
+#ifdef CONFIG_ATH12K_SAWF
+	if (u32_get_bits(skb->mark, SAWF_TAG_ID) == SAWF_VALID_TAG) {
+		u32 msduq_id = u32_get_bits(skb->mark, SAWF_MSDUQ_ID);
+		if (msduq_id < (ab->max_msduq_per_tid * ATH12K_SAWF_MAX_TID_SUPPORT)) {
+			ti.meta_data_flags =
+				ath12k_sawf_get_tcl_metadata_update(skb->mark);
+		}
+	} else if (gsn_valid) {
+#else
+	if (gsn_valid) {
+#endif /* CONFIG_ATH12K_SAWF */
+		ti.meta_data_flags = u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
+						     HTT_TCL_META_DATA_TYPE_MISSION) |
+				     u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
+		ti.vdev_id = arvif->vdev_id +
+			     HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
+	} else {
+		ti.vdev_id = arvif->vdev_id;
 	}
 
-	ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
+	ti.encap_type = ath12k_dp_tx_get_encap_type(ab, skb);
 	ti.addr_search_flags = arvif->hal_addr_search_flags;
 	ti.search_type = arvif->search_type;
 	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
-	ti.pkt_offset = 0;
-	ti.lmac_id = ar->lmac_id;
-	ti.vdev_id = arvif->vdev_id;
-	ti.bss_ast_hash = arvif->ast_hash;
-	ti.bss_ast_idx = arvif->ast_idx;
-	ti.dscp_tid_tbl_idx = 0;
-
-	if (skb->ip_summed == CHECKSUM_PARTIAL &&
-	    ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
-		ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
-			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
-			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
-			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
-			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
-	}
 
-	ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
-
-	ti.tid = ath12k_dp_tx_get_tid(skb);
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
+		   ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
+		ti.flags0 |= TX_IP_CHECKSUM;
+	}
 
 	switch (ti.encap_type) {
 	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+		if ((ahvif->vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) &&
+		    (skb->protocol == cpu_to_be16(ETH_P_PAE) ||
+		     (is_qos_null = ieee80211_is_qos_nullfunc(hdr->frame_control))))
+			is_diff_encap = true;
+		else
 		ath12k_dp_tx_encap_nwifi(skb);
 		break;
 	case HAL_TCL_ENCAP_TYPE_RAW:
-		if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+		if (!test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags)) {
 			ret = -EINVAL;
 			goto fail_remove_tx_buf;
 		}
@@ -241,8 +554,55 @@
 		goto fail_remove_tx_buf;
 	}
 
+	if (unlikely((ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_ETHERNET &&
+	     !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP)))) {
+		msdu_ext_desc = true;
+
+		if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+			ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+		}
+	}
+
+	if (unlikely(ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
+		if (skb->protocol == cpu_to_be16(ETH_P_ARP)) {
+			ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+			msdu_ext_desc = true;
+		}
+
+		if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
+			ti.encrypt_type =
+				ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+
+			if (ieee80211_has_protected(hdr->frame_control))
+				skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+		} else {
+			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+		}
+	}
+
+	if ((!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->ag->dev_flags) &&
+	     !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+	     !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+	     ieee80211_has_protected(hdr->frame_control)) ||
+	     is_diff_encap) {
+		if (is_qos_null && msdu_ext_desc)
+			goto skip_htt_meta;
+
+		/* Add metadata for sw encrypted vlan group traffic */
+		add_htt_metadata = true;
+		msdu_ext_desc = true;
+
+		ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
+skip_htt_meta:
+		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TO_FW, 1);
+		ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+		ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+	}
+
 	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
-	if (dma_mapping_error(ab->dev, ti.paddr)) {
+	if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 		ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
 		ret = -ENOMEM;
@@ -251,13 +611,14 @@
 
 	tx_desc->skb = skb;
 	tx_desc->mac_id = ar->pdev_idx;
+	tx_desc->recycler_fast_xmit = 0;
+	tx_desc->timestamp = ktime_get_real();
 	ti.desc_id = tx_desc->desc_id;
 	ti.data_len = skb->len;
 	skb_cb->paddr = ti.paddr;
-	skb_cb->vif = arvif->vif;
-	skb_cb->ar = ar;
+	skb_cb->vif = ahvif->vif;
 
-	if (msdu_ext_desc) {
+	if (unlikely(msdu_ext_desc)) {
 		skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
 		if (!skb_ext_desc) {
 			ret = -ENOMEM;
@@ -270,11 +631,22 @@
 		msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
 		ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
 
+		if (add_htt_metadata) {
+			ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
+			if (ret < 0) {
+				ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+					"Failed to add HTT meta data, dropping packet\n");
+				goto fail_unmap_dma;
+			}
+		}
+
 		ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
 					  skb_ext_desc->len, DMA_TO_DEVICE);
 		ret = dma_mapping_error(ab->dev, ti.paddr);
 		if (ret) {
-			kfree_skb(skb_ext_desc);
+			atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+			ath12k_warn(ab, "Failed to DMA map data Tx buffer\n");
+			dev_kfree_skb_any(skb_ext_desc);
 			goto fail_unmap_dma;
 		}
 
@@ -282,115 +654,270 @@
 		ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
 
 		skb_cb->paddr_ext_desc = ti.paddr;
+		tx_desc->skb_ext_desc = skb_ext_desc;
 	}
 
 	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
 	tcl_ring = &ab->hal.srng_list[hal_ring_id];
-
 	spin_lock_bh(&tcl_ring->lock);
-
-	ath12k_hal_srng_access_begin(ab, tcl_ring);
-
+	ath12k_hal_srng_access_src_ring_begin_nolock(tcl_ring);
 	hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
-	if (!hal_tcl_desc) {
+	if (unlikely(!hal_tcl_desc)) {
 		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
 		 * desc because the desc is directly enqueued onto hw queue.
 		 */
-		ath12k_hal_srng_access_end(ab, tcl_ring);
-		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+		ath12k_hal_srng_access_umac_src_ring_end_nolock(tcl_ring);
 		spin_unlock_bh(&tcl_ring->lock);
+		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
 		ret = -ENOMEM;
 
-		/* Checking for available tcl descritors in another ring in
-		 * case of failure due to full tcl ring now, is better than
-		 * checking this ring earlier for each pkt tx.
-		 * Restart ring selection if some rings are not checked yet.
-		 */
-		if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
-		    ab->hw_params->tcl_ring_retry) {
-			tcl_ring_retry = true;
-			ring_selector++;
-		}
-
-		goto fail_unmap_dma;
+		goto fail_unmap_dma_ext_desc;
 	}
 
-	ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
+	ab->soc_stats.tx_enqueued[ti.ring_id]++;
+	arvif->link_stats.tx_encap_type[ti.encap_type]++;
+	arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
+	arvif->link_stats.tx_desc_type[ti.type]++;
+	tcl_cmd = (struct hal_tcl_data_cmd *)hal_tcl_desc;
+
+	tcl_cmd->buf_addr_info.info0 = (u32)ti.paddr;
+	/* TODO: Copy the upper 8 bits here */
+	tcl_cmd->buf_addr_info.info1 = (ti.desc_id << 12);
+	tcl_cmd->info0 = (ti.type << 1) | arvif->desc.info0;
+	tcl_cmd->info1 = ti.meta_data_flags << 16;
+	tcl_cmd->info2 = ti.flags0 | ti.data_len;
 
-	ath12k_hal_srng_access_end(ab, tcl_ring);
+	/* In tcl_cmd->info3, Bit 24 to 31 represents vdev_id
+	 * LSH 24 times to add updated vdev_id to info3
+	 */
+	tcl_cmd->info3 = (ti.vdev_id << 24) | arvif->desc.info3;
+	tcl_cmd->info4 = arvif->desc.info4;
+	tcl_cmd->info5 = 0;
+
+#ifdef CONFIG_ATH12K_SAWF
+	/* SAWF */
+	if (u32_get_bits(skb->mark, SAWF_TAG_ID) == SAWF_VALID_TAG) {
+		u32 msduq_id = u32_get_bits(skb->mark, SAWF_MSDUQ_ID);
+
+		if (msduq_id < (ab->max_msduq_per_tid * ATH12K_SAWF_MAX_TID_SUPPORT)) {
+			tcl_cmd->info3 |=
+				ath12k_sawf_get_tcl_cmd_info3_update(msduq_id);
+		}
+		if (unlikely(ath12k_debugfs_is_sawf_stats_enabled(ar) & ATH12K_SAWF_STATS_BASIC)) {
+			peer_id = u32_get_bits(skb->mark, SAWF_PEER_ID);
+			spin_lock_bh(&ab->base_lock);
+			peer = ath12k_peer_find_by_id(ab, peer_id);
+			if (unlikely(!peer || !peer->sta))
+				ath12k_dbg(ab, ATH12K_DBG_SAWF,
+					   "peer_id %u not found\n", peer_id);
+			else
+				ath12k_sawf_tx_enqueue_peer_stats(ab, peer, msduq_id, ti.data_len);
+			spin_unlock_bh(&ab->base_lock);
+		}
+		/* Store the NWDELAY to skb->mark which can be fetched
+		 * during tx completion
+		 */
+		if (info->sawf.nw_delay > SAWF_NW_DELAY_MAX)
+			info->sawf.nw_delay = SAWF_NW_DELAY_MAX;
+		skb->mark = (SAWF_VALID_TAG << SAWF_TAG_SHIFT) | (info->sawf.nw_delay << SAWF_NW_DELAY_SHIFT) | msduq_id;
+	}
+#endif /* CONFIG_ATH12K_SAWF */
 
+	dsb(st);
+	ath12k_hal_srng_access_umac_src_ring_end_nolock(tcl_ring);
 	spin_unlock_bh(&tcl_ring->lock);
 
 	ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
 			skb->data, skb->len);
 
+	if (gsn_valid)
+		arvif->link_stats.tx_bcast_mcast++;
+	else
+		arvif->link_stats.tx_enqueued++;
+
 	atomic_inc(&ar->dp.num_tx_pending);
+	atomic_inc(&ab->ag->num_dp_tx_pending);
 
 	return 0;
 
+fail_unmap_dma_ext_desc:
+	if (unlikely(msdu_ext_desc)) {
+		dma_unmap_single(ab->dev, ti.paddr,
+				 skb_ext_desc->len, DMA_TO_DEVICE);
+		dev_kfree_skb_any(skb_ext_desc);
+	}
+
 fail_unmap_dma:
-	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
-	dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-			 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+	dma_unmap_single(ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 
 fail_remove_tx_buf:
-	ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
-	if (tcl_ring_retry)
-		goto tcl_ring_sel;
+	ath12k_dp_tx_release_txbuf(dp, tx_desc, ti.ring_id);
+
+	arvif->link_stats.tx_dropped++;
 
 	return ret;
 }
 
+static inline void ath12k_dp_tx_decrement(struct ath12k *ar)
+{
+	atomic_dec(&ar->ab->ag->num_dp_tx_pending);
+
+	if (atomic_read(&ar->flush_request)) {
+		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+			wake_up(&ar->tx_empty_waitq);
+	} else {
+		atomic_dec(&ar->dp.num_tx_pending);
+	}
+}
+
 static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
 				    struct sk_buff *msdu, u8 mac_id,
-				    struct dp_tx_ring *tx_ring)
+				    struct dp_tx_ring *tx_ring,
+				    struct sk_buff *skb_ext_desc)
 {
-	struct ath12k *ar;
 	struct ath12k_skb_cb *skb_cb;
+	struct ath12k *ar;
 
 	skb_cb = ATH12K_SKB_CB(msdu);
+	ar = ab->pdevs[mac_id].ar;
 
-	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-	if (skb_cb->paddr_ext_desc)
-		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+	dma_unmap_single_attrs(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE,
+			     DMA_ATTR_SKIP_CPU_SYNC);
+	if (unlikely(skb_ext_desc)) {
+		dma_unmap_single_attrs(ab->dev, skb_cb->paddr_ext_desc,
+				       skb_ext_desc->len,
+				       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+		dev_kfree_skb_any(skb_ext_desc);
+	}
 
-	dev_kfree_skb_any(msdu);
+	ieee80211_free_txskb(ar->ah->hw, msdu);
 
-	ar = ab->pdevs[mac_id].ar;
-	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
-		wake_up(&ar->dp.tx_empty_waitq);
+	ath12k_dp_tx_decrement(ar);
+}
+
+static inline void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
+                                             struct hal_wbm_completion_ring_tx *desc,
+                                             struct hal_tx_status *ts)
+{
+	ts->buf_rel_source =
+		FIELD_GET(HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE, desc->info0);
+	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+		return;
+
+	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+		return;
+
+	ts->status = FIELD_GET(HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON,
+			       desc->info0);
+	ts->ppdu_id = FIELD_GET(HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER,
+				desc->info1);
+	ts->ack_rssi = FIELD_GET(HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI,
+				 desc->info2);
+	if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
+		ts->rate_stats = desc->rate_stats.info0;
+	else
+		ts->rate_stats = 0;
+	ts->tid = FIELD_GET(HAL_WBM_RELEASE_TX_INFO3_TID, desc->info3);
+
+	ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_TX_INFO3_PEER_ID, desc->info3);
+	ts->flags = FIELD_GET(HAL_WBM_RELEASE_TX_INFO2_FIRST_MSDU, desc->info2) |
+		    FIELD_GET(HAL_WBM_RELEASE_TX_INFO2_LAST_MSDU, desc->info2);
+	ts->buffer_timestamp = FIELD_GET(HAL_WBM_RELEASE_TX_INFO2_BUFFER_TIMESTAMP,
+					 desc->info2);
+	ts->tsf = desc->rate_stats.tsf;
 }
 
 static void
 ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
 				 struct sk_buff *msdu,
 				 struct dp_tx_ring *tx_ring,
-				 struct ath12k_dp_htt_wbm_tx_status *ts)
+				 struct ath12k_dp_htt_wbm_tx_status *ts,
+				 struct sk_buff *skb_ext_desc, u8 mac_id,
+				 void *desc, ktime_t timestamp)
 {
 	struct ieee80211_tx_info *info;
 	struct ath12k_skb_cb *skb_cb;
 	struct ath12k *ar;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_peer *peer;
+	struct hal_tx_status ts_status = { 0 };
+	unsigned long attrs = 0;
+	u8 flags = 0;
+	bool drop_disable = false;
 
 	skb_cb = ATH12K_SKB_CB(msdu);
 	info = IEEE80211_SKB_CB(msdu);
 
-	ar = skb_cb->ar;
+	ar = ab->pdevs[mac_id].ar;
+	ab->soc_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
 
-	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
-		wake_up(&ar->dp.tx_empty_waitq);
+	ath12k_dp_tx_decrement(ar);
+
+	flags = skb_cb->flags;
+	drop_disable = (ab->stats_disable && !(flags & ATH12K_SKB_TX_STATUS));
+
+	/* If drop disable case, avoid cpu_sync since we are not passed
+	 * the skb to mac80211
+	 */
+	if (drop_disable)
+		attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+	dma_unmap_single_attrs(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE, attrs);
 
-	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-	if (skb_cb->paddr_ext_desc)
-		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+	if (unlikely(skb_ext_desc)) {
+		dma_unmap_single_attrs(ab->dev, skb_cb->paddr_ext_desc,
+				       skb_ext_desc->len, DMA_TO_DEVICE, attrs);
+		dev_kfree_skb_any(skb_ext_desc);
+	}
+
+	/* Free skb here if stats is disabled */
+	if (drop_disable) {
+		if (skb_cb->vif) {
+			ahvif = (void *)skb_cb->vif->drv_priv;
+			if (ahvif->links_map & BIT(skb_cb->link_id)) {
+				if (ahvif->link[skb_cb->link_id])
+					ahvif->link[skb_cb->link_id]->link_stats.tx_completed++;
+			} else {
+				ath12k_warn(ab,
+					    "invalid linkid 0x%X in htt tx complete buf linkmap %lu\n",
+					    skb_cb->link_id,
+					    ahvif->links_map);
+			}
+		}
+
+		if (msdu->destructor) {
+			msdu->wifi_acked_valid = 1;
+			msdu->wifi_acked = ts->acked;
+		}
+		ieee80211_free_txskb(ar->ah->hw, msdu);
+		return;
+	}
+
+	if (unlikely(!skb_cb->vif)) {
+		ieee80211_free_txskb(ar->ah->hw, msdu);
+		return;
+	}
+
+	vif = skb_cb->vif;
+	ahvif = (void *)vif->drv_priv;
+	if (ahvif->links_map & BIT(skb_cb->link_id)) {
+		if (ahvif->link[skb_cb->link_id])
+			ahvif->link[skb_cb->link_id]->link_stats.tx_completed++;
+	} else {
+		ath12k_warn(ab,
+			    "invalid linkid 0x%X in htt tx complete buf linkmap %lu\n",
+			    skb_cb->link_id,
+			    ahvif->links_map);
+	}
 
 	memset(&info->status, 0, sizeof(info->status));
 
 	if (ts->acked) {
 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 			info->flags |= IEEE80211_TX_STAT_ACK;
-			info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
+			info->status.ack_signal = ar->rssi_offsets.rssi_offset +
 						  ts->ack_rssi;
 			info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
 		} else {
@@ -398,36 +925,58 @@
 		}
 	}
 
-	ieee80211_tx_status(ar->hw, msdu);
+	if (unlikely(ath12k_debugfs_is_sawf_stats_enabled(ar))) {
+		spin_lock_bh(&ab->base_lock);
+		ath12k_dp_tx_status_parse(ab, desc, &ts_status);
+		peer = ath12k_peer_find_by_id(ab, ts_status.peer_id);
+		if (unlikely(!peer || !peer->sta))
+			ath12k_dbg(ab, ATH12K_DBG_DATA,
+				   "dp_tx: failed to find the peer with peer_id %d\n",
+				   ts_status.peer_id);
+		else
+			ath12k_sawf_stats_update(ar, msdu, &ts_status, peer, timestamp);
+		spin_unlock_bh(&ab->base_lock);
+	}
+
+	if ((!flags & ATH12K_SKB_HW_80211_ENCAP))
+		ieee80211_tx_status_8023(ar->ah->hw, vif, msdu);
+	else
+		ieee80211_tx_status(ar->ah->hw, msdu);
 }
 
 static void
 ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
 				     void *desc, u8 mac_id,
 				     struct sk_buff *msdu,
-				     struct dp_tx_ring *tx_ring)
+				     struct dp_tx_ring *tx_ring,
+				     struct sk_buff *skb_ext_desc,
+				     ktime_t timestamp)
 {
 	struct htt_tx_wbm_completion *status_desc;
 	struct ath12k_dp_htt_wbm_tx_status ts = {0};
-	enum hal_wbm_htt_tx_comp_status wbm_status;
+	int htt_status;
 
 	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
 
-	wbm_status = le32_get_bits(status_desc->info0,
+	htt_status = le32_get_bits(status_desc->info0,
 				   HTT_TX_WBM_COMP_INFO0_STATUS);
 
-	switch (wbm_status) {
+	ab->soc_stats.fw_tx_status[htt_status]++;
+
+	switch (htt_status) {
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
-	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
-	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
-		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+		ts.acked = (htt_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
 		ts.ack_rssi = le32_get_bits(status_desc->info2,
 					    HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
-		ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+		ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts, skb_ext_desc, mac_id, desc, timestamp);
+
 		break;
+	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
-		ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+	case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
+		ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring, skb_ext_desc);
 		break;
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
 		/* This event is to be handled only when the driver decides to
@@ -435,41 +984,195 @@
 		 */
 		break;
 	default:
-		ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+		ath12k_warn(ab, "Unknown htt tx status %d\n", htt_status);
 		break;
 	}
 }
 
-static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
+static void ath12k_dp_tx_cache_peer_stats(struct ath12k *ar,
 				       struct sk_buff *msdu,
 				       struct hal_tx_status *ts)
 {
+	struct ath12k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+
+	if (ts->try_cnt > 1) {
+		peer_stats->retry_pkts += ts->try_cnt - 1;
+		peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
+
+		if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+			peer_stats->failed_pkts += 1;
+			peer_stats->failed_bytes += msdu->len;
+		}
+	}
+}
+
+void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
+{
 	struct ath12k_base *ab = ar->ab;
-	struct ieee80211_tx_info *info;
-	struct ath12k_skb_cb *skb_cb;
+	struct ath12k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+	enum hal_tx_rate_stats_pkt_type pkt_type;
+	enum hal_tx_rate_stats_sgi sgi;
+	enum hal_tx_rate_stats_bw bw;
+	struct ath12k_peer *peer;
+	struct ath12k_link_sta *arsta;
+	u16 rate, ru_tones;
+	u8 mcs, rate_idx, ofdma;
+	int ret;
 
-	if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
-		/* Must not happen */
-		return;
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+	if (!peer || !peer->sta) {
+		ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+			   "failed to find the peer by id %u\n", ts->peer_id);
+		goto err_out;
+	}
+
+	arsta = ath12k_peer_get_link_sta(ab, peer);
+	if (!arsta) {
+		ath12k_warn(ab, "link sta not found on peer %pM id %d\n",
+			    peer->addr, peer->peer_id);
+		goto err_out;
+	}
+
+	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+	pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
+			     ts->rate_stats);
+	mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
+			ts->rate_stats);
+	sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
+			ts->rate_stats);
+	bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
+	ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
+	ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
+
+	/* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+	 * if it is invalid, then choose the NSS value while assoc.
+	 */
+	if (arsta->last_txrate.nss)
+		arsta->txrate.nss = arsta->last_txrate.nss;
+	else
+		arsta->txrate.nss = arsta->peer_nss;
+
+	if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
+	    pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
+		ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
+							    pkt_type,
+							    &rate_idx,
+							    &rate);
+		if (ret < 0)
+			goto err_out;
+		arsta->txrate.legacy = rate;
+	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
+		if (mcs > 7) {
+			ath12k_warn(ab, "Invalid HT mcs index %d\n", mcs);
+			goto err_out;
+		}
+
+		if (arsta->txrate.nss != 0)
+			arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
+		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+		if (sgi)
+			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
+		if (mcs > 9) {
+			ath12k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
+			goto err_out;
+		}
+
+		arsta->txrate.mcs = mcs;
+		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+		if (sgi)
+			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+		if (mcs > 11) {
+			ath12k_warn(ab, "Invalid HE mcs index %d\n", mcs);
+			goto err_out;
+		}
+
+		arsta->txrate.mcs = mcs;
+		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+		arsta->txrate.he_gi = ath12k_mac_he_gi_to_nl80211_he_gi(sgi);
+	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
+		if (mcs > 13) {
+			ath12k_warn(ab, "Invalid EHT mcs index %d\n", mcs);
+			goto err_out;
+		}
+
+		arsta->txrate.mcs = mcs;
+		arsta->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+		arsta->txrate.he_gi = ath12k_mac_he_gi_to_nl80211_he_gi(sgi);
+	}
+
+	arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
+	if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+		arsta->txrate.bw = RATE_INFO_BW_HE_RU;
+		arsta->txrate.he_ru_alloc =
+			ath12k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+	}
+
+	if (ath12k_debugfs_is_extd_tx_stats_enabled(ar))
+		ath12k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+
+err_out:
+	spin_unlock_bh(&ab->base_lock);
 	}
 
+static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
+				       struct sk_buff *msdu,
+				       struct hal_wbm_release_ring *tx_status,
+				       enum hal_wbm_rel_src_module buf_rel_source,
+				       int ring, ktime_t timestamp)
+{
+	struct ieee80211_tx_status status = { 0 };
+	struct ieee80211_rate_status status_rate = { 0 };
+	struct ath12k_base *ab = ar->ab;
+	struct ieee80211_tx_info *info;
+	struct ath12k_skb_cb *skb_cb;
+	struct ath12k_peer *peer;
+	struct ath12k_link_sta *arsta;
+	struct rate_info rate;
+	struct hal_tx_status ts = { 0 };
+	enum hal_wbm_htt_tx_comp_status wbm_status;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	u8 flags = 0;
+
 	skb_cb = ATH12K_SKB_CB(msdu);
+	ab->soc_stats.tx_completed[ring]++;
 
-	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-	if (skb_cb->paddr_ext_desc)
-		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+	flags = skb_cb->flags;
 
-	rcu_read_lock();
+	if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
+                ieee80211_free_txskb(ar->ah->hw, msdu);
+                return;
+        }
 
-	if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
-		dev_kfree_skb_any(msdu);
-		goto exit;
+        if (unlikely(!skb_cb->vif)) {
+                ieee80211_free_txskb(ar->ah->hw, msdu);
+                return;
 	}
 
-	if (!skb_cb->vif) {
-		dev_kfree_skb_any(msdu);
-		goto exit;
+	ath12k_dp_tx_status_parse(ab, (struct hal_wbm_completion_ring_tx *)tx_status, &ts);
+
+	ar->wmm_stats.tx_type = ath12k_tid_to_ac(ts.tid > ATH12K_DSCP_PRIORITY ? 0:ts.tid);
+	if (ar->wmm_stats.tx_type) {
+		if (ts.status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED)
+			ar->wmm_stats.total_wmm_tx_drop[ar->wmm_stats.tx_type]++;
+	}
+
+	wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
+			       tx_status->info0);
+
+	vif = skb_cb->vif;
+	ahvif = (void *)vif->drv_priv;
+	if (ahvif->links_map & BIT(skb_cb->link_id)) {
+		if (ahvif->link[skb_cb->link_id])
+			ahvif->link[skb_cb->link_id]->link_stats.tx_completed++;
+	} else {
+		ath12k_warn(ar->ab,
+			    "invalid linkid 0x%X in tx complete msdu linkmap %lu\n",
+			    skb_cb->link_id,
+			    ahvif->links_map);
 	}
 
 	info = IEEE80211_SKB_CB(msdu);
@@ -478,142 +1181,520 @@
 	/* skip tx rate update from ieee80211_status*/
 	info->status.rates[0].idx = -1;
 
-	if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+	if (ts.status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
 	    !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 		info->flags |= IEEE80211_TX_STAT_ACK;
-		info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
-					  ts->ack_rssi;
+		info->status.ack_signal = ar->rssi_offsets.rssi_offset +
+					  ts.ack_rssi;
 		info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
 	}
 
-	if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+	if (ts.status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
 	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 
-	/* NOTE: Tx rate status reporting. Tx completion status does not have
-	 * necessary information (for example nss) to build the tx rate.
-	 * Might end up reporting it out-of-band from HTT stats.
-	 */
+	if (ts.status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+		switch (ts.status) {
+		case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+		case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+		case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+		case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+			if (unlikely(ath12k_debugfs_is_sawf_stats_enabled(ar))) {
+				spin_lock_bh(&ab->base_lock);
+				peer = ath12k_peer_find_by_id(ab, ts.peer_id);
+				if (unlikely(!peer || !peer->sta))
+					ath12k_dbg(ab, ATH12K_DBG_DATA,
+						   "dp_tx: failed to find the peer with peer_id %d\n",
+						   ts.peer_id);
+				else
+					ath12k_sawf_stats_update(ar, msdu, &ts, peer, timestamp);
+				spin_unlock_bh(&ab->base_lock);
+			}
+			ieee80211_free_txskb(ar->ah->hw, msdu);
+			return;
+		default:
+			//TODO: Remove this print and add as a stats
+			ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n", ts.status);
+		}
+	}
 
-	ieee80211_tx_status(ar->hw, msdu);
+	if (unlikely(ath12k_debugfs_is_extd_tx_stats_enabled(ar)) ||
+		     ab->hw_params->single_pdev_only) {
+		if (ts.flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
+			if (ar->last_ppdu_id == 0) {
+				ar->last_ppdu_id = ts.ppdu_id;
+			} else if (ar->last_ppdu_id == ts.ppdu_id ||
+				   ar->cached_ppdu_id == ar->last_ppdu_id) {
+				ar->cached_ppdu_id = ar->last_ppdu_id;
+				ar->cached_stats.is_ampdu = true;
+				ath12k_dp_tx_update_txcompl(ar, &ts);
+				memset(&ar->cached_stats, 0,
+				       sizeof(struct ath12k_per_peer_tx_stats));
+			} else {
+				ar->cached_stats.is_ampdu = false;
+				ath12k_dp_tx_update_txcompl(ar, &ts);
+				memset(&ar->cached_stats, 0,
+				       sizeof(struct ath12k_per_peer_tx_stats));
+			}
+			ar->last_ppdu_id = ts.ppdu_id;
+		}
 
-exit:
-	rcu_read_unlock();
+		ath12k_dp_tx_cache_peer_stats(ar, msdu, &ts);
 }
 
-static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
-				      struct hal_wbm_completion_ring_tx *desc,
-				      struct hal_tx_status *ts)
-{
-	ts->buf_rel_source =
-		le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
-	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
-	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find_by_id(ab, ts.peer_id);
+	if (unlikely(!peer || !peer->sta)) {
+		ath12k_dbg(ab, ATH12K_DBG_DATA,
+			   "dp_tx: failed to find the peer with peer_id %d\n",
+			   ts.peer_id);
+		 spin_unlock_bh(&ab->base_lock);
+		 ieee80211_free_txskb(ar->ah->hw, msdu);
 		return;
+	}
 
-	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+	arsta = ath12k_peer_get_link_sta(ab, peer);
+	if (!arsta) {
+		ath12k_warn(ab, "link sta not found on peer %pM id %d\n",
+			    peer->addr, peer->peer_id);
+
+		spin_unlock_bh(&ab->base_lock);
+		ieee80211_free_txskb(ar->ah->hw, msdu);
 		return;
+	}
+
+	status.sta = peer->sta;
+	status.skb = msdu;
+	status.info = info;
+	rate = arsta->last_txrate;
+
+	status_rate.rate_idx = rate;
+	status_rate.try_count = 1;
 
-	ts->status = le32_get_bits(desc->info0,
-				   HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+	status.rates = &status_rate;
+	status.n_rates = 1;
 
-	ts->ppdu_id = le32_get_bits(desc->info1,
-				    HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
-	if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
-		ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
+
+	if (unlikely(ath12k_debugfs_is_extd_tx_stats_enabled(ar))) {
+		if(arsta->wbm_tx_stats && wbm_status < HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX)
+			arsta->wbm_tx_stats->wbm_tx_comp_stats[wbm_status]++;
+	}
+
+	if (unlikely(ath12k_debugfs_is_sawf_stats_enabled(ar)))
+		ath12k_sawf_stats_update(ar, msdu, &ts, peer, timestamp);
+
+	spin_unlock_bh(&ab->base_lock);
+
+	if (flags & ATH12K_SKB_HW_80211_ENCAP)
+		ieee80211_tx_status_8023(ar->ah->hw, vif, msdu);
 	else
-		ts->rate_stats = 0;
+		ieee80211_tx_status_ext(ar->ah->hw, &status);
 }
 
-void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
+static inline bool ath12k_dp_tx_completion_valid(struct hal_wbm_release_ring *desc)
+{
+	struct htt_tx_wbm_completion *status_desc;
+
+	if (FIELD_GET(HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE, desc->info0) ==
+	    HAL_WBM_REL_SRC_MODULE_FW) {
+		status_desc = (struct htt_tx_wbm_completion *)(((u8 *)desc) + HTT_TX_WBM_COMP_STATUS_OFFSET);
+
+		/* Dont consider HTT_TX_COMP_STATUS_MEC_NOTIFY */
+		if (FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS, status_desc->info0) ==
+		    HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY)
+			return false;
+	}
+	return true;
+}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+static void ath12k_ppeds_tx_update_stats(struct ath12k *ar, struct sk_buff *msdu,
+					 struct hal_wbm_release_ring *tx_status)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ieee80211_tx_info *info;
+	struct ath12k_peer *peer;
+	struct ath12k_link_sta *arsta;
+	struct hal_tx_status ts = { 0 };
+
+	info = IEEE80211_SKB_CB(msdu);
+	memset(&info->status, 0, sizeof(info->status));
+	info->status.rates[0].idx = -1;
+
+	ath12k_dp_tx_status_parse(ab, (struct hal_wbm_completion_ring_tx *)tx_status, &ts);
+	if (ts.status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+	    !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+		info->flags |= IEEE80211_TX_STAT_ACK;
+		info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
+					  ts.ack_rssi;
+		info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+	}
+
+	if (ts.status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
+		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+	if (ts.status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+		switch (ts.status) {
+		case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+		case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+		case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+		case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+			return;
+		default:
+			//TODO: Remove this print and add as a stats
+			ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n", ts.status);
+		}
+	}
+
+	spin_lock_bh(&ab->base_lock);
+
+	peer = ath12k_peer_find_by_id(ab, ts.peer_id);
+	if (unlikely(!peer || !peer->sta)) {
+		ath12k_dbg(ab, ATH12K_DBG_DATA,
+			   "dp_tx: failed to find the peer with peer_id %d\n",
+			   ts.peer_id);
+		spin_unlock_bh(&ab->base_lock);
+		return;
+	}
+
+	arsta = ath12k_peer_get_link_sta(ab, peer);
+	if (!arsta) {
+		ath12k_warn(ab, "link sta not found on peer %pM id %d\n",
+			    peer->addr, peer->peer_id);
+		spin_unlock_bh(&ab->base_lock);
+		return;
+	}
+
+	ieee80211_ppeds_tx_update_stats(ar->ah->hw, peer->sta, info, arsta->txrate,
+					peer->link_id, msdu->len);
+
+	spin_unlock_bh(&ab->base_lock);
+}
+
+static inline
+void ath12k_dp_ppeds_tx_comp_get_desc(struct ath12k_base *ab,
+				      struct hal_wbm_completion_ring_tx *tx_status,
+				      struct ath12k_ppeds_tx_desc_info **tx_desc)
+{
+        u64 desc_va = 0;
+	u32 desc_id;
+
+	if (likely(HAL_WBM_COMPL_TX_INFO0_CC_DONE & tx_status->info0)) {
+		/* HW done cookie conversion */
+		desc_va = ((u64)tx_status->buf_va_hi << 32 |
+			   tx_status->buf_va_lo);
+		*tx_desc = (struct ath12k_ppeds_tx_desc_info *)((unsigned long)desc_va);
+	} else {
+		/* SW does cookie conversion to VA */
+		desc_id = u32_get_bits(tx_status->buf_va_hi,
+				       BUFFER_ADDR_INFO1_SW_COOKIE);
+
+		*tx_desc = ath12k_dp_get_ppeds_tx_desc(ab, desc_id);
+	}
+}
+
+int ath12k_ppeds_tx_completion_handler(struct ath12k_base *ab, int budget)
 {
-	struct ath12k *ar;
 	struct ath12k_dp *dp = &ab->dp;
-	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+	struct dp_ppeds_tx_comp_ring *tx_ring = &dp->ppeds_comp_ring.ppe_wbm2sw_ring;
+	int hal_ring_id = tx_ring->ppe_wbm2sw_ring.ring_id;
 	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
-	struct ath12k_tx_desc_info *tx_desc = NULL;
+	struct ath12k_ppeds_tx_desc_info *tx_desc = NULL;
 	struct sk_buff *msdu;
-	struct hal_tx_status ts = { 0 };
-	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
-	struct hal_wbm_release_ring *desc;
+	u32 *desc;
 	u8 mac_id;
-	u64 desc_va;
+	int valid_entries, count = 0, i = 0;
+	struct hal_wbm_completion_ring_tx *tx_status;
+	struct htt_tx_wbm_completion *status_desc;
+	enum hal_wbm_rel_src_module buf_rel_source;
+	struct sk_buff_head free_list_head;
+	int work_done = 0, htt_status;
+	size_t stat_size;
+
+	if (likely(ab->stats_disable))
+		/* only need buf_addr_info and info0 */
+		stat_size = 3 * sizeof(u32);
+	else
+		stat_size = sizeof(struct hal_wbm_release_ring);
 
 	spin_lock_bh(&status_ring->lock);
 
-	ath12k_hal_srng_access_begin(ab, status_ring);
+	ath12k_hal_srng_access_dst_ring_begin_nolock(ab, status_ring);
+
+	valid_entries = ath12k_hal_srng_dst_num_free(ab, status_ring, false);
+	if (!valid_entries) {
+		ath12k_hal_srng_access_dst_ring_end_nolock(status_ring);
+		spin_unlock_bh(&status_ring->lock);
+		return work_done;
+	}
+
+	if (valid_entries >= budget)
+		valid_entries = budget;
+
+	ath12k_hal_srng_ppeds_dst_inv_entry(ab, status_ring, valid_entries);
+	skb_queue_head_init(&free_list_head);
 
-	while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
-		desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
-		if (!desc)
+	while (likely(valid_entries--)) {
+		desc = (struct hal_wbm_completion_ring_tx *)ath12k_hal_srng_dst_get_next_cache_entry(ab, status_ring);
+		if (!desc || !ath12k_dp_tx_completion_valid(desc))
+			continue;
+
+		memcpy(&tx_ring->tx_status[count], desc, stat_size);
+		count++;
+
+		if (count == valid_entries)
 			break;
+	}
+
+	ath12k_hal_srng_access_dst_ring_end_nolock(status_ring);
+
+	spin_lock_bh(&dp->ppeds_tx_desc_lock[ATH12K_PPEDS_DEFAULT_POOL_ID]);
+	spin_unlock_bh(&status_ring->lock);
+
+	while (count--) {
+		tx_status = &tx_ring->tx_status[i++];
 
-		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
-		       desc, sizeof(*desc));
-		tx_ring->tx_status_head =
-			ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+		ath12k_dp_ppeds_tx_comp_get_desc(ab, tx_status, &tx_desc);
+		if (unlikely(!tx_desc)) {
+			ath12k_warn(ab, "unable to retrieve ppe ds tx_desc!");
+			continue;
 	}
 
-	if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
-	    (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
-		/* TODO: Process pending tx_status messages when kfifo_is_full() */
-		ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+		mac_id = tx_desc->mac_id;
+
+		if (unlikely(!ab->stats_disable))
+			ath12k_ppeds_tx_update_stats(ab->pdevs[mac_id].ar, tx_desc->skb,
+						     tx_status);
+
+		/* Release descriptor as soon as extracting necessary info
+		 * to reduce contention
+		 */
+		msdu = ath12k_dp_ppeds_tx_release_desc_nolock(dp, tx_desc,
+							      ATH12K_PPEDS_DEFAULT_POOL_ID);
+		buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+					   tx_status->info0);
+		if (unlikely(buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
+			status_desc = ((void *)tx_status) + HTT_TX_WBM_COMP_STATUS_OFFSET;
+			htt_status = u32_get_bits(status_desc->info0,
+						  HTT_TX_WBM_COMP_INFO0_STATUS);
+			if (htt_status != HAL_WBM_REL_HTT_TX_COMP_STATUS_OK &&
+			    !ab->stats_disable) {
+				ab->ppeds_stats.fw2wbm_pkt_drops++;
+			}
+			dev_kfree_skb_any(msdu);
+			ath12k_warn(ab, "ath12k: Frame received from unexpected source %d status %d!\n",
+				 buf_rel_source, htt_status);
+			continue;
 	}
 
-	ath12k_hal_srng_access_end(ab, status_ring);
+		/* is skb is being reused, avoid freeing it */
+		if (!msdu)
+			continue;
 
-	spin_unlock_bh(&status_ring->lock);
+		if (skb_has_frag_list(msdu)) {
+			kfree_skb_list(skb_shinfo(msdu)->frag_list);
+			skb_shinfo(msdu)->frag_list = NULL;
+		}
 
-	while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
-		struct hal_wbm_completion_ring_tx *tx_status;
+#ifdef CONFIG_MAC80211_SFE_SUPPORT
+		if (likely(msdu->is_from_recycler)) {
+			__skb_queue_head(&free_list_head, msdu);
+		} else {
+			dev_kfree_skb(msdu);
+		}
+#else
+		dev_kfree_skb(msdu);
+#endif
+
+		work_done++;
+	}
+	spin_unlock_bh(&dp->ppeds_tx_desc_lock[ATH12K_PPEDS_DEFAULT_POOL_ID]);
+	dev_kfree_skb_list_fast(&free_list_head);
+
+	return work_done;
+}
+#endif
+
+int ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id,
+				    int budget)
+{
+	struct ath12k *ar;
+	struct ath12k_dp *dp = &ab->dp;
+	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+	struct ath12k_tx_desc_info *tx_desc = NULL;
+	struct list_head desc_free_list, *cur;
+	struct sk_buff *msdu, *skb_ext_desc;
+	struct ath12k_skb_cb *skb_cb;
+	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+	u8 mac_id;
+	u64 desc_va;
+	struct ath12k_vif *ahvif;
 		u32 desc_id;
+	u8 flags = 0;
+	int valid_entries;
+	int orig_budget = budget;
+	struct hal_wbm_completion_ring_tx *tx_status;
+	ktime_t timestamp;
+	enum hal_wbm_rel_src_module buf_rel_source;
+	enum hal_wbm_tqm_rel_reason rel_status;
+	struct sk_buff_head free_list_head;
+	int recycler_fast_xmit;
+
+
+	INIT_LIST_HEAD(&desc_free_list);
+
+	ath12k_hal_srng_access_dst_ring_begin_nolock(ab, status_ring);
+
+	valid_entries = ath12k_hal_srng_dst_num_free(ab, status_ring, false);
+	if (!valid_entries) {
+		ath12k_hal_srng_access_dst_ring_end_nolock(status_ring);
+		return 0;
+	}
+
+	if (valid_entries > budget)
+		valid_entries = budget;
 
-		tx_ring->tx_status_tail =
-			ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
-		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
-		ath12k_dp_tx_status_parse(ab, tx_status, &ts);
+	ath12k_hal_srng_dst_invalidate_entry(ab, status_ring, valid_entries);
+	skb_queue_head_init(&free_list_head);
 
-		if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
+	while (budget && (tx_status = (struct hal_wbm_completion_ring_tx *)ath12k_hal_srng_dst_get_next_cache_entry(ab, status_ring))) {
+		budget--;
+		if (!ath12k_dp_tx_completion_valid((struct hal_wbm_release_ring *)tx_status))
+			continue;
+
+		if (likely(HAL_WBM_COMPL_TX_INFO0_CC_DONE & tx_status->info0)) {
 			/* HW done cookie conversion */
-			desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
-				   le32_to_cpu(tx_status->buf_va_lo));
+			desc_va = ((u64)tx_status->buf_va_hi << 32 |
+					tx_status->buf_va_lo);
 			tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
 		} else {
 			/* SW does cookie conversion to VA */
-			desc_id = le32_get_bits(tx_status->buf_va_hi,
+			desc_id = u32_get_bits(tx_status->buf_va_hi,
 						BUFFER_ADDR_INFO1_SW_COOKIE);
 
 			tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
 		}
-		if (!tx_desc) {
+		if (unlikely(!tx_desc)) {
 			ath12k_warn(ab, "unable to retrieve tx_desc!");
 			continue;
 		}
 
 		msdu = tx_desc->skb;
 		mac_id = tx_desc->mac_id;
+		recycler_fast_xmit = tx_desc->recycler_fast_xmit;
+		skb_ext_desc = tx_desc->skb_ext_desc;
+		timestamp = tx_desc->timestamp;
 
 		/* Release descriptor as soon as extracting necessary info
 		 * to reduce contention
 		 */
-		ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
-		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+		list_add_tail(&tx_desc->list, &desc_free_list);
+
+		if (unlikely(!msdu)) {
+			ab->soc_stats.null_tx_complete[tx_ring->tcl_data_ring_id]++;
+
+			ath12k_err_dump(ab, "Null msdu\n", "Tx compl Desc: ", tx_status,
+					sizeof(*tx_status), NULL);
+
+			if (ath12k_debug_critical)
+				BUG_ON(1);
+
+			continue;
+		}
+
+		prefetch(msdu);
+		prefetch( (u8 *) msdu + 64);
+
+		/* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
+		buf_rel_source = tx_status->info0 & HAL_WBM_RELEASE_INFO0_REL_SRC_MASK;
+		ab->soc_stats.tx_wbm_rel_source[buf_rel_source]++;
+
+		rel_status = FIELD_GET(HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON,
+				       tx_status->info0);
+		ab->soc_stats.tqm_rel_reason[rel_status]++;
+
+		if (unlikely(buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
 			ath12k_dp_tx_process_htt_tx_complete(ab,
 							     (void *)tx_status,
 							     mac_id, msdu,
-							     tx_ring);
+							     tx_ring,
+							     skb_ext_desc,
+							     timestamp);
 			continue;
 		}
-
 		ar = ab->pdevs[mac_id].ar;
 
-		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
-			wake_up(&ar->dp.tx_empty_waitq);
+		ath12k_dp_tx_decrement(ar);
+
+		if (unlikely(WARN_ON_ONCE(buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))) {
+			/* Must not happen */
+			continue;
+		}
+
+		skb_cb =  (struct ath12k_skb_cb *)&IEEE80211_SKB_CB(msdu)->driver_data;
+		flags = skb_cb->flags;
+		dma_unmap_single_attrs(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE,
+				     DMA_ATTR_SKIP_CPU_SYNC);
+		if (unlikely(skb_ext_desc)) {
+			dma_unmap_single_attrs(ab->dev, skb_cb->paddr_ext_desc,
+					       skb_ext_desc->len,
+					       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+			dev_kfree_skb_any(skb_ext_desc);
+		}
+		/* Free skb here if stats is disabled */
+		if (ab->stats_disable && !(flags & ATH12K_SKB_TX_STATUS)) {
+			ar->ab->soc_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
+
+			if (skb_cb->vif) {
+				ahvif = (void *)skb_cb->vif->drv_priv;
+				if (ahvif->links_map & BIT(skb_cb->link_id)) {
+					if (ahvif->link[skb_cb->link_id])
+						ahvif->link[skb_cb->link_id]->link_stats.tx_completed++;
+				} else {
+					ath12k_warn(ab,
+						    "invalid linkid 0x%X in tx completion handler for linkmap %lu\n",
+						    skb_cb->link_id,
+						    ahvif->links_map);
+				}
+			}
+
+			if (unlikely(msdu->destructor)) {
+				msdu->wifi_acked_valid = 1;
+				msdu->wifi_acked = rel_status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED;
+			}
+			if (unlikely(skb_has_frag_list(msdu))) {
+				kfree_skb_list(skb_shinfo(msdu)->frag_list);
+				skb_shinfo(msdu)->frag_list = NULL;
+			}
+			if (likely(recycler_fast_xmit)) {
+				__skb_queue_head(&free_list_head, msdu);
+			} else {
+				dev_kfree_skb(msdu);
+			}
+		} else {
 
-		ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
+			ath12k_dp_tx_complete_msdu(ar, msdu, (struct hal_wbm_release_ring  *)tx_status,
+						   buf_rel_source,
+						   tx_ring->tcl_data_ring_id, timestamp);
 	}
 }
 
+	spin_lock_bh(&dp->tx_desc_lock[ring_id]);
+	list_for_each(cur, &desc_free_list) {
+		tx_desc = list_entry(cur, struct ath12k_tx_desc_info, list);
+		tx_desc->skb = NULL;
+		tx_desc->skb_ext_desc = NULL;
+		tx_desc->in_use = false;
+	}
+	list_splice_tail(&desc_free_list, &dp->tx_desc_free_list[ring_id]);
+	spin_unlock_bh(&dp->tx_desc_lock[ring_id]);
+
+	ath12k_hal_srng_access_dst_ring_end_nolock(status_ring);
+	return (orig_budget - budget);
+}
+
 static int
 ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
 			      int mac_id, u32 ring_id,
@@ -623,6 +1704,10 @@
 {
 	int ret = 0;
 
+	if ((ring_type == HAL_TX_MONITOR_BUF || ring_type == HAL_TX_MONITOR_DST) &&
+	    !ab->hw_params->supports_tx_monitor)
+		return ret;
+
 	switch (ring_type) {
 	case HAL_RXDMA_BUF:
 		/* for some targets, host fills rx buffer to fw and fw fills to
@@ -650,7 +1735,7 @@
 		*htt_ring_type = HTT_HW_TO_SW_RING;
 		break;
 	case HAL_RXDMA_MONITOR_BUF:
-		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+		*htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
 		*htt_ring_type = HTT_SW_TO_HW_RING;
 		break;
 	case HAL_RXDMA_MONITOR_STATUS:
@@ -658,13 +1743,9 @@
 		*htt_ring_type = HTT_SW_TO_HW_RING;
 		break;
 	case HAL_RXDMA_MONITOR_DST:
-		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+		*htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
 		*htt_ring_type = HTT_HW_TO_SW_RING;
 		break;
-	case HAL_RXDMA_MONITOR_DESC:
-		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
-		*htt_ring_type = HTT_SW_TO_HW_RING;
-		break;
 	case HAL_TX_MONITOR_BUF:
 		*htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
 		*htt_ring_type = HTT_SW_TO_HW_RING;
@@ -815,7 +1896,16 @@
 	skb_put(skb, len);
 	cmd = (struct htt_ver_req_cmd *)skb->data;
 	cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
-					     HTT_VER_REQ_INFO_MSG_ID);
+					     HTT_OPTION_TAG);
+
+	if (!ath12k_ftm_mode) {
+		cmd->tcl_metadata_version = u32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
+							    HTT_OPTION_TAG);
+		cmd->tcl_metadata_version |= u32_encode_bits(HTT_TCL_METADATA_VER_SZ,
+							     HTT_OPTION_LEN);
+		cmd->tcl_metadata_version |= u32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
+							     HTT_OPTION_VALUE);
+	}
 
 	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
 	if (ret) {
@@ -860,7 +1950,7 @@
 		cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
 					    HTT_PPDU_STATS_CFG_MSG_TYPE);
 
-		pdev_mask = 1 << (i + 1);
+		pdev_mask = DP_SW2HW_MACID(ar->pdev_idx) + i;
 		cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
 		cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
 
@@ -922,13 +2012,27 @@
 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
 	cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
 				       HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
+	cmd->info0 |= u32_encode_bits(tlv_filter->drop_threshold_valid,
+				      HTT_RX_RING_SELECTION_CFG_CMD_DROP_THRES_VAL);					   
+	if (!tlv_filter->rxmon_disable)
+		cmd->info0 |= u32_encode_bits(true,
+					      HTT_RX_RING_SELECTION_CFG_CMD_RXMON_GCONF_EN);
+	
 	cmd->info1 = le32_encode_bits(rx_buf_size,
 				      HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+	cmd->info1 |= u32_encode_bits(tlv_filter->conf_len_mgmt,
+				      HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+	cmd->info1 |= u32_encode_bits(tlv_filter->conf_len_ctrl,
+				      HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+	cmd->info1 |= u32_encode_bits(tlv_filter->conf_len_data,
+				      HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
 	cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
 	cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
 	cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
 	cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
 	cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
+	cmd->info2 = tlv_filter->info0;
+	cmd->info3 = tlv_filter->info1;
 
 	if (tlv_filter->offset_valid) {
 		cmd->rx_packet_offset =
@@ -960,6 +2064,25 @@
 					 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
 	}
 
+	if (tlv_filter->rx_mpdu_start_wmask > 0 &&
+			tlv_filter->rx_msdu_end_wmask > 0) {
+		cmd->info2 |=
+			u32_encode_bits(true,
+					HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET);
+		cmd->rx_mpdu_start_word_mask =
+			u32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
+					HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
+		/* mpdu_end is not used for any hardwares so far
+		 * please assign it in future if any chip is
+		 * using through hal ops
+		 */
+		cmd->rx_mpdu_end_word_mask =
+			u32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
+					HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
+		cmd->rx_msdu_end_word_mask =
+			u32_encode_bits(tlv_filter->rx_msdu_end_wmask,
+					HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
+	}
 	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
 	if (ret)
 		goto err_free;
@@ -973,6 +2096,49 @@
 }
 
 int
+ath12k_dp_tx_htt_h2t_vdev_stats_ol_req(struct ath12k *ar, u64 reset_bitmask)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct htt_h2t_msg_type_vdev_txrx_stats_req *cmd;
+	struct ath12k_dp *dp = &ab->dp;
+	struct sk_buff *skb;
+	int len = sizeof(*cmd), ret;
+
+	skb = ath12k_htc_alloc_skb(ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_h2t_msg_type_vdev_txrx_stats_req *)skb->data;
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr = FIELD_PREP(HTT_H2T_VDEV_TXRX_HDR_MSG_TYPE,
+			      HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG);
+	cmd->hdr |= FIELD_PREP(HTT_H2T_VDEV_TXRX_HDR_PDEV_ID,
+			       ar->pdev->pdev_id);
+	cmd->hdr |= FIELD_PREP(HTT_H2T_VDEV_TXRX_HDR_ENABLE, true);
+
+	/* Periodic interval is calculated as 1 units = 8 ms.
+	 * Ex: 125 -> 1000 ms
+	 */
+	cmd->hdr |= FIELD_PREP(HTT_H2T_VDEV_TXRX_HDR_INTERVAL,
+			       (ATH12K_STATS_TIMER_DUR_1SEC >> 3));
+	cmd->hdr |= FIELD_PREP(HTT_H2T_VDEV_TXRX_HDR_RESET_STATS, true);
+	cmd->vdev_id_lo_bitmask = (reset_bitmask & HTT_H2T_VDEV_TXRX_LO_BITMASK);
+	cmd->vdev_id_hi_bitmask = ((reset_bitmask &
+				    HTT_H2T_VDEV_TXRX_HI_BITMASK) >> 32);
+
+	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+	if (ret) {
+		ath12k_warn(ab, "failed to send htt type vdev stats offload request: %d",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int
 ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
 				   struct htt_ext_stats_cfg_params *cfg_params,
 				   u64 cookie)
@@ -1020,13 +2186,15 @@
 	struct ath12k_base *ab = ar->ab;
 	int ret;
 
+	if (ab->hw_params->supports_tx_monitor) {
 	ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
 	if (ret) {
 		ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
 		return ret;
 	}
+	}
 
-	ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+	ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
 	if (ret) {
 		ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
 		return ret;
@@ -1038,15 +2206,13 @@
 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
 {
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_dp *dp = &ab->dp;
 	struct htt_rx_ring_tlv_filter tlv_filter = {0};
-	int ret, ring_id;
+	int ret = 0, ring_id = 0, i;
 
-	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
 	tlv_filter.offset_valid = false;
 
 	if (!reset) {
-		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
 		tlv_filter.pkt_filter_flags0 =
 					HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
 					HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
@@ -1061,11 +2227,38 @@
 					HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
 					HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
 					HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+
+		tlv_filter.drop_threshold_valid = true;
+		tlv_filter.info0 =
+			u32_encode_bits(HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE,
+				   HTT_RX_TLV_FILTER_INFO0_RX_DROP_THRESHOLD);
+		tlv_filter.info0 |=
+			u32_encode_bits(true,
+				   HTT_RX_TLV_FILTER_INFO0_EN_MSDU_MPDU_LOG_MGMT_TYPE);
+		tlv_filter.info0 |=
+			u32_encode_bits(true,
+				   HTT_RX_TLV_FILTER_INFO0_EN_MSDU_MPDU_LOG_CTRL_TYPE);
+		tlv_filter.info0 |=
+			u32_encode_bits(true,
+				   HTT_RX_TLV_FILTER_INFO0_EN_MSDU_MPDU_LOG_DATA_TYPE);
+
+		tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+		tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+		tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+
+		tlv_filter.info1 =
+			FIELD_PREP(HTT_RX_TLV_FILTER_INFO1_EN_RX_TLV_PKT_OFFSET,
+				   true);
+		tlv_filter.info1 |=
+			FIELD_PREP(HTT_RX_TLV_FILTER_INFO1_RX_PKT_TLV_OFFSET,
+				   HTT_RX_RING_PKT_TLV_OFFSET);
 	}
 
-	if (ab->hw_params->rxdma1_enable) {
-		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
-						       HAL_RXDMA_MONITOR_BUF,
+	for (i = 0; i < ar->ab->hw_params->num_rxmda_per_pdev; i++) {
+		ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+
+		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i,
+						       HAL_RXDMA_MONITOR_DST,
 						       DP_RXDMA_REFILL_RING_SIZE,
 						       &tlv_filter);
 		if (ret) {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/dp_tx.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_tx.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/dp_tx.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/dp_tx.h	2024-04-19 16:04:28.953735667 +0200
@@ -15,11 +15,217 @@
 	int ack_rssi;
 };
 
-int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
-int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
-		 struct sk_buff *skb);
-void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
+/* htt_tx_msdu_desc_ext
+ *
+ * valid_pwr
+ *		if set, tx pwr spec is valid
+ *
+ * valid_mcs_mask
+ *		if set, tx MCS mask is valid
+ *
+ * valid_nss_mask
+ *		if set, tx Nss mask is valid
+ *
+ * valid_preamble_type
+ *		if set, tx preamble spec is valid
+ *
+ * valid_retries
+ *		if set, tx retries spec is valid
+ *
+ * valid_bw_info
+ *		if set, tx dyn_bw and bw_mask are valid
+ *
+ * valid_guard_interval
+ *		if set, tx guard intv spec is valid
+ *
+ * valid_chainmask
+ *		if set, tx chainmask is valid
+ *
+ * valid_encrypt_type
+ *		if set, encrypt type is valid
+ *
+ * valid_key_flags
+ *		if set, key flags is valid
+ *
+ * valid_expire_tsf
+ *		if set, tx expire TSF spec is valid
+ *
+ * valid_chanfreq
+ *		if set, chanfreq is valid
+ *
+ * is_dsrc
+ *		if set, MSDU is a DSRC frame
+ *
+ * guard_interval
+ *		0.4us, 0.8us, 1.6us, 3.2us
+ *
+ * encrypt_type
+ *		0 = NO_ENCRYPT,
+ *		1 = ENCRYPT,
+ *		2 ~ 3 - Reserved
+ *
+ * retry_limit
+ *		Specify the maximum number of transmissions, including the
+ *		initial transmission, to attempt before giving up if no ack
+ *		is received.
+ *		If the tx rate is specified, then all retries shall use the
+ *		same rate as the initial transmission.
+ *		If no tx rate is specified, the target can choose whether to
+ *		retain the original rate during the retransmissions, or to
+ *		fall back to a more robust rate.
+ *
+ * use_dcm_11ax
+ *		If set, Use Dual subcarrier modulation.
+ *		Valid only for 11ax preamble types HE_SU
+ *		and HE_EXT_SU
+ *
+ * ltf_subtype_11ax
+ *		Takes enum values of htt_11ax_ltf_subtype_t
+ *		Valid only for 11ax preamble types HE_SU
+ *		and HE_EXT_SU
+ *
+ * dyn_bw
+ *		0 = static bw, 1 = dynamic bw
+ *
+ * bw_mask
+ *		Valid only if dyn_bw == 0 (static bw).
+ *
+ * host_tx_desc_pool
+ *		If set, Firmware allocates tx_descriptors
+ *		in WAL_BUFFERID_TX_HOST_DATA_EXP,instead
+ *		of WAL_BUFFERID_TX_TCL_DATA_EXP.
+ *		Use cases:
+ *		Any time firmware uses TQM-BYPASS for Data
+ *		TID, firmware expect host to set this bit.
+ *
+ * power
+ *		unit of the power field is 0.5 dbm
+ *		signed value ranging from -64dbm to 63.5 dbm
+ *
+ * mcs_mask
+ *		mcs bit mask of 0 ~ 11
+ *		Setting more than one MCS isn't currently
+ *		supported by the target (but is supported
+ *		in the interface in case in the future
+ *		the target supports specifications of
+ *		a limited set of MCS values.
+ *
+ * nss_mask
+ *		Nss bit mask 0 ~ 7
+ *		Setting more than one Nss isn't currently
+ *		supported by the target (but is supported
+ *		in the interface in case in the future
+ *		the target supports specifications of
+ *		a limited set of Nss values.
+ *
+ * pream_type
+ *		Preamble types
+ *
+ * update_peer_cache
+ *		When set these custom values will be
+ *		used for all packets, until the next
+ *		update via this ext header.
+ *		This is to make sure not all packets
+ *		need to include this header.
+ *
+ * chain_mask
+ *		specify which chains to transmit from
+ *
+ * key_flags
+ *		Key Index and related flags - used in mesh mode
+ *
+ * chanfreq
+ *		Channel frequency: This identifies the desired channel
+ *		frequency (in MHz) for tx frames. This is used by FW to help
+ *		determine when it is safe to transmit or drop frames for
+ *		off-channel operation.
+ *		The default value of zero indicates to FW that the corresponding
+ *		VDEV's home channel (if there is one) is the desired channel
+ *		frequency.
+ *
+ * expire_tsf_lo
+ *		tx expiry time (TSF) LSBs
+ *
+ * expire_tsf_hi
+ *		tx expiry time (TSF) MSBs
+ *
+ * learning_frame
+ *		When this flag is set, this frame will be dropped by FW
+ *		rather than being enqueued to the Transmit Queue Manager (TQM) HW.
+ *
+ * send_as_standalone
+ *		This will indicate if the msdu needs to be sent as a singleton PPDU,
+ *		i.e. with no A-MSDU or A-MPDU aggregation.
+ *		The scope is extended to other use-cases.
+ *
+ * is_host_opaque_valid
+ *		set this bit to 1 if the host_opaque_cookie is populated
+ *		with valid information.
+ *
+ * host_opaque_cookie
+ *		Host opaque cookie for special frames
+ */
+
+struct htt_tx_msdu_desc_ext {
+	u32
+		valid_pwr            : 1,
+		valid_mcs_mask       : 1,
+		valid_nss_mask       : 1,
+		valid_preamble_type  : 1,
+		valid_retries        : 1,
+		valid_bw_info        : 1,
+		valid_guard_interval : 1,
+		valid_chainmask      : 1,
+		valid_encrypt_type   : 1,
+		valid_key_flags      : 1,
+		valid_expire_tsf     : 1,
+		valid_chanfreq       : 1,
+		is_dsrc              : 1,
+		guard_interval       : 2,
+		encrypt_type         : 2,
+		retry_limit          : 4,
+		use_dcm_11ax         : 1,
+		ltf_subtype_11ax     : 2,
+		dyn_bw               : 1,
+		bw_mask              : 6,
+		host_tx_desc_pool    : 1;
+	u32
+		power                : 8,
+		mcs_mask             : 12,
+		nss_mask             : 8,
+		pream_type           : 3,
+		update_peer_cache    : 1;
+	u32
+		chain_mask         : 8,
+		key_flags          : 8,
+		chanfreq           : 16;
+
+	u32 expire_tsf_lo;
+	u32 expire_tsf_hi;
 
+	u32
+		learning_frame       :  1,
+		send_as_standalone   :  1,
+		is_host_opaque_valid :  1,
+		rsvd0                : 29;
+	u32
+		host_opaque_cookie  : 16,
+		rsvd1               : 16;
+} __packed;
+
+void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts);
+int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
+		 struct ath12k_sta *ahsta, struct sk_buff *skb,
+		 bool gsn_valid, int mcbc_gsn);
+int ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id,
+				    int tx_comp_budget);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+int ath12k_ppeds_tx_completion_handler(struct ath12k_base *ab, int ring_id);
+struct ath12k_ppeds_tx_desc_info *
+ath12k_dp_ppeds_tx_assign_desc_nolock(struct ath12k_dp *dp,
+				      u8 ring_id);
+#endif
 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
 int
 ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
@@ -38,4 +244,6 @@
 				     struct htt_tx_ring_tlv_filter *htt_tlv_filter);
 int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
 int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+int ath12k_dp_tx_htt_h2t_vdev_stats_ol_req(struct ath12k *ar, u64 reset_bitmask);
+int ath12k_dp_tx_direct(struct ath12k_link_vif *arvif, struct sk_buff *skb);
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hal.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/hal.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal.c	2024-03-18 14:40:14.851741333 +0100
@@ -4,11 +4,17 @@
  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
 #include "hal_tx.h"
 #include "hal_rx.h"
 #include "debug.h"
 #include "hal_desc.h"
 #include "hif.h"
+#include "dp_rx.h"
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#define RING_HALT_TIMEOUT		10
+#endif
 
 static const struct hal_srng_config hw_srng_config_template[] = {
 	/* TODO: max_rings can populated by querying HW capabilities */
@@ -57,6 +63,14 @@
 		.ring_dir = HAL_SRNG_DIR_DST,
 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
 	},
+	[HAL_REO2PPE] = {
+		.start_ring_id = HAL_SRNG_RING_ID_REO2PPE,
+		.max_rings = 1,
+		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+		.mac_type = ATH12K_HAL_SRNG_UMAC,
+		.ring_dir = HAL_SRNG_DIR_DST,
+		.max_size = HAL_REO2PPE_RING_BASE_MSB_RING_SIZE,
+	},
 	[HAL_TCL_DATA] = {
 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
 		.max_rings = 6,
@@ -147,7 +161,7 @@
 		.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
 	},
 	[HAL_RXDMA_MONITOR_BUF] = {
-		.start_ring_id = HAL_SRNG_SW2RXMON_BUF0,
+		.start_ring_id = HAL_SRNG_SW2RXMON_BUF0, /* HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF, */
 		.max_rings = 1,
 		.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
 		.mac_type = ATH12K_HAL_SRNG_PMAC,
@@ -168,20 +182,20 @@
 		.start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1,
 		.max_rings = 1,
 		.entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2,
-		.mac_type = ATH12K_HAL_SRNG_PMAC,
+		.mac_type = ATH12K_HAL_SRNG_UMAC,
 		.ring_dir = HAL_SRNG_DIR_SRC,
-		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+		.max_size = HAL_PPE2TCL_RING_BASE_MSB_RING_SIZE,
 	},
 	[HAL_PPE_RELEASE] = {
 		.start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE,
 		.max_rings = 1,
 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
-		.mac_type = ATH12K_HAL_SRNG_PMAC,
+		.mac_type = ATH12K_HAL_SRNG_UMAC,
 		.ring_dir = HAL_SRNG_DIR_SRC,
 		.max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
 	},
 	[HAL_TX_MONITOR_BUF] = {
-		.start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
+		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
 		.max_rings = 1,
 		.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
 		.mac_type = ATH12K_HAL_SRNG_PMAC,
@@ -394,9 +408,16 @@
 			    RX_MSDU_END_INFO5_TID);
 }
 
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_intra_bss_bit(struct hal_rx_desc *desc)
+{
+	/* TODO - msdu_end info9 */
+	return 0;
+}
+
 static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
 {
-	return __le16_to_cpu(desc->u.qcn9274.mpdu_start.sw_peer_id);
+	return le16_get_bits(desc->u.qcn9274.mpdu_start.sw_peer_id,
+			     RX_MPDU_START_SW_PEER_ID_PEER);
 }
 
 static void ath12k_hw_qcn9274_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
@@ -447,7 +468,7 @@
 	return desc->u.qcn9274.mpdu_start.addr2;
 }
 
-static bool ath12k_hw_qcn9274_rx_desc_is_mcbc(struct hal_rx_desc *desc)
+static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
 {
 	return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
 	       RX_MPDU_START_INFO6_MCAST_BCAST;
@@ -511,9 +532,91 @@
 	crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
 }
 
-static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+static u8 ath12k_hal_qcn9274_rx_desc_get_ip_valid(struct hal_rx_desc *desc)
+{
+	bool ipv4, ipv6;
+	ipv4 = FIELD_GET(RX_MSDU_END_INFO11_IPV4,
+	                 __le32_to_cpu(desc->u.qcn9274.msdu_end.info11));
+	ipv6 = FIELD_GET(RX_MSDU_END_INFO11_IPV6,
+	                 __le32_to_cpu(desc->u.qcn9274.msdu_end.info11));
+	return (ipv4 || ipv6);
+}
+
+#define MPDU_START_WMASK 0x7FC
+#define MSDU_END_WMASK 0x13441
+
+static inline u16 ath12k_hal_rx_mpdu_start_wmask_get(void)
+{
+	return MPDU_START_WMASK;
+}
+
+static inline u32 ath12k_hal_rx_msdu_end_wmask_get(void)
+{
+	return MSDU_END_WMASK;
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+	return le64_get_bits(desc->u.qcn9274.msdu_end.msdu_end_tag,
+			     RX_MSDU_END_64_TLV_SRC_LINK_ID);
+}
+
+#define PMM_REG_OFFSET	4
+
+static void ath12k_hal_get_tsf_reg(u8 mac_id, enum hal_scratch_reg_enum *tsf_enum_low,
+				  enum hal_scratch_reg_enum *tsf_enum_hi)
+{
+	if (!mac_id) {
+		*tsf_enum_low = PMM_MAC0_TSF2_OFFSET_LO_US;
+		*tsf_enum_hi = PMM_MAC0_TSF2_OFFSET_HI_US;
+	} else if (mac_id == 1) {
+		*tsf_enum_low = PMM_MAC1_TSF2_OFFSET_LO_US;
+		*tsf_enum_hi = PMM_MAC1_TSF2_OFFSET_HI_US;
+	}
+}
+
+static void ath12k_hal_qcn9274_get_tsf2_scratch_reg(struct ath12k_base *ab,
+						    u8 mac_id, u64 *value)
+{
+	enum hal_scratch_reg_enum enum_lo, enum_hi;
+	u32 offset_lo, offset_hi;
+
+	ath12k_hal_get_tsf_reg(mac_id, &enum_lo, &enum_hi);
+
+	if (ab->hif.ops->pmm_read32) {
+		offset_lo = ath12k_hif_pmm_read32(ab, ATH12K_PPT_ADDR_OFFSET(enum_lo));
+		offset_hi = ath12k_hif_pmm_read32(ab, ATH12K_PPT_ADDR_OFFSET(enum_hi));
+	} else if (ab->hw_rev == ATH12K_HW_QCN6432_HW10) {
+		offset_lo = ath12k_hif_cmem_read32(ab, PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(enum_lo));
+		offset_hi = ath12k_hif_cmem_read32(ab, PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(enum_hi));
+	} else {
+		offset_lo = ath12k_hif_read32(ab, PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(enum_lo));
+		offset_hi = ath12k_hif_read32(ab, PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(enum_hi));
+	}
+
+	*value = ((u64)(offset_hi) << 32 | offset_lo);
+}
+
+static void ath12k_hal_qcn9274_get_tqm_scratch_reg(struct ath12k_base *ab, u64 *value)
 {
-	return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
+	u32 offset_lo, offset_hi;
+
+	if (ab->hif.ops->pmm_read32) {
+		offset_lo = ath12k_hif_pmm_read32(ab, ATH12K_PPT_ADDR_OFFSET(PMM_TQM_CLOCK_OFFSET_LO_US));
+		offset_hi = ath12k_hif_pmm_read32(ab, ATH12K_PPT_ADDR_OFFSET(PMM_TQM_CLOCK_OFFSET_HI_US));
+	} else if (ab->hw_rev == ATH12K_HW_QCN6432_HW10) {
+		offset_lo = ath12k_hif_cmem_read32(ab,
+						   PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(PMM_TQM_CLOCK_OFFSET_LO_US));
+		offset_hi = ath12k_hif_cmem_read32(ab,
+						   PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(PMM_TQM_CLOCK_OFFSET_HI_US));
+	} else {
+		offset_lo = ath12k_hif_read32(ab,
+					      PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(PMM_TQM_CLOCK_OFFSET_LO_US));
+		offset_hi = ath12k_hif_read32(ab,
+					      PMM_REG_BASE_QCN9224 + ATH12K_PPT_ADDR_OFFSET(PMM_TQM_CLOCK_OFFSET_HI_US));
+	}
+
+	*value = ((u64)(offset_hi) << 32 | offset_lo);
 }
 
 static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
@@ -551,10 +654,16 @@
 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
 
+	s = &hal->srng_config[HAL_REO2PPE];
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO2PPE_RING_BASE_LSB(ab);
+	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO2PPE_HP;
+	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+	s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
+
 	s = &hal->srng_config[HAL_TCL_DATA];
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
-	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
 
 	s = &hal->srng_config[HAL_TCL_CMD];
@@ -566,29 +675,29 @@
 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
 
 	s = &hal->srng_config[HAL_CE_SRC];
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
 
 	s = &hal->srng_config[HAL_CE_DST];
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
 
 	s = &hal->srng_config[HAL_CE_DST_STATUS];
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
 		HAL_CE_DST_STATUS_RING_BASE_LSB;
-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
 
 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
@@ -615,14 +724,9 @@
 	 * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA
 	 */
 	s = &hal->srng_config[HAL_PPE2TCL];
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB;
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB(ab);
 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP;
 
-	s = &hal->srng_config[HAL_PPE_RELEASE];
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
-				HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab);
-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP;
-
 	return 0;
 }
 
@@ -680,6 +784,59 @@
 	return errmap;
 }
 
+static void ath12k_hw_qcn9274_get_rx_first_last_msdu(struct hal_rx_desc *desc,
+						     struct ath12k_dp_rx_info *rx_info)
+{
+	rx_info->is_first_msdu = !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+						 RX_MSDU_END_INFO5_FIRST_MSDU);
+	rx_info->is_last_msdu = !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+						RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static void ath12k_hw_qcn9274_get_rx_msdulen_l3pad(struct hal_rx_desc *desc,
+						   struct ath12k_dp_rx_info *rx_info)
+{
+
+	rx_info->msdu_done = !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
+					     RX_MSDU_END_INFO14_MSDU_DONE);
+	rx_info->msdu_len = le32_get_bits(desc->u.qcn9274.msdu_end.info10,
+					  RX_MSDU_END_INFO10_MSDU_LENGTH);
+	rx_info->l3_pad_bytes = le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+					      RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static void ath12k_hw_qcn9274_get_rx_desc_info(struct hal_rx_desc *desc,
+					       struct ath12k_dp_rx_info *rx_info)
+{
+	__le32 info12 = desc->u.qcn9274.msdu_end.info12;
+	__le32 info13 = desc->u.qcn9274.msdu_end.info13;
+	__le32 info11 = desc->u.qcn9274.msdu_end.info11;
+
+	rx_info->pkt_type = le32_get_bits(info12, RX_MSDU_END_INFO12_PKT_TYPE);
+	rx_info->sgi = le32_get_bits(info12, RX_MSDU_END_INFO12_SGI);
+	rx_info->rate_mcs = le32_get_bits(info12, RX_MSDU_END_INFO12_RATE_MCS);
+	rx_info->bw = le32_get_bits(info12, RX_MSDU_END_INFO12_RECV_BW);
+	rx_info->nss = hweight8(le32_get_bits(info12, RX_MSDU_END_INFO12_MIMO_SS_BITMAP));
+	rx_info->phy_meta_data = __le32_to_cpu(desc->u.qcn9274.msdu_end.phy_meta_data);
+
+	rx_info->ip_csum_fail = !!le32_get_bits(info13, RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+	rx_info->l4_csum_fail = !!le32_get_bits(info13, RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+	rx_info->ip_is_valid = (le32_get_bits(__le32_to_cpu(info11), RX_MSDU_END_INFO11_IPV4) ||
+			       le32_get_bits(__le32_to_cpu(info11),  RX_MSDU_END_INFO11_IPV6));
+
+	rx_info->decap_type = le32_get_bits(info11, RX_MSDU_END_INFO11_DECAP_FORMAT);
+
+	rx_info->is_mcbc = ((!!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+			    RX_MSDU_END_INFO5_FIRST_MSDU)) &&
+			    (__le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+			    RX_MPDU_START_INFO6_MCAST_BCAST));
+
+	rx_info->peer_id = le16_get_bits(desc->u.qcn9274.mpdu_start.sw_peer_id,
+					 RX_MPDU_START_SW_PEER_ID_PEER);
+	rx_info->tid = le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+				     RX_MSDU_END_INFO5_TID);
+}
+
 const struct hal_ops hal_qcn9274_ops = {
 	.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
 	.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
@@ -706,12 +863,12 @@
 	.rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload,
 	.rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset,
 	.rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset,
+	.rx_desc_get_msdu_intra_bss_bit = ath12k_hw_qcn9274_rx_desc_get_msdu_intra_bss_bit,
 	.rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid,
 	.rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2,
-	.rx_desc_is_mcbc = ath12k_hw_qcn9274_rx_desc_is_mcbc,
+	.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
 	.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
 	.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
-	.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
 	.create_srng_config = ath12k_hal_srng_create_config_qcn9274,
 	.tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
 	.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
@@ -719,6 +876,65 @@
 	.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
 	.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
 	.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+	.rx_desc_get_ip_valid = ath12k_hal_qcn9274_rx_desc_get_ip_valid,
+	.rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_rx_mpdu_start_wmask_get,
+	.rxdma_ring_wmask_rx_msdu_end = ath12k_hal_rx_msdu_end_wmask_get,
+	.rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
+	.rx_get_desc_info = ath12k_hw_qcn9274_get_rx_desc_info,
+	.rx_get_desc_msdulen_l3pad = ath12k_hw_qcn9274_get_rx_msdulen_l3pad,
+	.rx_desc_get_first_last_msdu = ath12k_hw_qcn9274_get_rx_first_last_msdu,
+	.hal_get_tsf2_scratch_reg = ath12k_hal_qcn9274_get_tsf2_scratch_reg,
+	.hal_get_tqm_scratch_reg = ath12k_hal_qcn9274_get_tqm_scratch_reg,
+};
+
+const struct hal_ops hal_qcn6432_ops = {
+	.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
+	.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
+	.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
+	.rx_desc_encrypt_valid = ath12k_hw_qcn9274_rx_desc_encrypt_valid,
+	.rx_desc_get_encrypt_type = ath12k_hw_qcn9274_rx_desc_get_encrypt_type,
+	.rx_desc_get_decap_type = ath12k_hw_qcn9274_rx_desc_get_decap_type,
+	.rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_rx_desc_get_mesh_ctl,
+	.rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld,
+	.rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid,
+	.rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no,
+	.rx_desc_get_msdu_len = ath12k_hw_qcn9274_rx_desc_get_msdu_len,
+	.rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_rx_desc_get_msdu_sgi,
+	.rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs,
+	.rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw,
+	.rx_desc_get_msdu_freq = ath12k_hw_qcn9274_rx_desc_get_msdu_freq,
+	.rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type,
+	.rx_desc_get_msdu_nss = ath12k_hw_qcn9274_rx_desc_get_msdu_nss,
+	.rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_rx_desc_get_mpdu_tid,
+	.rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id,
+	.rx_desc_copy_end_tlv = ath12k_hw_qcn9274_rx_desc_copy_end_tlv,
+	.rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id,
+	.rx_desc_set_msdu_len = ath12k_hw_qcn9274_rx_desc_set_msdu_len,
+	.rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload,
+	.rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset,
+	.rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset,
+	.rx_desc_get_msdu_intra_bss_bit = ath12k_hw_qcn9274_rx_desc_get_msdu_intra_bss_bit,
+	.rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid,
+	.rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2,
+	.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
+	.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
+	.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
+	.create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+	.tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
+	.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
+	.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
+	.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
+	.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
+	.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+	.rx_desc_get_ip_valid = ath12k_hal_qcn9274_rx_desc_get_ip_valid,
+	.rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_rx_mpdu_start_wmask_get,
+	.rxdma_ring_wmask_rx_msdu_end = ath12k_hal_rx_msdu_end_wmask_get,
+	.rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
+	.rx_get_desc_info = ath12k_hw_qcn9274_get_rx_desc_info,
+	.rx_get_desc_msdulen_l3pad = ath12k_hw_qcn9274_get_rx_msdulen_l3pad,
+	.rx_desc_get_first_last_msdu = ath12k_hw_qcn9274_get_rx_first_last_msdu,
+	.hal_get_tsf2_scratch_reg = ath12k_hal_qcn9274_get_tsf2_scratch_reg,
+	.hal_get_tqm_scratch_reg = ath12k_hal_qcn9274_get_tqm_scratch_reg,
 };
 
 static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
@@ -887,10 +1103,10 @@
 	return desc->u.wcn7850.mpdu_start.addr2;
 }
 
-static bool ath12k_hw_wcn7850_rx_desc_is_mcbc(struct hal_rx_desc *desc)
+static bool ath12k_hw_wcn7850_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
 {
-	return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info6) &
-	       RX_MPDU_START_INFO6_MCAST_BCAST;
+	return __le16_to_cpu(desc->u.wcn7850.msdu_end.info5) &
+	       RX_MSDU_END_INFO5_DA_IS_MCBC;
 }
 
 static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -951,11 +1167,6 @@
 	crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
 }
 
-static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
-	return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
-}
-
 static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
 {
 	struct ath12k_hal *hal = &ab->hal;
@@ -992,9 +1203,9 @@
 
 	s = &hal->srng_config[HAL_TCL_DATA];
 	s->max_rings = 5;
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
-	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
 
 	s = &hal->srng_config[HAL_TCL_CMD];
@@ -1007,31 +1218,31 @@
 
 	s = &hal->srng_config[HAL_CE_SRC];
 	s->max_rings = 12;
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
 
 	s = &hal->srng_config[HAL_CE_DST];
 	s->max_rings = 12;
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
 
 	s = &hal->srng_config[HAL_CE_DST_STATUS];
 	s->max_rings = 12;
-	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
 		HAL_CE_DST_STATUS_RING_BASE_LSB;
-	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
-	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
-	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
-		HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
 
 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
@@ -1163,10 +1374,9 @@
 	.rx_desc_get_msdu_end_offset = ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset,
 	.rx_desc_mac_addr2_valid = ath12k_hw_wcn7850_rx_desc_mac_addr2_valid,
 	.rx_desc_mpdu_start_addr2 = ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2,
-	.rx_desc_is_mcbc = ath12k_hw_wcn7850_rx_desc_is_mcbc,
+	.rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc,
 	.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
 	.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
-	.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
 	.create_srng_config = ath12k_hal_srng_create_config_wcn7850,
 	.tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
 	.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
@@ -1251,7 +1461,8 @@
 }
 
 static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab,
-					struct hal_srng *srng)
+					struct hal_srng *srng,
+					u32 idx)
 {
 	struct ath12k_hal *hal = &ab->hal;
 	u32 val;
@@ -1260,6 +1471,12 @@
 
 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
 
+	val = ath12k_hif_read32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET) &
+		~HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+	ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+	val = 0;
+
 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
 		ath12k_hif_write32(ab, reg_base +
 				   ath12k_hal_reo1_ring_msi1_base_lsb_offset(ab),
@@ -1309,9 +1526,10 @@
 
 	/* Initialize head and tail pointers to indicate ring is empty */
 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
-	ath12k_hif_write32(ab, reg_base, 0);
-	ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
-	*srng->u.dst_ring.hp_addr = 0;
+	ath12k_hif_write32(ab, reg_base, idx * srng->entry_size);
+	ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, idx * srng->entry_size);
+	*srng->u.dst_ring.hp_addr = idx * srng->entry_size;
+	srng->u.dst_ring.tp = idx * srng->entry_size;
 
 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
 	val = 0;
@@ -1327,7 +1545,8 @@
 }
 
 static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
-					struct hal_srng *srng)
+					struct hal_srng *srng,
+					u32 idx)
 {
 	struct ath12k_hal *hal = &ab->hal;
 	u32 val;
@@ -1336,6 +1555,12 @@
 
 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
 
+	val = ath12k_hif_read32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab)) &
+		~HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+	ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
+	val = 0;
+
 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
 		ath12k_hif_write32(ab, reg_base +
 				   HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
@@ -1397,9 +1622,10 @@
 
 	/* Initialize head and tail pointers to indicate ring is empty */
 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
-	ath12k_hif_write32(ab, reg_base, 0);
-	ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
-	*srng->u.src_ring.tp_addr = 0;
+	ath12k_hif_write32(ab, reg_base, idx * srng->entry_size);
+	ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, idx * srng->entry_size);
+	*srng->u.src_ring.tp_addr = idx * srng->entry_size;
+	srng->u.src_ring.hp = idx * srng->entry_size;
 
 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
 	val = 0;
@@ -1422,12 +1648,13 @@
 }
 
 static void ath12k_hal_srng_hw_init(struct ath12k_base *ab,
-				    struct hal_srng *srng)
+				    struct hal_srng *srng,
+				    u32 idx)
 {
 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
-		ath12k_hal_srng_src_hw_init(ab, srng);
+		ath12k_hal_srng_src_hw_init(ab, srng, idx);
 	else
-		ath12k_hal_srng_dst_hw_init(ab, srng);
+		ath12k_hal_srng_dst_hw_init(ab, srng, idx);
 }
 
 static int ath12k_hal_srng_get_ring_id(struct ath12k_base *ab,
@@ -1572,14 +1799,14 @@
 }
 
 void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
-				   dma_addr_t paddr)
+				   dma_addr_t paddr, u8 rbm_id)
 {
 	desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
 						     BUFFER_ADDR_INFO0_ADDR);
 	desc->buf_addr_info.info1 =
 			le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
 					 BUFFER_ADDR_INFO1_ADDR) |
-			le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+			u32_encode_bits(rbm_id, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
 			le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
 }
 
@@ -1593,6 +1820,21 @@
 	return NULL;
 }
 
+void ath12k_hal_srng_prefetch_desc(struct ath12k_base *ab,
+				   struct hal_srng *srng)
+{
+	u32 *desc;
+
+	/* prefetch only if desc is available */
+	desc = ath12k_hal_srng_dst_peek(ab, srng);
+	if (likely(desc)) {
+		dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+				        (srng->entry_size * sizeof(u32)),
+					DMA_FROM_DEVICE);
+		prefetch(desc);
+	}
+}
+
 void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
 					 struct hal_srng *srng)
 {
@@ -1605,12 +1847,108 @@
 
 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
 
-	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
-			      srng->ring_size;
+	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
+
+	/* wrap around to start of ring*/
+	if (srng->u.dst_ring.tp == srng->ring_size)
+		srng->u.dst_ring.tp = 0;
+
+	/* Try to prefetch the next descriptor in the ring */
+	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+		ath12k_hal_srng_prefetch_desc(ab, srng);
 
 	return desc;
 }
 
+u32 *ath12k_hal_srng_dst_get_next_cache_entry(struct ath12k_base *ab,
+                                              struct hal_srng *srng)
+{
+	u32 *desc,*desc_next;
+	lockdep_assert_held(&srng->lock);
+
+	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+	        return NULL;
+
+	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
+
+	/* wrap around to start of ring*/
+	if (srng->u.dst_ring.tp == srng->ring_size)
+		srng->u.dst_ring.tp = 0;
+
+	/* Try to prefetch the next descriptor in the ring */
+	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
+		/* prefetch only if desc is available */
+		desc_next = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+		prefetch(desc_next);
+	}
+	return desc;
+}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+void ath12k_hal_srng_ppeds_dst_inv_entry(struct ath12k_base *ab,
+					 struct hal_srng *srng, int entries)
+{
+	u32 *desc, *last_desc;
+	u32 tp, hp;
+	u32 remaining_entries;
+
+	if (!(srng->flags & HAL_SRNG_FLAGS_CACHED) || !entries)
+		return;
+
+	tp = srng->u.dst_ring.tp;
+	hp = srng->u.dst_ring.cached_hp;
+
+	desc = srng->ring_base_vaddr + tp;
+	if (hp > tp) {
+		last_desc = ((void *)desc + entries * srng->entry_size * sizeof(u32));
+		dmac_inv_range_no_dsb((void *)desc,
+				      (void *)last_desc);
+	} else {
+		remaining_entries = srng->ring_size - tp;
+		last_desc = ((void *)desc + remaining_entries * sizeof(u32));
+		dmac_inv_range_no_dsb((void *)desc, (void *)last_desc);
+
+		last_desc = ((void *)srng->ring_base_vaddr + hp * sizeof(u32));
+		dmac_inv_range_no_dsb((void *)srng->ring_base_vaddr, (void *)last_desc);
+	}
+
+	dsb(st);
+}
+#endif
+
+void ath12k_hal_srng_dst_invalidate_entry(struct ath12k_base *ab,
+					  struct hal_srng *srng, int entries)
+{
+	u32 *desc;
+	u32 tp, hp;
+
+
+	if (!(srng->flags & HAL_SRNG_FLAGS_CACHED) || !entries)
+	        return;
+
+	tp = srng->u.dst_ring.tp;
+	hp = srng->u.dst_ring.cached_hp;
+
+	desc = srng->ring_base_vaddr + tp;
+	if (hp > tp) {
+		dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+					entries * srng->entry_size * sizeof(u32),
+					DMA_FROM_DEVICE);
+	} else {
+		entries = srng->ring_size - tp;
+		dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+					entries * sizeof(u32),
+					DMA_FROM_DEVICE);
+
+		entries = hp;
+		dma_sync_single_for_cpu(ab->dev, virt_to_phys(srng->ring_base_vaddr),
+					entries * sizeof(u32),
+					DMA_FROM_DEVICE);
+	}
+}
+
 int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
 				 bool sync_hw_ptr)
 {
@@ -1730,11 +2068,15 @@
 {
 	lockdep_assert_held(&srng->lock);
 
-	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
 		srng->u.src_ring.cached_tp =
 			*(volatile u32 *)srng->u.src_ring.tp_addr;
-	else
+	} else {
 		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+		/* Try to prefetch the next descriptor in the ring */
+		if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+			ath12k_hal_srng_prefetch_desc(ab, srng);
+	}
 }
 
 /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
@@ -1883,9 +2225,65 @@
 			   val);
 }
 
-int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+static void hal_tx_ppe2tcl_ring_halt_set(struct ath12k_base *ab)
+{
+	u32 cmn_reg_addr;
+	u32 regval;
+
+	cmn_reg_addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG;
+	regval = ath12k_hif_read32(ab, cmn_reg_addr);
+
+	regval |= (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_PPE2TCL1_RNG_HALT_SHFT);
+
+	/* Enable ring halt for the ppe2tcl ring */
+	ath12k_hif_write32(ab, cmn_reg_addr, regval);
+}
+
+static void hal_tx_ppe2tcl_ring_halt_reset(struct ath12k_base *ab)
+{
+	u32 cmn_reg_addr;
+	u32 regval;
+
+	cmn_reg_addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG;
+	regval = ath12k_hif_read32(ab, cmn_reg_addr);
+
+	regval &= ~(1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_PPE2TCL1_RNG_HALT_SHFT);
+
+	/* Disable ring halt for the ppe2tcl ring */
+	ath12k_hif_write32(ab, cmn_reg_addr, regval);
+}
+
+static bool hal_tx_ppe2tcl_ring_halt_done(struct ath12k_base *ab)
+{
+	u32 cmn_reg_addr;
+	u32 regval;
+
+	cmn_reg_addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG;
+
+	regval = ath12k_hif_read32(ab, cmn_reg_addr);
+
+	regval &= (1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_PPE2TCL1_RNG_HALT_STAT_SHFT);
+
+	return !!regval;
+}
+
+static bool hal_tx_ppe2tcl_ring_halt_get(struct ath12k_base *ab)
+{
+	u32 cmn_reg_addr;
+	u32 regval;
+
+	cmn_reg_addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_CMN_CTRL_REG;
+	regval = ath12k_hif_read32(ab, cmn_reg_addr);
+
+	return (regval &
+		1 << HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_PPE2TCL1_RNG_HALT_SHFT);
+}
+#endif
+
+int ath12k_hal_srng_setup_idx(struct ath12k_base *ab, enum hal_ring_type type,
 			  int ring_num, int mac_id,
-			  struct hal_srng_params *params)
+			  struct hal_srng_params *params, u32 res_idx)
 {
 	struct ath12k_hal *hal = &ab->hal;
 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
@@ -1894,6 +2292,9 @@
 	u32 idx;
 	int i;
 	u32 reg_base;
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	u32 retry_count = 0;
+#endif
 
 	ring_id = ath12k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
 	if (ring_id < 0)
@@ -1928,6 +2329,12 @@
 	memset(srng->ring_base_vaddr, 0,
 	       (srng->entry_size * srng->num_entries) << 2);
 
+	if (srng->flags & HAL_SRNG_FLAGS_CACHED) {
+		dmac_inv_range_no_dsb(srng->ring_base_vaddr,
+				      srng->ring_base_vaddr +
+				      ((srng->entry_size * srng->num_entries)));
+	}
+
 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
 
 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
@@ -1937,21 +2344,42 @@
 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
 		srng->u.src_ring.low_threshold = params->low_threshold *
 						 srng->entry_size;
+
+		if (srng->u.src_ring.tp_addr)
+			*srng->u.src_ring.tp_addr = 0;
+
 		if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
-			if (!ab->hw_params->supports_shadow_regs)
+			if (!ab->hw_params->supports_shadow_regs) {
 				srng->u.src_ring.hp_addr =
 					(u32 *)((unsigned long)ab->mem + reg_base);
-			else
+				if (type  == HAL_TCL_DATA) {
+					if (ab->hif.bus == ATH12K_BUS_PCI ||
+					    ab->hif.bus == ATH12K_BUS_HYBRID){
+						srng->u.src_ring.hp_addr_direct =
+							(u32 *)((unsigned long)ab->mem +
+							HAL_DP_REG_WINDOW_OFFSET +
+							(reg_base & WINDOW_RANGE_MASK));
+					} else {
+						srng->u.src_ring.hp_addr_direct =
+							srng->u.src_ring.hp_addr;
+					}
+				}
+			} else {
 				ath12k_dbg(ab, ATH12K_DBG_HAL,
 					   "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
 					   type, ring_num,
 					   reg_base,
 					   (unsigned long)srng->u.src_ring.hp_addr -
 					   (unsigned long)ab->mem);
+			}
 		} else {
 			idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
 						   idx);
+
+			if (srng->u.src_ring.hp_addr)
+				*srng->u.src_ring.hp_addr = 0;
+
 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
 		}
 	} else {
@@ -1967,18 +2395,36 @@
 		srng->u.dst_ring.tp = 0;
 		srng->u.dst_ring.cached_hp = 0;
 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+
+		if (srng->u.dst_ring.hp_addr)
+			*srng->u.dst_ring.hp_addr = 0;
+
 		if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
-			if (!ab->hw_params->supports_shadow_regs)
+			if (!ab->hw_params->supports_shadow_regs) {
 				srng->u.dst_ring.tp_addr =
 					(u32 *)((unsigned long)ab->mem + reg_base +
 					(HAL_REO1_RING_TP - HAL_REO1_RING_HP));
-			else
+				if (type  == HAL_WBM2SW_RELEASE) {
+					if (ab->hif.bus == ATH12K_BUS_PCI ||
+					    ab->hif.bus == ATH12K_BUS_HYBRID) {
+						srng->u.dst_ring.tp_addr_direct =
+							(u32 *)((unsigned long)ab->mem +
+							(reg_base & WINDOW_RANGE_MASK) +
+							HAL_DP_REG_WINDOW_OFFSET +
+							(HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+					} else {
+						srng->u.dst_ring.tp_addr_direct =
+							srng->u.dst_ring.tp_addr;
+					}
+				}
+			} else {
 				ath12k_dbg(ab, ATH12K_DBG_HAL,
 					   "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
 					   type, ring_num,
 					   reg_base + HAL_REO1_RING_TP - HAL_REO1_RING_HP,
 					   (unsigned long)srng->u.dst_ring.tp_addr -
 					   (unsigned long)ab->mem);
+			}
 		} else {
 			/* For PMAC & DMAC rings, tail pointer updates will be done
 			 * through FW by writing to a shared memory location
@@ -1986,6 +2432,10 @@
 			idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
 						   idx);
+
+			if (srng->u.dst_ring.tp_addr)
+				*srng->u.dst_ring.tp_addr = 0;
+
 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
 		}
 	}
@@ -1993,8 +2443,41 @@
 	if (srng_config->mac_type != ATH12K_HAL_SRNG_UMAC)
 		return ring_id;
 
-	ath12k_hal_srng_hw_init(ab, srng);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (res_idx) {
+		/* During UMAC reset Tx ring halt is set
+		 * by Wi-Fi FW during pre-reset stage.
+		 * Hence skip halting the rings again
+		 */
+		if (ath12k_dp_umac_reset_in_progress(ab)) {
+			if (!(hal_tx_ppe2tcl_ring_halt_get(ab))) {
+			        ath12k_warn(ab, "TX ring halt not set\n");
+			        WARN_ON(1);
+			}
+			ath12k_hal_srng_hw_init(ab, srng, res_idx);
+		} else {
+			hal_tx_ppe2tcl_ring_halt_set(ab);
+			do {
+				ath12k_warn(ab, "Waiting for ring reset, retried count: %d\n",
+					    retry_count);
+				mdelay(RING_HALT_TIMEOUT);
+				retry_count++;
+			} while (!(hal_tx_ppe2tcl_ring_halt_done(ab)) &&
+				 (retry_count < RNG_HALT_STAT_RETRY_COUNT));
+
+			if (retry_count >= RNG_HALT_STAT_RETRY_COUNT)
+				ath12k_err(ab, "Ring halt is failed, retried count: %d\n",
+					   retry_count);
 
+			ath12k_hal_srng_hw_init(ab, srng, res_idx);
+			hal_tx_ppe2tcl_ring_halt_reset(ab);
+		}
+	}
+	else
+		ath12k_hal_srng_hw_init(ab, srng, 0);
+#else
+		ath12k_hal_srng_hw_init(ab, srng, 0);
+#endif
 	if (type == HAL_CE_DST) {
 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
 		ath12k_hal_ce_dst_setup(ab, srng, ring_num);
@@ -2088,6 +2571,15 @@
 	}
 }
 
+void ath12k_hal_reo_config_reo2ppe_dest_info(struct ath12k_base *ab)
+{
+	u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+	u32 val = HAL_REO1_REO2PPE_DST_VAL;
+
+	ath12k_hif_write32(ab, reo_base + HAL_REO1_REO2PPE_DST_INFO,
+                          val);
+}
+
 void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab,
 				       u32 **cfg, u32 *len)
 {
@@ -2183,13 +2675,16 @@
 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
 			continue;
 
-		ath12k_err(ab, "CE_id %d pipe_num %d %ums before\n",
+		ath12k_err(ab,"CE_id %d pipe_num %d %ums before ce_manual_poll_count %d "
+				"ce_last_manual_tasklet_schedule_ts %ums before\n",
 			   i, ce_pipe->pipe_num,
-			   jiffies_to_msecs(jiffies - ce_pipe->timestamp));
+			   jiffies_to_msecs(jiffies - ce_pipe->timestamp),
+			   ce_pipe->ce_manual_poll_count,
+			   jiffies_to_msecs(jiffies - ce_pipe->last_ce_manual_poll_ts));
 	}
 
 	ath12k_err(ab, "\nLast interrupt received for each group:\n");
-	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
 		irq_grp = &ab->ext_irq_grp[i];
 		ath12k_err(ab, "group_id %d %ums before\n",
 			   irq_grp->grp_id,
@@ -2220,3 +2715,67 @@
 				   jiffies_to_msecs(jiffies - srng->timestamp));
 	}
 }
+
+ssize_t ath12k_debugfs_hal_dump_srng_stats(struct ath12k_base *ab, char *buf, int size)
+{
+	struct hal_srng *srng;
+	struct ath12k_ext_irq_grp *irq_grp;
+	struct ath12k_ce_pipe *ce_pipe;
+	unsigned int len = 0;
+	int i;
+
+	len += scnprintf(buf + len, size - len, "Last interrupt received for each CE:\n");
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		ce_pipe = &ab->ce.ce_pipe[i];
+
+		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+			continue;
+
+		spin_lock_bh(&ab->ce.ce_lock);
+		len += scnprintf(buf + len, size - len,
+				"CE_id %d pipe_num %d %ums before ce_manual_poll_count %d ce_last_manual_tasklet_schedule_ts %ums before\n",
+			   i, ce_pipe->pipe_num,
+			   jiffies_to_msecs(jiffies - ce_pipe->timestamp),
+			   ce_pipe->ce_manual_poll_count,
+			   jiffies_to_msecs(jiffies - ce_pipe->last_ce_manual_poll_ts));
+		spin_unlock_bh(&ab->ce.ce_lock);
+	}
+
+	len += scnprintf(buf + len, size - len, "\nLast interrupt received for each group:\n");
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
+		irq_grp = &ab->ext_irq_grp[i];
+		len += scnprintf(buf + len, size - len, "group_id %d %ums before\n",
+			   irq_grp->grp_id,
+			   jiffies_to_msecs(jiffies - irq_grp->timestamp));
+	}
+
+	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
+		srng = &ab->hal.srng_list[i];
+
+		spin_lock_bh(&srng->lock);
+		if (!srng->initialized) {
+			spin_unlock_bh(&srng->lock);
+			continue;
+		}
+
+		if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+			len += scnprintf(buf + len, size - len,
+				   "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
+				   srng->ring_id, srng->u.src_ring.hp,
+				   srng->u.src_ring.reap_hp,
+				   *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
+				   srng->u.src_ring.last_tp,
+				   jiffies_to_msecs(jiffies - srng->timestamp));
+		else if (srng->ring_dir == HAL_SRNG_DIR_DST)
+			len += scnprintf(buf + len, size - len,
+				   "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
+				   srng->ring_id, srng->u.dst_ring.tp,
+				   *srng->u.dst_ring.hp_addr,
+				   srng->u.dst_ring.cached_hp,
+				   srng->u.dst_ring.last_hp,
+				   jiffies_to_msecs(jiffies - srng->timestamp));
+		spin_unlock_bh(&srng->lock);
+	}
+
+	return len;
+}
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hal.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/hal.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal.h	2024-01-19 17:01:19.861846921 +0100
@@ -9,8 +9,11 @@
 
 #include "hal_desc.h"
 #include "rx_desc.h"
+#include "core.h"
 
 struct ath12k_base;
+struct ath12k_dp_rx_info;
+#define HAL_CE_REMAP_REG_BASE	(ab->ce_remap_base_addr)
 
 #define HAL_LINK_DESC_SIZE			(32 << 2)
 #define HAL_LINK_DESC_ALIGN			128
@@ -40,25 +43,42 @@
 
 #define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
 
+#define HAL_REO_QDESC_MAX_PEERID		8191
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#define HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_PPE2TCL1_RNG_HALT_SHFT	10
+#define HWIO_TCL_R0_CONS_RING_CMN_CTRL_REG_PPE2TCL1_RNG_HALT_STAT_SHFT  18
+#define RNG_HALT_STAT_RETRY_COUNT 10
+#endif
+
 /* WCSS Relative address */
 #define HAL_SEQ_WCSS_UMAC_OFFSET		0x00a00000
 #define HAL_SEQ_WCSS_UMAC_REO_REG		0x00a38000
 #define HAL_SEQ_WCSS_UMAC_TCL_REG		0x00a44000
-#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG		0x01b80000
-#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG		0x01b81000
-#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG		0x01b82000
-#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG		0x01b83000
+
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \
+	((ab)->hw_params->regs->hal_umac_ce0_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \
+	((ab)->hw_params->regs->hal_umac_ce0_dest_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \
+	((ab)->hw_params->regs->hal_umac_ce1_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \
+	((ab)->hw_params->regs->hal_umac_ce1_dest_reg_base)
+
 #define HAL_SEQ_WCSS_UMAC_WBM_REG		0x00a34000
 
 #define HAL_CE_WFSS_CE_REG_BASE			0x01b80000
+#define HAL_DP_REG_WINDOW_OFFSET		0x180000
 
 #define HAL_TCL_SW_CONFIG_BANK_ADDR		0x00a4408c
 
 /* SW2TCL(x) R0 ring configuration address */
 #define HAL_TCL1_RING_CMN_CTRL_REG		0x00000020
 #define HAL_TCL1_RING_DSCP_TID_MAP		0x00000240
-#define HAL_TCL1_RING_BASE_LSB			0x00000900
-#define HAL_TCL1_RING_BASE_MSB			0x00000904
+#define HAL_TCL1_RING_BASE_LSB(ab) \
+	((ab)->hw_params->regs->hal_tcl1_ring_base_lsb)
+#define HAL_TCL1_RING_BASE_MSB(ab) \
+	((ab)->hw_params->regs->hal_tcl1_ring_base_msb)
 #define HAL_TCL1_RING_ID(ab)			((ab)->hw_params->regs->hal_tcl1_ring_id)
 #define HAL_TCL1_RING_MISC(ab) \
 	((ab)->hw_params->regs->hal_tcl1_ring_misc)
@@ -76,30 +96,31 @@
 	((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb)
 #define HAL_TCL1_RING_MSI1_DATA(ab) \
 	((ab)->hw_params->regs->hal_tcl1_ring_msi1_data)
-#define HAL_TCL2_RING_BASE_LSB			0x00000978
+#define HAL_TCL2_RING_BASE_LSB(ab) \
+	((ab)->hw_params->regs->hal_tcl2_ring_base_lsb)
 #define HAL_TCL_RING_BASE_LSB(ab) \
 	((ab)->hw_params->regs->hal_tcl_ring_base_lsb)
 
 #define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab)				\
-	(HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+	(HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab)				\
-	(HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+	(HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab)				\
-	(HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB)
+	(HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_BASE_MSB_OFFSET				\
-	(HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+	(HAL_TCL1_RING_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_ID_OFFSET(ab)				\
-	(HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB)
+	(HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab)			\
-	(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB)
+	(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
-		(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB)
+		(HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
-		(HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+		(HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
-		(HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+		(HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 #define HAL_TCL1_RING_MISC_OFFSET(ab) \
-		(HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB)
+		(HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB(ab))
 
 /* SW2TCL(x) R2 ring pointers (head/tail) address */
 #define HAL_TCL1_RING_HP			0x00002000
@@ -110,20 +131,22 @@
 #define HAL_TCL1_RING_TP_OFFSET \
 		(HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
 
+#define HAL_TCL_RBM_MAPPING0_ADDR_OFFSET	0x00000088
+#define HAL_TCL_RBM_MAPPING_SHFT 4
+#define HAL_TCL_RBM_MAPPING_BMSK 0xF
+#define HAL_TCL_RBM_MAPPING_PPE2TCL_OFFSET  7
+#define HAL_TCL_RBM_MAPPING_TCL_CMD_CREDIT_OFFSET  6
+
 /* TCL STATUS ring address */
 #define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
 	((ab)->hw_params->regs->hal_tcl_status_ring_base_lsb)
 #define HAL_TCL_STATUS_RING_HP			0x00002048
 
 /* PPE2TCL1 Ring address */
-#define HAL_TCL_PPE2TCL1_RING_BASE_LSB		0x00000c48
+#define HAL_TCL_PPE2TCL1_RING_BASE_LSB(ab) \
+	((ab)->hw_params->regs->hal_tcl_ppe2tcl_ring_base_lsb)
 #define HAL_TCL_PPE2TCL1_RING_HP		0x00002038
 
-/* WBM PPE Release Ring address */
-#define HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab) \
-	((ab)->hw_params->regs->hal_ppe_rel_ring_base)
-#define HAL_WBM_PPE_RELEASE_RING_HP		0x00003020
-
 /* REO2SW(x) R0 ring configuration address */
 #define HAL_REO1_GEN_ENABLE			0x00000000
 #define HAL_REO1_MISC_CTRL_ADDR(ab) \
@@ -132,6 +155,8 @@
 #define HAL_REO1_DEST_RING_CTRL_IX_1		0x00000008
 #define HAL_REO1_DEST_RING_CTRL_IX_2		0x0000000c
 #define HAL_REO1_DEST_RING_CTRL_IX_3		0x00000010
+#define HAL_REO1_QDESC_ADDR_READ(ab)   (ab)->hw_params->regs->hal_reo1_qdesc_addr_read
+#define HAL_REO1_QDESC_MAX_PEERID(ab)   (ab)->hw_params->regs->hal_reo1_qdesc_max_peerid
 #define HAL_REO1_SW_COOKIE_CFG0(ab)	((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg0)
 #define HAL_REO1_SW_COOKIE_CFG1(ab)	((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg1)
 #define HAL_REO1_QDESC_LUT_BASE0(ab)	((ab)->hw_params->regs->hal_reo1_qdesc_lut_base0)
@@ -155,6 +180,28 @@
 #define HAL_REO1_AGING_THRESH_IX_2(ab)	((ab)->hw_params->regs->hal_reo1_aging_thres_ix2)
 #define HAL_REO1_AGING_THRESH_IX_3(ab)	((ab)->hw_params->regs->hal_reo1_aging_thres_ix3)
 
+#define HAL_REO1_REO2PPE_DST_VAL		0x2000
+#define HAL_REO1_REO2PPE_DST_INFO		0x00000cf0
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET \
+		(HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET \
+		(HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_DATA_OFFSET \
+		(HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_BASE_MSB_OFFSET \
+		(HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_ID_OFFSET(ab) (HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET \
+		(HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET \
+		(HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET \
+		(HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MISC_OFFSET \
+		(HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab))
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
 /* REO2SW(x) R2 ring pointers (head/tail) address */
 #define HAL_REO1_RING_HP			0x00003048
 #define HAL_REO1_RING_TP			0x0000304c
@@ -201,6 +248,12 @@
 	((ab)->hw_params->regs->hal_reo_status_ring_base)
 #define HAL_REO_STATUS_HP			0x000030a8
 
+
+/* REO2PPE address */
+#define HAL_REO2PPE_RING_BASE_LSB(ab) \
+		((ab)->hw_params->regs->hal_reo2ppe_ring_base)
+#define HAL_REO2PPE_HP				0x00003090
+
 /* WBM Idle R0 address */
 #define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \
 	((ab)->hw_params->regs->hal_wbm_idle_ring_base_lsb)
@@ -268,6 +321,7 @@
 #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN		BIT(3)
 #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN		BIT(4)
 #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN		BIT(5)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW6_EN		BIT(7)
 #define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN		BIT(8)
 
 /* TCL ring field mask and offset */
@@ -319,6 +373,8 @@
 #define HAL_REO1_SW_COOKIE_CFG_ALIGN			BIT(18)
 #define HAL_REO1_SW_COOKIE_CFG_ENABLE			BIT(19)
 #define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE		BIT(20)
+#define HAL_REO_QDESC_ADDR_READ_LUT_ENABLE		BIT(7)
+#define HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY	BIT(6)
 
 /* CE ring bit field mask and shift */
 #define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN			GENMASK(15, 0)
@@ -347,6 +403,8 @@
 #define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE		0x0000ffff
 #define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE		0x0000ffff
 #define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE		0x0000ffff
+#define HAL_REO2PPE_RING_BASE_MSB_RING_SIZE		0xffffffff
+#define HAL_PPE2TCL_RING_BASE_MSB_RING_SIZE		0x000fffff
 #define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE		0x000fffff
 #define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE		0x000fffff
 #define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE		0x0000ffff
@@ -358,13 +416,32 @@
 #define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE	0x000fffff
 #define HAL_RXDMA_RING_MAX_SIZE				0x0000ffff
 #define HAL_RXDMA_RING_MAX_SIZE_BE			0x000fffff
-#define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE	0x000fffff
+#define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE	0x0000ffff
 
 #define HAL_WBM2SW_REL_ERR_RING_NUM 3
+#define HAL_WBM2SW_PPEDS_TX_CMPLN_MAP_ID 11
+#define HAL_WBM2SW_PPEDS_TX_CMPLN_RING_NUM 6
 /* Add any other errors here and return them in
  * ath12k_hal_rx_desc_get_err().
  */
 
+#define HAL_IPQ5332_CE_WFSS_REG_BASE	0x740000
+#define HAL_IPQ5332_CE_SIZE		0x200000
+
+#define HAL_IPQ5332_CMEM_REG_BASE	0xC100000
+#define HAL_IPQ5332_CMEM_SIZE		0x40000
+#define HAL_IPQ5332_CMEM_BASE		0x100000
+
+#define HAL_IPQ5332_PMM_REG_BASE	0xCB500FC
+#define HAL_IPQ5332_PMM_SIZE		0x100
+
+#define HAL_QCN6432_CE_WFSS_REG_BASE	0x1B80000
+#define HAL_QCN6432_CE_SIZE		0x200000
+
+#define HAL_QCN6432_CMEM_REG_BASE	0x00100000
+#define HAL_QCN6432_CMEM_SIZE		0x40000
+#define HAL_QCN6432_CMEM_BASE		0x100000
+
 enum hal_srng_ring_id {
 	HAL_SRNG_RING_ID_REO2SW0 = 0,
 	HAL_SRNG_RING_ID_REO2SW1,
@@ -485,8 +562,8 @@
 	HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
 	HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
 	HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
-	HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
 	HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+	HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
 
 	HAL_SRNG_RING_ID_PMAC1_ID_END,
 };
@@ -512,6 +589,7 @@
 	HAL_REO_REINJECT,
 	HAL_REO_CMD,
 	HAL_REO_STATUS,
+	HAL_REO2PPE,
 	HAL_TCL_DATA,
 	HAL_TCL_CMD,
 	HAL_TCL_STATUS,
@@ -523,18 +601,41 @@
 	HAL_WBM2SW_RELEASE,
 	HAL_RXDMA_BUF,
 	HAL_RXDMA_DST,
-	HAL_RXDMA_MONITOR_BUF,
-	HAL_RXDMA_MONITOR_STATUS,
-	HAL_RXDMA_MONITOR_DST,
 	HAL_RXDMA_MONITOR_DESC,
+	HAL_RXDMA_MONITOR_STATUS,
 	HAL_RXDMA_DIR_BUF,
 	HAL_PPE2TCL,
 	HAL_PPE_RELEASE,
+	HAL_RXDMA_MONITOR_BUF,
 	HAL_TX_MONITOR_BUF,
+	HAL_RXDMA_MONITOR_DST,
 	HAL_TX_MONITOR_DST,
 	HAL_MAX_RING_TYPES,
 };
 
+#define PMM_REG_BASE_QCN9224	0xB500FC
+
+enum hal_scratch_reg_enum {
+	PMM_QTIMER_GLOBAL_OFFSET_LO_US,
+	PMM_QTIMER_GLOBAL_OFFSET_HI_US,
+	PMM_MAC0_TSF1_OFFSET_LO_US,
+	PMM_MAC0_TSF1_OFFSET_HI_US,
+	PMM_MAC0_TSF2_OFFSET_LO_US,
+	PMM_MAC0_TSF2_OFFSET_HI_US,
+	PMM_MAC1_TSF1_OFFSET_LO_US,
+	PMM_MAC1_TSF1_OFFSET_HI_US,
+	PMM_MAC1_TSF2_OFFSET_LO_US,
+	PMM_MAC1_TSF2_OFFSET_HI_US,
+	PMM_MLO_OFFSET_LO_US,
+	PMM_MLO_OFFSET_HI_US,
+	PMM_TQM_CLOCK_OFFSET_LO_US,
+	PMM_TQM_CLOCK_OFFSET_HI_US,
+	PMM_Q6_CRASH_REASON,
+	PMM_SCRATCH_TWT_OFFSET,
+	PMM_PMM_REG_MAX
+};
+
+#define WINDOW_RANGE_MASK GENMASK(18, 0)
 #define HAL_RX_MAX_BA_WINDOW	256
 
 #define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC	(100 * 1000)
@@ -613,6 +714,7 @@
 #define HAL_SRNG_FLAGS_MSI_INTR			0x00020000
 #define HAL_SRNG_FLAGS_HIGH_THRESH_INTR_EN	0x00080000
 #define HAL_SRNG_FLAGS_LMAC_RING		0x80000000
+#define HAL_SRNG_FLAGS_CACHED                   0x20000000
 
 #define HAL_SRNG_TLV_HDR_TAG		GENMASK(9, 1)
 #define HAL_SRNG_TLV_HDR_LEN		GENMASK(25, 10)
@@ -699,6 +801,7 @@
 			 * accessed through SW structure
 			 */
 			u32 *tp_addr;
+			u32 *tp_addr_direct;
 
 			/* Current SW loop cnt */
 			u32 loop_cnt;
@@ -728,6 +831,7 @@
 			 * through SW structure
 			 */
 			u32 *hp_addr;
+			u32 *hp_addr_direct;
 
 			/* Low threshold - in number of ring entries */
 			u32 low_threshold;
@@ -739,13 +843,16 @@
 };
 
 /* Interrupt mitigation - Batch threshold in terms of number of frames */
-#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
-#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_PPE_WBM2SW_REL 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 64
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 64
+#define HAL_SRNG_INT_BATCH_THRESHOLD_PPE2TCL 0
 #define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
 
 /* Interrupt mitigation - timer threshold in us */
-#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
-#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 200
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 200
+#define HAL_SRNG_INT_TIMER_THRESHOLD_PPE2TCL 30
 #define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
 
 enum hal_srng_mac_type {
@@ -812,6 +919,7 @@
 #define HAL_REO_CMD_FLG_FLUSH_ALL		BIT(6)
 #define HAL_REO_CMD_FLG_UNBLK_RESOURCE		BIT(7)
 #define HAL_REO_CMD_FLG_UNBLK_CACHE		BIT(8)
+#define HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC	BIT(9)
 
 /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
 #define HAL_REO_CMD_UPD0_RX_QUEUE_NUM		BIT(8)
@@ -1061,12 +1169,12 @@
 	u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
 	u32 (*rx_desc_get_mpdu_start_offset)(void);
 	u32 (*rx_desc_get_msdu_end_offset)(void);
+	u8 (*rx_desc_get_msdu_intra_bss_bit)(struct hal_rx_desc *desc);
 	bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
 	u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
-	bool (*rx_desc_is_mcbc)(struct hal_rx_desc *desc);
+	bool (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc);
 	void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
 				      struct ieee80211_hdr *hdr);
-	u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
 	void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
 					  u8 *crypto_hdr,
 					  enum hal_encrypt_type enctype);
@@ -1076,11 +1184,25 @@
 	bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
 	bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
 	u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
+	u8 (*rx_desc_get_ip_valid)(struct hal_rx_desc *desc);
 	const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
+	u16 (*rxdma_ring_wmask_rx_mpdu_start)(void);
+	u32 (*rxdma_ring_wmask_rx_msdu_end)(void);
+	u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
+	void (*rx_get_desc_info)(struct hal_rx_desc *desc,
+			         struct ath12k_dp_rx_info *info);
+	void (*rx_get_desc_msdulen_l3pad)(struct hal_rx_desc *desc,
+					  struct ath12k_dp_rx_info *info);
+	void (*rx_desc_get_first_last_msdu)(struct hal_rx_desc *desc,
+					    struct ath12k_dp_rx_info *info);
+	void (*hal_get_tsf2_scratch_reg)(struct ath12k_base *ab, u8 mac_id,
+					 u64 *value);
+	void (*hal_get_tqm_scratch_reg)(struct ath12k_base *ab, u64 *value);
 };
 
 extern const struct hal_ops hal_qcn9274_ops;
 extern const struct hal_ops hal_wcn7850_ops;
+extern const struct hal_ops hal_qcn6432_ops;
 
 u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
 void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
@@ -1098,8 +1220,8 @@
 				       struct hal_srng *srng);
 dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
 				       struct hal_srng *srng);
-void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
-				   dma_addr_t paddr);
+void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc,
+				   u32 cookie, dma_addr_t paddr, u8 rbm_id);
 u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
 void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
 				u32 len, u32 id, u8 byte_swap_data);
@@ -1125,9 +1247,9 @@
 void ath12k_hal_srng_access_begin(struct ath12k_base *ab,
 				  struct hal_srng *srng);
 void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng);
-int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
+int ath12k_hal_srng_setup_idx(struct ath12k_base *ab, enum hal_ring_type type,
 			  int ring_num, int mac_id,
-			  struct hal_srng_params *params);
+			  struct hal_srng_params *params, u32 idx);
 int ath12k_hal_srng_init(struct ath12k_base *ath12k);
 void ath12k_hal_srng_deinit(struct ath12k_base *ath12k);
 void ath12k_hal_dump_srng_stats(struct ath12k_base *ab);
@@ -1139,4 +1261,95 @@
 void ath12k_hal_srng_shadow_config(struct ath12k_base *ab);
 void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
 					 struct hal_srng *srng);
+u32 *ath12k_hal_srng_dst_get_next_cache_entry(struct ath12k_base *ab,
+					      struct hal_srng *srng);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+void ath12k_hal_srng_ppeds_dst_inv_entry(struct ath12k_base *ab,
+					  struct hal_srng *srng, int entries);
+#endif
+void ath12k_hal_srng_dst_invalidate_entry(struct ath12k_base *ab,
+					  struct hal_srng *srng, int entries);
+void ath12k_hal_reo_ring_ctrl_hash_ix3_setup(struct ath12k_base *ab,
+					     u32 ring_hash_map);
+void ath12k_hal_reo_ring_ctrl_hash_ix2_setup(struct ath12k_base *ab,
+					     u32 ring_hash_map);
+void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab);
+void ath12k_hal_srng_prefetch_desc(struct ath12k_base *ab,
+				   struct hal_srng *srng);
+void ath12k_hal_reo_config_reo2ppe_dest_info(struct ath12k_base *ab);
+
+static inline
+u32 *ath12k_hal_srng_src_get_next_entry_nolock(struct ath12k_base *ab,
+					       struct hal_srng *srng)
+{
+	u32 *desc;
+	u32 next_hp;
+
+	/* TODO: Using % is expensive, but we have to do this since size of some
+	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+	 * if separate function is defined for rings having power of 2 ring size
+	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+	 * overhead of % by using mask (with &).
+	 */
+	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+	if (next_hp == srng->u.src_ring.cached_tp)
+		return NULL;
+
+	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+	srng->u.src_ring.hp = next_hp;
+
+	/* TODO: Reap functionality is not used by all rings. If particular
+	 * ring does not use reap functionality, we need not update reap_hp
+	 * with next_hp pointer. Need to make sure a separate function is used
+	 * before doing any optimization by removing below code updating
+	 * reap_hp.
+	 */
+	srng->u.src_ring.reap_hp = next_hp;
+
+	return desc;
+}
+
+static inline
+void ath12k_hal_srng_access_src_ring_begin_nolock(struct hal_srng *srng)
+{
+	srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr;
+}
+
+static inline
+void ath12k_hal_srng_access_dst_ring_begin_nolock(struct ath12k_base *ab,
+						  struct hal_srng *srng)
+{
+	srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+	/* Try to prefetch the next descriptor in the ring */
+	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+		ath12k_hal_srng_prefetch_desc(ab, srng);
+}
+
+static inline
+void ath12k_hal_srng_access_umac_src_ring_end_nolock(struct hal_srng *srng)
+{
+	srng->u.src_ring.last_tp =
+			*(volatile u32 *)srng->u.src_ring.tp_addr;
+	writel_relaxed(srng->u.src_ring.hp, srng->u.src_ring.hp_addr_direct);
+	srng->timestamp = jiffies;
+}
+
+static inline
+void ath12k_hal_srng_access_dst_ring_end_nolock(struct hal_srng *srng)
+{
+	srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+	writel_relaxed(srng->u.dst_ring.tp, srng->u.dst_ring.tp_addr_direct);
+	srng->timestamp = jiffies;
+}
+
+static inline
+void ath12k_hal_srng_access_lmac_src_ring_end_nolock(struct hal_srng *srng)
+{
+	srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr;
+	*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+}
+
+
+ssize_t ath12k_debugfs_hal_dump_srng_stats(struct ath12k_base *ab, char *buf, int size);
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hal_desc.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_desc.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/hal_desc.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_desc.h	2024-01-19 17:01:19.865847030 +0100
@@ -522,7 +522,7 @@
 	HAL_PHYRXHT_SIG_USR_SU					= 468 /* 0x1d4 */,
 	HAL_PHYRXHT_SIG_USR_MU_MIMO				= 469 /* 0x1d5 */,
 	HAL_PHYRX_GENERIC_U_SIG					= 470 /* 0x1d6 */,
-	HAL_PHYRX_GENERICHT_SIG					= 471 /* 0x1d7 */,
+	HAL_PHYRX_GENERIC_EHT_SIG				= 471 /* 0x1d7 */,
 	HAL_OVERWRITE_RESP_START				= 472 /* 0x1d8 */,
 	HAL_OVERWRITE_RESP_PREAMBLE_INFO			= 473 /* 0x1d9 */,
 	HAL_OVERWRITE_RESP_FRAME_INFO				= 474 /* 0x1da */,
@@ -580,11 +580,20 @@
 #define HAL_TLV_64_HDR_TAG		GENMASK(9, 1)
 #define HAL_TLV_64_HDR_LEN		GENMASK(21, 10)
 
+#define HAL_TLV_64_ALIGN 8
+
 struct hal_tlv_64_hdr {
 	u64 tl;
 	u8 value[];
 } __packed;
 
+struct hal_tlv_parsed_hdr {
+	u16 tlv_tag;
+	u16 tlv_len;
+	u16 tlv_userid;
+	u8 *tlv_data;
+};
+
 #define RX_MPDU_DESC_INFO0_MSDU_COUNT		GENMASK(7, 0)
 #define RX_MPDU_DESC_INFO0_FRAG_FLAG		BIT(8)
 #define RX_MPDU_DESC_INFO0_MPDU_RETRY		BIT(9)
@@ -597,8 +606,7 @@
 #define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID	BIT(27)
 #define RX_MPDU_DESC_INFO0_TID			GENMASK(31, 28)
 
-/* TODO revisit after meta data is concluded */
-#define RX_MPDU_DESC_META_DATA_PEER_ID		GENMASK(15, 0)
+#define RX_MPDU_DESC_META_DATA_PEER_ID		GENMASK(13, 0)
 
 struct rx_mpdu_desc {
 	__le32 info0; /* %RX_MPDU_DESC_INFO */
@@ -915,53 +923,6 @@
  *		this ring has looped around the ring.
  */
 
-#define HAL_REO_TO_PPE_RING_INFO0_DATA_LENGTH	GENMASK(15, 0)
-#define HAL_REO_TO_PPE_RING_INFO0_DATA_OFFSET	GENMASK(23, 16)
-#define HAL_REO_TO_PPE_RING_INFO0_POOL_ID	GENMASK(28, 24)
-#define HAL_REO_TO_PPE_RING_INFO0_PREHEADER	BIT(29)
-#define HAL_REO_TO_PPE_RING_INFO0_TSO_EN	BIT(30)
-#define HAL_REO_TO_PPE_RING_INFO0_MORE	BIT(31)
-
-struct hal_reo_to_ppe_ring {
-	__le32 buffer_addr;
-	__le32 info0; /* %HAL_REO_TO_PPE_RING_INFO0_ */
-} __packed;
-
-/* hal_reo_to_ppe_ring
- *
- *		Producer: REO
- *		Consumer: PPE
- *
- * buf_addr_info
- *		Details of the physical address of a buffer or MSDU
- *		link descriptor.
- *
- * data_length
- *		Length of valid data in bytes
- *
- * data_offset
- *		Offset to the data from buffer pointer. Can be used to
- *		strip header in the data for tunnel termination etc.
- *
- * pool_id
- *		REO has global configuration register for this field.
- *		It may have several free buffer pools, each
- *		RX-Descriptor ring can fetch free buffer from specific
- *		buffer pool; pool id will indicate which pool the buffer
- *		will be released to; POOL_ID Zero returned to SW
- *
- * preheader
- *		Disabled: 0 (Default)
- *		Enabled: 1
- *
- * tso_en
- *		Disabled: 0 (Default)
- *		Enabled: 1
- *
- * more
- *		More Segments followed
- */
-
 enum hal_reo_entr_rxdma_push_reason {
 	HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ERR_DETECTED,
 	HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ROUTING_INSTRUCTION,
@@ -984,6 +945,10 @@
 	HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
 	HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
 	HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
+	HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR,
+	HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_ADDR_MISMATCH_ERR,
+	HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR,
+	HAL_REO_ENTR_RING_RXDMA_ECODE_GROUPCAST_AMSDU_OR_WDS_ERR,
 	HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
 };
 
@@ -1197,6 +1162,7 @@
 #define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE	BIT(12)
 #define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE	BIT(13)
 #define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL		BIT(14)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC	BIT(15)
 
 struct hal_reo_flush_cache {
 	struct hal_reo_cmd_hdr cmd;
@@ -1239,6 +1205,7 @@
 
 #define HAL_TCL_DATA_CMD_INFO5_RING_ID			GENMASK(27, 20)
 #define HAL_TCL_DATA_CMD_INFO5_LOOPING_COUNT		GENMASK(31, 28)
+#define HAL_ENCRYPT_TYPE_MAX 12
 
 enum hal_encrypt_type {
 	HAL_ENCRYPT_TYPE_WEP_40,
@@ -1260,11 +1227,13 @@
 	HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
 	HAL_TCL_ENCAP_TYPE_ETHERNET,
 	HAL_TCL_ENCAP_TYPE_802_3 = 3,
+	HAL_TCL_ENCAP_TYPE_MAX
 };
 
 enum hal_tcl_desc_type {
 	HAL_TCL_DESC_TYPE_BUFFER,
 	HAL_TCL_DESC_TYPE_EXT_DESC,
+	HAL_TCL_DESC_TYPE_MAX,
 };
 
 enum hal_wbm_htt_tx_comp_status {
@@ -1274,9 +1243,16 @@
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+	HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH,
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
 };
 
+#define TX_IP_CHECKSUM (HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN  | \
+			HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN | \
+			HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN | \
+			HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN | \
+			HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN)
+
 struct hal_tcl_data_cmd {
 	struct ath12k_buffer_addr buf_addr_info;
 	__le32 info0;
@@ -1782,6 +1758,7 @@
 	HAL_WBM_REL_SRC_MODULE_REO,
 	HAL_WBM_REL_SRC_MODULE_FW,
 	HAL_WBM_REL_SRC_MODULE_SW,
+	HAL_WBM_REL_SRC_MODULE_MAX,
 };
 
 enum hal_wbm_rel_desc_type {
@@ -1863,7 +1840,7 @@
 #define HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON	GENMASK(16, 13)
 #define HAL_WBM_COMPL_TX_INFO0_RBM_OVERRIDE_VLD		BIT(17)
 #define HAL_WBM_COMPL_TX_INFO0_SW_COOKIE_LO		GENMASK(29, 18)
-#define HAL_WBM_COMPL_TX_INFO0_CC_DONE			BIT(30)
+#define HAL_WBM_COMPL_TX_INFO0_CC_DONE			0x40000000
 #define HAL_WBM_COMPL_TX_INFO0_WBM_INTERNAL_ERROR	BIT(31)
 
 #define HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER	GENMASK(23, 0)
@@ -1960,6 +1937,7 @@
 	__le32 info1;
 } __packed;
 
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MASK		0x7
 #define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE		GENMASK(2, 0)
 #define HAL_WBM_RELEASE_INFO0_BM_ACTION			GENMASK(5, 3)
 #define HAL_WBM_RELEASE_INFO0_DESC_TYPE			GENMASK(8, 6)
@@ -2048,6 +2026,19 @@
  *	fw with fw_reason2.
  * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
  *	fw with fw_reason3.
+ * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by
+ *	fw with disable queue.
+ * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by
+ *	fw to remove all mpdu until 1st non-match.
+ * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold
+ *	criteria
+ * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc
+ *	not available
+ * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or
+ *	null flow
+ * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV
+ * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with
+ *	'TCL_drop_reason'
  */
 enum hal_wbm_tqm_rel_reason {
 	HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
@@ -2058,6 +2049,16 @@
 	HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
 	HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
 	HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+	HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE,
+	HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING,
+	HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD,
+	HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL,
+	HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU,
+	HAL_WBM_TQM_REL_REASON_MULTICAST_DROP,
+	HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP,
+
+	/* Keep Last */
+	HAL_WBM_TQM_REL_REASON_MAX,
 };
 
 struct hal_wbm_buffer_ring {
@@ -2314,6 +2315,34 @@
 	struct hal_rx_mpdu_link_ptr mpdu_link[15];
 } __packed;
 
+struct hal_rx_reo_queue_1k {
+        struct hal_desc_header desc_hdr;
+	u32 rx_bitmap_319_288;
+	u32 rx_bitmap_351_320;
+	u32 rx_bitmap_383_352;
+	u32 rx_bitmap_415_384;
+	u32 rx_bitmap_447_416;
+	u32 rx_bitmap_479_448;
+	u32 rx_bitmap_511_480;
+	u32 rx_bitmap_543_512;
+	u32 rx_bitmap_575_544;
+	u32 rx_bitmap_607_576;
+	u32 rx_bitmap_639_608;
+	u32 rx_bitmap_671_640;
+	u32 rx_bitmap_703_672;
+	u32 rx_bitmap_735_704;
+	u32 rx_bitmap_767_736;
+	u32 rx_bitmap_799_768;
+	u32 rx_bitmap_831_800;
+	u32 rx_bitmap_863_832;
+	u32 rx_bitmap_895_864;
+	u32 rx_bitmap_927_896;
+	u32 rx_bitmap_959_928;
+	u32 rx_bitmap_991_960;
+	u32 rx_bitmap_1023_992;
+	u32 rsvd[8];
+}__packed;
+
 /* hal_rx_reo_queue_ext
  *	Consumer: REO
  *	Producer: REO
@@ -2500,13 +2529,13 @@
 #define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE		BIT(30)
 #define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG		BIT(31)
 
-#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE		GENMASK(7, 0)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE			GENMASK(9, 8)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD				BIT(10)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN				GENMASK(22, 11)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR			BIT(23)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR			BIT(24)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID			BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE		GENMASK(9, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE			GENMASK(11, 10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD				BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN				GENMASK(24, 13)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR			BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR			BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID			BIT(27)
 
 struct hal_reo_update_rx_queue {
 	struct hal_reo_cmd_hdr cmd;
@@ -2883,24 +2912,22 @@
  *		entries into this Ring has looped around the ring.
  */
 
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_DATA_LENGTH	GENMASK(13, 0)
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L4_CSUM_STATUS	BIT(14)
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L3_CSUM_STATUS	BIT(15)
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_PID		GENMASK(27, 24)
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_QDISC		BIT(28)
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MULTICAST	BIT(29)
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MORE		BIT(30)
-#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_VALID_TOGGLE	BIT(31)
-
 struct hal_tcl_entrance_from_ppe_ring {
 	__le32 buffer_addr;
 	__le32 info0;
+	__le32 opaque_lo;
+	__le32 opaque_hi;
+	__le32 info1;
+	__le32 info2;
+	__le32 info3;
+	__le32 info4;
 } __packed;
 
 struct hal_mon_buf_ring {
 	__le32 paddr_lo;
 	__le32 paddr_hi;
-	__le64 cookie;
+	__le32 cookie;
+	__le32 magic;
 };
 
 /* hal_mon_buf_ring
@@ -2918,9 +2945,8 @@
 
 #define HAL_MON_DEST_COOKIE_BUF_ID      GENMASK(17, 0)
 
-#define HAL_MON_DEST_INFO0_END_OFFSET		GENMASK(15, 0)
-#define HAL_MON_DEST_INFO0_FLUSH_DETECTED	BIT(16)
-#define HAL_MON_DEST_INFO0_END_OF_PPDU		BIT(17)
+#define HAL_MON_DEST_INFO0_END_OFFSET		GENMASK(11, 0)
+#define HAL_MON_DEST_INFO0_END_REASON		GENMASK(17, 16)
 #define HAL_MON_DEST_INFO0_INITIATOR		BIT(18)
 #define HAL_MON_DEST_INFO0_EMPTY_DESC		BIT(19)
 #define HAL_MON_DEST_INFO0_RING_ID		GENMASK(27, 20)
@@ -2928,7 +2954,7 @@
 
 struct hal_mon_dest_desc {
 	__le32 cookie;
-	__le32 reserved;
+	__le32 magic;
 	__le32 ppdu_id;
 	__le32 info0;
 };
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hal_rx.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_rx.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/hal_rx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_rx.c	2024-01-19 17:01:19.865847030 +0100
@@ -89,6 +89,9 @@
 	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
 		desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL);
 
+	if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC)
+		desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC;
+
 	return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
 }
 
@@ -320,7 +323,7 @@
 {
 	enum hal_reo_dest_ring_push_reason push_reason;
 	enum hal_reo_dest_ring_error_code err_code;
-	u32 cookie, val;
+	u32 cookie;
 
 	push_reason = le32_get_bits(desc->info0,
 				    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
@@ -335,12 +338,6 @@
 		return -EINVAL;
 	}
 
-	val = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE);
-	if (val != HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
-		ath12k_warn(ab, "expected buffer type link_desc");
-		return -EINVAL;
-	}
-
 	ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie);
 	*desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
 
@@ -689,6 +686,7 @@
 u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
 {
 	u32 num_ext_desc;
+	u32 num_1k_desc = 0;
 
 	if (ba_window_size <= 1) {
 		if (tid != HAL_DESC_REO_NON_QOS_TID)
@@ -699,12 +697,16 @@
 		num_ext_desc = 1;
 	} else if (ba_window_size <= 210) {
 		num_ext_desc = 2;
-	} else {
+	} else if (ba_window_size <= 256) {
 		num_ext_desc = 3;
+	} else {
+		num_ext_desc = 10;
+		num_1k_desc = 1;
 	}
 
 	return sizeof(struct hal_rx_reo_queue) +
-		(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+		(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)) +
+		(num_1k_desc * sizeof(struct hal_rx_reo_queue_1k));
 }
 
 void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
@@ -813,6 +815,38 @@
 	}
 }
 
+void ath12k_hal_reo_ring_ctrl_hash_ix0_setup(struct ath12k_base *ab)
+{
+	u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+	u32 curr, val;
+
+	curr = ath12k_hif_read32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0);
+	val = curr & ~(REO_DEST_CTRL_IX_0_RING6_MAP_MASK <<
+		       REO_DEST_CTRL_IX_0_RING6_MAP_SHFT);
+	val |= (REO2PPE_DST_RING_MAP << REO_DEST_CTRL_IX_0_RING6_MAP_SHFT);
+
+	ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+			   val);
+}
+
+void ath12k_hal_reo_ring_ctrl_hash_ix2_setup(struct ath12k_base *ab,
+					     u32 ring_hash_map)
+{
+	u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+
+	ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+			   ring_hash_map);
+}
+
+void ath12k_hal_reo_ring_ctrl_hash_ix3_setup(struct ath12k_base *ab,
+					     u32 ring_hash_map)
+{
+	u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+
+	ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+			   ring_hash_map);
+}
+
 void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
 {
 	u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
@@ -843,8 +877,75 @@
 	ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
 			   HAL_DEFAULT_VO_REO_TIMEOUT_USEC);
 
-	ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
-			   ring_hash_map);
-	ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
-			   ring_hash_map);
+	ath12k_hal_reo_ring_ctrl_hash_ix0_setup(ab);
+	ath12k_hal_reo_ring_ctrl_hash_ix2_setup(ab, ring_hash_map);
+	ath12k_hal_reo_ring_ctrl_hash_ix3_setup(ab, ring_hash_map);
+}
+
+/*
+ * Setting CLEAR_DESC_ARRAY field of WCSS_UMAC_REO_R0_QDESC_ADDR_READ
+ * and resetting back, to erase stale entries in reo storage.
+ */
+void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab)
+{
+	u32 val;
+
+	val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+				HAL_REO1_QDESC_ADDR_READ(ab));
+
+	val |= u32_encode_bits(1, HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY);
+	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+			   HAL_REO1_QDESC_ADDR_READ(ab), val);
+
+	val &= ~HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY;
+	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+			   HAL_REO1_QDESC_ADDR_READ(ab), val);
+}
+
+void ath12k_dp_reset_rx_reo_tid_q(void *vaddr, u32 ba_window_size,
+				  u8 tid)
+{
+	struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
+	struct hal_rx_reo_queue_ext *ext_desc;
+	u32 size, info0, info1, rx_queue_num;
+
+	size = ath12k_hal_reo_qdesc_size(ba_window_size, tid);
+
+	rx_queue_num = qdesc->rx_queue_num;
+	info0 = qdesc->info0;
+	info1 = qdesc->info1;
+
+	memset(qdesc, 0, size);
+
+	ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+				    HAL_DESC_REO_QUEUE_DESC,
+				    REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+	qdesc->rx_queue_num = rx_queue_num;
+	qdesc->info0 = info0;
+	qdesc->info1 = info1;
+
+	qdesc->info1 |= u32_encode_bits(0, HAL_RX_REO_QUEUE_INFO1_SVLD);
+	qdesc->info1 |= u32_encode_bits(0,
+				       HAL_RX_REO_QUEUE_INFO1_SSN);
+
+	if (tid == HAL_DESC_REO_NON_QOS_TID)
+		return;
+
+	ext_desc = qdesc->ext_desc;
+	memset(ext_desc, 0, 3 * sizeof(*ext_desc));
+
+	ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+				    HAL_DESC_REO_QUEUE_EXT_DESC,
+				    REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+	ext_desc++;
+
+	ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+				    HAL_DESC_REO_QUEUE_EXT_DESC,
+				    REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+	ext_desc++;
+
+	ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+				    HAL_DESC_REO_QUEUE_EXT_DESC,
+				    REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
 }
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hal_rx.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_rx.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/hal_rx.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_rx.h	2024-03-18 14:40:14.851741333 +0100
@@ -19,7 +19,7 @@
 	bool hw_cc_done;
 };
 
-#define HAL_INVALID_PEERID 0xffff
+#define HAL_INVALID_PEERID 0x3fff
 #define VHT_SIG_SU_NSS_MASK 0x7
 
 #define HAL_RX_MAX_MCS 12
@@ -61,6 +61,8 @@
 	HAL_RX_BW_40MHZ,
 	HAL_RX_BW_80MHZ,
 	HAL_RX_BW_160MHZ,
+	HAL_RX_BW_240MHZ,
+	HAL_RX_BW_320MHZ,
 	HAL_RX_BW_MAX,
 };
 
@@ -70,6 +72,7 @@
 	HAL_RX_PREAMBLE_11N,
 	HAL_RX_PREAMBLE_11AC,
 	HAL_RX_PREAMBLE_11AX,
+	HAL_RX_PREAMBLE_11BE = 6,
 	HAL_RX_PREAMBLE_MAX,
 };
 
@@ -107,9 +110,12 @@
 	HAL_RX_MON_STATUS_PPDU_NOT_DONE,
 	HAL_RX_MON_STATUS_PPDU_DONE,
 	HAL_RX_MON_STATUS_BUF_DONE,
+	HAL_RX_MON_STATUS_BUF_ADDR,
+	HAL_RX_MON_STATUS_MPDU_END,
+	HAL_RX_MON_STATUS_MSDU_END,
 };
 
-#define HAL_RX_MAX_MPDU		256
+#define HAL_RX_MAX_MPDU		1024
 #define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP	(HAL_RX_MAX_MPDU >> 5)
 
 struct hal_rx_user_status {
@@ -142,9 +148,48 @@
 	u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
 	u32 mpdu_ok_byte_count;
 	u32 mpdu_err_byte_count;
+	u32 ampdu_id;
+	bool ampdu_present;
+};
+
+struct msdu_info {
+	u32 errmap;
+	u32 decap_format;
 };
 
 #define HAL_MAX_UL_MU_USERS	37
+#define HAL_RX_MAX_MSDU		256
+
+struct mon_mpdu_data {
+	struct sk_buff *head_msdu;
+	struct sk_buff *tail_msdu;
+	struct msdu_info msdu_info[HAL_RX_MAX_MSDU];
+	u16 msdu_count;
+};
+
+#define HAL_RX_MON_MAX_AGGR_SIZE	128
+
+struct hal_rx_tlv_aggr_info {
+	u8 in_progress;
+	u8 cur_len;
+	u16 tlv_tag;
+	u8 buf[HAL_RX_MON_MAX_AGGR_SIZE];
+};
+
+struct hal_rx_u_sig_info {
+	bool ul_dl;
+	u8 bw;
+	u8 ppdu_type_comp_mode;
+	u8 eht_sig_mcs;
+	u8 num_eht_sig_sym;
+};
+
+struct hal_rx_mon_cmn_mpdu_info {
+	struct mon_mpdu_data mon_mpdu[HAL_RX_MAX_MPDU];
+	u16 mpdu_count;
+};
+
+#define EHT_MAX_USER_INFO	4
 
 struct hal_rx_mon_ppdu_info {
 	u32 ppdu_id;
@@ -155,6 +200,7 @@
 	u32 preamble_type;
 	u32 mpdu_len;
 	u16 chan_num;
+	u16 freq;
 	u16 tcp_msdu_count;
 	u16 tcp_ack_msdu_count;
 	u16 udp_msdu_count;
@@ -229,23 +275,43 @@
 	bool first_msdu_in_mpdu;
 	bool is_ampdu;
 	u8 medium_prot_type;
+	bool ppdu_continuation;
+	struct hal_rx_tlv_aggr_info tlv_aggr;
+	u16 usig_flags;
+	u16 eht_flags;
+	u8 num_eht_user_info_valid;
+	u16 punctured_pattern;
+	u32 usig_common;
+	u32 usig_value;
+	u32 usig_mask;
+	u32 eht_known;
+	u32 eht_data[9];
+	u32 eht_user_info[EHT_MAX_USER_INFO];
+	struct hal_rx_u_sig_info u_sig_info;
+	struct hal_rx_mon_cmn_mpdu_info cmn_mpdu_info;
 };
 
 #define HAL_RX_PPDU_START_INFO0_PPDU_ID		GENMASK(15, 0)
+#define HAL_RX_PPDU_START_INFO1_CHAN_NUM	GENMASK(15, 0)
+#define HAL_RX_PPDU_START_INFO1_CHAN_FREQ	GENMASK(31, 16)
 
 struct hal_rx_ppdu_start {
 	__le32 info0;
-	__le32 chan_num;
-	__le32 ppdu_start_ts;
+	__le32 info1;
+	__le32 ppdu_start_ts_31_0;
+	__le32 ppdu_start_ts_63_32;
+	__le32 reserverd[2];
 } __packed;
 
-#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR	GENMASK(25, 16)
-
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK	GENMASK(8, 0)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID		BIT(9)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID		BIT(10)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID		BIT(11)
-#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE		GENMASK(23, 20)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID		GENMASK(13, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_CHIP_ID		GENMASK(15, 14)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR	GENMASK(26, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK	GENMASK(10, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID		BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID		BIT(12)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID		BIT(13)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE		GENMASK(24, 21)
 
 #define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX		GENMASK(15, 0)
 #define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL		GENMASK(31, 16)
@@ -261,8 +327,8 @@
 #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP		GENMASK(15, 0)
 #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP	GENMASK(31, 16)
 
-#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT	GENMASK(24, 0)
-#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT	GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT	GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT	GENMASK(24, 0)
 
 struct hal_rx_ppdu_end_user_stats {
 	__le32 rsvd0[2];
@@ -293,6 +359,7 @@
 	__le32 info4;
 	__le32 info5;
 	__le32 info6;
+	__le32 rsvd;
 } __packed;
 
 #define HAL_RX_HT_SIG_INFO_INFO0_MCS		GENMASK(6, 0)
@@ -389,11 +456,9 @@
 #define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION	BIT(25)
 
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION	GENMASK(6, 0)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING		BIT(7)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB	GENMASK(10, 8)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA		BIT(11)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC		BIT(12)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF		BIT(10)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR	GENMASK(14, 13)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM	BIT(15)
 
@@ -419,7 +484,7 @@
 
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID	GENMASK(10, 0)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS	GENMASK(13, 11)
-#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF	BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF	BIT(14)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS	GENMASK(18, 15)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM	BIT(19)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING	BIT(20)
@@ -435,30 +500,35 @@
 	HAL_RECEPTION_TYPE_FRAMELESS
 };
 
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB	GENMASK(15, 8)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION   GENMASK(3, 0)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION   GENMASK(3, 0)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW	GENMASK(7, 5)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB   GENMASK(15, 8)
 
 struct hal_rx_phyrx_rssi_legacy_info {
-	__le32 rsvd[35];
 	__le32 info0;
+	__le32 rsvd0[39];
+	__le32 info1;
+	__le32 rsvd2;
 } __packed;
 
 #define HAL_RX_MPDU_START_INFO0_PPDU_ID	GENMASK(31, 16)
-#define HAL_RX_MPDU_START_INFO1_PEERID	GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO1_PEERID		GENMASK(29, 16)
+#define HAL_RX_MPDU_START_INFO1_CHIPID		GENMASK(31, 30)
 #define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
 struct hal_rx_mpdu_start {
+	__le32 rsvd0[9];
 	__le32 info0;
 	__le32 info1;
-	__le32 rsvd1[11];
+	__le32 rsvd1[2];
 	__le32 info2;
-	__le32 rsvd2[9];
+	__le32 rsvd2[16];
 } __packed;
 
 #define HAL_RX_PPDU_END_DURATION	GENMASK(23, 0)
 struct hal_rx_ppdu_end_duration {
 	__le32 rsvd0[9];
 	__le32 info0;
-	__le32 rsvd1[4];
+	__le32 rsvd1[18];
 } __packed;
 
 struct hal_rx_rxpcu_classification_overview {
@@ -629,35 +699,435 @@
 #define HAL_RX_MPDU_ERR_MPDU_LEN		BIT(6)
 #define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME	BIT(7)
 
-static inline
-enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
-{
-	enum nl80211_he_ru_alloc ret;
-
-	switch (ru_tones) {
-	case RU_52:
-		ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
-		break;
-	case RU_106:
-		ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
-		break;
-	case RU_242:
-		ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
-		break;
-	case RU_484:
-		ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
-		break;
-	case RU_996:
-		ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
-		break;
-	case RU_26:
-		fallthrough;
-	default:
-		ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
-		break;
-	}
-	return ret;
-}
+#define HAL_RX_PHY_CMN_USER_INFO0_GI   GENMASK(17, 16)
+struct phyrx_common_user_info {
+	__le32 rsvd[2];
+	__le32 info0;
+	__le32 rsvd1;
+};
+
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE	GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_GI_LTF		GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM	GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS		GENMASK(10, 7)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED		BIT(11)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD		GENMASK(13, 12)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC		GENMASK(17, 14)
+
+struct hal_eht_sig_ndp_cmn_eb {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE		GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_GI_LTF			GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM		GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM		BIT(9)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR	GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY		BIT(12)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD			GENMASK(16, 13)
+
+struct hal_eht_sig_cc_usig_overflow {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID	GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS	GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_VALIDATE	BIT(15)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS	GENMASK(19, 16)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED	BIT(20)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING	BIT(21)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CRC	GENMASK(25, 22)
+
+struct hal_eht_sig_non_mu_mimo_user_info {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID		GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS		GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING		BIT(15)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING	GENMASK(22, 16)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CRC		GENMASK(26, 23)
+
+struct hal_eht_sig_mu_mimo_user_info {
+	__le32 info0;
+} __packed;
+
+union hal_eht_sig_user_field {
+	struct hal_eht_sig_mu_mimo_user_info mu_mimo_usr;
+	struct hal_eht_sig_non_mu_mimo_user_info non_mu_mimo_usr;
+};
+
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_SPATIAL_REUSE		GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_GI_LTF			GENMASK(5, 4)
+#define	HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_LTF_SYM		GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_LDPC_EXTA_SYM		BIT(9)
+#define	HAL_RX_EHT_SIG_NON_OFDMA_INFO0_PRE_FEC_PAD_FACTOR	GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISAMBIGUITY		BIT(12)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISREGARD		GENMASK(16, 13)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS		GENMASK(19, 17)
+
+struct hal_eht_sig_non_ofdma_cmn_eb {
+	__le32 info0;
+	union hal_eht_sig_user_field user_field;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB1_SPATIAL_REUSE		GENMASK_ULL(3, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_GI_LTF			GENMASK_ULL(5, 4)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_NUM_LFT_SYM		GENMASK_ULL(8, 6)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_LDPC_EXTRA_SYM		BIT(9)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_FEC_PAD_FACTOR	GENMASK_ULL(11, 10)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_DISAMBIGUITY	BIT(12)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_DISREGARD		GENMASK_ULL(16, 13)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1		GENMASK_ULL(25, 17)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2		GENMASK_ULL(34, 26)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_CRC			GENMASK_ULL(30, 27)
+
+struct hal_eht_sig_ofdma_cmn_eb1 {
+	__le64 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1		GENMASK_ULL(8, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2		GENMASK_ULL(17, 9)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3		GENMASK_ULL(26, 18)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4		GENMASK_ULL(35, 27)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5		GENMASK_ULL(44, 36)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6		GENMASK_ULL(53, 45)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_MCS			GNEMASK_ULL(57, 54)
+
+struct hal_eht_sig_ofdma_cmn_eb2 {
+	__le64 info0;
+} __packed;
+
+enum hal_eht_bw {
+	HAL_EHT_BW_20,
+	HAL_EHT_BW_40,
+	HAL_EHT_BW_80,
+	HAL_EHT_BW_160,
+	HAL_EHT_BW_320_1,
+	HAL_EHT_BW_320_2,
+};
+
+#define HAL_RX_USIG_CMN_INFO0_PHY_VERSION	GENMASK(2, 0)
+#define HAL_RX_USIG_CMN_INFO0_BW		GENMASK(5, 3)
+#define HAL_RX_USIG_CMN_INFO0_UL_DL		BIT(6)
+#define HAL_RX_USIG_CMN_INFO0_BSS_COLOR		GENMASK(12, 7)
+#define HAL_RX_USIG_CMN_INFO0_TXOP		GENMASK(19, 13)
+#define HAL_RX_USIG_CMN_INFO0_DISREGARD		GENMASK(25, 20)
+#define HAL_RX_USIG_CMN_INFO0_VALIDATE		BIT(26)
+
+struct hal_mon_usig_cmn {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE	GENMASK(1, 0)
+#define HAL_RX_USIG_TB_INFO0_VALIDATE			BIT(2)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1		GENMASK(6, 3)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2		GENMASK(10, 7)
+#define HAL_RX_USIG_TB_INFO0_DISREGARD_1		GENMASK(15, 11)
+#define HAL_RX_USIG_TB_INFO0_CRC			GENMASK(19, 16)
+#define HAL_RX_USIG_TB_INFO0_TAIL			GENMASK(25, 20)
+#define HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS	BIT(31)
+
+struct hal_mon_usig_tb {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE	GENMASK(1, 0)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_1			BIT(2)
+#define HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO		GENMASK(7, 3)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_2			BIT(8)
+#define HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS		GENMASK(10, 9)
+#define HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM		GENMASK(15, 11)
+#define HAL_RX_USIG_MU_INFO0_CRC			GENMASK(20, 16)
+#define HAL_RX_USIG_MU_INFO0_TAIL			GENMASK(26, 21)
+#define HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS	BIT(31)
+
+struct hal_mon_usig_mu {
+	__le32 info0;
+} __packed;
+
+union hal_mon_usig_non_cmn {
+	struct hal_mon_usig_tb tb;
+	struct hal_mon_usig_mu mu;
+};
+
+struct hal_mon_usig_hdr {
+	struct hal_mon_usig_cmn usig_1;
+	union hal_mon_usig_non_cmn usig_2;
+} __packed;
+
+#define HAL_RX_USR_INFO0_PHY_PPDU_ID		GENMASK(15, 0)
+#define HAL_RX_USR_INFO0_USR_RSSI		GENMASK(23, 16)
+#define HAL_RX_USR_INFO0_PKT_TYPE		GENMASK(27, 24)
+#define HAL_RX_USR_INFO0_STBC			BIT(28)
+#define HAL_RX_USR_INFO0_RECEPTION_TYPE		GENMASK(31, 29)
+
+#define HAL_RX_USR_INFO1_MCS			GENMASK(3, 0)
+#define HAL_RX_USR_INFO1_SGI			GENMASK(5, 4)
+#define HAL_RX_USR_INFO1_HE_RANGING_NDP		BIT(6)
+#define HAL_RX_USR_INFO1_MIMO_SS_BITMAP		GENMASK(15, 8)
+#define HAL_RX_USR_INFO1_RX_BW			GENMASK(18, 16)
+#define HAL_RX_USR_INFO1_DL_OFMDA_USR_IDX	GENMASK(31, 24)
+
+#define HAL_RX_USR_INFO2_DL_OFDMA_CONTENT_CHAN	BIT(0)
+#define HAL_RX_USR_INFO2_NSS			GENMASK(10, 8)
+#define HAL_RX_USR_INFO2_STREAM_OFFSET		GENMASK(13, 11)
+#define HAL_RX_USR_INFO2_STA_DCM		BIT(14)
+#define HAL_RX_USR_INFO2_LDPC			BIT(15)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_0		GENMASK(19, 16)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_1		GENMASK(23, 20)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_2		GENMASK(27, 24)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_3		GENMASK(31, 28)
+
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_0	GENMASK(5, 0)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_1	GENMASK(13, 8)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_2	GENMASK(21, 16)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_3	GENMASK(29, 24)
+
+struct hal_receive_user_info {
+	__le32 info0;
+	__le32 info1;
+	__le32 info2;
+	__le32 info3;
+	__le32 user_fd_rssi_seg0;
+	__le32 user_fd_rssi_seg1;
+	__le32 user_fd_rssi_seg2;
+	__le32 user_fd_rssi_seg3;
+} __packed;
+
+enum hal_mon_reception_type{
+	HAL_RECEPTION_TYPE_SU,
+	HAL_RECEPTION_TYPE_DL_MU_MIMO,
+	HAL_RECEPTION_TYPE_DL_MU_OFMA,
+	HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+	HAL_RECEPTION_TYPE_UL_MU_MIMO,
+	HAL_RECEPTION_TYPE_UL_MU_OFDMA,
+	HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+/* Different allowed RU in 11BE */
+#define HAL_EHT_RU_26		0ULL
+#define HAL_EHT_RU_52		1ULL
+#define HAL_EHT_RU_78		2ULL
+#define HAL_EHT_RU_106		3ULL
+#define HAL_EHT_RU_132		4ULL
+#define HAL_EHT_RU_242		5ULL
+#define HAL_EHT_RU_484		6ULL
+#define HAL_EHT_RU_726		7ULL
+#define HAL_EHT_RU_996		8ULL
+#define HAL_EHT_RU_996x2	9ULL
+#define HAL_EHT_RU_996x3	10ULL
+#define HAL_EHT_RU_996x4	11ULL
+#define HAL_EHT_RU_NONE		15ULL
+#define HAL_EHT_RU_INVALID	31ULL
+/*
+ * MRUs spanning above 80Mhz
+ * HAL_EHT_RU_996_484 = HAL_EHT_RU_484 + HAL_EHT_RU_996 + 4 (reserved)
+ */
+#define HAL_EHT_RU_996_484	18ULL
+#define HAL_EHT_RU_996x2_484	28ULL
+#define HAL_EHT_RU_996x3_484	40ULL
+#define HAL_EHT_RU_996_484_242	23ULL
+
+#define NUM_RU_BITS_PER80	16
+#define NUM_RU_BITS_PER20	4
+
+/* Different per_80Mhz band in 320Mhz bandwidth */
+#define HAL_80_0	0
+#define HAL_80_1	1
+#define HAL_80_2	2
+#define HAL_80_3	3
+
+#define HAL_RU_SHIFT(num_80mhz_band, ru_index_per_80)	\
+		((NUM_RU_BITS_PER80 * (num_80mhz_band)) +	\
+		 (NUM_RU_BITS_PER20 * (ru_index_per_80)))
+
+/* MRU-996+484 */
+#define HAL_EHT_RU_996_484_0	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_0, 1)) |	\
+				 (HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_1, 0)))
+#define HAL_EHT_RU_996_484_1	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_1, 0)))
+#define HAL_EHT_RU_996_484_2	((HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 1)))
+#define HAL_EHT_RU_996_484_3	((HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 0)))
+#define HAL_EHT_RU_996_484_4	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 1)) |	\
+				 (HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996_484_5	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996_484_6	((HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_3, 1)))
+#define HAL_EHT_RU_996_484_7	((HAL_EHT_RU_996 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_3, 0)))
+
+/* MRU-996x2+484 */
+#define HAL_EHT_RU_996x2_484_0	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_0, 1)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)))
+#define HAL_EHT_RU_996x2_484_1	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)))
+#define HAL_EHT_RU_996x2_484_2	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 1)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)))
+#define HAL_EHT_RU_996x2_484_3	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)))
+#define HAL_EHT_RU_996x2_484_4	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 1)))
+#define HAL_EHT_RU_996x2_484_5	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 0)))
+#define HAL_EHT_RU_996x2_484_6	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 1)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x2_484_7	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x2_484_8	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 1)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x2_484_9	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x2_484_10	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_3, 1)))
+#define HAL_EHT_RU_996x2_484_11	((HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x2 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_3, 0)))
+
+/* MRU-996x3+484 */
+#define HAL_EHT_RU_996x3_484_0	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_0, 1)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x3_484_1	((HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x3_484_2	((HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 1)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x3_484_3	((HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x3_484_4	((HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 1)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x3_484_5	((HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_3, 0)))
+#define HAL_EHT_RU_996x3_484_6	((HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_3, 1)))
+#define HAL_EHT_RU_996x3_484_7	((HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_0, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_1, 0)) |	\
+				 (HAL_EHT_RU_996x3 << HAL_RU_SHIFT(HAL_80_2, 0)) |	\
+				 (HAL_EHT_RU_484 << HAL_RU_SHIFT(HAL_80_3, 0)))
+
+#define HAL_SET_RU_PER80(ru_320mhz, ru_per80, ru_idx_per80mhz, num_80mhz) \
+			((ru_320mhz) |= ((uint64_t)(ru_per80) << \
+			(((num_80mhz) * NUM_RU_BITS_PER80) + \
+			((ru_idx_per80mhz) * NUM_RU_BITS_PER20))))
+
+#define RU_26			1
+#define RU_52			2
+#define RU_106			4
+#define RU_242			9
+#define RU_484			18
+#define RU_996			37
+#define RU_2X996		74
+#define RU_3X996		111
+#define RU_4X996		148
+#define RU_52_26		RU_52 + RU_26
+#define RU_106_26		RU_106 + RU_26
+#define RU_484_242		RU_484 + RU_242
+#define RU_996_484		RU_996 + RU_484
+#define RU_996_484_242		RU_996 + RU_484_242
+#define RU_2X996_484		RU_2X996 + RU_484
+#define RU_3X996_484		RU_3X996 + RU_484
+
+enum ath12k_eht_ru_size {
+	ATH12K_EHT_RU_26,
+	ATH12K_EHT_RU_52,
+	ATH12K_EHT_RU_106,
+	ATH12K_EHT_RU_242,
+	ATH12K_EHT_RU_484,
+	ATH12K_EHT_RU_996,
+	ATH12K_EHT_RU_996x2,
+	ATH12K_EHT_RU_996x4,
+	ATH12K_EHT_RU_52_26,
+	ATH12K_EHT_RU_106_26,
+	ATH12K_EHT_RU_484_242,
+	ATH12K_EHT_RU_996_484,
+	ATH12K_EHT_RU_996_484_242,
+	ATH12K_EHT_RU_996x2_484,
+	ATH12K_EHT_RU_996x3,
+	ATH12K_EHT_RU_996x3_484,
+	ATH12K_EHT_RU_INVALID,
+};
+
+#define REO2PPE_DST_RING_MAP 11
+#define REO_DEST_CTRL_IX_0_RING6_MAP_MASK 0xF
+#define REO_DEST_CTRL_IX_0_RING6_MAP_SHFT 24
+
+#define REO2PPE_DST_RING_MAP 11
+#define REO_DEST_CTRL_IX_0_RING6_MAP_MASK 0xF
+#define REO_DEST_CTRL_IX_0_RING6_MAP_SHFT 24
+
+/* U-SIG Common Mask */
+#define USIG_PHY_VERSION_KNOWN	0x00000001
+#define USIG_BW_KNOWN		0x00000002
+#define USIG_UL_DL_KNOWN	0x00000004
+#define USIG_BSS_COLOR_KNOWN	0x00000008
+#define USIG_TXOP_KNOWN		0x00000010
+
+#define USIG_PHY_VERSION_SHIFT	12
+#define USIG_BW_SHIFT		15
+#define USIG_UL_DL_SHIFT	18
+#define USIG_BSS_COLOR_SHIFT	19
+#define USIG_TXOP_SHIFT		25
+
+/* U-SIG MU/TB Value */
+#define USIG_DISREGARD_SHIFT			0
+#define USIG_PPDU_TYPE_N_COMP_MODE_SHIFT	6
+#define USIG_VALIDATE_SHIFT			8
+
+#define USIG_MU_VALIDATE1_SHIFT			5
+#define USIG_MU_PUNCTURE_CH_INFO_SHIFT		9
+#define USIG_MU_VALIDATE2_SHIFT			12
+#define USIG_MU_EHT_SIG_MCS_SHIFT		15
+#define USIG_MU_NUM_EHT_SIG_SYM_SHIFT		17
+
+#define USIG_TB_SPATIAL_REUSE_1_SHIFT		9
+#define USIG_TB_SPATIAL_REUSE_2_SHIFT		13
+#define USIG_TB_DISREGARD1_SHIFT		17
+
+#define USIG_CRC_SHIFT				22
+#define USIG_TAIL_SHIFT				26
+
+/* U-SIG MU/TB Mask */
+#define USIG_DISREGARD_KNOWN			0x00000001
+#define USIG_PPDU_TYPE_N_COMP_MODE_KNOWN	0x00000004
+#define USIG_VALIDATE_KNOWN			0x00000008
+
+#define USIG_MU_VALIDATE1_KNOWN			0x00000002
+#define USIG_MU_PUNCTURE_CH_INFO_KNOWN		0x00000010
+#define USIG_MU_VALIDATE2_KNOWN			0x00000020
+#define USIG_MU_EHT_SIG_MCS_KNOWN		0x00000040
+#define USIG_MU_NUM_EHT_SIG_SYM_KNOWN		0x00000080
+#define USIG_TB_SPATIAL_REUSE_1_KNOWN		0x00000010
+#define USIG_TB_SPATIAL_REUSE_2_KNOWN		0x00000020
+#define USIG_TB_DISREGARD1_KNOWN		0x00000040
+
+#define USIG_CRC_KNOWN				0x00000100
+#define USIG_TAIL_KNOWN				0x00000200
 
 void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab,
 				       struct hal_tlv_64_hdr *tlv,
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hal_tx.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_tx.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/hal_tx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_tx.c	2024-01-19 17:01:19.865847030 +0100
@@ -29,43 +29,6 @@
 	return dscp >> 3;
 }
 
-void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
-				  struct hal_tcl_data_cmd *tcl_cmd,
-				  struct hal_tx_info *ti)
-{
-	tcl_cmd->buf_addr_info.info0 =
-		le32_encode_bits(ti->paddr, BUFFER_ADDR_INFO0_ADDR);
-	tcl_cmd->buf_addr_info.info1 =
-		le32_encode_bits(((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT),
-				 BUFFER_ADDR_INFO1_ADDR);
-	tcl_cmd->buf_addr_info.info1 |=
-		le32_encode_bits((ti->rbm_id), BUFFER_ADDR_INFO1_RET_BUF_MGR) |
-		le32_encode_bits(ti->desc_id, BUFFER_ADDR_INFO1_SW_COOKIE);
-
-	tcl_cmd->info0 =
-		le32_encode_bits(ti->type, HAL_TCL_DATA_CMD_INFO0_DESC_TYPE) |
-		le32_encode_bits(ti->bank_id, HAL_TCL_DATA_CMD_INFO0_BANK_ID);
-
-	tcl_cmd->info1 =
-		le32_encode_bits(ti->meta_data_flags,
-				 HAL_TCL_DATA_CMD_INFO1_CMD_NUM);
-
-	tcl_cmd->info2 = cpu_to_le32(ti->flags0) |
-		le32_encode_bits(ti->data_len, HAL_TCL_DATA_CMD_INFO2_DATA_LEN) |
-		le32_encode_bits(ti->pkt_offset, HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET);
-
-	tcl_cmd->info3 = cpu_to_le32(ti->flags1) |
-		le32_encode_bits(ti->tid, HAL_TCL_DATA_CMD_INFO3_TID) |
-		le32_encode_bits(ti->lmac_id, HAL_TCL_DATA_CMD_INFO3_PMAC_ID) |
-		le32_encode_bits(ti->vdev_id, HAL_TCL_DATA_CMD_INFO3_VDEV_ID);
-
-	tcl_cmd->info4 = le32_encode_bits(ti->bss_ast_idx,
-					  HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX) |
-			 le32_encode_bits(ti->bss_ast_hash,
-					  HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM);
-	tcl_cmd->info5 = 0;
-}
-
 void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
 {
 	u32 ctrl_reg_val;
@@ -143,3 +106,36 @@
 	ath12k_hif_write32(ab, HAL_TCL_SW_CONFIG_BANK_ADDR + 4 * bank_id,
 			   bank_config);
 }
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+void ath12k_hal_tx_set_ppe_vp_entry(struct ath12k_base *ab, u32 ppe_vp_config,
+				    u32 ppe_vp_idx)
+{
+	ath12k_hif_write32(ab, HAL_TX_PPE_VP_CONFIG_TABLE_ADDR +
+			   HAL_TX_PPE_VP_CONFIG_TABLE_OFFSET * ppe_vp_idx,
+			   ppe_vp_config);
+}
+#endif
+
+void ath12k_hal_tx_config_rbm_mapping(struct ath12k_base *ab,u8 ring_num,
+				      u8 rbm_id, int ring_type)
+{
+	u32 curr_map, new_map;
+
+	if (ring_type == HAL_PPE2TCL)
+		ring_num = ring_num + HAL_TCL_RBM_MAPPING_PPE2TCL_OFFSET;
+	else if (ring_type == HAL_TCL_CMD)
+		ring_num = ring_num + HAL_TCL_RBM_MAPPING_TCL_CMD_CREDIT_OFFSET;
+
+	curr_map = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+				     HAL_TCL_RBM_MAPPING0_ADDR_OFFSET);
+
+	/* Protect the other values and clear the specific fields to be updated */
+	curr_map &= (~(HAL_TCL_RBM_MAPPING_BMSK <<
+		      (HAL_TCL_RBM_MAPPING_SHFT * ring_num)));
+	new_map = curr_map | ((HAL_TCL_RBM_MAPPING_BMSK & rbm_id) <<
+			      (HAL_TCL_RBM_MAPPING_SHFT * ring_num));
+
+	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+			   HAL_TCL_RBM_MAPPING0_ADDR_OFFSET, new_map);
+}
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hal_tx.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_tx.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/hal_tx.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hal_tx.h	2024-01-19 17:01:19.865847030 +0100
@@ -64,6 +64,8 @@
 	u8 tid;
 	u16 peer_id;
 	u32 rate_stats;
+	u32 buffer_timestamp;
+	u32 tsf;
 };
 
 #define HAL_TX_PHY_DESC_INFO0_BF_TYPE		GENMASK(17, 16)
@@ -182,13 +184,28 @@
 /* STA mode will have MCAST_PKT_CTRL instead of DSCP_TID_MAP bitfield */
 #define HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID	GENMASK(22, 17)
 
-void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
-				  struct hal_tcl_data_cmd *tcl_cmd,
-				  struct hal_tx_info *ti);
+#define HAL_TX_PPE_VP_CONFIG_TABLE_ADDR		0x00a44194
+#define HAL_TX_PPE_VP_CONFIG_TABLE_OFFSET	4
+
+#define HAL_TX_PPE_VP_CFG_VP_NUM		GENMASK(7, 0)
+#define HAL_TX_PPE_VP_CFG_PMAC_ID		GENMASK(9, 8)
+#define HAL_TX_PPE_VP_CFG_BANK_ID		GENMASK(15, 10)
+#define HAL_TX_PPE_VP_CFG_VDEV_ID		GENMASK(23, 16)
+#define HAL_TX_PPE_VP_CFG_SRCH_IDX_REG_NUM	GENMASK(26, 24)
+#define HAL_TX_PPE_VP_CFG_USE_PPE_INT_PRI	BIT(27)
+#define HAL_TX_PPE_VP_CFG_TO_FW			BIT(28)
+#define HAL_TX_PPE_VP_CFG_DROP_PREC_EN		BIT(29)
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+void ath12k_hal_tx_set_ppe_vp_entry(struct ath12k_base *ab, u32 ppe_vp_config,
+                                    u32 ppe_vp_index);
+#endif
 void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id);
 int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
 			    enum hal_reo_cmd_type type,
 			    struct ath12k_hal_reo_cmd *cmd);
 void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
 					   u8 bank_id);
+void ath12k_hal_tx_config_rbm_mapping(struct ath12k_base *ab,u8 ring_num,
+                                      u8 rbm_id, int ring_type);
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hif.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hif.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/hif.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hif.h	2024-01-19 17:01:19.865847030 +0100
@@ -8,10 +8,14 @@
 #define ATH12K_HIF_H
 
 #include "core.h"
+#include "pci.h"
 
 struct ath12k_hif_ops {
 	u32 (*read32)(struct ath12k_base *sc, u32 address);
 	void (*write32)(struct ath12k_base *sc, u32 address, u32 data);
+	u32 (*cmem_read32)(struct ath12k_base *sc, u32 address);
+	void (*cmem_write32)(struct ath12k_base *sc, u32 address, u32 data);
+	u32 (*pmm_read32)(struct ath12k_base *sc, u32 address);
 	void (*irq_enable)(struct ath12k_base *sc);
 	void (*irq_disable)(struct ath12k_base *sc);
 	int (*start)(struct ath12k_base *sc);
@@ -30,8 +34,64 @@
 	void (*ce_irq_enable)(struct ath12k_base *ab);
 	void (*ce_irq_disable)(struct ath12k_base *ab);
 	void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
+	int (*ssr_notifier_reg)(struct ath12k_base *ab);
+	int (*ssr_notifier_unreg)(struct ath12k_base *ab);
+	void (*config_static_window)(struct ath12k_base *ab);
+	int (*get_msi_irq)(struct ath12k_base *ab, unsigned int vector);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	int (*ppeds_register_interrupts)(struct ath12k_base *ab, int type, int vector,
+					 int ring_num);
+	void (*ppeds_free_interrupts)(struct ath12k_base *ab);
+	void (*ppeds_irq_enable)(struct ath12k_base *ab, enum ppeds_irq_type type);
+	void (*ppeds_irq_disable)(struct ath12k_base *ab, enum ppeds_irq_type type);
+#endif
+	int (*dp_umac_reset_irq_config)(struct ath12k_base *ab);
+	void (*dp_umac_reset_enable_irq)(struct ath12k_base *ab);
+	void (*dp_umac_reset_free_irq)(struct ath12k_base *ab);
 };
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+static inline int ath12k_hif_ppeds_register_interrupts(struct ath12k_base *ab, int type, int vector,
+						       int ring_num)
+{
+	if (!test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags))
+		return 0;
+
+	if (ab->hif.ops->ppeds_register_interrupts)
+		return ab->hif.ops->ppeds_register_interrupts(ab, type, vector,
+							      ring_num);
+	return 0;
+}
+
+static inline void ath12k_hif_ppeds_free_interrupts(struct ath12k_base *ab)
+{
+	if (!test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags))
+		return;
+
+	if (ab->hif.ops->ppeds_register_interrupts)
+		ab->hif.ops->ppeds_free_interrupts(ab);
+}
+
+static inline void ath12k_hif_ppeds_irq_enable(struct ath12k_base *ab, enum ppeds_irq_type type)
+{
+	if (!test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags))
+		return;
+
+	if (ab->hif.ops->ppeds_irq_enable)
+		ab->hif.ops->ppeds_irq_enable(ab, type);
+}
+
+static inline void ath12k_hif_ppeds_irq_disable(struct ath12k_base *ab, enum ppeds_irq_type type)
+{
+	if (!test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags))
+		return;
+
+	if (ab->hif.ops->ppeds_irq_disable)
+		ab->hif.ops->ppeds_irq_disable(ab, type);
+}
+#endif
+
 static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
 						 u8 *ul_pipe, u8 *dl_pipe)
 {
@@ -72,6 +132,22 @@
 		*msi_data_idx = ce_id;
 }
 
+static inline int ath12k_hif_ssr_notifier_reg(struct ath12k_base *ab)
+{
+	if (!ab->hif.ops->ssr_notifier_reg)
+		return 0;
+
+	return ab->hif.ops->ssr_notifier_reg(ab);
+}
+
+static inline int ath12k_hif_ssr_notifier_unreg(struct ath12k_base *ab)
+{
+	if (!ab->hif.ops->ssr_notifier_unreg)
+		return 0;
+
+	return ab->hif.ops->ssr_notifier_unreg(ab);
+}
+
 static inline void ath12k_hif_ce_irq_enable(struct ath12k_base *ab)
 {
 	if (ab->hif.ops->ce_irq_enable)
@@ -131,6 +207,23 @@
 	ab->hif.ops->write32(ab, address, data);
 }
 
+static inline u32 ath12k_hif_cmem_read32(struct ath12k_base *ab, u32 address)
+{
+	return ab->hif.ops->cmem_read32(ab, address);
+}
+
+static inline u32 ath12k_hif_pmm_read32(struct ath12k_base *ab, u32 offset)
+{
+	return ab->hif.ops->pmm_read32(ab, offset);
+}
+
+static inline void ath12k_hif_cmem_write32(struct ath12k_base *ab, u32 address,
+				      u32 data)
+{
+	ab->hif.ops->cmem_write32(ab, address, data);
+}
+
+
 static inline int ath12k_hif_power_up(struct ath12k_base *ab)
 {
 	return ab->hif.ops->power_up(ab);
@@ -141,4 +234,43 @@
 	ab->hif.ops->power_down(ab);
 }
 
+static inline void ath12k_hif_config_static_window(struct ath12k_base *ab)
+{
+	if (!ab->hif.ops->config_static_window)
+		return;
+
+	ab->hif.ops->config_static_window(ab);
+}
+
+static inline int ath12k_hif_get_msi_irq(struct ath12k_base *ab, unsigned int vector)
+{
+	if (!ab->hif.ops->get_msi_irq)
+		return -EOPNOTSUPP;
+
+	return ab->hif.ops->get_msi_irq(ab, vector);
+}
+
+static inline int ath12k_hif_dp_umac_reset_irq_config(struct ath12k_base *ab)
+{
+	if (ab->hif.ops->dp_umac_reset_irq_config)
+		return ab->hif.ops->dp_umac_reset_irq_config(ab);
+
+	return 0;
+}
+
+static inline void ath12k_hif_dp_umac_reset_enable_irq(struct ath12k_base *ab)
+{
+	if (ab->hif.ops->dp_umac_reset_enable_irq)
+		return ab->hif.ops->dp_umac_reset_enable_irq(ab);
+
+	return;
+}
+
+static inline void ath12k_hif_dp_umac_reset_free_irq(struct ath12k_base *ab)
+{
+	if (ab->hif.ops->dp_umac_reset_free_irq)
+		return ab->hif.ops->dp_umac_reset_free_irq(ab);
+
+	return;
+}
 #endif /* ATH12K_HIF_H */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/htc.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/htc.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/htc.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/htc.c	2024-01-19 17:01:19.865847030 +0100
@@ -81,6 +81,8 @@
 	struct ath12k_base *ab = htc->ab;
 	int credits = 0;
 	int ret;
+	bool credit_flow_enabled = (ab->hw_params->credit_flow &&
+				    ep->tx_credit_flow_enabled);
 
 	if (eid >= ATH12K_HTC_EP_COUNT) {
 		ath12k_warn(ab, "Invalid endpoint id: %d\n", eid);
@@ -89,7 +91,7 @@
 
 	skb_push(skb, sizeof(struct ath12k_htc_hdr));
 
-	if (ep->tx_credit_flow_enabled) {
+	if (credit_flow_enabled) {
 		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
 		spin_lock_bh(&htc->tx_lock);
 		if (ep->tx_credits < credits) {
@@ -109,6 +111,7 @@
 
 	ath12k_htc_prepare_tx_skb(ep, skb);
 
+	skb_cb->eid = eid;
 	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
 	ret = dma_mapping_error(dev, skb_cb->paddr);
 	if (ret) {
@@ -125,7 +128,7 @@
 err_unmap:
 	dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 err_credits:
-	if (ep->tx_credit_flow_enabled) {
+	if (credit_flow_enabled) {
 		spin_lock_bh(&htc->tx_lock);
 		ep->tx_credits += credits;
 		ath12k_dbg(ab, ATH12K_DBG_HTC,
@@ -202,6 +205,7 @@
 			break;
 		}
 
+		if (ab->hw_params->credit_flow) {
 		switch (record->hdr.id) {
 		case ATH12K_HTC_RECORD_CREDITS:
 			len = sizeof(struct ath12k_htc_credit_report);
@@ -220,6 +224,7 @@
 				    record->hdr.id, record->hdr.len);
 			break;
 		}
+		}
 
 		if (status)
 			break;
@@ -244,6 +249,29 @@
 	complete(&ab->htc_suspend);
 }
 
+void ath12k_htc_tx_completion_handler(struct ath12k_base *ab,
+				      struct sk_buff *skb)
+{
+	struct ath12k_htc *htc = &ab->htc;
+	struct ath12k_htc_ep *ep;
+	void (*ep_tx_complete)(struct ath12k_base *, struct sk_buff *);
+	u8 eid;
+
+	eid = ATH12K_SKB_CB(skb)->eid;
+	if (eid >= ATH12K_HTC_EP_COUNT)
+		return;
+
+	ep = &htc->endpoint[eid];
+	spin_lock_bh(&htc->tx_lock);
+	ep_tx_complete = ep->ep_ops.ep_tx_complete;
+	spin_unlock_bh(&htc->tx_lock);
+	if (!ep_tx_complete) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	ep_tx_complete(htc->ab, skb);
+}
+
 void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
 				      struct sk_buff *skb)
 {
@@ -606,12 +634,19 @@
 		disable_credit_flow_ctrl = true;
 	}
 
+	if (!ab->hw_params->credit_flow) {
+		flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+		disable_credit_flow_ctrl = true;
+	}
+
 	req_msg->flags_len = le32_encode_bits(flags, HTC_SVC_MSG_CONNECTIONFLAGS);
 	req_msg->msg_svc_id |= le32_encode_bits(conn_req->service_id,
 						HTC_SVC_MSG_SERVICE_ID);
 
 	reinit_completion(&htc->ctl_resp);
 
+	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+		clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
 	status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
 	if (status) {
 		kfree_skb(skb);
@@ -730,7 +765,10 @@
 	msg->msg_id = le32_encode_bits(ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID,
 				       HTC_MSG_MESSAGEID);
 
+	if (ab->hw_params->credit_flow)
 	ath12k_dbg(ab, ATH12K_DBG_HTC, "HTC is using TX credit flow control\n");
+	else
+		msg->flags |= ATH12K_GLOBAL_DISABLE_CREDIT_FLOW;
 
 	status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
 	if (status) {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/htc.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/htc.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/htc.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/htc.h	2024-01-19 17:01:19.865847030 +0100
@@ -81,6 +81,8 @@
 	ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF,
 	ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS,
 	ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY,
+	ATH12K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE	    = 0x4,
+	ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL	    = 0x8,
 };
 
 #define ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK	GENMASK(1, 0)
@@ -117,6 +119,8 @@
 	__le32 svc_meta_pad;
 } __packed;
 
+#define ATH12K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1)
+
 struct ath12k_htc_setup_complete_extended {
 	__le32 msg_id;
 	__le32 flags;
@@ -312,5 +316,7 @@
 struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ar, int size);
 void ath12k_htc_rx_completion_handler(struct ath12k_base *ar,
 				      struct sk_buff *skb);
+void ath12k_htc_tx_completion_handler(struct ath12k_base *ab,
+				      struct sk_buff *skb);
 
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hw.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hw.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/hw.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hw.c	2024-04-11 15:53:51.048030965 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/types.h>
@@ -45,6 +45,37 @@
 	return false;
 }
 
+static u8 ath12k_hw_ipq5332_mac_from_pdev_id(int pdev_idx)
+{
+	return pdev_idx;
+}
+
+static int ath12k_hw_mac_id_to_pdev_id_ipq5332(const struct ath12k_hw_params *hw,
+					       int mac_id)
+{
+	return mac_id;
+}
+
+static int ath12k_hw_mac_id_to_srng_id_ipq5332(const struct ath12k_hw_params *hw,
+					       int mac_id)
+{
+	return 0;
+}
+
+static u8 ath12k_hw_get_ring_selector_ipq5332(struct sk_buff *skb)
+{
+	return smp_processor_id();
+}
+
+static bool ath12k_dp_srng_is_comp_ring_ipq5332(int ring_num)
+{
+	if (ring_num < 3 || ring_num == 4)
+		return true;
+
+	return false;
+}
+
+
 static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
 					       int mac_id)
 {
@@ -88,10 +119,24 @@
 	.dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850,
 };
 
+static const struct ath12k_hw_ops ipq5332_ops = {
+	.get_hw_mac_from_pdev_id = ath12k_hw_ipq5332_mac_from_pdev_id,
+	.mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_ipq5332,
+	.mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_ipq5332,
+	.rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
+	.get_ring_selector = ath12k_hw_get_ring_selector_ipq5332,
+	.dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_ipq5332,
+};
+
+
 #define ATH12K_TX_RING_MASK_0 0x1
 #define ATH12K_TX_RING_MASK_1 0x2
 #define ATH12K_TX_RING_MASK_2 0x4
 #define ATH12K_TX_RING_MASK_3 0x8
+/* ATH12K_TX_RING_MASK_4 corresponds to the interrupt mask
+ * for the 4th TX Data ring which uses WBM2SW4RELEASE and
+ * hence uses the mask with 4th bit set
+ */
 #define ATH12K_TX_RING_MASK_4 0x10
 
 #define ATH12K_RX_RING_MASK_0 0x1
@@ -106,6 +151,8 @@
 #define ATH12K_REO_STATUS_RING_MASK_0 0x1
 
 #define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH12K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH12K_HOST2RXDMA_RING_MASK_2 0x4
 
 #define ATH12K_RX_MON_RING_MASK_0 0x1
 #define ATH12K_RX_MON_RING_MASK_1 0x2
@@ -114,6 +161,12 @@
 #define ATH12K_TX_MON_RING_MASK_0 0x1
 #define ATH12K_TX_MON_RING_MASK_1 0x2
 
+#define ATH12K_PPE2TCL_RING_MASK_0 0x1
+#define ATH12K_REO2PPE_RING_MASK_0 0x1
+#define ATH12K_PPE_WBM2SW_RELEASE_RING_MASK_0 0x1
+#define ATH12K_PPE_WBM2SW_RELEASE_RING_MASK_0 0x1
+#define ATH12K_UMAC_RESET_INTR_MASK_0	0x1
+
 /* Target firmware's Copy Engine configuration. */
 static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
 	/* CE0: host->target HTC control and raw streams */
@@ -531,18 +584,304 @@
 	},
 };
 
-static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
+static const struct ce_pipe_config ath12k_target_ce_config_wlan_ipq5332[] = {
+	/* host->target HTC control and raw streams */
+	{
+		.pipenum = __cpu_to_le32(0),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* target->host HTT */
+	{
+		.pipenum = __cpu_to_le32(1),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* target->host WMI  + HTC control */
+	{
+		.pipenum = __cpu_to_le32(2),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* host->target WMI */
+	{
+		.pipenum = __cpu_to_le32(3),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* host->target HTT */
+	{
+		.pipenum = __cpu_to_le32(4),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(256),
+		.nbytes_max = __cpu_to_le32(256),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* Target -> host PKTLOG */
+	{
+		.pipenum = __cpu_to_le32(5),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* Reserved for target autonomous HIF_memcpy */
+	{
+		.pipenum = __cpu_to_le32(6),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(16384),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* CE7 Reserved for CV Prefetch */
+	{
+		.pipenum = __cpu_to_le32(7),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* CE8 Reserved for target generic HIF memcpy */
+	{
+		.pipenum = __cpu_to_le32(8),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(16384),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* CE9 WMI logging/CFR/Spectral/Radar/ */
+	{
+		.pipenum = __cpu_to_le32(9),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* Unused TBD */
+	{
+		.pipenum = __cpu_to_le32(10),
+		.pipedir = __cpu_to_le32(PIPEDIR_NONE),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(0),
+		.reserved = __cpu_to_le32(0),
+	},
+	/* Unused TBD */
+	{
+		.pipenum = __cpu_to_le32(11),
+		.pipedir = __cpu_to_le32(PIPEDIR_NONE),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(0),
+		.reserved = __cpu_to_le32(0),
+	},
+};
+
+static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq5332[] = {
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(0),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(1),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(0),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(1),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+		__cpu_to_le32(PIPEDIR_OUT),
+		__cpu_to_le32(4),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(1),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(5),
+	},
+	{
+		__cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
+		__cpu_to_le32(PIPEDIR_IN),
+		__cpu_to_le32(9),
+	},
+	/* (Additions here) */
+
+	{ /* must be last */
+		__cpu_to_le32(0),
+		__cpu_to_le32(0),
+		__cpu_to_le32(0),
+	},
+};
+
+static struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
 	.tx  = {
 		ATH12K_TX_RING_MASK_0,
 		ATH12K_TX_RING_MASK_1,
 		ATH12K_TX_RING_MASK_2,
-		ATH12K_TX_RING_MASK_3,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		ATH12K_TX_RING_MASK_4,
+		0, 0, 0
 	},
 	.rx_mon_dest = {
-		0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
 		ATH12K_RX_MON_RING_MASK_0,
 		ATH12K_RX_MON_RING_MASK_1,
 		ATH12K_RX_MON_RING_MASK_2,
+		0, 0, 0, 0
+	},
+	.rx = {
+		0, 0, 0, 0,
+		ATH12K_RX_RING_MASK_0,
+		ATH12K_RX_RING_MASK_1,
+		ATH12K_RX_RING_MASK_2,
+		ATH12K_RX_RING_MASK_3,
+		0, 0, 0, 0,
+		0, 0, 0
+	},
+	.rx_err = {
+		0, 0, 0,
+		ATH12K_RX_ERR_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0
+	},
+	.rx_wbm_rel = {
+		0, 0, 0,
+		ATH12K_RX_WBM_REL_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0
+	},
+	.reo_status = {
+		0, 0, 0,
+		ATH12K_REO_STATUS_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0
+	},
+	.host2rxdma = {
+		0, 0, 0,
+		ATH12K_HOST2RXDMA_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0
+	},
+	.tx_mon_dest = {
+		ATH12K_TX_MON_RING_MASK_0,
+		ATH12K_TX_MON_RING_MASK_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0
+	},
+	.ppe2tcl = {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		ATH12K_PPE2TCL_RING_MASK_0, 0, 0
+	},
+	.reo2ppe = {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		0, ATH12K_REO2PPE_RING_MASK_0, 0
+	},
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	.wbm2sw6_ppeds_tx_cmpln = {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, ATH12K_PPE_WBM2SW_RELEASE_RING_MASK_0
+	},
+#endif
+	.umac_dp_reset = {
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		ATH12K_UMAC_RESET_INTR_MASK_0
+	},
+};
+
+static struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq5332 = {
+	.tx  = {
+		ATH12K_TX_RING_MASK_0,
+		ATH12K_TX_RING_MASK_1,
+		ATH12K_TX_RING_MASK_2,
+		ATH12K_TX_RING_MASK_4,
+		0, 0, 0, 0, 0, 0, 0, 0,
+	},
+	.rx_mon_dest = {
+		0, 0, 0, 0, 0, 0, 0, 0,
+		ATH12K_RX_MON_RING_MASK_0,
+		0, 0, 0,
 	},
 	.rx = {
 		0, 0, 0, 0,
@@ -550,30 +889,36 @@
 		ATH12K_RX_RING_MASK_1,
 		ATH12K_RX_RING_MASK_2,
 		ATH12K_RX_RING_MASK_3,
+		0, 0, 0, 0,
 	},
 	.rx_err = {
 		0, 0, 0,
 		ATH12K_RX_ERR_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
 	},
 	.rx_wbm_rel = {
 		0, 0, 0,
 		ATH12K_RX_WBM_REL_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
 	},
 	.reo_status = {
 		0, 0, 0,
 		ATH12K_REO_STATUS_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
 	},
 	.host2rxdma = {
 		0, 0, 0,
 		ATH12K_HOST2RXDMA_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0,
 	},
 	.tx_mon_dest = {
 		ATH12K_TX_MON_RING_MASK_0,
 		ATH12K_TX_MON_RING_MASK_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0
 	},
 };
 
-static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
+static struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
 	.tx  = {
 		ATH12K_TX_RING_MASK_0,
 		ATH12K_TX_RING_MASK_2,
@@ -603,6 +948,70 @@
 	},
 };
 
+static struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn6432 = {
+	.tx  = {
+		ATH12K_TX_RING_MASK_0,
+		ATH12K_TX_RING_MASK_1,
+		ATH12K_TX_RING_MASK_2 | ATH12K_TX_RING_MASK_4,
+		0, 0, 0, 0, 0, 0, 0, 0,
+	},
+	.rx_mon_dest = {
+		0, 0,
+		ATH12K_RX_MON_RING_MASK_0,
+		ATH12K_RX_MON_RING_MASK_1,
+		ATH12K_RX_MON_RING_MASK_2,
+		0, 0, 0, 0, 0, 0, 0,
+	},
+	.rx = {
+		0, 0, 0,
+		ATH12K_RX_RING_MASK_0,
+		ATH12K_RX_RING_MASK_1,
+		ATH12K_RX_RING_MASK_2 | ATH12K_RX_RING_MASK_3,
+		0, 0, 0, 0, 0, 0,
+	},
+	.rx_err = {
+		0, 0,
+		ATH12K_RX_ERR_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0,
+	},
+	.rx_wbm_rel = {
+		0, 0,
+		ATH12K_RX_WBM_REL_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0,
+	},
+	.reo_status = {
+		0, 0,
+		ATH12K_REO_STATUS_RING_MASK_0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0,
+	},
+	.host2rxdma = {
+		0, 0,
+		ATH12K_HOST2RXDMA_RING_MASK_0,
+		ATH12K_HOST2RXDMA_RING_MASK_1,
+		ATH12K_HOST2RXDMA_RING_MASK_2,
+		0, 0, 0, 0, 0, 0, 0,
+	},
+	.tx_mon_dest = {
+		ATH12K_TX_MON_RING_MASK_0,
+		ATH12K_TX_MON_RING_MASK_1,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	},
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	.ppe2tcl = {
+		0, 0, 0, 0,
+		0, 0, ATH12K_PPE2TCL_RING_MASK_0, 0, 0, 0, 0, 0,
+	},
+	.reo2ppe = {
+		0, 0, 0, 0,
+		0, 0, 0, ATH12K_REO2PPE_RING_MASK_0, 0, 0, 0, 0,
+	},
+	.wbm2sw6_ppeds_tx_cmpln = {
+		0, 0, 0, 0,
+		0, 0, 0, 0, ATH12K_PPE_WBM2SW_RELEASE_RING_MASK_0, 0, 0, 0,
+	},
+#endif
+};
+
 static const struct ath12k_hw_regs qcn9274_v1_regs = {
 	/* SW2TCL(x) R0 ring configuration address */
 	.hal_tcl1_ring_id = 0x00000908,
@@ -615,34 +1024,14 @@
 	.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
 	.hal_tcl1_ring_msi1_data = 0x00000950,
 	.hal_tcl_ring_base_lsb = 0x00000b58,
+	.hal_tcl1_ring_base_lsb = 0x00000900,
+	.hal_tcl1_ring_base_msb = 0x00000904,
+	.hal_tcl2_ring_base_lsb = 0x00000978,
+
 
 	/* TCL STATUS ring address */
 	.hal_tcl_status_ring_base_lsb = 0x00000d38,
 
-	.hal_wbm_idle_ring_base_lsb = 0x00000d0c,
-	.hal_wbm_idle_ring_misc_addr = 0x00000d1c,
-	.hal_wbm_r0_idle_list_cntl_addr = 0x00000210,
-	.hal_wbm_r0_idle_list_size_addr = 0x00000214,
-	.hal_wbm_scattered_ring_base_lsb = 0x00000220,
-	.hal_wbm_scattered_ring_base_msb = 0x00000224,
-	.hal_wbm_scattered_desc_head_info_ix0 = 0x00000230,
-	.hal_wbm_scattered_desc_head_info_ix1 = 0x00000234,
-	.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000240,
-	.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000244,
-	.hal_wbm_scattered_desc_ptr_hp_addr = 0x0000024c,
-
-	.hal_wbm_sw_release_ring_base_lsb = 0x0000034c,
-	.hal_wbm_sw1_release_ring_base_lsb = 0x000003c4,
-	.hal_wbm0_release_ring_base_lsb = 0x00000dd8,
-	.hal_wbm1_release_ring_base_lsb = 0x00000e50,
-
-	/* PCIe base address */
-	.pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
-	.pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
-
-	/* PPE release ring address */
-	.hal_ppe_rel_ring_base = 0x0000043c,
-
 	/* REO DEST ring address */
 	.hal_reo2_ring_base = 0x0000055c,
 	.hal_reo1_misc_ctrl_addr = 0x00000b7c,
@@ -650,6 +1039,8 @@
 	.hal_reo1_sw_cookie_cfg1 = 0x00000054,
 	.hal_reo1_qdesc_lut_base0 = 0x00000058,
 	.hal_reo1_qdesc_lut_base1 = 0x0000005c,
+	.hal_reo1_qdesc_addr_read = 0x00000060,
+
 	.hal_reo1_ring_base_lsb = 0x000004e4,
 	.hal_reo1_ring_base_msb = 0x000004e8,
 	.hal_reo1_ring_id = 0x000004ec,
@@ -677,6 +1068,43 @@
 
 	/* REO status ring address */
 	.hal_reo_status_ring_base = 0x00000a84,
+
+	/* WBM idle link ring address */
+	.hal_wbm_idle_ring_base_lsb = 0x00000d0c,
+	.hal_wbm_idle_ring_misc_addr = 0x00000d1c,
+	.hal_wbm_r0_idle_list_cntl_addr = 0x00000210,
+	.hal_wbm_r0_idle_list_size_addr = 0x00000214,
+	.hal_wbm_scattered_ring_base_lsb = 0x00000220,
+	.hal_wbm_scattered_ring_base_msb = 0x00000224,
+	.hal_wbm_scattered_desc_head_info_ix0 = 0x00000230,
+	.hal_wbm_scattered_desc_head_info_ix1 = 0x00000234,
+	.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000240,
+	.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000244,
+	.hal_wbm_scattered_desc_ptr_hp_addr = 0x0000024c,
+
+	/* SW2WBM release ring address */
+	.hal_wbm_sw_release_ring_base_lsb = 0x0000034c,
+	.hal_wbm_sw1_release_ring_base_lsb = 0x000003c4,
+
+	/* WBM2SW release ring address */
+	.hal_wbm0_release_ring_base_lsb = 0x00000dd8,
+	.hal_wbm1_release_ring_base_lsb = 0x00000e50,
+
+	/* reo2ppe ring address */
+	.hal_reo2ppe_ring_base = 0x00000938,
+
+	/* ppe2tcl ring base address */
+	.hal_tcl_ppe2tcl_ring_base_lsb = 0x00000c48,
+
+	/* PCIe base address */
+	.pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+	.pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+	/* CE base address */
+	.hal_umac_ce0_src_reg_base = 0x01b80000,
+	.hal_umac_ce0_dest_reg_base = 0x01b81000,
+	.hal_umac_ce1_src_reg_base = 0x01b82000,
+	.hal_umac_ce1_dest_reg_base = 0x01b83000,
 };
 
 static const struct ath12k_hw_regs qcn9274_v2_regs = {
@@ -691,10 +1119,52 @@
 	.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
 	.hal_tcl1_ring_msi1_data = 0x00000950,
 	.hal_tcl_ring_base_lsb = 0x00000b58,
+	.hal_tcl1_ring_base_lsb = 0x00000900,
+	.hal_tcl1_ring_base_msb = 0x00000904,
+	.hal_tcl2_ring_base_lsb = 0x00000978,
 
 	/* TCL STATUS ring address */
 	.hal_tcl_status_ring_base_lsb = 0x00000d38,
 
+	/* REO DEST ring address */
+	.hal_reo2_ring_base = 0x00000578,
+	.hal_reo1_misc_ctrl_addr = 0x00000b9c,
+	.hal_reo1_sw_cookie_cfg0 = 0x0000006c,
+	.hal_reo1_sw_cookie_cfg1 = 0x00000070,
+	.hal_reo1_qdesc_lut_base0 = 0x00000074,
+	.hal_reo1_qdesc_lut_base1 = 0x00000078,
+	.hal_reo1_qdesc_addr_read = 0x0000007c,
+	.hal_reo1_qdesc_max_peerid = 0x00000088,
+	.hal_reo1_ring_base_lsb = 0x00000500,
+	.hal_reo1_ring_base_msb = 0x00000504,
+	.hal_reo1_ring_id = 0x00000508,
+	.hal_reo1_ring_misc = 0x00000510,
+	.hal_reo1_ring_hp_addr_lsb = 0x00000514,
+	.hal_reo1_ring_hp_addr_msb = 0x00000518,
+	.hal_reo1_ring_producer_int_setup = 0x00000524,
+	.hal_reo1_ring_msi1_base_lsb = 0x00000548,
+	.hal_reo1_ring_msi1_base_msb = 0x0000054C,
+	.hal_reo1_ring_msi1_data = 0x00000550,
+	.hal_reo1_aging_thres_ix0 = 0x00000B28,
+	.hal_reo1_aging_thres_ix1 = 0x00000B2C,
+	.hal_reo1_aging_thres_ix2 = 0x00000B30,
+	.hal_reo1_aging_thres_ix3 = 0x00000B34,
+
+	/* REO Exception ring address */
+	.hal_reo2_sw0_ring_base = 0x000008c0,
+
+	/* REO Reinject ring address */
+	.hal_sw2reo_ring_base = 0x00000320,
+	.hal_sw2reo1_ring_base = 0x00000398,
+
+	/* REO cmd ring address */
+	.hal_reo_cmd_ring_base = 0x000002A8,
+
+	/* REO status ring address */
+	.hal_reo_status_ring_base = 0x00000aa0,
+
+	.hal_reo1_qdesc_addr_read = 0x0000007c,
+
 	/* WBM idle link ring address */
 	.hal_wbm_idle_ring_base_lsb = 0x00000d3c,
 	.hal_wbm_idle_ring_misc_addr = 0x00000d4c,
@@ -716,12 +1186,41 @@
 	.hal_wbm0_release_ring_base_lsb = 0x00000e08,
 	.hal_wbm1_release_ring_base_lsb = 0x00000e80,
 
+	/* reo2ppe ring base address */
+	.hal_reo2ppe_ring_base = 0x00000938,
+
+	/* ppe2tcl ring base address */
+	.hal_tcl_ppe2tcl_ring_base_lsb = 0x00000c48,
+
 	/* PCIe base address */
 	.pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
 	.pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
 
-	/* PPE release ring address */
-	.hal_ppe_rel_ring_base = 0x0000046c,
+	/* CE base address */
+	.hal_umac_ce0_src_reg_base = 0x01b80000,
+	.hal_umac_ce0_dest_reg_base = 0x01b81000,
+	.hal_umac_ce1_src_reg_base = 0x01b82000,
+	.hal_umac_ce1_dest_reg_base = 0x01b83000,
+};
+
+const struct ath12k_hw_regs ipq5332_regs = {
+	/* SW2TCL(x) R0 ring configuration address */
+	.hal_tcl1_ring_id = 0x00000918,
+	.hal_tcl1_ring_misc = 0x00000920,
+	.hal_tcl1_ring_tp_addr_lsb = 0x0000092c,
+	.hal_tcl1_ring_tp_addr_msb = 0x00000930,
+	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000940,
+	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000944,
+	.hal_tcl1_ring_msi1_base_lsb = 0x00000958,
+	.hal_tcl1_ring_msi1_base_msb = 0x0000095c,
+	.hal_tcl1_ring_base_lsb = 0x00000910,
+	.hal_tcl1_ring_base_msb = 0x00000914,
+	.hal_tcl1_ring_msi1_data = 0x00000960,
+	.hal_tcl2_ring_base_lsb = 0x00000988,
+	.hal_tcl_ring_base_lsb = 0x00000b68,
+
+	/* TCL STATUS ring address */
+	.hal_tcl_status_ring_base_lsb = 0x00000d48,
 
 	/* REO DEST ring address */
 	.hal_reo2_ring_base = 0x00000578,
@@ -730,6 +1229,8 @@
 	.hal_reo1_sw_cookie_cfg1 = 0x00000070,
 	.hal_reo1_qdesc_lut_base0 = 0x00000074,
 	.hal_reo1_qdesc_lut_base1 = 0x00000078,
+	.hal_reo1_qdesc_addr_read = 0x0000007c,
+	.hal_reo1_qdesc_max_peerid = 0x00000088,
 	.hal_reo1_ring_base_lsb = 0x00000500,
 	.hal_reo1_ring_base_msb = 0x00000504,
 	.hal_reo1_ring_id = 0x00000508,
@@ -757,6 +1258,33 @@
 
 	/* REO status ring address */
 	.hal_reo_status_ring_base = 0x00000aa0,
+
+	/* WBM idle link ring address */
+	.hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+	.hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+	.hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+	.hal_wbm_r0_idle_list_size_addr = 0x00000244,
+	.hal_wbm_scattered_ring_base_lsb = 0x00000250,
+	.hal_wbm_scattered_ring_base_msb = 0x00000254,
+	.hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+	.hal_wbm_scattered_desc_head_info_ix1	= 0x00000264,
+	.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+	.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+	.hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+	/* SW2WBM release ring address */
+	.hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+
+	/* WBM2SW release ring address */
+	.hal_wbm0_release_ring_base_lsb = 0x00000e08,
+	.hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+	/* CE base address */
+	.hal_umac_ce0_src_reg_base = 0x00740000,
+	.hal_umac_ce0_dest_reg_base = 0x00741000,
+	.hal_umac_ce1_src_reg_base = 0x00742000,
+	.hal_umac_ce1_dest_reg_base = 0x00743000,
+
 };
 
 static const struct ath12k_hw_regs wcn7850_regs = {
@@ -771,6 +1299,9 @@
 	.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
 	.hal_tcl1_ring_msi1_data = 0x00000950,
 	.hal_tcl_ring_base_lsb = 0x00000b58,
+	.hal_tcl1_ring_base_lsb = 0x00000900,
+	.hal_tcl1_ring_base_msb = 0x00000904,
+	.hal_tcl2_ring_base_lsb = 0x00000978,
 
 	/* TCL STATUS ring address */
 	.hal_tcl_status_ring_base_lsb = 0x00000d38,
@@ -796,9 +1327,6 @@
 	.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
 	.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
 
-	/* PPE release ring address */
-	.hal_ppe_rel_ring_base = 0x0000043c,
-
 	/* REO DEST ring address */
 	.hal_reo2_ring_base = 0x0000055c,
 	.hal_reo1_misc_ctrl_addr = 0x00000b7c,
@@ -833,6 +1361,105 @@
 
 	/* REO status ring address */
 	.hal_reo_status_ring_base = 0x00000a84,
+
+	/* CE base address */
+	.hal_umac_ce0_src_reg_base = 0x01b80000,
+	.hal_umac_ce0_dest_reg_base = 0x01b81000,
+	.hal_umac_ce1_src_reg_base = 0x01b82000,
+	.hal_umac_ce1_dest_reg_base = 0x01b83000,
+};
+
+const struct ath12k_hw_regs qcn6432_regs = {
+	/* SW2TCL(x) R0 ring configuration address */
+	.hal_tcl1_ring_id = 0x00000918,
+	.hal_tcl1_ring_misc = 0x00000920,
+	.hal_tcl1_ring_tp_addr_lsb = 0x0000092c,
+	.hal_tcl1_ring_tp_addr_msb = 0x00000930,
+	.hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000940,
+	.hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000944,
+	.hal_tcl1_ring_msi1_base_lsb = 0x00000958,
+	.hal_tcl1_ring_msi1_base_msb = 0x0000095c,
+	.hal_tcl1_ring_base_lsb = 0x00000910,
+	.hal_tcl1_ring_base_msb = 0x00000914,
+	.hal_tcl1_ring_msi1_data = 0x00000960,
+	.hal_tcl2_ring_base_lsb = 0x00000988,
+	.hal_tcl_ring_base_lsb = 0x00000b68,
+
+	/* TCL STATUS ring address */
+	.hal_tcl_status_ring_base_lsb = 0x00000d48,
+
+	/* REO DEST ring address */
+	.hal_reo2_ring_base = 0x00000578,
+	.hal_reo1_misc_ctrl_addr = 0x00000b9c,
+	.hal_reo1_sw_cookie_cfg0 = 0x0000006c,
+	.hal_reo1_sw_cookie_cfg1 = 0x00000070,
+	.hal_reo1_qdesc_lut_base0 = 0x00000074,
+	.hal_reo1_qdesc_lut_base1 = 0x00000078,
+	.hal_reo1_qdesc_addr_read = 0x0000007c,
+	.hal_reo1_qdesc_max_peerid = 0x00000088,
+	.hal_reo1_ring_base_lsb = 0x00000500,
+	.hal_reo1_ring_base_msb = 0x00000504,
+	.hal_reo1_ring_id = 0x00000508,
+	.hal_reo1_ring_misc = 0x00000510,
+	.hal_reo1_ring_hp_addr_lsb = 0x00000514,
+	.hal_reo1_ring_hp_addr_msb = 0x00000518,
+	.hal_reo1_ring_producer_int_setup = 0x00000524,
+	.hal_reo1_ring_msi1_base_lsb = 0x00000548,
+	.hal_reo1_ring_msi1_base_msb = 0x0000054C,
+	.hal_reo1_ring_msi1_data = 0x00000550,
+	.hal_reo1_aging_thres_ix0 = 0x00000B28,
+	.hal_reo1_aging_thres_ix1 = 0x00000B2C,
+	.hal_reo1_aging_thres_ix2 = 0x00000B30,
+	.hal_reo1_aging_thres_ix3 = 0x00000B34,
+
+	/* REO Exception ring address */
+	.hal_reo2_sw0_ring_base = 0x000008c0,
+
+	/* REO Reinject ring address */
+	.hal_sw2reo_ring_base = 0x00000320,
+	.hal_sw2reo1_ring_base = 0x00000398,
+
+	/* REO cmd ring address */
+	.hal_reo_cmd_ring_base = 0x000002A8,
+
+	/* REO status ring address */
+	.hal_reo_status_ring_base = 0x00000aa0,
+
+	/* WBM idle link ring address */
+	.hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+	.hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+	.hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+	.hal_wbm_r0_idle_list_size_addr = 0x00000244,
+	.hal_wbm_scattered_ring_base_lsb = 0x00000250,
+	.hal_wbm_scattered_ring_base_msb = 0x00000254,
+	.hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+	.hal_wbm_scattered_desc_head_info_ix1   = 0x00000264,
+	.hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+	.hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+	.hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+	/* SW2WBM release ring address */
+	.hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+
+	/* WBM2SW release ring address */
+	.hal_wbm0_release_ring_base_lsb = 0x00000e08,
+	.hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+	/* reo2ppe ring base address */
+	.hal_reo2ppe_ring_base = 0x00000938,
+
+	/* ppe2tcl ring base address */
+	.hal_tcl_ppe2tcl_ring_base_lsb = 0x00000c58,
+
+	/* PCIe base address */
+	.pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+	.pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+	/* CE base address */
+	.hal_umac_ce0_src_reg_base = 0x01B80000,
+	.hal_umac_ce0_dest_reg_base = 0x01B81000,
+	.hal_umac_ce1_src_reg_base =  0x01B82000,
+	.hal_umac_ce1_dest_reg_base =  0x01B83000,
 };
 
 static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
@@ -841,7 +1468,8 @@
 			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
 			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
 			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
-			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN |
+			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW6_EN,
 };
 
 static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
@@ -852,12 +1480,42 @@
 			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
 };
 
-static const struct ath12k_hw_params ath12k_hw_params[] = {
+const struct ce_ie_addr ath12k_ce_ie_addr_ipq5332 = {
+	.ie1_reg_addr = CE_HOST_IE_ADDRESS,
+	.ie2_reg_addr = CE_HOST_IE_2_ADDRESS,
+	.ie3_reg_addr = CE_HOST_IE_3_ADDRESS,
+};
+
+const struct ce_remap ath12k_ce_remap_ipq5332 = {
+	.base = HAL_IPQ5332_CE_WFSS_REG_BASE,
+	.size = HAL_IPQ5332_CE_SIZE,
+};
+
+const struct cmem_remap ath12k_cmem_ipq5332 = {
+	.base = HAL_IPQ5332_CMEM_REG_BASE,
+	.size = HAL_IPQ5332_CMEM_SIZE,
+};
+
+const struct pmm_remap ath12k_pmm_ipq5332 = {
+	.base = HAL_IPQ5332_PMM_REG_BASE,
+	.size = HAL_IPQ5332_PMM_SIZE,
+};
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq5332 = {
+	.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+	.wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
+			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+			    HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static struct ath12k_hw_params ath12k_hw_params[] = {
 	{
 		.name = "qcn9274 hw1.0",
 		.hw_rev = ATH12K_HW_QCN9274_HW10,
 		.fw = {
-			.dir = "QCN9274/hw1.0",
+			.dir = "QCN92XX/hw1.0",
 			.board_size = 256 * 1024,
 			.cal_offset = 128 * 1024,
 		},
@@ -869,6 +1527,8 @@
 		.hw_ops = &qcn9274_ops,
 		.ring_mask = &ath12k_hw_ring_mask_qcn9274,
 		.regs = &qcn9274_v1_regs,
+		.ext_irq_grp_num_max = 12,
+		.route_wbm_release = 3,
 
 		.host_ce_config = ath12k_host_ce_config_qcn9274,
 		.ce_count = 16,
@@ -879,21 +1539,23 @@
 
 		.hal_params = &ath12k_hw_hal_params_qcn9274,
 
-		.rxdma1_enable = false,
+		.rxdma1_enable = true,
 		.num_rxmda_per_pdev = 1,
 		.num_rxdma_dst_ring = 0,
 		.rx_mac_buf_ring = false,
 		.vdev_start_delay = false,
 
 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
-					BIT(NL80211_IFTYPE_AP),
-		.supports_monitor = false,
+					BIT(NL80211_IFTYPE_AP) |
+					BIT(NL80211_IFTYPE_MESH_POINT),
+		.supports_monitor = true,
 
 		.idle_ps = false,
+		.cold_boot_calib = false,
 		.download_calib = true,
 		.supports_suspend = false,
 		.tcl_ring_retry = true,
-		.reoq_lut_support = false,
+		.reoq_lut_support = true,
 		.supports_shadow_regs = false,
 
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
@@ -906,6 +1568,31 @@
 
 		.hal_ops = &hal_qcn9274_ops,
 
+		.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
+
+		.fix_l1ss = false,
+
+		.supports_ap_ps = true,
+		.alloc_cacheable_memory = true,
+		.spectral = {
+			.fft_sz = 0,
+			.fft_pad_sz = 0,
+			.summary_pad_sz = 0,
+			.fft_hdr_len = 0,
+			.max_fft_bins = 0,
+			.fragment_160mhz = false,
+		},
+		.max_mlo_peer = ATH12K_MAX_MLO_PEER,
+		.num_local_link = 0,
+		.m3_fw_support = true,
+		.compact_rx_tlv = true,
+		.send_platform_model = false,
+		.en_fwlog = true,
+		.support_umac_reset = false,
+		.en_qdsslog = true,
+		.supports_tx_monitor = false,
+		.fw_mem_mode = ATH12K_QMI_TARGET_MEM_MODE,
+		.is_plink_preferable = true,
 	},
 	{
 		.name = "wcn7850 hw2.0",
@@ -925,6 +1612,8 @@
 		.hw_ops = &wcn7850_ops,
 		.ring_mask = &ath12k_hw_ring_mask_wcn7850,
 		.regs = &wcn7850_regs,
+		.ext_irq_grp_num_max = 12,
+		.route_wbm_release = 3,
 
 		.host_ce_config = ath12k_host_ce_config_wcn7850,
 		.ce_count = 9,
@@ -960,6 +1649,22 @@
 		.wmi_init = ath12k_wmi_init_wcn7850,
 
 		.hal_ops = &hal_wcn7850_ops,
+
+		.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) |
+					   BIT(CNSS_PCIE_PERST_NO_PULL_V01),
+
+		.fix_l1ss = false,
+		.supports_ap_ps = true,
+		.credit_flow = false,
+		.m3_fw_support = true,
+		.compact_rx_tlv = false,
+		.send_platform_model = false,
+		.en_fwlog = true,
+		.support_umac_reset = false,
+		.en_qdsslog = true,
+		.supports_tx_monitor = false,
+		.fw_mem_mode = ATH12K_QMI_TARGET_MEM_MODE,
+		.is_plink_preferable = true,
 	},
 	{
 		.name = "qcn9274 hw2.0",
@@ -969,7 +1674,7 @@
 			.board_size = 256 * 1024,
 			.cal_offset = 128 * 1024,
 		},
-		.max_radios = 1,
+		.max_radios = 2,
 		.single_pdev_only = false,
 		.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
 		.internal_sleep_clock = false,
@@ -977,6 +1682,8 @@
 		.hw_ops = &qcn9274_ops,
 		.ring_mask = &ath12k_hw_ring_mask_qcn9274,
 		.regs = &qcn9274_v2_regs,
+		.ext_irq_grp_num_max = 12,
+		.route_wbm_release = 3,
 
 		.host_ce_config = ath12k_host_ce_config_qcn9274,
 		.ce_count = 16,
@@ -987,21 +1694,23 @@
 
 		.hal_params = &ath12k_hw_hal_params_qcn9274,
 
-		.rxdma1_enable = false,
+		.rxdma1_enable = true,
 		.num_rxmda_per_pdev = 1,
 		.num_rxdma_dst_ring = 0,
 		.rx_mac_buf_ring = false,
 		.vdev_start_delay = false,
 
 		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
-					BIT(NL80211_IFTYPE_AP),
-		.supports_monitor = false,
+					BIT(NL80211_IFTYPE_AP) |
+					BIT(NL80211_IFTYPE_MESH_POINT),
+		.supports_monitor = true,
 
 		.idle_ps = false,
+		.cold_boot_calib = true,
 		.download_calib = true,
 		.supports_suspend = false,
 		.tcl_ring_retry = true,
-		.reoq_lut_support = false,
+		.reoq_lut_support = true,
 		.supports_shadow_regs = false,
 
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
@@ -1013,9 +1722,261 @@
 		.wmi_init = ath12k_wmi_init_qcn9274,
 
 		.hal_ops = &hal_qcn9274_ops,
+
+		.qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
+
+		.fix_l1ss = false,
+		.supports_ap_ps = true,
+		.credit_flow = false,
+		.alloc_cacheable_memory = true,
+		.spectral = {
+			.fft_sz = 7,
+			.fft_pad_sz = 0,
+			.summary_pad_sz = 16,
+			.fft_hdr_len = 24,
+			.max_fft_bins = 512,
+			.fragment_160mhz = true,
+		},
+		.max_mlo_peer = ATH12K_MAX_MLO_PEER,
+		.num_local_link = 0,
+		.m3_fw_support = true,
+		.compact_rx_tlv = true,
+		.send_platform_model = false,
+		.en_fwlog = true,
+		.support_umac_reset = true,
+		.en_qdsslog = false,
+		.supports_tx_monitor = false,
+		.fw_mem_mode = ATH12K_QMI_TARGET_MEM_MODE,
+		.is_plink_preferable = true,
+	},
+	{
+		.name = "ipq5332 hw1.0",
+		.hw_rev = ATH12K_HW_IPQ5332_HW10,
+		.bdf_addr = 0x4B500000,
+		.fw = {
+			.dir = "IPQ5332/hw1.0",
+			.board_size = 256 * 1024,
+			.cal_offset = 128 * 1024,
+		},
+		.max_radios = 1,
+		.single_pdev_only = false,
+		.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332,
+		.internal_sleep_clock = false,
+
+		.hw_ops = &ipq5332_ops,
+		.regs = &ipq5332_regs,
+		.ext_irq_grp_num_max = 12,
+		.ring_mask = &ath12k_hw_ring_mask_ipq5332,
+		.route_wbm_release = 3,
+
+		.host_ce_config = ath12k_host_ce_config_ipq5332,
+		.ce_count = 12,
+		.target_ce_config = ath12k_target_ce_config_wlan_ipq5332,
+		.target_ce_count = 12,
+		.svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_ipq5332,
+		.svc_to_ce_map_len = 19,
+
+		.hal_params = &ath12k_hw_hal_params_ipq5332,
+
+		.rxdma1_enable = true,
+		.num_rxmda_per_pdev = 1,
+		.num_rxdma_dst_ring = 0,
+		.rx_mac_buf_ring = false,
+		.vdev_start_delay = false,
+		.fixed_fw_mem = false,
+		.smp2p_wow_exit = false,
+
+		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
+					BIT(NL80211_IFTYPE_AP) |
+					BIT(NL80211_IFTYPE_MESH_POINT),
+		.supports_monitor = true,
+
+		.idle_ps = false,
+		.cold_boot_calib = true,
+		.download_calib = true,
+		.supports_suspend = false,
+		.tcl_ring_retry = true,
+		.reoq_lut_support = true,
+		.supports_shadow_regs = false,
+
+		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
+		.num_tcl_banks = 48,
+		.max_tx_ring = 4,
+
+
+		.wmi_init = ath12k_wmi_init_ipq5332,
+
+		.hal_ops = &hal_qcn9274_ops,
+
+		.supports_ap_ps = true,
+		.credit_flow = false,
+		.alloc_cacheable_memory = true,
+		.spectral = {
+			.fft_sz = 7,
+			.fft_pad_sz = 0,
+			.summary_pad_sz = 16,
+			.fft_hdr_len = 24,
+			.max_fft_bins = 512,
+			.fragment_160mhz = false,
+		},
+		.max_mlo_peer = ATH12K_MAX_MLO_PEER,
+		.num_local_link = 0,
+		.m3_fw_support = false,
+		.compact_rx_tlv = true,
+		.ce_ie_addr = &ath12k_ce_ie_addr_ipq5332,
+		.ce_remap = &ath12k_ce_remap_ipq5332,
+		.cmem_remap = &ath12k_cmem_ipq5332,
+		.pmm_remap = &ath12k_pmm_ipq5332,
+		.send_platform_model = true,
+		.en_fwlog = true,
+		.support_umac_reset = false,
+		.support_ce_manual_poll=true,
+		.en_qdsslog = true,
+		.supports_tx_monitor = false,
+		.fw_mem_mode = ATH12K_QMI_TARGET_MEM_MODE,
+		.is_plink_preferable = false,
+	},
+	{
+		.name = "qcn6432 hw1.0",
+		.hw_rev = ATH12K_HW_QCN6432_HW10,
+		.fw = {
+			.dir = "QCN6432/hw1.0",
+			.board_size = 256 * 1024,
+			.cal_offset = 128 * 1024,
+		},
+		.max_radios = 1,
+		.single_pdev_only = false,
+		.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN6432,
+		.internal_sleep_clock = false,
+
+		.hw_ops = &ipq5332_ops,
+		.regs = &qcn6432_regs,
+		.ext_irq_grp_num_max = 6,
+		.ring_mask = &ath12k_hw_ring_mask_qcn6432,
+		.route_wbm_release = 2,
+
+		.host_ce_config = ath12k_host_ce_config_ipq5332,
+		.ce_count = 12,
+		.target_ce_config = ath12k_target_ce_config_wlan_ipq5332,
+		.target_ce_count = 12,
+		.svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_ipq5332,
+		.svc_to_ce_map_len = 19,
+
+		.hal_params = &ath12k_hw_hal_params_ipq5332,
+
+		.rxdma1_enable = true,
+		.num_rxmda_per_pdev = 1,
+		.num_rxdma_dst_ring = 0,
+		.rx_mac_buf_ring = false,
+		.vdev_start_delay = false,
+		.fixed_fw_mem = false,
+		.smp2p_wow_exit = false,
+
+		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
+					BIT(NL80211_IFTYPE_AP) |
+					BIT(NL80211_IFTYPE_MESH_POINT),
+		.supports_monitor = true,
+
+		.idle_ps = false,
+		.cold_boot_calib = true,
+		.download_calib = true,
+		.supports_suspend = false,
+		.tcl_ring_retry = true,
+		.reoq_lut_support = true,
+		.supports_shadow_regs = false,
+
+		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
+		.num_tcl_banks = 48,
+		.max_tx_ring = 4,
+		.static_window_map = true,
+
+		.wmi_init = ath12k_wmi_init_ipq5332,
+
+		.hal_ops = &hal_qcn6432_ops,
+
+		.supports_ap_ps = true,
+		.credit_flow = false,
+		.alloc_cacheable_memory = true,
+		.spectral = {
+			.fft_sz = 7,
+			.fft_pad_sz = 0,
+			.summary_pad_sz = 16,
+			.fft_hdr_len = 24,
+			.max_fft_bins = 512,
+			.fragment_160mhz = false,
+		},
+		.max_mlo_peer = ATH12K_MAX_MLO_PEER,
+		.num_local_link = 0,
+		.m3_fw_support = false,
+		.compact_rx_tlv = true,
+		.send_platform_model = true,
+		.en_fwlog = false,
+		.support_umac_reset = false,
+		.en_qdsslog = true,
+		.fw_mem_mode = ATH12K_QMI_TARGET_MEM_MODE,
+		.is_plink_preferable = true,
 	},
 };
 
+void ath12k_dp_reset_interrupt_mask(struct ath12k_base *ab)
+{
+	struct ath12k_hw_ring_mask *ring_mask = ab->hw_params->ring_mask;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *umac_reset = &ag->mlo_umac_reset;
+	int i;
+
+	if (ag->mlo_umac_reset.is_intr_bkup)
+		return;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
+		umac_reset->intr_bkup.tx[i] = ring_mask->tx[i];
+
+		umac_reset->intr_bkup.rx_mon_dest[i] = ring_mask->rx_mon_dest[i];
+		umac_reset->intr_bkup.rx[i] = ring_mask->rx[i];
+		umac_reset->intr_bkup.rx_err[i] = ring_mask->rx_err[i];
+		umac_reset->intr_bkup.rx_wbm_rel[i] = ring_mask->rx_wbm_rel[i];
+		umac_reset->intr_bkup.reo_status[i] = ring_mask->reo_status[i];
+		umac_reset->intr_bkup.host2rxdma[i] = ring_mask->host2rxdma[i];
+		umac_reset->intr_bkup.tx_mon_dest[i] = ring_mask->tx_mon_dest[i];
+
+		ring_mask->tx[i] = 0;
+		ring_mask->rx_mon_dest[i] = 0;
+		ring_mask->rx[i] = 0;
+		ring_mask->rx_err[i] = 0;
+		ring_mask->rx_wbm_rel[i] = 0;
+		ring_mask->reo_status[i] = 0;
+		ring_mask->host2rxdma[i] = 0;
+		ring_mask->tx_mon_dest[i] = 0;
+	}
+
+	ag->mlo_umac_reset.is_intr_bkup = true;
+}
+
+void ath12k_dp_restore_interrupt_mask(struct ath12k_base *ab)
+{
+	struct ath12k_hw_ring_mask *ring_mask = ab->hw_params->ring_mask;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *umac_reset = &ag->mlo_umac_reset;
+	int i;
+
+	if (!ag->mlo_umac_reset.is_intr_bkup)
+		return;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
+		ring_mask->tx[i] = umac_reset->intr_bkup.tx[i];
+		ring_mask->rx_mon_dest[i] = umac_reset->intr_bkup.rx_mon_dest[i];
+		ring_mask->rx[i] = umac_reset->intr_bkup.rx[i];
+		ring_mask->rx_err[i] = umac_reset->intr_bkup.rx_err[i];
+		ring_mask->rx_wbm_rel[i] = umac_reset->intr_bkup.rx_wbm_rel[i];
+		ring_mask->reo_status[i] = umac_reset->intr_bkup.reo_status[i];
+		ring_mask->host2rxdma[i] = umac_reset->intr_bkup.host2rxdma[i];
+		ring_mask->tx_mon_dest[i] = umac_reset->intr_bkup.tx_mon_dest[i];
+	}
+
+	ag->mlo_umac_reset.is_intr_bkup = false;
+
+}
+
 int ath12k_hw_init(struct ath12k_base *ab)
 {
 	const struct ath12k_hw_params *hw_params = NULL;
@@ -1039,3 +2000,4 @@
 
 	return 0;
 }
+
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/hw.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hw.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/hw.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/hw.h	2024-03-18 14:40:14.851741333 +0100
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_HW_H
@@ -14,10 +14,17 @@
 
 /* Target configuration defines */
 
+#ifdef CONFIG_ATH12K_MEM_PROFILE_512M
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS	(8 + 1)
+#define ATH12K_QMI_TARGET_MEM_MODE	ATH12K_QMI_TARGET_MEM_MODE_512M
+#else
 /* Num VDEVS per radio */
 #define TARGET_NUM_VDEVS	(16 + 1)
+#define ATH12K_QMI_TARGET_MEM_MODE	ATH12K_QMI_TARGET_MEM_MODE_DEFAULT
+#endif
 
-#define TARGET_NUM_PEERS_PDEV	(512 + TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV	(TARGET_NUM_STATIONS + TARGET_NUM_VDEVS)
 
 /* Num of peers for Single Radio mode */
 #define TARGET_NUM_PEERS_SINGLE		(TARGET_NUM_PEERS_PDEV)
@@ -29,7 +36,7 @@
 #define TARGET_NUM_PEERS_DBS_SBS	(3 * TARGET_NUM_PEERS_PDEV)
 
 /* Max num of stations (per radio) */
-#define TARGET_NUM_STATIONS	512
+#define TARGET_NUM_STATIONS	128
 
 #define TARGET_NUM_PEERS(x)	TARGET_NUM_PEERS_##x
 #define TARGET_NUM_PEER_KEYS	2
@@ -66,8 +73,14 @@
 #define TARGET_NUM_WDS_ENTRIES		32
 #define TARGET_DMA_BURST_SIZE		1
 #define TARGET_RX_BATCHMODE		1
+#define TARGET_EMA_MAX_PROFILE_PERIOD	8
+#define TARGET_RX_PEER_METADATA_VER_V1A 2
+#define TARGET_RX_PEER_METADATA_VER_V1B 3
+
+#define ATH12K_HW_DEFAULT_QUEUE		0
 
 #define ATH12K_HW_MAX_QUEUES		4
+#define ATH12K_HW_MAX_QUEUES_PPEDS	1
 #define ATH12K_QUEUE_LEN		4096
 
 #define ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK  0x4
@@ -76,11 +89,20 @@
 
 #define ATH12K_BOARD_MAGIC		"QCA-ATH12K-BOARD"
 #define ATH12K_BOARD_API2_FILE		"board-2.bin"
+#define ATH12K_BOARD_OVERRIDE_FILE	"board-id-override.txt"
 #define ATH12K_DEFAULT_BOARD_FILE	"board.bin"
 #define ATH12K_DEFAULT_CAL_FILE		"caldata.bin"
+#define ATH12K_QMI_DEF_CAL_FILE_PREFIX  "caldata_"
+#define ATH12K_QMI_DEF_CAL_FILE_SUFFIX  ".bin"
 #define ATH12K_AMSS_FILE		"amss.bin"
+#define ATH12K_AMSS_DUALMAC_FILE	"amss_dualmac.bin"
 #define ATH12K_M3_FILE			"m3.bin"
 #define ATH12K_REGDB_FILE_NAME		"regdb.bin"
+#define ATH12K_REGDB_BINARY		"regdb"
+#define ATH12K_RXGAINLUT_FILE_PREFIX	"rxgainlut.b"
+#define ATH12K_RXGAINLUT_FILE		"rxgainlut.bin"
+#define ATH12K_DEFAULT_ID		255
+#define ATH12K_FW_CFG_FILE		"firmware.dat"
 
 enum ath12k_hw_rate_cck {
 	ATH12K_HW_RATE_CCK_LP_11M = 0,
@@ -105,24 +127,34 @@
 
 enum ath12k_bus {
 	ATH12K_BUS_PCI,
+	ATH12K_BUS_AHB,
+	ATH12K_BUS_HYBRID,
 };
 
-#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11
-
+/* Regular 12 Host DP interrupts + 3 PPEDS interrupts + 1 DP UMAC RESET interrupt*/
+#define ATH12K_EXT_IRQ_DP_NUM_VECTORS 16
+#define ATH12K_EXT_IRQ_GRP_NUM_MAX(ab)\
+	((ab)->hw_params->ext_irq_grp_num_max)
 struct hal_rx_desc;
 struct hal_tcl_data_cmd;
 struct htt_rx_ring_tlv_filter;
 enum hal_encrypt_type;
 
 struct ath12k_hw_ring_mask {
-	u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
-	u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
-	u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
-	u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
-	u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];
-	u8 reo_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
-	u8 host2rxdma[ATH12K_EXT_IRQ_GRP_NUM_MAX];
-	u8 tx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+	u8 tx[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 rx_mon_dest[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 rx[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 rx_err[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 rx_wbm_rel[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 reo_status[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 host2rxdma[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 tx_mon_dest[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 ppe2tcl[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+	u8 reo2ppe[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	u8 wbm2sw6_ppeds_tx_cmpln[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
+#endif
+	u8 umac_dp_reset[ATH12K_EXT_IRQ_DP_NUM_VECTORS];
 };
 
 struct ath12k_hw_hal_params {
@@ -133,6 +165,7 @@
 struct ath12k_hw_params {
 	const char *name;
 	u16 hw_rev;
+	u32 bdf_addr;
 
 	struct {
 		const char *dir;
@@ -146,8 +179,10 @@
 	bool internal_sleep_clock:1;
 
 	const struct ath12k_hw_ops *hw_ops;
-	const struct ath12k_hw_ring_mask *ring_mask;
+	struct ath12k_hw_ring_mask *ring_mask;
 	const struct ath12k_hw_regs *regs;
+	u8 ext_irq_grp_num_max;
+	u8 route_wbm_release;
 
 	const struct ce_attr *host_ce_config;
 	u32 ce_count;
@@ -164,10 +199,21 @@
 	bool rx_mac_buf_ring:1;
 	bool vdev_start_delay:1;
 
+	struct {
+		u8 fft_sz;
+		u8 fft_pad_sz;
+		u8 summary_pad_sz;
+		u8 fft_hdr_len;
+		u16 max_fft_bins;
+		bool fragment_160mhz;
+	} spectral;
+
 	u16 interface_modes;
 	bool supports_monitor:1;
+	bool supports_tx_monitor;
 
 	bool idle_ps:1;
+	bool cold_boot_calib:1;
 	bool download_calib:1;
 	bool supports_suspend:1;
 	bool tcl_ring_retry:1;
@@ -177,6 +223,7 @@
 	u32 hal_desc_sz;
 	u32 num_tcl_banks;
 	u32 max_tx_ring;
+	bool static_window_map;
 
 	const struct mhi_controller_config *mhi_config;
 
@@ -184,6 +231,30 @@
 			 struct ath12k_wmi_resource_config_arg *config);
 
 	const struct hal_ops *hal_ops;
+
+	u64 qmi_cnss_feature_bitmap;
+
+	bool fix_l1ss;
+	bool supports_ap_ps;
+	bool credit_flow;
+	bool alloc_cacheable_memory;
+	u16 max_mlo_peer;
+	u8 num_local_link;
+	const struct ce_ie_addr *ce_ie_addr;
+	bool smp2p_wow_exit;
+	bool fixed_fw_mem;
+	bool m3_fw_support;
+	bool send_platform_model;
+	bool en_fwlog;
+	bool en_qdsslog;
+	const struct ce_remap *ce_remap;
+	const struct cmem_remap *cmem_remap;
+	bool compact_rx_tlv;
+	bool support_ce_manual_poll;
+	const struct pmm_remap *pmm_remap;
+	bool support_umac_reset;
+	int fw_mem_mode;
+	bool is_plink_preferable;
 };
 
 struct ath12k_hw_ops {
@@ -234,12 +305,40 @@
 	ATH12K_BD_IE_BOARD_DATA = 1,
 };
 
+enum ath12k_bd_ie_regdb_type {
+	ATH12K_BD_IE_REGDB_NAME = 0,
+	ATH12K_BD_IE_REGDB_DATA = 1,
+};
+
+enum ath12k_bd_ie_rxgainlut_type {
+	 ATH12K_BD_IE_RXGAINLUT_NAME = 0,
+	 ATH12K_BD_IE_RXGAINLUT_DATA = 1,
+};
+
 enum ath12k_bd_ie_type {
 	/* contains sub IEs of enum ath12k_bd_ie_board_type */
 	ATH12K_BD_IE_BOARD = 0,
-	ATH12K_BD_IE_BOARD_EXT = 1,
+	ATH12K_BD_IE_REGDB = 1,
+	ATH12K_BD_IE_BOARD_EXT = 2,
+	ATH12K_BD_IE_RXGAINLUT = 3,
 };
 
+static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
+{
+	switch (type) {
+	case ATH12K_BD_IE_BOARD:
+		return "board data";
+	case ATH12K_BD_IE_REGDB:
+		return "regdb data";
+	case ATH12K_BD_IE_BOARD_EXT:
+		return "board data ext";
+	case ATH12K_BD_IE_RXGAINLUT:
+		return "rxgainlut data";
+	}
+
+	return "unknown";
+}
+
 struct ath12k_hw_regs {
 	u32 hal_tcl1_ring_id;
 	u32 hal_tcl1_ring_misc;
@@ -251,9 +350,16 @@
 	u32 hal_tcl1_ring_msi1_base_msb;
 	u32 hal_tcl1_ring_msi1_data;
 	u32 hal_tcl_ring_base_lsb;
+	u32 hal_tcl1_ring_base_lsb;
+	u32 hal_tcl1_ring_base_msb;
+	u32 hal_tcl2_ring_base_lsb;
 
 	u32 hal_tcl_status_ring_base_lsb;
 
+	u32 hal_reo1_qdesc_addr_read;
+	u32 hal_reo1_qdesc_max_peerid;
+	u32 hal_ppe_rel_ring_base;
+
 	u32 hal_wbm_idle_ring_base_lsb;
 	u32 hal_wbm_idle_ring_misc_addr;
 	u32 hal_wbm_r0_idle_list_cntl_addr;
@@ -274,9 +380,14 @@
 	u32 pcie_qserdes_sysclk_en_sel;
 	u32 pcie_pcs_osc_dtct_config_base;
 
-	u32 hal_ppe_rel_ring_base;
+	u32 hal_umac_ce0_src_reg_base;
+	u32 hal_umac_ce0_dest_reg_base;
+	u32 hal_umac_ce1_src_reg_base;
+	u32 hal_umac_ce1_dest_reg_base;
 
 	u32 hal_reo2_ring_base;
+	u32 hal_reo2ppe_ring_base;
+	u32 hal_tcl_ppe2tcl_ring_base_lsb;
 	u32 hal_reo1_misc_ctrl_addr;
 	u32 hal_reo1_sw_cookie_cfg0;
 	u32 hal_reo1_sw_cookie_cfg1;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/mac.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mac.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/mac.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mac.c	2024-04-19 16:04:28.957735776 +0200
@@ -5,7 +5,15 @@
  */
 
 #include <net/mac80211.h>
+#include <net/cfg80211.h>
 #include <linux/etherdevice.h>
+#include <linux/bitfield.h>
+#include <linux/inetdevice.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
 #include "mac.h"
 #include "core.h"
 #include "debug.h"
@@ -13,7 +21,19 @@
 #include "hw.h"
 #include "dp_tx.h"
 #include "dp_rx.h"
+#include "testmode.h"
 #include "peer.h"
+#include "debugfs_sta.h"
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+#include "bondif.h"
+#endif
+
+#define IEEE80211_EHT_PPE_THRES_NSS_POS                        0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK               0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK  0x1f0
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_POS   4
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE         3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE       9
 
 #define CHAN2G(_channel, _freq, _flags) { \
 	.band                   = NL80211_BAND_2GHZ, \
@@ -42,6 +62,16 @@
 	.max_power              = 30, \
 }
 
+static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
+					     struct ath12k_link_vif *arvif);
+
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+int ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
+					  struct ieee80211_vif *vif,
+					  u16 old_links, u16 new_links,
+					  struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]);
+extern int g_bonded_interface_model;
+#endif
 static const struct ieee80211_channel ath12k_2ghz_channels[] = {
 	CHAN2G(1, 2412, 0),
 	CHAN2G(2, 2417, 0),
@@ -87,10 +117,12 @@
 	CHAN5G(165, 5825, 0),
 	CHAN5G(169, 5845, 0),
 	CHAN5G(173, 5865, 0),
+	CHAN5G(177, 5885, 0),
 };
 
 static const struct ieee80211_channel ath12k_6ghz_channels[] = {
 	CHAN6G(1, 5955, 0),
+	CHAN6G(2, 5935, 0),
 	CHAN6G(5, 5975, 0),
 	CHAN6G(9, 5995, 0),
 	CHAN6G(13, 6015, 0),
@@ -182,32 +214,35 @@
 	[NL80211_BAND_2GHZ] = {
 			[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
 			[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
-			[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
-			[NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
-			[NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
-			[NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+			[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20_2G,
+			[NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20_2G,
+			[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40_2G,
+			[NL80211_CHAN_WIDTH_80] = MODE_UNKNOWN,
 			[NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
 			[NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+			[NL80211_CHAN_WIDTH_320] = MODE_UNKNOWN,
 	},
 	[NL80211_BAND_5GHZ] = {
 			[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
 			[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
-			[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
-			[NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
-			[NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
-			[NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
-			[NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
-			[NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+			[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20,
+			[NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20,
+			[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
+			[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
+			[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
+			[NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
+			[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
 	},
 	[NL80211_BAND_6GHZ] = {
 			[NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
 			[NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
-			[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
-			[NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
-			[NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
-			[NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
-			[NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
-			[NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+			[NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20,
+			[NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20,
+			[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
+			[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
+			[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
+			[NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
+			[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
 	},
 
 };
@@ -238,8 +273,162 @@
 	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
 };
 
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
+static int ath12k_start_vdev_delay(struct ath12k *ar,
 				   struct ieee80211_vif *vif);
+static void ath12k_update_bcn_template_work(struct work_struct *work);
+static void ath12k_update_obss_color_notify_work(struct work_struct *work);
+static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
+					     struct ath12k_link_vif *arvif);
+static int ath12k_fw_stats_request(struct ath12k *ar,
+				   struct stats_request_params *req_param);
+
+enum nl80211_he_ru_alloc ath12k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
+{
+	enum nl80211_he_ru_alloc ret;
+
+	switch (ru_phy) {
+	case RU_26:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+		break;
+	case RU_52:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+		break;
+	case RU_106:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+		break;
+	case RU_242:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+		break;
+	case RU_484:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+		break;
+	case RU_996:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+		break;
+	default:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+		break;
+	}
+
+	return ret;
+}
+
+enum nl80211_he_ru_alloc ath12k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+	enum nl80211_he_ru_alloc ret;
+
+	switch (ru_tones) {
+	case 26:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+		break;
+	case 52:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+		break;
+	case 106:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+		break;
+	case 242:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+		break;
+	case 484:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+		break;
+	case 996:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+		break;
+	case (996 * 2):
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+		break;
+	default:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+		break;
+	}
+
+	return ret;
+}
+
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones)
+{
+	enum nl80211_eht_ru_alloc ret;
+
+	switch (ru_tones) {
+	case 26:
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+		break;
+	case 52:
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+		break;
+	case (52 + 26):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+		break;
+	case 106:
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+		break;
+	case (106 + 26):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+		break;
+	case 242:
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+		break;
+	case 484:
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+		break;
+	case (484 + 242):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+		break;
+	case 996:
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+		break;
+	case (996 + 484):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+		break;
+	case (996 + 484 + 242):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+		break;
+	case (996 * 2):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+		break;
+	case (996 * 2 + 484):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+		break;
+	case (996 * 3):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+		break;
+	case (996 * 3 + 484):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+		break;
+	case (996 * 4):
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+		break;
+	default:
+		ret = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+		break;
+	}
+
+	return ret;
+}
+
+enum nl80211_he_gi ath12k_mac_he_gi_to_nl80211_he_gi(u8 sgi)
+{
+	enum nl80211_he_gi ret;
+
+	switch (sgi) {
+	case RX_MSDU_START_SGI_0_8_US:
+		ret = NL80211_RATE_INFO_HE_GI_0_8;
+		break;
+	case RX_MSDU_START_SGI_1_6_US:
+		ret = NL80211_RATE_INFO_HE_GI_1_6;
+		break;
+	case RX_MSDU_START_SGI_3_2_US:
+		ret = NL80211_RATE_INFO_HE_GI_3_2;
+		break;
+	default:
+		ret = NL80211_RATE_INFO_HE_GI_0_8;
+		break;
+	}
+
+	return ret;
+}
 
 static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
 {
@@ -292,6 +481,24 @@
 		return "11ax-he40-2g";
 	case MODE_11AX_HE80_2G:
 		return "11ax-he80-2g";
+	case MODE_11BE_EHT20:
+		return "11be-eht20";
+	case MODE_11BE_EHT40:
+		return "11be-eht40";
+	case MODE_11BE_EHT80:
+		return "11be-eht80";
+	case MODE_11BE_EHT80_80:
+		return "11be-eht80+80";
+	case MODE_11BE_EHT160:
+		return "11be-eht160";
+	case MODE_11BE_EHT160_160:
+		return "11be-eht160+160";
+	case MODE_11BE_EHT320:
+		return "11be-eht320";
+	case MODE_11BE_EHT20_2G:
+		return "11be-eht20-2g";
+	case MODE_11BE_EHT40_2G:
+		return "11be-eht40-2g";
 	case MODE_UNKNOWN:
 		/* skip */
 		break;
@@ -322,6 +529,8 @@
 	case ATH12K_BW_160:
 		ret = RATE_INFO_BW_160;
 		break;
+	case ATH12K_BW_320:
+		ret = RATE_INFO_BW_320;
 	}
 
 	return ret;
@@ -338,6 +547,8 @@
 		return ATH12K_BW_80;
 	case RATE_INFO_BW_160:
 		return ATH12K_BW_160;
+	case RATE_INFO_BW_320:
+		return ATH12K_BW_320;
 	default:
 		return ATH12K_BW_20;
 	}
@@ -368,6 +579,19 @@
 	return -EINVAL;
 }
 
+static int get_num_chains(u32 mask)
+{
+	int num_chains = 0;
+
+	while (mask) {
+		if (mask & BIT(0))
+			num_chains++;
+		mask >>= 1;
+	}
+
+	return num_chains;
+}
+
 u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
 			     u32 bitrate)
 {
@@ -381,7 +605,7 @@
 }
 
 static u32
-ath12k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+ath12k_mac_max_ht_nss(const u8 *ht_mcs_mask)
 {
 	int nss;
 
@@ -393,7 +617,7 @@
 }
 
 static u32
-ath12k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+ath12k_mac_max_vht_nss(const u16 *vht_mcs_mask)
 {
 	int nss;
 
@@ -404,6 +628,42 @@
 	return 1;
 }
 
+static u32
+ath12k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+	int nss;
+
+	for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+		if (he_mcs_mask[nss])
+			return nss + 1;
+
+	return 1;
+}
+
+static u32
+ath12k_mac_max_eht_nss(const u16 eht_mcs_mask[NL80211_EHT_NSS_MAX])
+{
+	int nss;
+
+	for (nss = NL80211_EHT_NSS_MAX - 1; nss >= 0; nss--)
+		if (eht_mcs_mask[nss])
+			return nss + 1;
+
+	return 1;
+}
+
+static u32
+ath12k_mac_max_eht_mcs_nss(const u8 *eht_mcs, int eht_mcs_set_size)
+{
+	int i;
+	u8 nss = 0;
+
+	for (i = 0; i < eht_mcs_set_size; i++)
+		nss = max(nss, u8_get_bits(eht_mcs[i], IEEE80211_EHT_MCS_NSS_RX));
+
+	return nss;
+}
+
 static u8 ath12k_parse_mpdudensity(u8 mpdudensity)
 {
 /*  From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing":
@@ -440,12 +700,26 @@
 }
 
 static int ath12k_mac_vif_chan(struct ieee80211_vif *vif,
-			       struct cfg80211_chan_def *def)
+			       struct cfg80211_chan_def *def,
+			       u8 link_id)
 {
+	struct ieee80211_bss_conf *link_conf;
 	struct ieee80211_chanctx_conf *conf;
 
+	/* non-MLO connection, link id 0 pointer to def link */
+	if (!vif->link_conf[link_id])
+		return -ENOENT;
+
 	rcu_read_lock();
-	conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+	
+	link_conf = rcu_dereference(vif->link_conf[link_id]);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -ENOENT;
+	}
+
+	conf = rcu_dereference(link_conf->chanctx_conf);
 	if (!conf) {
 		rcu_read_unlock();
 		return -ENOENT;
@@ -457,6 +731,72 @@
 	return 0;
 }
 
+static int ath12k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+	switch ((mcs_map >> (2 * nss)) & 0x3) {
+	case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+	}
+	return 0;
+}
+
+static u16 ath12k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+					const u16 he_mcs_limit[NL80211_HE_NSS_MAX])
+{
+	int idx_limit;
+	int nss;
+	u16 mcs_map;
+	u16 mcs;
+
+	for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+		mcs_map = ath12k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+			he_mcs_limit[nss];
+
+		if (mcs_map)
+			idx_limit = fls(mcs_map) - 1;
+		else
+			idx_limit = -1;
+
+		switch (idx_limit) {
+		case 0 ... 7:
+			mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+			break;
+		case 8:
+		case 9:
+			mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+			break;
+		case 10:
+		case 11:
+			mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+			break;
+		default:
+			WARN_ON(1);
+			fallthrough;
+		case -1:
+			mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+			break;
+		}
+
+		tx_mcs_set &= ~(0x3 << (nss * 2));
+		tx_mcs_set |= mcs << (nss * 2);
+	}
+
+	return tx_mcs_set;
+}
+
+static bool
+ath12k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+	int nss;
+
+	for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+		if (he_mcs_mask[nss])
+			return false;
+
+	return true;
+}
+
 static bool ath12k_mac_bitrate_is_cck(int bitrate)
 {
 	switch (bitrate) {
@@ -498,46 +838,94 @@
 	       (ath12k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
 }
 
+struct
+ieee80211_bss_conf *ath12k_get_link_bss_conf(struct ath12k_link_vif *arvif)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ieee80211_bss_conf *link_conf = NULL;
+
+	WARN_ON(!rcu_read_lock_held());
+
+	if (arvif->link_id > IEEE80211_MLD_MAX_NUM_LINKS)
+		return NULL;
+
+	link_conf = rcu_dereference(vif->link_conf[arvif->link_id]);
+
+	return link_conf;
+}
+
+static struct
+ieee80211_link_sta *ath12k_get_link_sta(struct ath12k_link_sta *arsta)
+{
+	struct ieee80211_sta *sta;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	WARN_ON(!rcu_read_lock_held());
+
+	if (arsta->link_id > IEEE80211_MLD_MAX_NUM_LINKS)
+		return NULL;
+
+	link_sta = rcu_dereference(sta->link[arsta->link_id]);
+
+	return link_sta;
+}
+
 static void ath12k_get_arvif_iter(void *data, u8 *mac,
 				  struct ieee80211_vif *vif)
 {
 	struct ath12k_vif_iter *arvif_iter = data;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	u8 link_id;
+
+	/* TODO Revisit lock usage for ahvif->link. Currently this is
+	 * used with vif protected by rcu lock and since the ahvif->links_map
+	 * or link wont be modified it is safe to access below without its mutex
+	 */
+	for_each_set_bit(link_id, &ahvif->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = ahvif->link[link_id];
 
-	if (arvif->vdev_id == arvif_iter->vdev_id)
+		if (WARN_ON(arvif == NULL))
+			continue;
+
+		if ((arvif->vdev_id == arvif_iter->vdev_id) &&
+		    (arvif->ar == arvif_iter->ar)) {
 		arvif_iter->arvif = arvif;
+			break;
+		}
+	}
 }
 
-struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
+struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
 {
 	struct ath12k_vif_iter arvif_iter = {};
 	u32 flags;
 
 	arvif_iter.vdev_id = vdev_id;
+	arvif_iter.ar = ar;
 
 	flags = IEEE80211_IFACE_ITER_RESUME_ALL;
-	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+	ieee80211_iterate_active_interfaces_atomic(ar->ah->hw,
 						   flags,
 						   ath12k_get_arvif_iter,
 						   &arvif_iter);
-	if (!arvif_iter.arvif) {
-		ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
-		return NULL;
-	}
-
 	return arvif_iter.arvif;
 }
 
-struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
 						   u32 vdev_id)
 {
 	int i;
 	struct ath12k_pdev *pdev;
-	struct ath12k_vif *arvif;
+	struct ath12k_link_vif *arvif;
 
 	for (i = 0; i < ab->num_radios; i++) {
 		pdev = rcu_dereference(ab->pdevs_active[i]);
-		if (pdev && pdev->ar) {
+		if (pdev && pdev->ar &&
+		    (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
 			arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
 			if (arvif)
 				return arvif;
@@ -547,6 +935,80 @@
 	return NULL;
 }
 
+static struct ath12k *ath12k_mac_get_ar_by_agile_chandef(struct ieee80211_hw *hw,
+							 enum nl80211_band band)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	int i;
+
+	if (band != NL80211_BAND_5GHZ)
+		return NULL;
+
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
+		if (!ar->agile_chandef.chan)
+			continue;
+		if (ar->agile_chandef.chan->center_freq > ar->chan_info.low_freq &&
+		    ar->agile_chandef.chan->center_freq < ar->chan_info.high_freq)
+			return ar;
+		ar++;
+	}
+	return NULL;
+}
+
+static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
+						struct ieee80211_channel *channel)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	int i;
+
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
+		if (channel->center_freq >= ar->chan_info.low_freq &&
+		    channel->center_freq <= ar->chan_info.high_freq)
+			return ar;
+		ar++;
+	}
+	return NULL;
+}
+
+static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
+					   struct ieee80211_chanctx_conf *ctx)
+{
+	if (!ctx)
+		return NULL;
+
+	return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan);
+}
+
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+				    u8 link_id)
+{
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar = NULL;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if (!vif)
+		return NULL;
+
+	ahvif = ath12k_vif_to_ahvif(vif);
+
+	if (!(ahvif->links_map & BIT(link_id)))
+		return NULL;
+
+	arvif = ahvif->link[link_id];
+
+	if (arvif && arvif->is_created)
+		ar = arvif->ar;
+
+	return ar;
+}
+
 struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id)
 {
 	int i;
@@ -577,6 +1039,9 @@
 		return NULL;
 
 	for (i = 0; i < ab->num_radios; i++) {
+		if (ab->fw_mode == ATH12K_FIRMWARE_MODE_FTM)
+			pdev = &ab->pdevs[i];
+		else
 		pdev = rcu_dereference(ab->pdevs_active[i]);
 
 		if (pdev && pdev->pdev_id == pdev_id)
@@ -586,6 +1051,19 @@
 	return NULL;
 }
 
+bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif)
+{
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (ahvif->vif->valid_links & BIT(arvif->link_id))
+		return true;
+
+	return false;
+}
+
 static void ath12k_pdev_caps_update(struct ath12k *ar)
 {
 	struct ath12k_base *ab = ar->ab;
@@ -601,13 +1079,14 @@
 
 	ar->txpower_limit_2g = ar->max_tx_power;
 	ar->txpower_limit_5g = ar->max_tx_power;
+	ar->txpower_limit_6g = ar->max_tx_power;
 	ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
 }
 
 static int ath12k_mac_txpower_recalc(struct ath12k *ar)
 {
 	struct ath12k_pdev *pdev = ar->pdev;
-	struct ath12k_vif *arvif;
+	struct ath12k_link_vif *arvif;
 	int ret, txpower = -1;
 	u32 param;
 
@@ -653,6 +1132,16 @@
 		ar->txpower_limit_5g = txpower;
 	}
 
+	if ((ar->ah->hw->wiphy->bands[NL80211_BAND_6GHZ]) &&
+		ar->txpower_limit_6g != txpower) {
+		param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+		ret = ath12k_wmi_pdev_set_param(ar, param,
+						txpower, ar->pdev->pdev_id);
+		if (ret)
+			goto fail;
+		ar->txpower_limit_6g = txpower;
+	}
+
 	return 0;
 
 fail:
@@ -661,7 +1150,7 @@
 	return ret;
 }
 
-static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif)
+static int ath12k_recalc_rtscts_prot(struct ath12k_link_vif *arvif)
 {
 	struct ath12k *ar = arvif->ar;
 	u32 vdev_param, rts_cts;
@@ -700,7 +1189,7 @@
 	return ret;
 }
 
-static int ath12k_mac_set_kickout(struct ath12k_vif *arvif)
+static int ath12k_mac_set_kickout(struct ath12k_link_vif *arvif)
 {
 	struct ath12k *ar = arvif->ar;
 	u32 param;
@@ -745,23 +1234,130 @@
 	return 0;
 }
 
+static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif,
+					struct ath12k_link_sta *arsta)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!ar->num_stations)
+		return;
+
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+		return;
+
+	ar->num_stations--;
+}
+
+int ath12k_mac_partner_peer_cleanup(struct ath12k_base *ab)
+{
+
+	struct ath12k_base *partner_ab;
+	struct ath12k *ar;
+	struct ath12k_hw_group *ag;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_vif *ahvif;
+	struct ieee80211_sta *sta;
+	struct ieee80211_vif *vif;
+	struct ath12k_sta *ahsta;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_peer *peer, *tmp;
+	int idx, ret = 0;
+	u8 link_id;
+
+	ag = ab->ag;
+
+	for (idx = 0; idx < ag->num_chip; idx++) {
+		partner_ab = ag->ab[idx];
+
+		if (ab == partner_ab)
+			continue;
+
+		list_for_each_entry_safe(peer, tmp, &partner_ab->peers, list) {
+			if (!peer->sta || !peer->mlo || !peer->vif)
+				continue;
+
+			link_id = peer->link_id;
+			/* get arsta */
+			sta = peer->sta;
+			ahsta = ath12k_sta_to_ahsta(sta);
+			arsta = ahsta->link[link_id];
+
+			/* get arvif */
+			vif = peer->vif;
+			ahvif = (struct ath12k_vif *)vif->drv_priv;
+			arvif = ahvif->link[link_id];
+
+			ar = arvif->ar;
+			if (!ar)
+				continue;
+
+			mutex_lock(&ar->conf_mutex);
+			ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
+			if (ret) {
+				mutex_unlock(&ar->conf_mutex);
+				ath12k_warn(ar->ab,
+					    "failed to delete peer vdev_id %d addr %pM ret %d\n",
+					    arvif->vdev_id, arsta->addr, ret);
+				continue;
+			}
+			ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
+			if (ret) {
+				mutex_unlock(&ar->conf_mutex);
+				continue;
+			}
+
+			ar->num_peers--;
+			arvif->num_stations--;
+			ath12k_mac_dec_num_stations(arvif, arsta);
+			mutex_unlock(&ar->conf_mutex);
+			cancel_work_sync(&arsta->update_wk);
+		}
+	}
+	return ret;
+}
+
 void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
 {
 	struct ath12k_peer *peer, *tmp;
 	struct ath12k_base *ab = ar->ab;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_link_vif *arvif, *tmp_vif;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	mutex_lock(&ab->tbl_mtx_lock);
 	spin_lock_bh(&ab->base_lock);
 	list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
 		ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+		ath12k_peer_rhash_delete(ab, peer);
 		list_del(&peer->list);
+#ifdef CONFIG_ATH12K_SAWF
+	if (peer->sawf_ctx_peer.telemetry_peer_ctx)
+		ath12k_telemetry_peer_ctx_free(peer->sawf_ctx_peer.telemetry_peer_ctx);
+#endif
 		kfree(peer);
 	}
 	spin_unlock_bh(&ab->base_lock);
+	mutex_unlock(&ab->tbl_mtx_lock);
+
+	if (!list_empty(&ab->neighbor_peers))
+		ath12k_debugfs_nrp_cleanup_all(ar);
 
 	ar->num_peers = 0;
 	ar->num_stations = 0;
+
+	if (ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1) {
+		list_for_each_entry_safe_reverse(arvif, tmp_vif, &ar->arvifs, list)
+			arvif->num_stations = 0;
+	}
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "ath12k mac peer cleanup done\n");
 }
 
 static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
@@ -771,9 +1367,14 @@
 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
 		return -ESHUTDOWN;
 
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "ath12k vdev setup timeout %d\n",
+		   ATH12K_VDEV_SETUP_TIMEOUT_HZ);
+
 	if (!wait_for_completion_timeout(&ar->vdev_setup_done,
-					 ATH12K_VDEV_SETUP_TIMEOUT_HZ))
+					 ATH12K_VDEV_SETUP_TIMEOUT_HZ)){
+		WARN_ON(1);
 		return -ETIMEDOUT;
+	}
 
 	return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
 }
@@ -781,8 +1382,11 @@
 static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
 {
 	int ret;
+	struct vdev_up_params params = { 0 };
 
-	ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+	params.vdev_id = vdev_id;
+	params.bssid = ar->mac_addr;
+	ret = ath12k_wmi_vdev_up(ar, &params);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
 			    vdev_id, ret);
@@ -800,26 +1404,30 @@
 	struct ieee80211_channel *channel;
 	struct wmi_vdev_start_req_arg arg = {};
 	int ret;
+	struct vdev_up_params params = { 0 };
 
 	lockdep_assert_held(&ar->conf_mutex);
 
 	channel = chandef->chan;
 	arg.vdev_id = vdev_id;
-	arg.freq = channel->center_freq;
-	arg.band_center_freq1 = chandef->center_freq1;
-	arg.band_center_freq2 = chandef->center_freq2;
-	arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
-	arg.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
-
-	arg.min_power = 0;
-	arg.max_power = channel->max_power;
-	arg.max_reg_power = channel->max_reg_power;
-	arg.max_antenna_gain = channel->max_antenna_gain;
+	arg.channel.freq = channel->center_freq;
+	arg.channel.band_center_freq1 = chandef->center_freq1;
+	arg.channel.band_center_freq2 = chandef->center_freq2;
+	arg.channel.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
+	arg.channel.chan_radar =
+			!!(channel->flags & IEEE80211_CHAN_RADAR);
+
+	arg.channel.min_power = 0;
+	arg.channel.max_power = channel->max_power;
+	arg.channel.max_reg_power = channel->max_reg_power;
+	arg.channel.max_antenna_gain = channel->max_antenna_gain;
 
 	arg.pref_tx_streams = ar->num_tx_chains;
 	arg.pref_rx_streams = ar->num_rx_chains;
 
-	arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+	arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+	arg.ru_punct_bitmap = 0xFFFFFFFF;
 
 	reinit_completion(&ar->vdev_setup_done);
 	reinit_completion(&ar->vdev_delete_done);
@@ -838,7 +1446,9 @@
 		return ret;
 	}
 
-	ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+	params.vdev_id = vdev_id;
+	params.bssid = ar->mac_addr;
+	ret = ath12k_wmi_vdev_up(ar, &params);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
 			    vdev_id, ret);
@@ -885,17 +1495,16 @@
 	return ret;
 }
 
-static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
+int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
 {
 	struct ath12k_pdev *pdev = ar->pdev;
 	struct ath12k_wmi_vdev_create_arg arg = {};
 	int bit, ret;
-	u8 tmp_addr[6];
 	u16 nss;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (ar->monitor_vdev_created)
+	if (test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags))
 		return 0;
 
 	if (ar->ab->free_vdev_map == 0) {
@@ -903,7 +1512,10 @@
 		return -ENOMEM;
 	}
 
+	spin_lock_bh(&ar->ab->base_lock);
 	bit = __ffs64(ar->ab->free_vdev_map);
+	ar->ab->free_vdev_map &= ~(1LL << bit);
+	spin_unlock_bh(&ar->ab->base_lock);
 
 	ar->monitor_vdev_id = bit;
 
@@ -923,10 +1535,13 @@
 		arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
 	}
 
-	ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg);
+	ret = ath12k_wmi_vdev_create(ar, ar->mac_addr, &arg);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
 			    ar->monitor_vdev_id, ret);
+		spin_lock_bh(&ar->ab->base_lock);
+		ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+		spin_unlock_bh(&ar->ab->base_lock);
 		ar->monitor_vdev_id = -1;
 		return ret;
 	}
@@ -945,23 +1560,23 @@
 		return ret;
 
 	ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
-	ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
 	ar->num_created_vdevs++;
-	ar->monitor_vdev_created = true;
+	set_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags);
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n",
 		   ar->monitor_vdev_id);
 
 	return 0;
 }
 
-static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
+int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
 {
 	int ret;
 	unsigned long time_left;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (!ar->monitor_vdev_created)
+	if (!test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags))
 		return 0;
 
 	reinit_completion(&ar->vdev_delete_done);
@@ -979,42 +1594,55 @@
 		ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
 	} else {
 		ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+		spin_lock_bh(&ar->ab->base_lock);
 		ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
 		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n",
 			   ar->monitor_vdev_id);
+		spin_unlock_bh(&ar->ab->base_lock);
 		ar->num_created_vdevs--;
 		ar->monitor_vdev_id = -1;
-		ar->monitor_vdev_created = false;
+		clear_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags);
 	}
 
 	return ret;
 }
 
-static void
+void
 ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
 				struct ieee80211_chanctx_conf *conf,
 				void *data)
 {
-	struct cfg80211_chan_def **def = data;
+	struct ath12k_mac_any_chandef_arg *arg =
+				      (struct ath12k_mac_any_chandef_arg *)data;
+	struct ath12k *ctx_ar, *ar = arg->ar;
+
+	ctx_ar = ath12k_get_ar_by_ctx(ar->ah->hw, conf);
 
-	*def = &conf->def;
+	if (ctx_ar == ar)
+		arg->def = &conf->def;
 }
 
-static int ath12k_mac_monitor_start(struct ath12k *ar)
+int ath12k_mac_monitor_start(struct ath12k *ar)
 {
-	struct cfg80211_chan_def *chandef = NULL;
+	struct cfg80211_chan_def *chandef;
+	struct ath12k_mac_any_chandef_arg arg = { .ar = ar, .def = NULL};
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (ar->monitor_started)
+	if (test_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags))
 		return 0;
 
-	ieee80211_iter_chan_contexts_atomic(ar->hw,
+	ieee80211_iter_chan_contexts_atomic(ar->ah->hw,
 					    ath12k_mac_get_any_chandef_iter,
-					    &chandef);
+					    &arg);
+	chandef = arg.def;
 	if (!chandef)
-		return 0;
+		return -EINVAL;
+
+	/* TODO 5G low high split changes */
+	if (!ar->mac.sbands[chandef->chan->band].channels)
+		return -EINVAL;
 
 	ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
 	if (ret) {
@@ -1023,21 +1651,28 @@
 		return ret;
 	}
 
-	ar->monitor_started = true;
-	ar->num_started_vdevs++;
 	ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "fail to set monitor filter: %d\n", ret);
+		return ret;
+	}
+
+	set_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags);
+	ar->num_started_vdevs++;
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
 
 	return ret;
 }
 
-static int ath12k_mac_monitor_stop(struct ath12k *ar)
+int ath12k_mac_monitor_stop(struct ath12k *ar)
 {
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (!ar->monitor_started)
+	if (!test_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags))
 		return 0;
 
 	ret = ath12k_mac_monitor_vdev_stop(ar);
@@ -1046,7 +1681,7 @@
 		return ret;
 	}
 
-	ar->monitor_started = false;
+	clear_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags);
 	ar->num_started_vdevs--;
 	ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true);
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
@@ -1055,92 +1690,421 @@
 
 static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
 	struct ieee80211_conf *conf = &hw->conf;
-	int ret = 0;
+	int ret, i;
 
-	mutex_lock(&ar->conf_mutex);
+	mutex_lock(&ah->conf_mutex);
 
+	ar = ah->radio;
+
+	for (i = 0; i < ah->num_radio; i++) {
+		mutex_lock(&ar->conf_mutex);
 	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-		ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
-		if (ar->monitor_conf_enabled) {
-			if (ar->monitor_vdev_created)
-				goto exit;
+			if (conf->flags & IEEE80211_CONF_MONITOR) {
+				set_bit(MONITOR_CONF_ENABLED, &ar->monitor_flags);
+				if (test_bit(MONITOR_VDEV_CREATED,
+					     &ar->monitor_flags))
+					goto out;
+
 			ret = ath12k_mac_monitor_vdev_create(ar);
 			if (ret)
-				goto exit;
+					goto out;
 			ret = ath12k_mac_monitor_start(ar);
-			if (ret)
-				goto err_mon_del;
+				if (ret) {
+					ath12k_mac_monitor_vdev_delete(ar);
+					goto out;
+				}
 		} else {
-			if (!ar->monitor_vdev_created)
-				goto exit;
+				clear_bit(MONITOR_CONF_ENABLED, &ar->monitor_flags);
+				if (!test_bit(MONITOR_VDEV_CREATED,
+					      &ar->monitor_flags))
+					goto out;
+
 			ret = ath12k_mac_monitor_stop(ar);
 			if (ret)
-				goto exit;
+					goto out;
+
 			ath12k_mac_monitor_vdev_delete(ar);
 		}
 	}
-
-exit:
+out:
 	mutex_unlock(&ar->conf_mutex);
-	return ret;
+		ar++;
+ 	}
+
+	mutex_unlock(&ah->conf_mutex);
 
-err_mon_del:
-	ath12k_mac_monitor_vdev_delete(ar);
-	mutex_unlock(&ar->conf_mutex);
 	return ret;
 }
 
-static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
+static void ath12k_mac_setup_bcn_tmpl_vif_params(struct ath12k_link_vif *arvif,
+						 struct sk_buff *bcn)
 {
-	struct ath12k *ar = arvif->ar;
-	struct ath12k_base *ab = ar->ab;
-	struct ieee80211_hw *hw = ar->hw;
-	struct ieee80211_vif *vif = arvif->vif;
-	struct ieee80211_mutable_offsets offs = {};
-	struct sk_buff *bcn;
 	struct ieee80211_mgmt *mgmt;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vht_cap *vht_cap;
 	u8 *ies;
-	int ret;
+	const u8 *vht_cap_ie;
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
-		return 0;
+	ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+	mgmt = (struct ieee80211_mgmt *)bcn->data;
+	ies += sizeof(mgmt->u.beacon);
 
-	bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
-	if (!bcn) {
-		ath12k_warn(ab, "failed to get beacon template from mac80211\n");
-		return -EPERM;
+	/* avoid ie parsing if already done for this ahvif */
+	if (!ahvif->rsnie_present &&
+	    cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
+		ahvif->rsnie_present = true;
+
+	vht_cap_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, (skb_tail_pointer(bcn) - ies));
+	if (vht_cap_ie && vht_cap_ie[1] >= sizeof(*vht_cap)) {
+		vht_cap = (void *)(vht_cap_ie + 2);
+		arvif->vht_cap = vht_cap->vht_cap_info;
 	}
 
+	/* avoid ie parsing if already done for this ahvif */
+	if (!ahvif->wpaie_present &&
+	    cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+				    WLAN_OUI_TYPE_MICROSOFT_WPA,
+				    ies, (skb_tail_pointer(bcn) - ies)))
+		ahvif->wpaie_present = true;
+}
+
+static bool ath12k_mac_setup_bcn_tmpl_nontx_vif_params(struct ath12k_link_vif *tx_arvif,
+                                                      struct ath12k_link_vif *arvif,
+						       struct sk_buff *bcn)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_mgmt *mgmt;
+	const u8 *ies, *profile, *next_profile;
+	int ies_len;
+
+	if (arvif == tx_arvif)
+		return true;
+
+	ahvif->rsnie_present = ahvif->rsnie_present;
+
 	ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
 	ies += sizeof(mgmt->u.beacon);
+	ies_len = skb_tail_pointer(bcn) - ies;
 
-	if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
-		arvif->rsnie_present = true;
+	ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ies, ies_len);
 
-	if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
-				    WLAN_OUI_TYPE_MICROSOFT_WPA,
-				    ies, (skb_tail_pointer(bcn) - ies)))
-		arvif->wpaie_present = true;
+	while (ies) {
+		u8 mbssid_len;
 
-	ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+		ies_len -= (2 + ies[1]);
+		mbssid_len = ies[1] - 1;
+		profile = &ies[3];
+
+		while (mbssid_len) {
+			u8 profile_len, nie_len;
+			const u8 *nie;
+
+			profile_len = profile[1];
+			next_profile = profile + (2 + profile_len);
+			mbssid_len -= (2 + profile_len);
+
+			profile += 2;
+			profile_len -= (2 + profile[1]);
+			profile += (2 + profile[1]); /* nontx capabilities */
+			profile_len -= (2 + profile[1]);
+			profile += (2 + profile[1]); /* SSID */
+			if (profile[2] == ahvif->vif->bss_conf.bssid_index) {
+				profile_len -= 5;
+				profile = profile + 5;
+
+				if (cfg80211_find_ie(WLAN_EID_RSN, profile,
+						     profile_len))
+					ahvif->rsnie_present = true;
+				else if (ahvif->rsnie_present) {
+					nie = cfg80211_find_ext_ie(WLAN_EID_EXT_NON_INHERITANCE,
+								   profile,
+								   profile_len);
+					if (nie) {
+						int i;
 
-	kfree_skb(bcn);
+						nie_len = nie[1];
+						nie += 2;
+						for (i = 0; i < nie_len; i++) {
+							if (nie[i] ==
+								WLAN_EID_RSN) {
+								ahvif->rsnie_present = false;
+								break;
+							}
+						}
+					}
+				}
+				return true;
+			}
+			profile = next_profile;
+		}
+		ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, profile,
+				       ies_len);
+	}
+
+	return false;
+}
+
+static int __ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif,
+				       struct sk_buff *bcn,
+				       struct ieee80211_mutable_offsets offs,
+				       int ema_idx, int ema_cnt)
+{
+	struct ath12k *ar = arvif->ar;
+	u32 ema_param = 0;
+
+	if (ema_cnt) {
+		ema_param = (ema_cnt << WMI_BEACON_EMA_PARAM_PERIODICITY_SHIFT);
+		ema_param |= (ema_idx << WMI_BEACON_EMA_PARAM_TMPL_IDX_SHIFT);
+		ema_param |= ((!ema_idx ? 1 : 0) <<
+			      WMI_BEACON_EMA_PARAM_FIRST_TMPL_SHIFT);
+		ema_param |= ((ema_idx + 1 == ema_cnt ? 1 : 0) <<
+			      WMI_BEACON_EMA_PARAM_LAST_TMPL_SHIFT);
+	}
+
+	return ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, ema_param);
+}
+
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_link_vif *tx_arvif;
+	struct ath12k_vif *tx_ahvif, *ahvif = arvif->ahvif;
+	struct ieee80211_ema_beacons *beacons;
+	struct ieee80211_bss_conf *link;
+	u8 i = 0;
+	int ret = 0;
+	bool found_vdev = false;
+
+	rcu_read_lock();
+	link = ath12k_get_link_bss_conf(arvif);
+	if (!link->mbssid_tx_vif) {
+		rcu_read_unlock();
+		return -1;
+	}
+	tx_ahvif = (void *)link->mbssid_tx_vif->drv_priv;
+	if (!tx_ahvif) {
+		rcu_read_unlock();
+		return -ENOLINK;
+	}
+	/* both TX an NON_TX arvif's should be under same ar and hw
+	 * hence current acquired lock's should suffice for opertaing
+	 * with tx arvif also
+	 */
+	lockdep_assert_held(&tx_ahvif->ah->conf_mutex);
+	tx_arvif = tx_ahvif->link[link->mbssid_tx_vif_linkid];
+	if (!tx_arvif) {
+		rcu_read_unlock();
+		return -ENOLINK;
+	}
 
+	if (arvif != tx_arvif && !tx_arvif->is_started) {
+		rcu_read_unlock();
+		ath12k_warn(arvif->ar->ab,
+			    "Transmit vif is not started before this non Tx beacon setup for vdev %d\n",
+			    arvif->vdev_id);
+		return -EINVAL;
+	}
+
+	lockdep_assert_held(&tx_arvif->ar->conf_mutex);
+	beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->ah->hw,
+							 tx_ahvif->vif,
+							 tx_arvif->link_id);
+	rcu_read_unlock();
+	if (!beacons || !beacons->cnt) {
+		ath12k_warn(arvif->ar->ab,
+			    "failed to get ema beacon templates from mac80211\n");
+		return -EPERM;
+	}
+
+	if (tx_arvif == arvif) {
+		ath12k_mac_setup_bcn_tmpl_vif_params(tx_arvif,
+						     beacons->bcn[0].skb);
+		found_vdev = true;
+	} else {
+		ahvif->wpaie_present = ahvif->wpaie_present;
+	}
+
+	for (i = 0; i < beacons->cnt; i++) {
+		if (found_vdev == false)
+			found_vdev =
+				ath12k_mac_setup_bcn_tmpl_nontx_vif_params(tx_arvif,
+									   arvif,
+									   beacons->bcn[i].skb);
+
+		ret = __ath12k_mac_setup_bcn_tmpl(tx_arvif, beacons->bcn[i].skb,
+						  beacons->bcn[i].offs,
+						  i, beacons->cnt);
+		if (ret) {
+			ath12k_warn(arvif->ar->ab,
+				    "failed to set ema beacon template id %i error %d\n",
+				    i, ret);
+			break;
+		}
+	}
+
+	ieee80211_beacon_free_ema_list(beacons);
+	return ret;
+}
+
+static int ath12k_mac_setup_bcn_tmpl_non_ema(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_link_vif *tx_arvif = arvif;
+	struct ath12k_vif *tx_ahvif = arvif->ahvif;
+	struct ieee80211_mutable_offsets offs = {};
+	struct sk_buff *bcn;
+	struct ieee80211_bss_conf *link;
+	int ret;
+
+	rcu_read_lock();
+	link = ath12k_get_link_bss_conf(arvif);
+	if (!link) {
+		rcu_read_unlock();
+		return -ENOLINK;
+	}
+
+	if (link->mbssid_tx_vif) {
+		tx_ahvif = (void *)link->mbssid_tx_vif->drv_priv;
+		if (!tx_ahvif) {
+			rcu_read_unlock();
+			return -ENOLINK;
+		}
+		lockdep_assert_held(&tx_ahvif->ah->conf_mutex);
+		tx_arvif = tx_ahvif->link[link->mbssid_tx_vif_linkid];
+	}
+
+	if (!tx_arvif) {
+		rcu_read_unlock();
+		return -ENOLINK;
+	}
+
+	if (arvif != tx_arvif && !tx_arvif->is_started) {
+		rcu_read_unlock();
+		ath12k_warn(arvif->ar->ab,
+			    "Transmit vif is not started before this non Tx beacon setup for vdev %d\n",
+			    arvif->vdev_id);
+		return -EINVAL;
+	}
+
+	lockdep_assert_held(&tx_arvif->ar->conf_mutex);
+	bcn = ieee80211_beacon_get_template(tx_arvif->ar->ah->hw, tx_ahvif->vif,
+					    &offs, tx_arvif->link_id);
+	rcu_read_unlock();
+	if (!bcn) {
+		ath12k_warn(arvif->ar->ab,
+			    "failed to get beacon template from mac80211\n");
+		return -EPERM;
+	}
+
+	if (tx_arvif == arvif)
+		ath12k_mac_setup_bcn_tmpl_vif_params(tx_arvif, bcn);
+	else
+		(void) ath12k_mac_setup_bcn_tmpl_nontx_vif_params(tx_arvif,
+								  arvif,
+								  bcn);
+
+	ret = __ath12k_mac_setup_bcn_tmpl(tx_arvif, bcn, offs, 0, 0);
 	if (ret)
-		ath12k_warn(ab, "failed to submit beacon template command: %d\n",
+		ath12k_warn(tx_arvif->ar->ab,
+			    "failed to submit beacon template command: %d\n",
 			    ret);
 
+	kfree_skb(bcn);
 	return ret;
 }
 
-static void ath12k_control_beaconing(struct ath12k_vif *arvif,
+static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *tx_ahvif, *ahvif = arvif->ahvif;
+	struct ieee80211_bss_conf *link;
+	bool is_ema;
+
+	if (ahvif->vdev_type != WMI_VDEV_TYPE_AP)
+		return 0;
+
+	rcu_read_lock();
+	link = ath12k_get_link_bss_conf(arvif);
+	if (!link) {
+		rcu_read_unlock();
+		return -ENOLINK;
+	}
+	if (link->mbssid_tx_vif) {
+		tx_ahvif = (void *)link->mbssid_tx_vif->drv_priv;
+		if (ahvif != tx_ahvif && arvif->is_up) {
+			rcu_read_unlock();
+			return 0;
+		}
+	}
+	is_ema = link->ema_ap;
+	rcu_read_unlock();
+
+	if (is_ema)
+		return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+	else
+		return ath12k_mac_setup_bcn_tmpl_non_ema(arvif);
+}
+
+
+void ath12k_mac_bcn_tx_event(struct ath12k_link_vif *arvif)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ath12k *ar = arvif->ar;
+	struct ieee80211_bss_conf* link_conf;
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in bcn tx event\n");
+		return;
+	}
+
+	if (!link_conf->color_change_active && !arvif->bcca_zero_sent)
+		return;
+
+	if (link_conf->color_change_active &&
+	    ieee80211_beacon_cntdwn_is_complete_mlo(vif, arvif->link_id)) {
+		arvif->bcca_zero_sent = true;
+		ieee80211_color_change_finish_mlo(vif, arvif->link_id);
+		return;
+	}
+
+	arvif->bcca_zero_sent = false;
+
+	if (link_conf->color_change_active && !link_conf->ema_ap)
+		ieee80211_beacon_update_cntdwn_mlo(vif, arvif->link_id);
+	ieee80211_queue_work(ar->ah->hw, &arvif->update_bcn_template_work);
+}
+
+static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
 				     struct ieee80211_bss_conf *info)
 {
 	struct ath12k *ar = arvif->ar;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_vif *tx_ahvif = NULL;
+	struct ath12k_link_vif *tx_arvif = NULL;
+	struct ieee80211_bss_conf *link;
 	int ret;
+	struct vdev_up_params params = { 0 };
 
+	rcu_read_lock();
+	link = ath12k_get_link_bss_conf(arvif);
+	if (!link) {
+		rcu_read_unlock();
+		return;
+	}
+
+	if (link->mbssid_tx_vif) {
+		tx_ahvif = (void *)link->mbssid_tx_vif->drv_priv;
+		if (tx_ahvif) {
+			lockdep_assert_held(&tx_ahvif->ah->conf_mutex);
+			tx_arvif = tx_ahvif->link[link->mbssid_tx_vif_linkid];
+			if (tx_arvif)
+				lockdep_assert_held(&tx_arvif->ar->conf_mutex);
+		}
+	}
+
+	rcu_read_unlock();
 	lockdep_assert_held(&arvif->ar->conf_mutex);
 
 	if (!info->enable_beacon) {
@@ -1150,6 +2114,7 @@
 				    arvif->vdev_id, ret);
 
 		arvif->is_up = false;
+
 		return;
 	}
 
@@ -1161,12 +2126,19 @@
 		return;
 	}
 
-	arvif->aid = 0;
+	ahvif->aid = 0;
 
-	ether_addr_copy(arvif->bssid, info->bssid);
+	ether_addr_copy(arvif->bssid, info->addr);
 
-	ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
-				 arvif->bssid);
+	params.vdev_id = arvif->vdev_id;
+	params.aid = ahvif->aid;
+	params.bssid = arvif->bssid;
+	if (tx_arvif) {
+		params.tx_bssid = tx_arvif->bssid;
+		params.profile_idx = info->bssid_index;
+		params.profile_count = BIT(info->bssid_indicator);
+	}
+	ret = ath12k_wmi_vdev_up(arvif->ar, &params);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
 			    arvif->vdev_id, ret);
@@ -1179,12 +2151,16 @@
 }
 
 static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
-				      struct ieee80211_vif *vif,
-				      struct ieee80211_sta *sta,
+				      struct ath12k_link_vif *arvif,
+				      struct ath12k_link_sta *arsta,
 				      struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
 	u32 aid;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -1193,44 +2169,58 @@
 	else
 		aid = sta->aid;
 
-	ether_addr_copy(arg->peer_mac, sta->addr);
+	ether_addr_copy(arg->peer_mac, arsta->addr);
 	arg->vdev_id = arvif->vdev_id;
 	arg->peer_associd = aid;
 	arg->auth_flag = true;
 	/* TODO: STA WAR in ath10k for listen interval required? */
-	arg->peer_listen_intval = ar->hw->conf.listen_interval;
+	arg->peer_listen_intval = ar->ah->hw->conf.listen_interval;
 	arg->peer_nss = 1;
 	arg->peer_caps = vif->bss_conf.assoc_capability;
 }
 
 static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
-				       struct ieee80211_vif *vif,
-				       struct ieee80211_sta *sta,
+				       struct ath12k_link_vif *arvif,
+				       struct ath12k_link_sta *arsta,
 				       struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	struct ieee80211_bss_conf *info = &vif->bss_conf;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_bss_conf *info;
 	struct cfg80211_chan_def def;
 	struct cfg80211_bss *bss;
-	struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv;
 	const u8 *rsnie = NULL;
 	const u8 *wpaie = NULL;
+	struct ieee80211_hw *hw;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+	info = ath12k_get_link_bss_conf(arvif);
+
+	if (!info) {
+		ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc send\n");
+		return;
+	}
+
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
 		return;
 
-	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+	hw = ar->ah->hw;
+	bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0,
 			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
 
-	if (arvif->rsnie_present || arvif->wpaie_present) {
+	if (ahvif->rsnie_present || ahvif->wpaie_present) {
 		arg->need_ptk_4_way = true;
-		if (arvif->wpaie_present)
+		if (ahvif->wpaie_present)
 			arg->need_gtk_2_way = true;
 	} else if (bss) {
 		const struct cfg80211_bss_ies *ies;
 
-		rcu_read_lock();
 		rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
 
 		ies = rcu_dereference(bss->ies);
@@ -1239,12 +2229,11 @@
 						WLAN_OUI_TYPE_MICROSOFT_WPA,
 						ies->data,
 						ies->len);
-		rcu_read_unlock();
-		cfg80211_put_bss(ar->hw->wiphy, bss);
+		cfg80211_put_bss(hw->wiphy, bss);
 	}
 
 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
-	if (rsnie || wpaie) {
+	if (ar->supports_6ghz || rsnie || wpaie) {
 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 			   "%s: rsn ie found\n", __func__);
 		arg->need_ptk_4_way = true;
@@ -1264,12 +2253,100 @@
 	/* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
 }
 
+static enum ieee80211_sta_rx_bandwidth
+ath12k_get_radio_max_bw_caps(struct ath12k *ar,
+			     enum nl80211_band band,
+			     enum ieee80211_sta_rx_bandwidth sta_bw,
+			     enum nl80211_iftype iftype)
+{
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_sband_iftype_data *iftype_data;
+	const struct ieee80211_sta_eht_cap *eht_cap;
+	const struct ieee80211_sta_he_cap *he_cap;
+	int i, idx = 0;
+
+	sband = &ar->mac.sbands[band];
+	iftype_data = ar->mac.iftype[band];
+
+	if (!sband || !iftype_data) {
+		WARN_ONCE(1, "Invalid band specified :%d\n", band);
+		return sta_bw;
+	}
+
+	for (i = 0; i < NUM_NL80211_IFTYPES && i != iftype; i++) {
+		switch(i) {
+		case NL80211_IFTYPE_STATION:
+		case NL80211_IFTYPE_AP:
+		case NL80211_IFTYPE_MESH_POINT:
+			idx++;
+			break;
+		default:
+			break;
+		}
+	}
+
+	eht_cap = &iftype_data[idx].eht_cap;
+	he_cap = &iftype_data[idx].he_cap;
+
+	if (!eht_cap || !he_cap)
+		return sta_bw;
+
+	/* EHT Caps */
+	if (band != NL80211_BAND_2GHZ && eht_cap->has_eht &&
+	    (eht_cap->eht_cap_elem.phy_cap_info[0] &
+	     IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
+		return IEEE80211_STA_RX_BW_320;
+
+	/* HE Caps */
+	switch (band) {
+	case NL80211_BAND_5GHZ:
+	case NL80211_BAND_6GHZ:
+		if (he_cap->has_he) {
+			if (he_cap->he_cap_elem.phy_cap_info[0] &
+			    (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+			    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) {
+				return IEEE80211_STA_RX_BW_160;
+			} else if (he_cap->he_cap_elem.phy_cap_info[0] &
+				   IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G) {
+				return IEEE80211_STA_RX_BW_80;
+			}
+		}
+		break;
+	case NL80211_BAND_2GHZ:
+		if (he_cap->has_he &&
+		    (he_cap->he_cap_elem.phy_cap_info[0] &
+		     IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G))
+			return IEEE80211_STA_RX_BW_40;
+		break;
+	default:
+		break;
+	}
+
+	if (sband->vht_cap.vht_supported) {
+		switch (sband->vht_cap.cap &
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+		case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+		case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+			return IEEE80211_STA_RX_BW_160;
+		default:
+			return sta_bw;
+		}
+	}
+
+	/* Keep Last */
+	if (sband->ht_cap.ht_supported &&
+	    (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+		return IEEE80211_STA_RX_BW_40;
+
+	return sta_bw;
+}
+
 static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
-				      struct ieee80211_vif *vif,
-				      struct ieee80211_sta *sta,
+				      struct ath12k_link_vif *arvif,
+				      struct ath12k_link_sta *arsta,
 				      struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
 	struct cfg80211_chan_def def;
 	const struct ieee80211_supported_band *sband;
@@ -1278,15 +2355,28 @@
 	u32 ratemask;
 	u8 rate;
 	int i;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
+		return;
+
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc rate set\n");
 		return;
+	}
 
 	band = def.chan->band;
-	sband = ar->hw->wiphy->bands[band];
-	ratemask = sta->deflink.supp_rates[band];
+	sband = ar->ah->hw->wiphy->bands[band];
+	ratemask = link_sta->supp_rates[band];
 	ratemask &= arvif->bitrate_mask.control[band].legacy;
 	rates = sband->bitrates;
 
@@ -1303,7 +2393,7 @@
 }
 
 static bool
-ath12k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+ath12k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask)
 {
 	int nss;
 
@@ -1315,7 +2405,7 @@
 }
 
 static bool
-ath12k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+ath12k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask)
 {
 	int nss;
 
@@ -1327,22 +2417,37 @@
 }
 
 static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
-				   struct ieee80211_vif *vif,
-				   struct ieee80211_sta *sta,
+				   struct ath12k_link_vif *arvif,
+				   struct ath12k_link_sta *arsta,
 				   struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	const struct ieee80211_sta_ht_cap *ht_cap;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	int i, n;
 	u8 max_nss;
 	u32 stbc;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht set\n");
+		return;
+	}
+
+	ht_cap = &link_sta->ht_cap;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
 		return;
 
 	if (!ht_cap->ht_supported)
@@ -1368,11 +2473,19 @@
 	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
 		arg->ldpc_flag = true;
 
-	if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+	if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
 		arg->bw_40 = true;
 		arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
 	}
 
+	/* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20
+	 * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset
+	 * both flags if guard interval is Default GI
+	 */
+	if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI)
+		arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 |
+				IEEE80211_HT_CAP_SGI_40);
+
 	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
 		if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
 		    IEEE80211_HT_CAP_SGI_40))
@@ -1418,7 +2531,7 @@
 			arg->peer_ht_rates.rates[i] = i;
 	} else {
 		arg->peer_ht_rates.num_rates = n;
-		arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+		arg->peer_nss = min(link_sta->rx_nss, max_nss);
 	}
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1487,23 +2600,66 @@
 	return tx_mcs_set;
 }
 
+static u8 ath12k_get_nss_160mhz(struct ath12k *ar,
+				u8 max_nss)
+{
+	u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+	u8 max_sup_nss = 0;
+
+	switch (nss_ratio_info) {
+	case WMI_NSS_RATIO_1BY2_NSS:
+		max_sup_nss = max_nss >> 1;
+		break;
+	case WMI_NSS_RATIO_3BY4_NSS:
+		ath12k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+		break;
+	case WMI_NSS_RATIO_1_NSS:
+		max_sup_nss = max_nss;
+		break;
+	case WMI_NSS_RATIO_2_NSS:
+		ath12k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+		break;
+	default:
+		ath12k_warn(ar->ab, "invalid nss ratio received from fw\n");
+		break;
+	}
+
+	return max_sup_nss;
+}
+
 static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
-				    struct ieee80211_vif *vif,
-				    struct ieee80211_sta *sta,
+				    struct ath12k_link_vif *arvif,
+				    struct ath12k_link_sta *arsta,
 				    struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	const struct ieee80211_sta_vht_cap *vht_cap;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
-	const u16 *vht_mcs_mask;
+	u16 *vht_mcs_mask;
 	u16 tx_mcs_map;
 	u8 ampdu_factor;
 	u8 max_nss, vht_mcs;
-	int i;
+	int i, vht_nss, nss_idx;
+	bool user_rate_valid = true;
+	u32 rx_nss, tx_nss, nss_160;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
-	if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht set\n");
 		return;
+	}
+	vht_cap = &link_sta->vht_cap;
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id))) {
+		return;
+	}
 
 	if (!vht_cap->vht_supported)
 		return;
@@ -1537,12 +2693,31 @@
 				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
 					ampdu_factor)) - 1);
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		arg->bw_80 = true;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
 		arg->bw_160 = true;
 
+	vht_nss =  ath12k_mac_max_vht_nss(vht_mcs_mask);
+
+	if (vht_nss > link_sta->rx_nss) {
+		user_rate_valid = false;
+		for (nss_idx = link_sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+			if (vht_mcs_mask[nss_idx]) {
+				user_rate_valid = true;
+				break;
+			}
+		}
+	}
+
+	if (!user_rate_valid) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "Setting vht range MCS value to peer supported nss:%d for peer %pM\n",
+			   link_sta->rx_nss, arsta->addr);
+		vht_mcs_mask[link_sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+	}
+
 	/* Calculate peer NSS capability from VHT capabilities if STA
 	 * supports VHT.
 	 */
@@ -1554,7 +2729,7 @@
 		    vht_mcs_mask[i])
 			max_nss = i + 1;
 	}
-	arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+	arg->peer_nss = min(link_sta->rx_nss, max_nss);
 	arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
 	arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
 	arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -1576,27 +2751,91 @@
 	/* TODO:  Check */
 	arg->tx_max_mcs_nss = 0xFF;
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
-		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+	if (arg->peer_phymode == MODE_11AC_VHT160 ||
+	    arg->peer_phymode == MODE_11AC_VHT80_80) {
+		tx_nss = ath12k_get_nss_160mhz(ar, max_nss);
+		rx_nss = min(arg->peer_nss, tx_nss);
+		arg->peer_bw_rxnss_override = ATH12K_BW_NSS_MAP_ENABLE;
+
+		if (!rx_nss) {
+			ath12k_warn(ar->ab, "invalid max_nss\n");
+			return;
+		}
+
+		if (arg->peer_phymode == MODE_11AC_VHT160)
+			nss_160 = FIELD_PREP(ATH12K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+		else
+			nss_160 = FIELD_PREP(ATH12K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
 
-	/* TODO: rxnss_override */
+		arg->peer_bw_rxnss_override |= nss_160;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+		   arsta->addr, arg->peer_max_mpdu, arg->peer_flags,
+		   arg->peer_bw_rxnss_override);
 }
 
 static void ath12k_peer_assoc_h_he(struct ath12k *ar,
-				   struct ieee80211_vif *vif,
-				   struct ieee80211_sta *sta,
+				   struct ath12k_link_vif *arvif,
+				   struct ath12k_link_sta *arsta,
 				   struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct cfg80211_chan_def def;
+	const struct ieee80211_sta_he_cap *he_cap;
 	int i;
 	u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
 	u16 mcs_160_map, mcs_80_map;
 	bool support_160;
-	u16 v;
+	enum nl80211_band band;
+	u16 *he_mcs_mask;
+	u8 he_mcs;
+	u16 he_tx_mcs = 0, v = 0;
+	int he_nss, nss_idx;
+	bool user_rate_valid = true;
+	u32 rx_nss, tx_nss, nss_160;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_bss_conf* link_conf;
+	struct ieee80211_link_sta *link_sta;
+	u32 peer_he_ops;
+	enum ieee80211_sta_rx_bandwidth radio_max_bw_caps;
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc\n");
+		return;
+	}
+
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc he set\n");
+		return;
+	}
+
+	peer_he_ops = link_conf->he_oper.params;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+	he_cap = &link_sta->he_cap;
+
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
+		return;
 
 	if (!he_cap->has_he)
 		return;
 
+	band = def.chan->band;
+	he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+	radio_max_bw_caps = ath12k_get_radio_max_bw_caps(ar, band, link_sta->bandwidth,
+						 vif->type);
+
+	if (ath12k_peer_assoc_h_he_masked(he_mcs_mask))
+		return;
+
 	arg->he_flag = true;
 
 	support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
@@ -1631,13 +2870,15 @@
 	else
 		max_nss = rx_mcs_80;
 
-	arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+	arg->peer_nss = min(link_sta->rx_nss, max_nss);
 
 	memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
 	       sizeof(arg->peer_he_cap_macinfo));
 	memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
 	       sizeof(arg->peer_he_cap_phyinfo));
-	arg->peer_he_ops = vif->bss_conf.he_oper.params;
+
+	arg->peer_he_ops = peer_he_ops;
+
 
 	/* the top most byte is used to indicate BSS color info */
 	arg->peer_he_ops &= 0xffffff;
@@ -1659,10 +2900,10 @@
 			IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
 
 	if (ampdu_factor) {
-		if (sta->deflink.vht_cap.vht_supported)
+		if (link_sta->vht_cap.vht_supported)
 			arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
 						    ampdu_factor)) - 1;
-		else if (sta->deflink.ht_cap.ht_supported)
+		else if (link_sta->ht_cap.ht_supported)
 			arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
 						    ampdu_factor)) - 1;
 	}
@@ -1703,25 +2944,49 @@
 	if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
 		arg->twt_requester = true;
 
-	switch (sta->deflink.bandwidth) {
+	he_nss =  ath12k_mac_max_he_nss(he_mcs_mask);
+
+	if (he_nss > link_sta->rx_nss) {
+		user_rate_valid = false;
+		for (nss_idx = link_sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+			if (he_mcs_mask[nss_idx]) {
+				user_rate_valid = true;
+				break;
+			}
+		}
+	}
+
+	if (!user_rate_valid) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "Setting he range MCS value to peer supported nss:%d for peer %pM\n",
+			   link_sta->rx_nss, arsta->addr);
+		he_mcs_mask[link_sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
+	}
+
+	switch (min(link_sta->sta_max_bandwidth, radio_max_bw_caps)) {
 	case IEEE80211_STA_RX_BW_160:
 		if (he_cap->he_cap_elem.phy_cap_info[0] &
 		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
 			v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+			v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask);
 			arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
 
 			v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
 			arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
 
 			arg->peer_he_mcs_count++;
+			he_tx_mcs = v;
 		}
 		v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
 		arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
 
 		v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+		v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask);
 		arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
 
 		arg->peer_he_mcs_count++;
+		if (!he_tx_mcs)
+			he_tx_mcs = v;
 		fallthrough;
 
 	default:
@@ -1729,24 +2994,463 @@
 		arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
 
 		v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+		v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask);
 		arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
 
 		arg->peer_he_mcs_count++;
+		if (!he_tx_mcs)
+			he_tx_mcs = v;
+		break;
+	}
+
+	/* Calculate peer NSS capability from HE capabilities if STA
+	 * supports HE.
+	 */
+	for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+		he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+		/* In case of fixed rates, MCS Range in he_tx_mcs might have
+		 * unsupported range, with he_mcs_mask set, so check either of them
+		 * to find nss.
+		 */
+		if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+		    he_mcs_mask[i])
+			max_nss = i + 1;
+	}
+	arg->peer_nss = min(link_sta->rx_nss, max_nss);
+
+	if (arg->peer_phymode == MODE_11AX_HE160 ||
+	    arg->peer_phymode == MODE_11AX_HE80_80) {
+		tx_nss = ath12k_get_nss_160mhz(ar, ar->num_tx_chains);
+		rx_nss = min(arg->peer_nss, tx_nss);
+
+		arg->peer_nss = min(link_sta->rx_nss, ar->num_rx_chains);
+		arg->peer_bw_rxnss_override = ATH12K_BW_NSS_MAP_ENABLE;
+
+		if (!rx_nss) {
+			ath12k_warn(ar->ab, "invalid max_nss\n");
+			return;
+		}
+
+		if (arg->peer_phymode == MODE_11AX_HE160)
+			nss_160 = FIELD_PREP(ATH12K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+		else
+			nss_160 = FIELD_PREP(ATH12K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+		arg->peer_bw_rxnss_override |= nss_160;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+		   arsta->addr, arg->peer_nss,
+		   arg->peer_he_mcs_count,
+		   arg->peer_bw_rxnss_override);
+}
+
+static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
+				        struct ath12k_link_vif *arvif,
+				        struct ath12k_link_sta *arsta,
+					struct ath12k_wmi_peer_assoc_arg *arg)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	const struct ieee80211_sta_he_cap *he_cap;
+	struct cfg80211_chan_def def;
+	enum nl80211_band band;
+	u8  ampdu_factor;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;	
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc he-6ghz set\n");
+		return;
+	}
+	he_cap = &link_sta->he_cap;
+ 
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
+		return;
+
+	band = def.chan->band;
+
+	if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa)
+		return;
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
+		arg->bw_40 = true;
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
+		arg->bw_80 = true;
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
+		arg->bw_160 = true;
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
+		arg->bw_320 = true;
+
+	arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa);
+	arg->peer_mpdu_density =
+		ath12k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
+						   arg->peer_he_caps_6ghz));
+
+	/* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+	 * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+	 * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+	 * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+	 * Band Capabilities element in the 6 GHz band.
+	 *
+	 * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+	 * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+	 */
+	ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK,
+				 he_cap->he_cap_elem.mac_cap_info[3]) +
+			FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
+				  arg->peer_he_caps_6ghz);
+
+	arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+				     ampdu_factor)) - 1;
+}
+
+static void
+ath12k_mac_set_eht_mcs_nss_bitmap_20mhz_only(
+		const struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcsnss,
+		u32 *rx_mcs, u32 *tx_mcs,
+		const u16 eht_mcs_limit[NL80211_EHT_NSS_MAX])
+{
+	int nss;
+	u8 mcs_7 = 0, mcs_9 = 0, mcs_11 = 0, mcs_13 = 0;
+	u8 peer_mcs_7 = 0, peer_mcs_9 = 0, peer_mcs_11 = 0, peer_mcs_13 = 0;
+
+	for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++) {
+		if (eht_mcs_limit[nss] & 0x00FF)
+			mcs_7++;
+		if (eht_mcs_limit[nss] & 0x0300)
+			mcs_9++;
+		if (eht_mcs_limit[nss] & 0x0C00)
+			mcs_11++;
+		if (eht_mcs_limit[nss] & 0x3000)
+			mcs_13++;
+	}
+
+	peer_mcs_7 = u8_get_bits(mcsnss->rx_tx_mcs7_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+	peer_mcs_9 = u8_get_bits(mcsnss->rx_tx_mcs9_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+	peer_mcs_11 = u8_get_bits(mcsnss->rx_tx_mcs11_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+	peer_mcs_13 = u8_get_bits(mcsnss->rx_tx_mcs13_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+
+	*rx_mcs = FIELD_PREP(WMI_MCS_NSS_MAP_0_7, min(peer_mcs_7, mcs_7)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_8_9, min(peer_mcs_9, mcs_9)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_10_11, min(peer_mcs_11, mcs_11)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_12_13, min (peer_mcs_13, mcs_13));
+
+	peer_mcs_7 = u8_get_bits(mcsnss->rx_tx_mcs7_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+	peer_mcs_9 = u8_get_bits(mcsnss->rx_tx_mcs9_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+	peer_mcs_11 = u8_get_bits(mcsnss->rx_tx_mcs11_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+	peer_mcs_13 = u8_get_bits(mcsnss->rx_tx_mcs13_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+
+	*tx_mcs = FIELD_PREP(WMI_MCS_NSS_MAP_0_7, min(peer_mcs_7, mcs_7)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_8_9, min(peer_mcs_9, mcs_9)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_10_11, min(peer_mcs_11, mcs_11)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_12_13, min (peer_mcs_13, mcs_13));
+}
+
+static void
+ath12k_mac_set_eht_mcs_nss_bitmap(const struct ieee80211_eht_mcs_nss_supp_bw *mcsnss,
+				  u32 *rx_mcs, u32 *tx_mcs,
+				  const u16 eht_mcs_limit[NL80211_EHT_NSS_MAX])
+{
+	int nss;
+	u8 mcs_7 = 0, mcs_9 = 0, mcs_11 = 0, mcs_13 = 0;
+	u8 peer_mcs_7 = 0, peer_mcs_9 = 0, peer_mcs_11 = 0, peer_mcs_13 = 0;
+
+	for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++) {
+		if (eht_mcs_limit[nss] & 0x00FF)
+			mcs_7++;
+		if (eht_mcs_limit[nss] & 0x0300)
+			mcs_9++;
+		if (eht_mcs_limit[nss] & 0x0C00)
+			mcs_11++;
+		if (eht_mcs_limit[nss] & 0x3000)
+			mcs_13++;
+	}
+
+	peer_mcs_7 = u8_get_bits(mcsnss->rx_tx_mcs9_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+	peer_mcs_9 = u8_get_bits(mcsnss->rx_tx_mcs9_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+	peer_mcs_11 = u8_get_bits(mcsnss->rx_tx_mcs11_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+	peer_mcs_13 = u8_get_bits(mcsnss->rx_tx_mcs13_max_nss, IEEE80211_EHT_MCS_NSS_RX);
+
+	*rx_mcs = FIELD_PREP(WMI_MCS_NSS_MAP_0_7, min(peer_mcs_7, mcs_7)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_8_9, min(peer_mcs_9, mcs_9)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_10_11, min(peer_mcs_11, mcs_11)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_12_13, min (peer_mcs_13, mcs_13));
+
+	peer_mcs_7 = u8_get_bits(mcsnss->rx_tx_mcs9_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+	peer_mcs_9 = u8_get_bits(mcsnss->rx_tx_mcs9_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+	peer_mcs_11 = u8_get_bits(mcsnss->rx_tx_mcs11_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+	peer_mcs_13 = u8_get_bits(mcsnss->rx_tx_mcs13_max_nss, IEEE80211_EHT_MCS_NSS_TX);
+
+	*tx_mcs = FIELD_PREP(WMI_MCS_NSS_MAP_0_7, min(peer_mcs_7, mcs_7)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_8_9, min(peer_mcs_9, mcs_9)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_10_11, min(peer_mcs_11, mcs_11)) |
+		  FIELD_PREP(WMI_MCS_NSS_MAP_12_13, min (peer_mcs_13, mcs_13));
+}
+
+static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres,
+					     struct ath12k_ppe_threshold *ppet)
+{
+	u32 bit_pos = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+	u8 nss;
+
+	ppet->numss_m1 = ppe_thres[0] & IEEE80211_EHT_PPE_THRES_NSS_MASK;
+	ppet->ru_bit_mask = FIELD_GET(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK,
+			__cpu_to_le16(ppe_thres[0]));
+
+	for (nss = 0; nss <= ppet->numss_m1; nss++) {
+		u8 ru;
+
+		for (ru = 0;
+		     ru < hweight8(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+		     ru++) {
+			u32 val = 0;
+			u8 i;
+
+			if ((ppet->ru_bit_mask & BIT(ru)) == 0)
+				continue;
+
+			for (i = 0; i < IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
+			     i++) {
+				val >>= 1;
+				val |= ((ppe_thres[bit_pos / 8] >>
+					(bit_pos % 8)) & 0x1) << 5;
+				bit_pos++;
+			}
+			ppet->ppet16_ppet8_ru3_ru0[nss] |=
+				(val <<
+				 (ru * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE));
+		}
+	}
+}
+
+static bool
+ath12k_peer_assoc_h_eht_masked(const u16 eht_mcs_mask[NL80211_EHT_NSS_MAX])
+{
+	int nss;
+
+	for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++)
+		if (eht_mcs_mask[nss])
+			return false;
+
+	return true;
+}
+
+static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
+				    struct ath12k_link_vif *arvif,
+				    struct ath12k_link_sta *arsta,
+				    struct ath12k_wmi_peer_assoc_arg *arg)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct cfg80211_chan_def def;
+	const struct ieee80211_sta_eht_cap *eht_cap;
+	const struct ieee80211_eht_mcs_nss_supp *mcs_nss;
+	const struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss_supp_bw;
+	u8 mcs_idx = WMI_EHTCAP_TXRX_MCS_NSS_IDX_80;
+	enum nl80211_band band;
+	u16 *eht_mcs_mask;
+	u8 max_nss;
+	int eht_nss, nss_idx;
+	bool user_rate_valid = true;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_bss_conf* link_conf;
+	struct ieee80211_link_sta *link_sta;
+	const struct ieee80211_sta_eht_cap *own_eht_cap;
+	const struct ieee80211_eht_mcs_nss_supp *own_eht_mcs_nss_supp;
+	enum ieee80211_sta_rx_bandwidth radio_max_bw_caps;
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc\n");
+		return;
+	}
+
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht set\n");
+		return;
+	}
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	eht_cap = &link_sta->eht_cap;
+	mcs_nss = &eht_cap->eht_mcs_nss_supp;
+	mcs_nss_supp_bw = &mcs_nss->bw._80;
+
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
+		return;
+
+	band = def.chan->band;
+	eht_mcs_mask = arvif->bitrate_mask.control[band].eht_mcs;
+
+	radio_max_bw_caps = ath12k_get_radio_max_bw_caps(ar, band, link_sta->bandwidth,
+						 vif->type);
+
+	own_eht_cap = &ar->mac.sbands[band].iftype_data->eht_cap;
+	own_eht_mcs_nss_supp = &own_eht_cap->eht_mcs_nss_supp;
+
+	if (ath12k_peer_assoc_h_eht_masked((const u16*) eht_mcs_mask))
+		return;
+
+	if (!link_sta->he_cap.has_he || !eht_cap->has_eht)
+		return;
+
+	arg->eht_flag = true;
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) {
+		arg->bw_40 = true;
+		arg->bw_80 = true;
+		arg->bw_160 = true;
+		arg->bw_320 = true;
+	}
+	if (eht_cap->eht_cap_elem.phy_cap_info[5] &
+	    IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT &&
+	    eht_cap->eht_ppe_thres[0] != 0)
+		ath12k_mac_set_eht_ppe_threshold(eht_cap->eht_ppe_thres,
+						 &arg->peer_eht_ppet);
+	memcpy(arg->peer_eht_cap_mac, eht_cap->eht_cap_elem.mac_cap_info,
+	       sizeof(eht_cap->eht_cap_elem.mac_cap_info));
+	memcpy(arg->peer_eht_cap_phy, eht_cap->eht_cap_elem.phy_cap_info,
+	       sizeof(eht_cap->eht_cap_elem.phy_cap_info));
+	arg->peer_eht_ops = 0;
+
+	eht_nss = ath12k_mac_max_eht_mcs_nss((void *)own_eht_mcs_nss_supp,
+						  sizeof(*own_eht_mcs_nss_supp));
+
+	if (eht_nss > link_sta->rx_nss) {
+		user_rate_valid = false;
+		for (nss_idx = (link_sta->rx_nss - 1); nss_idx >= 0; nss_idx--) {
+			if (eht_mcs_mask[nss_idx]) {
+				user_rate_valid = true;
+				break;
+			}
+		}
+	}
+
+	if (!user_rate_valid) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				"Setting eht range MCS value to peer supported nss:%d for peer %pM\n",
+				link_sta->rx_nss, arsta->addr);
+		eht_mcs_mask[link_sta->rx_nss - 1] = eht_mcs_mask[eht_nss - 1];
+	}
+
+	switch (min(link_sta->sta_max_bandwidth, radio_max_bw_caps)) {
+	case IEEE80211_STA_RX_BW_320:
+		mcs_nss_supp_bw = &mcs_nss->bw._320;
+		mcs_idx = WMI_EHTCAP_TXRX_MCS_NSS_IDX_320;
+		arg->peer_eht_mcs_count++;
+		ath12k_mac_set_eht_mcs_nss_bitmap(&mcs_nss->bw._320,
+						  &arg->peer_eht_rx_mcs_set[mcs_idx],
+						  &arg->peer_eht_tx_mcs_set[mcs_idx],
+						  eht_mcs_mask);
+		fallthrough;
+
+	case IEEE80211_STA_RX_BW_160:
+		mcs_nss_supp_bw = &mcs_nss->bw._160;
+		mcs_idx = WMI_EHTCAP_TXRX_MCS_NSS_IDX_160;
+		arg->peer_eht_mcs_count++;
+		ath12k_mac_set_eht_mcs_nss_bitmap(&mcs_nss->bw._160,
+						  &arg->peer_eht_rx_mcs_set[mcs_idx],
+						  &arg->peer_eht_tx_mcs_set[mcs_idx],
+						  eht_mcs_mask);
+		fallthrough;
+
+	default:
+		if (arg->peer_phymode == MODE_11BE_EHT20) {
+			mcs_idx = WMI_EHTCAP_TXRX_MCS_NSS_IDX_80;
+			ath12k_mac_set_eht_mcs_nss_bitmap_20mhz_only(&mcs_nss->only_20mhz,
+					&arg->peer_eht_rx_mcs_set[mcs_idx],
+					&arg->peer_eht_tx_mcs_set[mcs_idx],
+					eht_mcs_mask);
+		} else {
+			mcs_idx = WMI_EHTCAP_TXRX_MCS_NSS_IDX_80;
+			ath12k_mac_set_eht_mcs_nss_bitmap(&mcs_nss->bw._80,
+					&arg->peer_eht_rx_mcs_set[mcs_idx],
+					&arg->peer_eht_tx_mcs_set[mcs_idx],
+					eht_mcs_mask);
+		}
+
+		arg->peer_eht_mcs_count++;
 		break;
 	}
+
+	if (!(link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
+	      IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+		if (mcs_nss->only_20mhz.rx_tx_mcs13_max_nss)
+			max_nss = mcs_nss->only_20mhz.rx_tx_mcs13_max_nss;
+		else if (mcs_nss->only_20mhz.rx_tx_mcs11_max_nss)
+			max_nss = mcs_nss->only_20mhz.rx_tx_mcs11_max_nss;
+		else if (mcs_nss->only_20mhz.rx_tx_mcs9_max_nss)
+			max_nss = mcs_nss->only_20mhz.rx_tx_mcs9_max_nss;
+		else
+			max_nss = mcs_nss->only_20mhz.rx_tx_mcs7_max_nss;
+	} else {
+		if (mcs_nss_supp_bw->rx_tx_mcs13_max_nss)
+			max_nss = u8_get_bits(mcs_nss_supp_bw->rx_tx_mcs13_max_nss,
+					      IEEE80211_EHT_MCS_NSS_RX);
+		if (mcs_nss_supp_bw->rx_tx_mcs11_max_nss)
+			max_nss = max(max_nss, u8_get_bits(mcs_nss_supp_bw->rx_tx_mcs11_max_nss,
+					      IEEE80211_EHT_MCS_NSS_RX));
+		if (mcs_nss_supp_bw->rx_tx_mcs9_max_nss)
+			max_nss = max(max_nss, u8_get_bits(mcs_nss_supp_bw->rx_tx_mcs9_max_nss,
+					      IEEE80211_EHT_MCS_NSS_RX));
+	}
+
+	max_nss = min(max_nss, (uint8_t)eht_nss);
+
+	arg->peer_nss = min(link_sta->rx_nss, max_nss);
+	arg->ru_punct_bitmap = ~def.ru_punct_bitmap;
+	if (ieee80211_vif_is_mesh(vif) && link_sta->ru_punct_bitmap)
+		arg->ru_punct_bitmap = ~link_sta->ru_punct_bitmap;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac eht peer %pM nss %d mcs cnt %d ru_punct_bitmap 0x%x\n",
+		   arsta->addr, arg->peer_nss, arg->peer_eht_mcs_count, arg->ru_punct_bitmap);
 }
 
-static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+static void ath12k_peer_assoc_h_smps(struct ath12k_link_sta *arsta,
 				     struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+	const struct ieee80211_sta_ht_cap *ht_cap;
 	int smps;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+	struct ath12k *ar = arsta->arvif->ar;
 
-	if (!ht_cap->ht_supported)
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc smps set\n");
 		return;
+	}
+
+	ht_cap = &link_sta->ht_cap;
 
+	if (!ht_cap->ht_supported && !link_sta->he_6ghz_capa.capa)
+		return;
+
+	if (ht_cap->ht_supported) {
 	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
 	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+	} else {
+		smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS,
+				 le16_to_cpu(link_sta->he_6ghz_capa.capa));
+	}
 
 	switch (smps) {
 	case WLAN_HT_CAP_SM_PS_STATIC:
@@ -1764,13 +3468,17 @@
 }
 
 static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
-				    struct ieee80211_vif *vif,
-				    struct ieee80211_sta *sta,
+				    struct ath12k_link_vif *arvif,
+				    struct ath12k_link_sta *arsta,
 				    struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
-	switch (arvif->vdev_type) {
+	switch (ahvif->vdev_type) {
 	case WMI_VDEV_TYPE_AP:
 		if (sta->wme) {
 			/* TODO: Check WME vs QoS */
@@ -1796,17 +3504,78 @@
 	}
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
-		   sta->addr, arg->qos_flag);
+		   arsta->addr, arg->qos_flag);
+}
+
+static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
+				    struct ath12k_wmi_peer_assoc_arg *arg)
+{
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta_p;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	unsigned long links;
+	struct peer_assoc_mlo_params *ml = &arg->ml;
+	u8 i = 0, link_id;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID)
+		return;
+
+	ml->enabled = true;
+	ml->assoc_link = arsta->is_assoc_link;
+	if (arsta->link_id == ahsta->primary_link_id)
+		ml->primary_umac = true;
+	else
+		ml->primary_umac = false;
+	ml->peer_id_valid = true;
+	ml->logical_link_idx_valid = true;
+
+	ether_addr_copy(ml->mld_addr, sta->addr);
+	ml->logical_link_idx = arsta->link_idx;
+	ml->ml_peer_id = ahsta->ml_peer_id;
+	ml->ieee_link_id = arsta->link_id;
+	ml->num_partner_links = 0;
+	/*emlsr */
+	/* ml->eml_caps = sta->eml_cap; */
+	ml->eml_caps = 0;
+
+	links = sta->valid_links;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		if (i > ATH12K_WMI_MLO_MAX_LINKS)
+			break;
+		arsta_p = ahsta->link[link_id];
+		arvif = arsta_p->arvif;
+
+		if (arsta_p == arsta)
+			continue;
+		ml->partner_info[i].vdev_id = arvif->vdev_id;
+		ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id;
+		ml->partner_info[i].assoc_link = arsta_p->is_assoc_link;
+		if (arsta_p->link_id == ahsta->primary_link_id)
+			ml->partner_info[i].primary_umac = true;
+		else
+			ml->partner_info[i].primary_umac = false;
+		ml->partner_info[i].logical_link_idx_valid = true;
+		ml->partner_info[i].logical_link_idx = arsta_p->link_idx;
+		ml->num_partner_links++;
+		i++;
+	}
 }
 
 static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
-				    struct ath12k_vif *arvif,
-				    struct ieee80211_sta *sta)
+				    struct ath12k_link_vif *arvif,
+				    struct ath12k_link_sta *arsta)
 {
 	struct ath12k_wmi_ap_ps_arg arg;
 	u32 max_sp;
 	u32 uapsd;
 	int ret;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -1835,26 +3604,26 @@
 
 	arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
 	arg.value = uapsd;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
 	arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
 	arg.value = max_sp;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
 	/* TODO: revisit during testing */
 	arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
 	arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
 	arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
 	arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
@@ -1866,17 +3635,29 @@
 	return ret;
 }
 
-static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+static bool ath12k_mac_sta_has_ofdm_only(struct ath12k_link_sta *arsta)
 {
-	return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+	struct ath12k *ar = arsta->arvif->ar;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in ofdm rate check\n");
+		return false;
+	}
+	return link_sta->supp_rates[NL80211_BAND_2GHZ] >>
 	       ATH12K_MAC_FIRST_OFDM_RATE_IDX;
 }
 
-static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
-						    struct ieee80211_sta *sta)
+static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ieee80211_link_sta *link_sta)
 {
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
-		switch (sta->deflink.vht_cap.cap &
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+		switch (link_sta->vht_cap.cap &
 			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
 		case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
 			return MODE_11AC_VHT160;
@@ -1888,85 +3669,137 @@
 		}
 	}
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		return MODE_11AC_VHT80;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 		return MODE_11AC_VHT40;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
 		return MODE_11AC_VHT20;
 
 	return MODE_UNKNOWN;
 }
 
-static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
-						   struct ieee80211_sta *sta)
+static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ieee80211_link_sta *link_sta)
 {
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
-		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+		if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
 		     IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
 			return MODE_11AX_HE160;
-		else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+		else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
 		     IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
 			return MODE_11AX_HE80_80;
 		/* not sure if this is a valid case? */
 		return MODE_11AX_HE160;
 	}
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		return MODE_11AX_HE80;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 		return MODE_11AX_HE40;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
 		return MODE_11AX_HE20;
 
 	return MODE_UNKNOWN;
 }
 
+static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ieee80211_link_sta *link_sta)
+{
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
+		if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] &
+		    IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+			return MODE_11BE_EHT320;
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+		if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
+		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+			return MODE_11BE_EHT160;
+		else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
+			 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+			return MODE_11BE_EHT80_80;
+		/* not sure if this is a valid case? */
+		return MODE_11BE_EHT160;
+	}
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
+		return MODE_11BE_EHT80;
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
+		return MODE_11BE_EHT40;
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
+		return MODE_11BE_EHT20;
+
+	return MODE_UNKNOWN;
+}
+
 static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
-					struct ieee80211_vif *vif,
-					struct ieee80211_sta *sta,
+				        struct ath12k_link_vif *arvif,
+				        struct ath12k_link_sta *arsta,					
 					struct ath12k_wmi_peer_assoc_arg *arg)
 {
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
+	const u16 *he_mcs_mask;
+	const u16 *eht_mcs_mask;
 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
+	u8 link_id = arvif->link_id;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
-	if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
 		return;
 
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc phymode set\n");
+		return;
+	}
+
 	band = def.chan->band;
 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+	he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+	eht_mcs_mask = arvif->bitrate_mask.control[band].eht_mcs;
 
 	switch (band) {
 	case NL80211_BAND_2GHZ:
-		if (sta->deflink.he_cap.has_he) {
-			if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+		if (link_sta->eht_cap.has_eht &&
+		    !ath12k_peer_assoc_h_eht_masked(eht_mcs_mask)) {
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
+				phymode = MODE_11BE_EHT40_2G;
+			else
+				phymode = MODE_11BE_EHT20_2G;
+		} else if (link_sta->he_cap.has_he &&
+			   !ath12k_peer_assoc_h_he_masked(he_mcs_mask)) {
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 				phymode = MODE_11AX_HE80_2G;
-			else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+			else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11AX_HE40_2G;
 			else
 				phymode = MODE_11AX_HE20_2G;
-		} else if (sta->deflink.vht_cap.vht_supported &&
+		} else if (link_sta->vht_cap.vht_supported &&
 		    !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
-			if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11AC_VHT40;
 			else
 				phymode = MODE_11AC_VHT20;
-		} else if (sta->deflink.ht_cap.ht_supported &&
+		} else if (link_sta->ht_cap.ht_supported &&
 			   !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
-			if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11NG_HT40;
 			else
 				phymode = MODE_11NG_HT20;
-		} else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+		} else if (ath12k_mac_sta_has_ofdm_only(arsta)) {
 			phymode = MODE_11G;
 		} else {
 			phymode = MODE_11B;
@@ -1974,15 +3807,19 @@
 		break;
 	case NL80211_BAND_5GHZ:
 	case NL80211_BAND_6GHZ:
-		/* Check HE first */
-		if (sta->deflink.he_cap.has_he) {
-			phymode = ath12k_mac_get_phymode_he(ar, sta);
-		} else if (sta->deflink.vht_cap.vht_supported &&
+		/* Check EHT first */
+		if (link_sta->eht_cap.has_eht &&
+		    !ath12k_peer_assoc_h_eht_masked(eht_mcs_mask)) {
+			phymode = ath12k_mac_get_phymode_eht(link_sta);
+		} else if (link_sta->he_cap.has_he &&
+		    !ath12k_peer_assoc_h_he_masked(he_mcs_mask)) {
+			phymode = ath12k_mac_get_phymode_he(link_sta);
+		} else if (link_sta->vht_cap.vht_supported &&
 		    !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
-			phymode = ath12k_mac_get_phymode_vht(ar, sta);
-		} else if (sta->deflink.ht_cap.ht_supported &&
+			phymode = ath12k_mac_get_phymode_vht(link_sta);
+		} else if (link_sta->ht_cap.ht_supported &&
 			   !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
-			if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+			if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11NA_HT40;
 			else
 				phymode = MODE_11NA_HT20;
@@ -1995,15 +3832,15 @@
 	}
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
-		   sta->addr, ath12k_mac_phymode_str(phymode));
+		   arsta->addr, ath12k_wmi_phymode_str(phymode));
 
 	arg->peer_phymode = phymode;
 	WARN_ON(phymode == MODE_UNKNOWN);
 }
 
 static void ath12k_peer_assoc_prepare(struct ath12k *ar,
-				      struct ieee80211_vif *vif,
-				      struct ieee80211_sta *sta,
+				      struct ath12k_link_vif *arvif,
+				      struct ath12k_link_sta *arsta,
 				      struct ath12k_wmi_peer_assoc_arg *arg,
 				      bool reassoc)
 {
@@ -2014,30 +3851,41 @@
 	reinit_completion(&ar->peer_assoc_done);
 
 	arg->peer_new_assoc = !reassoc;
-	ath12k_peer_assoc_h_basic(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_crypto(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_rates(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_he(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
-	ath12k_peer_assoc_h_smps(sta, arg);
+
+	ath12k_peer_assoc_h_basic(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_crypto(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_rates(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_ht(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_vht(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_he(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_he_6ghz(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_eht(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg);
+	ath12k_peer_assoc_h_smps(arsta, arg);
+	ath12k_peer_assoc_h_mlo(arsta, arg);
+
+	arsta->peer_nss = arg->peer_nss;
 
 	/* TODO: amsdu_disable req? */
 }
 
-static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
+static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arvif,
 				  const u8 *addr,
-				  const struct ieee80211_sta_ht_cap *ht_cap)
+				  const struct ieee80211_sta_ht_cap *ht_cap,
+				  u16 he_6ghz_capa)
 {
 	int smps;
 
-	if (!ht_cap->ht_supported)
+	if (!ht_cap->ht_supported && !he_6ghz_capa)
 		return 0;
 
+	if (ht_cap->ht_supported) {
 	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
 	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+	} else {
+		smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa);
+	}
 
 	if (smps >= ARRAY_SIZE(ath12k_smps_map))
 		return -EINVAL;
@@ -2047,26 +3895,246 @@
 					 ath12k_smps_map[smps]);
 }
 
-static void ath12k_bss_assoc(struct ieee80211_hw *hw,
-			     struct ieee80211_vif *vif,
+static bool ath12k_mac_set_he_txbf_conf(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k *ar = arvif->ar;
+	u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+	u32 value = 0;
+	int ret;
+	struct ieee80211_bss_conf* link_conf;
+
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access bss link conf in txbf conf\n");
+		return false;
+	}
+
+	if (!link_conf->he_support) {
+		rcu_read_unlock();
+		return true;
+	}
+
+	if (link_conf->he_su_beamformer) {
+		value |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
+		if (link_conf->he_mu_beamformer &&
+		    ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			value |= FIELD_PREP(HE_MODE_MU_TX_BFER, HE_MU_BFER_ENABLE);
+	}
+
+	if (ahvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+		value |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+			 FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+
+		if (link_conf->he_full_ul_mumimo)
+			value |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE);
+
+		if (link_conf->he_su_beamformee)
+			value |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+	}
+
+	rcu_read_unlock();
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n",
+			    arvif->vdev_id, ret);
+		return false;
+	}
+
+	param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+	value =
+		FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) |
+		FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE,
+			   HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE);
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    param, value);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n",
+			    arvif->vdev_id, ret);
+		return false;
+	}
+	return true;
+}
+
+static bool ath12k_mac_vif_recalc_sta_he_txbf(struct ath12k *ar,
+					      struct ath12k_link_vif *arvif,
+					      struct ieee80211_sta_he_cap *he_cap)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ieee80211_he_cap_elem he_cap_elem = {0};
+	struct ieee80211_sta_he_cap *cap_band;
+	struct cfg80211_chan_def def;
+	u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+	u32 hemode = 0;
+	int ret;
+	u8 link_id = arvif->link_id;
+	struct ieee80211_bss_conf* link_conf;
+
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access bss link conf in recalc txbf conf\n");
+		return false;
+	}
+
+	if (!link_conf->he_support) {
+		rcu_read_unlock();
+		return true;
+	}
+
+	rcu_read_unlock();
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return false;
+
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
+		return false;
+
+	if (def.chan->band == NL80211_BAND_2GHZ)
+		cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap;
+	else
+		cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap;
+
+	memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem));
+
+	if (he_cap_elem.phy_cap_info[HE_PHYCAP_BYTE_4] &
+	    IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE) {
+		if (he_cap->he_cap_elem.phy_cap_info[HE_PHYCAP_BYTE_3] &
+		    IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER)
+			hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+		if (he_cap->he_cap_elem.phy_cap_info[HE_PHYCAP_BYTE_4] &
+		    IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER) {
+			hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE);
+		}
+	}
+
+	if (vif->type != NL80211_IFTYPE_MESH_POINT) {
+		hemode |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+			  FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+
+		if (he_cap_elem.phy_cap_info[HE_PHYCAP_BYTE_2] &
+		    IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO)
+			if (he_cap->he_cap_elem.phy_cap_info[HE_PHYCAP_BYTE_2] &
+			    IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO)
+				hemode |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE);
+
+		if (FIELD_GET(HE_MODE_MU_TX_BFEE, hemode))
+			hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+
+		if (FIELD_GET(HE_MODE_MU_TX_BFER, hemode))
+			hemode |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
+	}
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, hemode);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n",
+			    hemode, ret);
+		return false;
+	}
+
+	return true;
+}
+
+static bool ath12k_mac_set_eht_txbf_conf(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k *ar = arvif->ar;
+	u32 param = WMI_VDEV_PARAM_SET_EHT_MU_MODE;
+	u32 value = 0;
+	int ret;
+	struct ieee80211_bss_conf* link_conf;
+
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access bss link conf in eht txbf conf\n");
+		return false;
+	}
+
+	if (!link_conf->eht_support) {
+		rcu_read_unlock();
+		return true;
+	}
+
+	if (link_conf->eht_su_beamformer) {
+		value |= FIELD_PREP(EHT_MODE_SU_TX_BFER, EHT_SU_BFER_ENABLE);
+		if (link_conf->eht_mu_beamformer &&
+		    ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			value |= FIELD_PREP(EHT_MODE_MU_TX_BFER, EHT_MU_BFER_ENABLE) |
+				 FIELD_PREP(EHT_MODE_DL_OFDMA_MUMIMO, EHT_DL_MUOFDMA_ENABLE) |
+				 FIELD_PREP(EHT_MODE_UL_OFDMA_MUMIMO, EHT_UL_MUOFDMA_ENABLE);
+	}
+
+	if (ahvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+		value |= FIELD_PREP(EHT_MODE_DL_OFDMA, EHT_DL_MUOFDMA_ENABLE) |
+			 FIELD_PREP(EHT_MODE_UL_OFDMA, EHT_UL_MUOFDMA_ENABLE);
+
+		if (link_conf->eht_80mhz_full_bw_ul_mumimo)
+			value |= FIELD_PREP(EHT_MODE_MUMIMO, EHT_UL_MUMIMO_ENABLE);
+
+		if (link_conf->eht_su_beamformee)
+			value |= FIELD_PREP(EHT_MODE_SU_TX_BFEE, EHT_SU_BFEE_ENABLE);
+	}
+
+	rcu_read_unlock();
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set vdev %d EHT MU mode: %d\n",
+			    arvif->vdev_id, ret);
+		return false;
+	}
+
+	return true;
+}
+
+void ath12k_bss_assoc(struct ath12k *ar,
+		      struct ath12k_link_vif *arvif,
 			     struct ieee80211_bss_conf *bss_conf)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
 	struct ath12k_wmi_peer_assoc_arg peer_arg;
 	struct ieee80211_sta *ap_sta;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_sta *ahsta;
 	struct ath12k_peer *peer;
+	struct ieee80211_sta_he_cap he_cap;
 	bool is_auth = false;
 	int ret;
+	struct vdev_up_params params = { 0 };
+	u8 link_id;
+	u16 he_6ghz_capa;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
-		   arvif->vdev_id, arvif->bssid, arvif->aid);
+	if (!arvif)
+		return;
+
+	link_id = bss_conf->link_id;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i link_id:%d assoc bssid %pM aid %d\n",
+		   arvif->vdev_id, link_id,  arvif->bssid, ahvif->aid);
 
 	rcu_read_lock();
 
-	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+//     ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+	/* TODO, need check correct or not while debug later.
+	 * ieee80211_sta should find by MLO address for MLO
+	 * or bssid for non-MLO.
+	 */
+	ap_sta = ieee80211_find_sta(vif,vif->cfg.ap_addr);
+
 	if (!ap_sta) {
 		ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
 			    bss_conf->bssid, arvif->vdev_id);
@@ -2074,10 +4142,17 @@
 		return;
 	}
 
-	ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+	ahsta = ath12k_sta_to_ahsta(ap_sta);
+	arsta = ahsta->link[link_id];
+
+	/* he_cap here is updated at assoc success for sta mode only */
+	he_cap = ap_sta->link[link_id]->he_cap;
+
+	ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, false);
 
 	rcu_read_unlock();
 
+	peer_arg.is_assoc = true;
 	ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -2091,26 +4166,53 @@
 		return;
 	}
 
+	he_6ghz_capa = ap_sta->link[link_id]->he_6ghz_capa.capa;
+
 	ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
-				     &ap_sta->deflink.ht_cap);
+				     &ap_sta->link[link_id]->ht_cap,
+				     le16_to_cpu(he_6ghz_capa));
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
 		return;
 	}
 
+	if (!ath12k_mac_vif_recalc_sta_he_txbf(ar, arvif, &he_cap)) {
+		ath12k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+			    arvif->vdev_id, bss_conf->bssid);
+		return;
+	}
+
 	WARN_ON(arvif->is_up);
 
-	arvif->aid = vif->cfg.aid;
+	ahvif->aid = vif->cfg.aid;
 	ether_addr_copy(arvif->bssid, bss_conf->bssid);
 
-	ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+	params.vdev_id = arvif->vdev_id;
+	params.aid = ahvif->aid;
+	params.bssid = arvif->bssid;
+
+	if (bss_conf->nontransmitted) {
+		params.profile_idx = bss_conf->bssid_index;
+		params.profile_count = BIT(bss_conf->bssid_indicator) - 1;
+		params.tx_bssid = bss_conf->transmitter_bssid;
+	}
+
+	if (ar->ab->ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1 &&
+	    !ar->ab->is_reset)
+		/* Skip sending vdev up for non-asserted links while
+		 * recovering station vif type
+		 */
+		goto skip_vdev_up;
+
+	ret = ath12k_wmi_vdev_up(ar, &params);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
 			    arvif->vdev_id, ret);
 		return;
 	}
 
+skip_vdev_up:
 	arvif->is_up = true;
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
@@ -2142,15 +4244,23 @@
 			    arvif->vdev_id, ret);
 }
 
-static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
-				struct ieee80211_vif *vif)
-{
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+void ath12k_bss_disassoc(struct ath12k *ar,
+			 struct ath12k_link_vif *arvif,
+			 bool do_vdev_down)
+{
+	struct ath12k_vif *tx_ahvif;
+	struct ath12k_link_vif *tx_arvif;
+	struct ieee80211_bss_conf *link;
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (do_vdev_down) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+			   arvif->vdev_id, arvif->bssid);
+		return;
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
 		   arvif->vdev_id, arvif->bssid);
 
@@ -2161,7 +4271,25 @@
 
 	arvif->is_up = false;
 
+	rcu_read_lock();
+	link = ath12k_get_link_bss_conf(arvif);
+	if (!link)
+		goto unlock;
+
+	if (link->mbssid_tx_vif) {
+		tx_ahvif = (void *)link->mbssid_tx_vif->drv_priv;
+		if (!tx_ahvif)
+			goto unlock;
+		lockdep_assert_held(&tx_ahvif->ah->conf_mutex);
+		tx_arvif = tx_ahvif->link[link->mbssid_tx_vif_linkid];
+		if (!tx_arvif)
+			goto unlock;
+		lockdep_assert_held(&tx_arvif->ar->conf_mutex);
+	}
+
 	/* TODO: cancel connection_loss_work */
+unlock:
+	rcu_read_unlock();
 }
 
 static u32 ath12k_mac_get_rate_hw_value(int bitrate)
@@ -2190,10 +4318,10 @@
 }
 
 static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
-					 struct ieee80211_vif *vif,
-					 struct cfg80211_chan_def *def)
+					 struct ath12k_link_vif *arvif,
+					 struct cfg80211_chan_def *def,
+					 struct ieee80211_bss_conf *link_conf)
 {
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
 	const struct ieee80211_supported_band *sband;
 	u8 basic_rate_idx;
 	int hw_rate_code;
@@ -2203,8 +4331,10 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	sband = ar->hw->wiphy->bands[def->chan->band];
-	basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+	sband = ar->ah->hw->wiphy->bands[def->chan->band];
+	basic_rate_idx = ffs(link_conf->basic_rates);
+	if (basic_rate_idx)
+		basic_rate_idx -= 1;
 	bitrate = sband->bitrates[basic_rate_idx].bitrate;
 
 	hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -2226,11 +4356,13 @@
 		ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
 }
 
-static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
+static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
 				     struct ieee80211_bss_conf *info)
 {
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ath12k *ar = arvif->ar;
 	struct sk_buff *tmpl;
+	struct ath12k_hw *ah = ar->ah;
 	int ret;
 	u32 interval;
 	bool unsol_bcast_probe_resp_enabled = false;
@@ -2238,7 +4370,8 @@
 	if (info->fils_discovery.max_interval) {
 		interval = info->fils_discovery.max_interval;
 
-		tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+		tmpl = ieee80211_get_fils_discovery_tmpl_mlo(ah->hw, ahvif->vif,
+							 info->link_id);
 		if (tmpl)
 			ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
 							     tmpl);
@@ -2246,8 +4379,9 @@
 		unsol_bcast_probe_resp_enabled = 1;
 		interval = info->unsol_bcast_probe_resp_interval;
 
-		tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
-								 arvif->vif);
+		tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl_mlo(ah->hw,
+								 ahvif->vif,
+								 info->link_id);
 		if (tmpl)
 			ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
 							 tmpl);
@@ -2272,14 +4406,192 @@
 	return ret;
 }
 
-static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
-					   struct ieee80211_vif *vif,
+static void ath12k_mac_non_srg_th_config(struct ath12k *ar,
+					 struct ieee80211_he_obss_pd *he_obss_pd,
+					 u32 *param_val)
+{
+	s8 non_srg_th = ATH12K_OBSS_PD_THRESHOLD_DISABLED;
+
+	if (he_obss_pd->sr_ctrl &
+	    IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) {
+		non_srg_th = ATH12K_OBSS_PD_MAX_THRESHOLD;
+	} else {
+		if (he_obss_pd->sr_ctrl &
+		    IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+			non_srg_th = (ATH12K_OBSS_PD_MAX_THRESHOLD +
+				      he_obss_pd->non_srg_max_offset);
+
+		*param_val |= ATH12K_OBSS_PD_NON_SRG_EN;
+	}
+
+	if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+		      ar->ab->wmi_ab.svc_map)) {
+		if (non_srg_th != ATH12K_OBSS_PD_THRESHOLD_DISABLED)
+			non_srg_th -= ATH12K_DEFAULT_NOISE_FLOOR;
+	}
+
+	*param_val |= (non_srg_th & GENMASK(7, 0));
+}
+
+static void ath12k_mac_srg_th_config(struct ath12k *ar,
+				     struct ieee80211_he_obss_pd *he_obss_pd,
+				     u32 *param_val)
+{
+	s8 srg_th = 0;
+
+	if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+		srg_th = ATH12K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset;
+		*param_val |= ATH12K_OBSS_PD_SRG_EN;
+	}
+
+	if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+		     ar->ab->wmi_ab.svc_map)) {
+		*param_val |= ATH12K_OBSS_PD_THRESHOLD_IN_DBM;
+		*param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
+	} else {
+		/* SRG not supported and threshold in dB */
+		*param_val &= ~(ATH12K_OBSS_PD_SRG_EN |
+				ATH12K_OBSS_PD_THRESHOLD_IN_DBM);
+	}
+}
+
+static int ath12k_mac_config_obss_pd(struct ath12k *ar,
+				     struct ieee80211_he_obss_pd *he_obss_pd)
+{
+	u32 bitmap[2], param_id, param_val, pdev_id;
+	int ret;
+
+	pdev_id = ar->pdev->pdev_id;
+
+	/* Set and enable SRG/non-SRG OBSS PD Threshold */
+	param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
+	if (test_bit(MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+		ret = ath12k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
+		if (ret)
+			ath12k_warn(ar->ab,
+				    "Failed to set obss_pd_threshold for pdev: %u\n",
+				    pdev_id);
+		return ret;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "OBSS PD Params: sr_ctrl %x non_srg_thres %u srg_max %u\n",
+		   he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset,
+		   he_obss_pd->max_offset);
+
+	param_val = 0;
+
+	/* Preparing non-SRG OBSS PD Threshold Configurations */
+	ath12k_mac_non_srg_th_config(ar, he_obss_pd, &param_val);
+
+	/* Preparing SRG OBSS PD Threshold Configurations */
+	ath12k_mac_srg_th_config(ar, he_obss_pd, &param_val);
+
+	ret = ath12k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set obss_pd_threshold for pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	/* Enable OBSS PD for all access category */
+	param_id  = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC;
+	param_val = 0xf;
+	ret = ath12k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set obss_pd_per_ac for pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	/* Set SR Prohibit */
+	param_id  = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT;
+	param_val = !!(he_obss_pd->sr_ctrl &
+		       IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED);
+	ret = ath12k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "Failed to set sr_prohibit for pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+		      ar->ab->wmi_ab.svc_map))
+		return 0;
+
+	/* Set SRG BSS Color Bitmap */
+	memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
+	ret = ath12k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set bss_color_bitmap for pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	/* Set SRG Partial BSSID Bitmap */
+	memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
+	ret = ath12k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set partial_bssid_bitmap for pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	memset(bitmap, 0xff, sizeof(bitmap));
+
+	/* Enable all BSS Colors for SRG */
+	ret = ath12k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set srg_color_en_bitmap pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	/* Enable all patial BSSID mask for SRG */
+	ret = ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set srg_bssid_en_bitmap pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	/* Enable all BSS Colors for non-SRG */
+	ret = ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set non_srg_color_en_bitmap pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	/* Enable all patial BSSID mask for non-SRG */
+	ret = ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "Failed to set non_srg_bssid_en_bitmap pdev: %u\n",
+			    pdev_id);
+		return ret;
+	}
+
+	return 0;
+}
+
+void ath12k_mac_bss_info_changed(struct ath12k *ar,
+				 struct ath12k_link_vif *arvif,
 					   struct ieee80211_bss_conf *info,
 					   u64 changed)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+	struct ath12k_vif *ahvif = arvif->ahvif, *tx_ahvif;
+	struct ath12k_link_vif *tx_arvif;
+	struct ieee80211_vif *vif = ahvif->vif;
 	struct cfg80211_chan_def def;
+	struct vdev_up_params params = { 0 };
 	u32 param_id, param_value;
 	enum nl80211_band band;
 	u32 vdev_param;
@@ -2290,8 +4602,13 @@
 	int ret;
 	u8 rateidx;
 	u32 rate;
+	bool color_collision_detect;
+	u8 link_id = arvif->link_id;
 
-	mutex_lock(&ar->conf_mutex);
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return;
 
 	if (changed & BSS_CHANGED_BEACON_INT) {
 		arvif->beacon_interval = info->beacon_int;
@@ -2311,7 +4628,7 @@
 
 	if (changed & BSS_CHANGED_BEACON) {
 		param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
-		param_value = WMI_BEACON_STAGGERED_MODE;
+		param_value = WMI_BEACON_BURST_MODE;
 		ret = ath12k_wmi_pdev_set_param(ar, param_id,
 						param_value, ar->pdev->pdev_id);
 		if (ret)
@@ -2322,10 +4639,87 @@
 				   "Set staggered beacon mode for VDEV: %d\n",
 				   arvif->vdev_id);
 
+		if ((!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) &&
+		     arvif->is_started) {
+			/* need to install Transmitting vif's template first */
+
 		ret = ath12k_mac_setup_bcn_tmpl(arvif);
 		if (ret)
 			ath12k_warn(ar->ab, "failed to update bcn template: %d\n",
 				    ret);
+
+			if (!arvif->pending_csa_up)
+				goto skip_pending_cs_up;
+
+			memset(&params, 0, sizeof(params));
+			params.vdev_id = arvif->vdev_id;
+			params.aid = ahvif->aid;
+			params.bssid = arvif->bssid;
+
+			if (info->mbssid_tx_vif) {
+				tx_ahvif = (void *)info->mbssid_tx_vif->drv_priv;
+				tx_arvif = tx_ahvif->link[info->mbssid_tx_vif_linkid];
+				params.tx_bssid = tx_arvif->bssid;
+				params.profile_idx = ahvif->vif->bss_conf.bssid_index;
+				params.profile_count = BIT(info->bssid_indicator);
+			}
+
+			if (info->mbssid_tx_vif && arvif != tx_arvif &&
+			    tx_arvif->pending_csa_up) {
+				/* skip non tx vif's */
+				goto skip_pending_cs_up;
+			}
+			ret = ath12k_wmi_vdev_up(arvif->ar, &params);
+			if (ret)
+				ath12k_warn(ar->ab, "failed to bring vdev up %d: %d\n",
+					    arvif->vdev_id, ret);
+
+			arvif->pending_csa_up = false;
+
+			if (info->mbssid_tx_vif && arvif == tx_arvif) {
+				struct ath12k_link_vif *arvif_itr;
+				list_for_each_entry(arvif_itr, &ar->arvifs, list) {
+					if (!arvif_itr->pending_csa_up)
+						continue;
+
+					memset(&params, 0, sizeof(params));
+					params.vdev_id = arvif_itr->vdev_id;
+					params.aid = ahvif->aid;
+					params.bssid = arvif_itr->bssid;
+					params.tx_bssid = tx_arvif->bssid;
+					params.profile_idx =
+						ahvif->vif->bss_conf.bssid_index;
+					params.profile_count =
+						BIT(info->bssid_indicator);
+					ret = ath12k_wmi_vdev_up(arvif_itr->ar, &params);
+					if (ret)
+						ath12k_warn(ar->ab, "failed to bring vdev up %d: %d\n",
+							    arvif_itr->vdev_id, ret);
+					arvif_itr->pending_csa_up = false;
+				}
+			}
+		}
+skip_pending_cs_up:
+		if (arvif->bcca_zero_sent)
+			arvif->do_not_send_tmpl = true;
+		else
+			arvif->do_not_send_tmpl = false;
+
+		if (arvif->is_up && info->he_support) {
+			param_id = WMI_VDEV_PARAM_BA_MODE;
+
+			if (info->eht_support)
+				param_value = WMI_BA_MODE_BUFFER_SIZE_1024;
+			else
+				param_value = WMI_BA_MODE_BUFFER_SIZE_256;
+
+			ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+							    param_id, param_value);
+			if (ret)
+				ath12k_warn(ar->ab,
+					    "failed to set BA BUFFER SIZE %d for vdev: %d\n",
+					    param_value, arvif->vdev_id);
+		}
 	}
 
 	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -2347,31 +4741,31 @@
 
 	if (changed & BSS_CHANGED_SSID &&
 	    vif->type == NL80211_IFTYPE_AP) {
-		arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+		ahvif->u.ap.ssid_len = vif->cfg.ssid_len;
 		if (vif->cfg.ssid_len)
-			memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
-		arvif->u.ap.hidden_ssid = info->hidden_ssid;
+			memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+		ahvif->u.ap.hidden_ssid = info->hidden_ssid;
 	}
 
-	if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+	if (changed & BSS_CHANGED_BSSID && info->bssid && !is_zero_ether_addr(info->bssid))
 		ether_addr_copy(arvif->bssid, info->bssid);
 
-	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+	/* pending_up is not needed for recovered ab during Mode1 scenario */
+	if (changed & BSS_CHANGED_BEACON_ENABLED && !arvif->is_started && !ar->ab->recovery_start)
+		arvif->pending_up = true;
+
+	if (changed & BSS_CHANGED_BEACON_ENABLED && arvif->is_started) {
+		if (info->enable_beacon) {
+			ath12k_mac_set_he_txbf_conf(arvif);
+			ath12k_mac_set_eht_txbf_conf(arvif);
+		}
 		ath12k_control_beaconing(arvif, info);
 
-		if (arvif->is_up && vif->bss_conf.he_support &&
-		    vif->bss_conf.he_oper.params) {
-			/* TODO: Extend to support 1024 BA Bitmap size */
-			ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-							    WMI_VDEV_PARAM_BA_MODE,
-							    WMI_BA_MODE_BUFFER_SIZE_256);
-			if (ret)
-				ath12k_warn(ar->ab,
-					    "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
-					    arvif->vdev_id);
 
+		if (arvif->is_up && info->he_support) {
+			if (info->he_oper.params) {
 			param_id = WMI_VDEV_PARAM_HEOPS_0_31;
-			param_value = vif->bss_conf.he_oper.params;
+				param_value = info->he_oper.params;
 			ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 							    param_id, param_value);
 			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
@@ -2381,6 +4775,8 @@
 			if (ret)
 				ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
 					    param_value, arvif->vdev_id, ret);
+
+			}
 		}
 	}
 
@@ -2447,28 +4843,30 @@
 
 	if (changed & BSS_CHANGED_ASSOC) {
 		if (vif->cfg.assoc)
-			ath12k_bss_assoc(hw, vif, info);
+			ath12k_bss_assoc(ar, arvif, info);
 		else
-			ath12k_bss_disassoc(hw, vif);
+			ath12k_bss_disassoc(ar, arvif, false);
 	}
 
 	if (changed & BSS_CHANGED_TXPOWER) {
 		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n",
 			   arvif->vdev_id, info->txpower);
-
 		arvif->txpower = info->txpower;
 		ath12k_mac_txpower_recalc(ar);
 	}
 
 	if (changed & BSS_CHANGED_MCAST_RATE &&
-	    !ath12k_mac_vif_chan(arvif->vif, &def)) {
+	    !ath12k_mac_vif_chan(ahvif->vif, &def, link_id)) {
 		band = def.chan->band;
-		mcast_rate = vif->bss_conf.mcast_rate[band];
+		mcast_rate = info->mcast_rate[band];
 
-		if (mcast_rate > 0)
+		if (mcast_rate > 0) {
 			rateidx = mcast_rate - 1;
-		else
-			rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+		} else {
+			rateidx = ffs(info->basic_rates);
+			if (rateidx)
+				rateidx -= 1;
+		}
 
 		if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
 			rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
@@ -2505,8 +4903,8 @@
 	}
 
 	if (changed & BSS_CHANGED_BASIC_RATES &&
-	    !ath12k_mac_vif_chan(arvif->vif, &def))
-		ath12k_recalculate_mgmt_rate(ar, vif, &def);
+	    !ath12k_mac_vif_chan(ahvif->vif, &def, link_id))
+		ath12k_recalculate_mgmt_rate(ar, arvif, &def, info);
 
 	if (changed & BSS_CHANGED_TWT) {
 		if (info->twt_requester || info->twt_responder)
@@ -2516,19 +4914,39 @@
 	}
 
 	if (changed & BSS_CHANGED_HE_OBSS_PD)
-		ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
-					     &info->he_obss_pd);
+		ath12k_mac_config_obss_pd(ar, &info->he_obss_pd);
 
 	if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+		color_collision_detect = (info->he_bss_color.enabled &&
+					  info->he_bss_color.collision_detection_enabled);
 		if (vif->type == NL80211_IFTYPE_AP) {
 			ret = ath12k_wmi_obss_color_cfg_cmd(ar,
 							    arvif->vdev_id,
 							    info->he_bss_color.color,
 							    ATH12K_BSS_COLOR_AP_PERIODS,
-							    info->he_bss_color.enabled);
+							    info->nontransmitted ?
+							    0 : color_collision_detect);
 			if (ret)
 				ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
 					    arvif->vdev_id,  ret);
+
+			param_id = WMI_VDEV_PARAM_BSS_COLOR;
+			param_value = info->he_bss_color.color << IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET;
+
+			if (!info->he_bss_color.enabled)
+				param_value |= IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED;
+			ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+							    param_id,
+							    param_value);
+			if (ret)
+				ath12k_warn(ar->ab,
+					    "failed to set bss color param on vdev %i: %d\n",
+					    arvif->vdev_id,  ret);
+
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				   "bss color param 0x%x set on vdev %i\n",
+				   param_value, arvif->vdev_id);
+
 		} else if (vif->type == NL80211_IFTYPE_STATION) {
 			ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar,
 									  arvif->vdev_id,
@@ -2550,12 +4968,330 @@
 	if (changed & BSS_CHANGED_FILS_DISCOVERY ||
 	    changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
 		ath12k_mac_fils_discovery(arvif, info);
+}
+
+static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
+					  struct ieee80211_vif *vif,
+					  u64 changed)
+{
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_link_vif *arvif;
+	struct ath12k *ar;
+	unsigned long link_id;
+	struct ieee80211_bss_conf *info;
+
+	if (changed & BSS_CHANGED_SSID &&
+	    vif->type == NL80211_IFTYPE_AP) {
+		ahvif->u.ap.ssid_len = vif->cfg.ssid_len;
+		if (vif->cfg.ssid_len)
+			memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+	}
+
+	link_id = ahvif->deflink.link_id;
+
+	if (changed & BSS_CHANGED_ASSOC) {
+		for_each_set_bit(link_id, &ahvif->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+			arvif = ahvif->link[link_id];
+			info = vif->link_conf[link_id];
+			ar = arvif->ar;
+			if (vif->cfg.assoc) {
+				if (info)
+					ath12k_bss_assoc(ar, arvif, info);
+			} else {
+				ath12k_bss_disassoc(ar, arvif, false);
+			}
+		}
+	}
+}
+
+static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif,
+					   struct ieee80211_bss_conf *info,
+					   u64 changed)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_link_vif *arvif;
+	struct ath12k *ar;
+	u8 link_id = info->link_id;
+
+	mutex_lock(&ah->conf_mutex);
+	arvif = ahvif->link[link_id];
+
+	if (arvif == NULL || !arvif->is_created) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			    "bss info parameter changes %llx cached to apply after vdev create on channel assign\n",
+			    changed);
+
+		ahvif->cache[link_id].bss_conf_changed |= changed;
+		mutex_unlock(&ah->conf_mutex);
+		return;
+	}
+
+	ar = arvif->ar;
+
+	if (!ar) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			    "bss info parameter changes %llx cached to apply after vdev create on channel assign\n",
+			    changed);
+		ahvif->cache[link_id].bss_conf_changed |= changed;
+		mutex_unlock(&ah->conf_mutex);
+		return;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath12k_mac_bss_info_changed(ar, arvif, info, changed);
 
 	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+}
+
+static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	unsigned long time_left;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->vdev_delete_done);
+
+	ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to delete WMI scan vdev %d: %d\n",
+			    arvif->vdev_id, ret);
+		goto clean_up;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+						ATH12K_VDEV_DELETE_TIMEOUT_HZ);
+	if (time_left == 0) {
+		ath12k_warn(ar->ab, "Timeout in receiving vdev delete response vdev_id : %d\n",
+			    arvif->vdev_id);
+		ret = -ETIMEDOUT;
+		goto clean_up;
+	}
+
+	spin_lock_bh(&ar->ab->base_lock);
+	ar->ab->free_vdev_map |= 1LL << arvif->vdev_id;
+	spin_unlock_bh(&ar->ab->base_lock);
+
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	if (arvif->link_id != ATH12K_DEFAULT_SCAN_LINK && arvif->ndev_pvt) {
+		if (arvif->ndev_pvt->is_bond_enslaved)
+			ath12k_bond_link_release(arvif);
+		ath12k_disable_ppe_for_link_netdev(ar->ab, arvif,
+						   arvif->ndev_pvt->link_ndev);
+	}
+#endif
+	ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+	ar->ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
+	ar->num_created_vdevs--;
+	arvif->ahvif->num_vdev_created--;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_SET(MAC, L1), "mac vdev delete id %d type %d subtype %d map %llx\n",
+		   arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype,
+		   ar->ab->free_vdev_map);
+clean_up:
+	arvif->is_created = false;
+	arvif->is_scan_vif = false;
+	arvif->ar = NULL;
+	spin_lock_bh(&ar->data_lock);
+	list_del(&arvif->list);
+	spin_unlock_bh(&ar->data_lock);
+
+	return ret;
+}
+
+static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
+{
+	struct ath12k *ar = arvif->ar;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!arvif->vdev_stop_notify_done) {
+		reinit_completion(&ar->vdev_setup_done);
+
+		ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+		arvif->vdev_stop_notify_done = true;
+
+		ret = ath12k_mac_vdev_setup_sync(ar);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	memset(&ar->wmm_stats, 0, sizeof(struct ath12k_wmm_stats));
+	WARN_ON(ar->num_started_vdevs == 0);
+
+	ar->num_started_vdevs--;
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+		   arvif->addr, arvif->vdev_id);
+
+	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+		clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
+			   arvif->vdev_id);
+	}
+
+	return 0;
+}
+
+static int
+ath12k_mac_find_link_id_by_freq(struct ieee80211_vif *vif, struct ath12k *ar, u32 freq)
+{
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	u16 link_id;
+
+	lockdep_assert_held(&ahvif->ah->conf_mutex);
+
+	if (!vif->valid_links)
+		return 0;
+
+	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+		if (!(ahvif->links_map & BIT(link_id)) ||
+		    !ahvif->link[link_id]->ar)
+			continue;
+		if (freq >= ahvif->link[link_id]->ar->chan_info.low_freq &&
+		    freq <= ahvif->link[link_id]->ar->chan_info.high_freq)
+			return link_id;
+	}
+
+	/* Use a default link for scan purpose in driver if the request
+	 * cannot be mapped to any of the active links(channel assigned)
+	 */
+	return ATH12K_DEFAULT_SCAN_LINK;
+}
+
+void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_hw *ah = ahvif->ah;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	ahvif->link[arvif->link_id] = NULL;
+	ahvif->links_map &= ~BIT(arvif->link_id);
+
+	if (arvif != &ahvif->deflink)
+		kfree(arvif);
+	else
+		memset(arvif->addr, 0, ETH_ALEN);
+}
+
+struct ath12k_link_vif *
+ath12k_mac_assign_link_vif( struct ath12k_hw *ah, struct ieee80211_vif *vif, u8 link_id)
+{
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	int i;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if(ahvif->link[link_id])
+		return ahvif->link[link_id];
+
+	/* Not a ML vif */
+	if (!vif->valid_links) {
+		link_id = 0;
+		arvif =  &ahvif->deflink;
+	} else {
+		/* first link vif is fetched from deflink except for scan arvifs */
+		if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
+			arvif = &ahvif->deflink;
+		} else {
+			arvif = (struct ath12k_link_vif *)
+					kzalloc(sizeof(struct ath12k_link_vif),
+						GFP_KERNEL);
+
+			if (arvif == NULL)
+				return NULL;
+
+			INIT_LIST_HEAD(&arvif->list);
+			INIT_WORK(&arvif->update_obss_color_notify_work,
+				  ath12k_update_obss_color_notify_work);
+			INIT_WORK(&arvif->update_bcn_template_work,
+				  ath12k_update_bcn_template_work);
+			arvif->num_stations = 0;
+			init_completion(&arvif->peer_ch_width_switch_send);
+			INIT_WORK(&arvif->peer_ch_width_switch_work,
+				  ath12k_wmi_peer_chan_width_switch_work);
+		}
+	}
+
+	ahvif->link[link_id] = arvif;
+	arvif->ahvif = ahvif;
+	arvif->link_id = link_id;
+	ahvif->links_map |= BIT(link_id);
+
+	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+		arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI;
+		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+		memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].he_mcs));
+		memset(arvif->bitrate_mask.control[i].eht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].eht_mcs));
+	}
+	
+	return arvif;
+}
+
+static struct ath12k*
+ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      u32 center_freq)
+{
+	struct ath12k_hw *ah = hw->priv;
+	enum nl80211_band band;
+	struct ath12k *ar;
+	int i;
+
+	/* Loop through first channel and determine the scan radio
+	 * NOTE: There could be 5G low/high channels in that case
+	 * split the hw request and perform multiple scans
+	 */
+
+	if (center_freq < ATH12K_MIN_5G_FREQ)
+		band = NL80211_BAND_2GHZ;
+	else if (center_freq < ATH12K_MIN_6G_FREQ)
+		band = NL80211_BAND_5GHZ;
+	else
+		band = NL80211_BAND_6GHZ;
+
+	ar = ah->radio;
+
+	for (i = 0; i < ah->num_radio; i++) {
+		if (band == NL80211_BAND_5GHZ) {
+			if (center_freq > ar->chan_info.low_freq &&
+			    center_freq < ar->chan_info.high_freq)
+				if (ar->mac.sbands[band].channels)
+					return ar;
+		} else if (ar->mac.sbands[band].channels) {
+			return ar;
+		}
+		ar++;
+	}
+
+	return NULL;
 }
 
 void __ath12k_mac_scan_finish(struct ath12k *ar)
 {
+	struct ath12k_hw *ah = ar->ah;
+
 	lockdep_assert_held(&ar->data_lock);
 
 	switch (ar->scan.state) {
@@ -2563,23 +5299,26 @@
 		break;
 	case ATH12K_SCAN_RUNNING:
 	case ATH12K_SCAN_ABORTING:
+		if (ar->scan.is_roc && ar->scan.roc_notify)
+			ieee80211_remain_on_channel_expired(ah->hw);
+		fallthrough;
+	case ATH12K_SCAN_STARTING:
 		if (!ar->scan.is_roc) {
 			struct cfg80211_scan_info info = {
-				.aborted = (ar->scan.state ==
-					    ATH12K_SCAN_ABORTING),
+				.aborted = ((ar->scan.state ==
+					    ATH12K_SCAN_ABORTING) ||
+					(ar->scan.state ==
+					 ATH12K_SCAN_STARTING))
 			};
 
-			ieee80211_scan_completed(ar->hw, &info);
-		} else if (ar->scan.roc_notify) {
-			ieee80211_remain_on_channel_expired(ar->hw);
+			ieee80211_scan_completed(ah->hw, &info);
 		}
-		fallthrough;
-	case ATH12K_SCAN_STARTING:
 		ar->scan.state = ATH12K_SCAN_IDLE;
 		ar->scan_channel = NULL;
 		ar->scan.roc_freq = 0;
 		cancel_delayed_work(&ar->scan.timeout);
 		complete(&ar->scan.completed);
+		ieee80211_queue_work(ah->hw, &ar->scan.vdev_del_wk);
 		break;
 	}
 }
@@ -2679,6 +5418,51 @@
 	mutex_unlock(&ar->conf_mutex);
 }
 
+static void ath12k_scan_vdev_del_work(struct work_struct *work)
+{
+	struct ath12k *ar = container_of(work, struct ath12k,
+					 scan.vdev_del_wk);
+	struct ath12k_hw *ah = ar->ah;
+	struct ath12k_link_vif *arvif;
+
+	if (unlikely(test_bit(ATH12K_FLAG_RECOVERY, &ar->ab->dev_flags)))
+		return;
+
+	mutex_lock(&ah->conf_mutex);
+	mutex_lock(&ar->conf_mutex);
+	/* scan vdev got deleted already. This can happen when on same vif, new
+	 * scan request was requested with different frequeny which leads to
+	 * movement of scan arvif from one radio to another radio */
+	if (ar->scan.vdev_id == -1)
+		goto work_complete;
+
+	mutex_unlock(&ar->conf_mutex);
+
+	arvif = ath12k_mac_get_arvif(ar, ar->scan.vdev_id);
+	/* should not happen */
+	if (!arvif) {
+		ath12k_warn(ar->ab, "mac scan vdev del on unknow vdev_id %d\n",
+			    ar->scan.vdev_id);
+		mutex_unlock(&ah->conf_mutex);
+		return;
+	}
+
+	if (arvif->is_started) {
+		mutex_lock(&ar->conf_mutex);
+		ar->scan.vdev_id = -1;
+		goto work_complete;
+	}
+
+	ath12k_mac_remove_link_interface(ah->hw, arvif);
+	ath12k_mac_unassign_link_vif(arvif);
+
+	mutex_lock(&ar->conf_mutex);
+	ar->scan.vdev_id = -1;
+work_complete:
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+}
+
 static int ath12k_start_scan(struct ath12k *ar,
 			     struct ath12k_wmi_scan_req_arg *arg)
 {
@@ -2686,6 +5470,9 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (ath12k_spectral_get_mode(ar) == ATH12K_SPECTRAL_BACKGROUND)
+		ath12k_spectral_reset_buffer(ar);
+
 	ret = ath12k_wmi_send_scan_start_cmd(ar, arg);
 	if (ret)
 		return ret;
@@ -2717,12 +5504,92 @@
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_scan_request *hw_req)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k *ar;
 	struct cfg80211_scan_request *req = &hw_req->req;
 	struct ath12k_wmi_scan_req_arg arg = {};
 	int ret;
-	int i;
+	int i, link_id;
+    bool create = true;
+
+	mutex_lock(&ah->conf_mutex);
+
+	/* Since the targeted scan device could depend on the frequency
+	 * requested in the hw_req, select the corresponding radio
+	 */
+	ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq);
+	if (!ar) {
+		ath12k_err(NULL, "unable to select device for scan\n");
+		mutex_unlock(&ah->conf_mutex);
+		return -EINVAL;
+	}
+
+	if (unlikely(test_bit(ATH12K_FLAG_RECOVERY, &ar->ab->dev_flags))) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ESHUTDOWN;
+	}
+
+	link_id = ath12k_mac_find_link_id_by_freq(vif, ar,
+						  hw_req->req.channels[0]->center_freq);
+
+	arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+	/* If the vif is already assigned to a specific vdev of an ar,
+	 * check whether its already started, vdev which is started
+	 * are not allowed to switch to a new radio.
+	 * If the vdev is not started, but was earlier created on a
+	 * different ar, delete that vdev and create a new one. We don't
+	 * delete at the scan stop as an optimization to avoid redundant
+	 * delete-create vdev's for the same ar, in case the request is
+	 * always on the same band for the vif
+	 */
+	if (!arvif) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK &&
+	    (!is_broadcast_ether_addr(req->bssid) &&
+	     !is_zero_ether_addr(req->bssid))) {
+		memcpy(arvif->addr, req->bssid, ETH_ALEN);
+	}
+
+	if (arvif->is_created) {
+		if (ar != arvif->ar && arvif->is_started) {
+			mutex_unlock(&ah->conf_mutex);
+			return -EINVAL;
+		} else if (ar != arvif->ar) {
+			if (!arvif->ar) {
+				mutex_unlock(&ah->conf_mutex);
+				return -EINVAL;
+			}
+			mutex_lock(&arvif->ar->conf_mutex);
+			if (arvif->ar->scan.vdev_id != -1)
+				arvif->ar->scan.vdev_id = -1;
+			mutex_unlock(&arvif->ar->conf_mutex);
+
+			ath12k_mac_remove_link_interface(hw, arvif);
+			ath12k_mac_unassign_link_vif(arvif);
+		} else {
+			create = false;
+		}
+	}
+
+	if (create) {
+		arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+		mutex_lock(&ar->conf_mutex);
+		ret = ath12k_mac_vdev_create(ar, arvif);
+		if (ret) {
+			mutex_unlock(&ar->conf_mutex);
+			mutex_unlock(&ah->conf_mutex);
+			ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret);
+			return -EINVAL;
+		}
+		mutex_unlock(&ar->conf_mutex);
+	}
+
+	mutex_unlock(&ah->conf_mutex);
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -2767,6 +5634,14 @@
 
 	if (req->n_channels) {
 		arg.num_chan = req->n_channels;
+		arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+					GFP_KERNEL);
+
+		if (!arg.chan_list) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+
 		for (i = 0; i < arg.num_chan; i++)
 			arg.chan_list[i] = req->channels[i]->center_freq;
 	}
@@ -2779,12 +5654,21 @@
 		spin_unlock_bh(&ar->data_lock);
 	}
 
+	/* As per cfg80211/mac80211 scan design, it allows only
+	 * scan at a time. Hence last_scan link id is used for
+	 * tracking the link id on which the scan is been done
+	 * on this vif
+	 */
+	ahvif->last_scan_link = arvif->link_id;
+
 	/* Add a margin to account for event/command processing */
-	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+	ieee80211_queue_delayed_work(ar->ah->hw, &ar->scan.timeout,
 				     msecs_to_jiffies(arg.max_scan_time +
 						      ATH12K_MAC_SCAN_TIMEOUT_MSECS));
 
 exit:
+	kfree(arg.chan_list);
+
 	if (req->ie_len)
 		kfree(arg.extraie.ptr);
 
@@ -2795,7 +5679,35 @@
 static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
 					 struct ieee80211_vif *vif)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_link_vif *arvif;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	u8 link_id = ahvif->last_scan_link;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(ahvif->links_map & BIT(link_id))) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(NULL, "unable to select device to cancel scan\n");
+		return;
+	}
+
+	arvif = ahvif->link[link_id];
+
+	if (!arvif->is_created) {
+		ath12k_err(NULL, "unable to select device to cancel scan\n");
+		mutex_unlock(&ah->conf_mutex);
+		return;
+	}
+
+	ar = arvif->ar;
+	if (!ar) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(NULL, "unable to select device to cancel scan\n");
+		return;
+	}
+	mutex_unlock(&ah->conf_mutex);
 
 	mutex_lock(&ar->conf_mutex);
 	ath12k_scan_abort(ar);
@@ -2804,13 +5716,15 @@
 	cancel_delayed_work_sync(&ar->scan.timeout);
 }
 
-static int ath12k_install_key(struct ath12k_vif *arvif,
+static int ath12k_install_key(struct ath12k_link_vif *arvif,
 			      struct ieee80211_key_conf *key,
 			      enum set_key_cmd cmd,
 			      const u8 *macaddr, u32 flags)
 {
 	int ret;
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ath12k *ar = arvif->ar;
+	struct ath12k_hw_group *ag = ar->ab->ag;
 	struct wmi_vdev_install_key_arg arg = {
 		.vdev_id = arvif->vdev_id,
 		.key_idx = key->keyidx,
@@ -2824,7 +5738,7 @@
 
 	reinit_completion(&ar->install_key_done);
 
-	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ag->dev_flags))
 		return 0;
 
 	if (cmd == DISABLE_KEY) {
@@ -2852,13 +5766,24 @@
 	case WLAN_CIPHER_SUITE_GCMP:
 	case WLAN_CIPHER_SUITE_GCMP_256:
 		arg.key_cipher = WMI_CIPHER_AES_GCM;
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		arg.key_cipher = WMI_CIPHER_AES_CMAC;
+		break;
+	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+		arg.key_cipher = WMI_CIPHER_AES_GMAC;
+		break;
+	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+		arg.key_cipher = WMI_CIPHER_NONE;
 		break;
 	default:
 		ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
 		return -EOPNOTSUPP;
 	}
 
-	if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+	if (test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags))
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
 			      IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
 
@@ -2871,13 +5796,15 @@
 	if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
 		return -ETIMEDOUT;
 
-	if (ether_addr_equal(macaddr, arvif->vif->addr))
-		arvif->key_cipher = key->cipher;
+	if (ether_addr_equal(macaddr, arvif->addr)) {
+		ahvif->key_cipher = key->cipher;
+		ath12k_dp_tx_update_bank_profile(arvif);
+	}
 
 	return ar->install_key_status ? -EINVAL : 0;
 }
 
-static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
+static int ath12k_clear_peer_keys(struct ath12k_link_vif *arvif,
 				  const u8 *addr)
 {
 	struct ath12k *ar = arvif->ar;
@@ -2919,40 +5846,52 @@
 	return first_errno;
 }
 
-static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-				 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
+		       struct ath12k_link_vif *arvif,
+		       struct ath12k_link_sta *arsta,
 				 struct ieee80211_key_conf *key)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+	struct ath12k_base *ab;
 	struct ath12k_peer *peer;
-	struct ath12k_sta *arsta;
+	struct ath12k_sta *ahsta = NULL;
 	const u8 *peer_addr;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
+	struct ieee80211_bss_conf *link_conf;
 	int ret = 0;
 	u32 flags = 0;
+	u8 link_id = arvif->link_id;
+	struct ieee80211_sta *sta = NULL;
 
-	/* BIP needs to be done in software */
-	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-	    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-	    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
-	    key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
-		return 1;
+	if (arsta) {
+		ahsta = arsta->ahsta;
+		sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+	}
 
-	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
-		return 1;
+	if (sta && sta->mlo &&
+	    (test_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &ar->ab->dev_flags)))
+		return 0;
 
-	if (key->keyidx > WMI_MAX_KEY_INDEX)
-		return -ENOSPC;
+	lockdep_assert_held(&ar->ah->conf_mutex);
 
-	mutex_lock(&ar->conf_mutex);
+	rcu_read_lock();
+	link_conf = rcu_dereference(vif->link_conf[link_id]);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	ab = ar->ab;
 
 	if (sta)
-		peer_addr = sta->addr;
-	else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
-		peer_addr = vif->bss_conf.bssid;
+		peer_addr = arsta->addr;
+	else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+		peer_addr = link_conf->bssid;
 	else
-		peer_addr = vif->addr;
+		peer_addr = link_conf->addr;
+
+	rcu_read_unlock();
 
 	key->hw_key_idx = key->keyidx;
 
@@ -2968,12 +5907,12 @@
 			ath12k_warn(ab, "cannot install key for non-existent peer %pM\n",
 				    peer_addr);
 			ret = -EOPNOTSUPP;
-			goto exit;
+			goto out;
 		} else {
 			/* if the peer doesn't exist there is no key to disable
 			 * anymore
 			 */
-			goto exit;
+			goto out;
 		}
 	}
 
@@ -2984,14 +5923,15 @@
 
 	ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags);
 	if (ret) {
-		ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret);
-		goto exit;
+		ath12k_warn(ab, "ath12k_install_key cmd %d failed (%d) for peer %pM on vdev %d (link id %d)\n",
+			    cmd, ret, peer_addr, arvif->vdev_id, link_id);
+		goto out;
 	}
 
 	ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key);
 	if (ret) {
 		ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret);
-		goto exit;
+		goto out;
 	}
 
 	spin_lock_bh(&ab->base_lock);
@@ -3016,8 +5956,6 @@
 		ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr);
 
 	if (sta) {
-		arsta = (struct ath12k_sta *)sta->drv_priv;
-
 		switch (key->cipher) {
 		case WLAN_CIPHER_SUITE_TKIP:
 		case WLAN_CIPHER_SUITE_CCMP:
@@ -3025,23 +5963,145 @@
 		case WLAN_CIPHER_SUITE_GCMP:
 		case WLAN_CIPHER_SUITE_GCMP_256:
 			if (cmd == SET_KEY)
-				arsta->pn_type = HAL_PN_TYPE_WPA;
+				ahsta->pn_type = HAL_PN_TYPE_WPA;
 			else
-				arsta->pn_type = HAL_PN_TYPE_NONE;
+				ahsta->pn_type = HAL_PN_TYPE_NONE;
 			break;
 		default:
-			arsta->pn_type = HAL_PN_TYPE_NONE;
+			ahsta->pn_type = HAL_PN_TYPE_NONE;
 			break;
 		}
 	}
 
 	spin_unlock_bh(&ab->base_lock);
 
-exit:
-	mutex_unlock(&ar->conf_mutex);
+out:
 	return ret;
 }
 
+static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+				 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+				 struct ieee80211_key_conf *key)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_hw_group *ag = ah->ag;
+	struct ath12k_key_conf *key_conf = NULL;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta = NULL;
+	struct ath12k_sta *ahsta;
+	int ret = -ENOLINK;
+	u8 link_id;
+
+	mutex_lock(&ah->conf_mutex);
+
+	/* IGTK needs to be done in host software */
+	if (key->keyidx == 4 || key->keyidx == 5) {
+		ret = 1;
+		goto out;
+	}
+
+	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ag->dev_flags)) {
+		ret = 1;
+		goto out;
+	}
+
+	if (key->keyidx > WMI_MAX_KEY_INDEX) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	if (sta) {
+		ahsta = ath12k_sta_to_ahsta(sta);
+		if (sta->mlo) {
+			unsigned long links = sta->valid_links;
+			for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+				arvif = ahvif->link[link_id];
+				arsta = ahsta->link[link_id];
+				if (WARN_ON(!arvif || !arsta))
+					continue;
+				mutex_lock(&arvif->ar->conf_mutex);
+				ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+				mutex_unlock(&arvif->ar->conf_mutex);
+				if (ret)
+					break;
+				arsta->keys[key->keyidx] = key;
+			}
+		} else {
+			arsta = &ahsta->deflink;
+			arvif = arsta->arvif;
+			if (WARN_ON(!arvif))
+				goto out;
+			mutex_lock(&arvif->ar->conf_mutex);
+			ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+			mutex_unlock(&arvif->ar->conf_mutex);
+			arsta->keys[key->keyidx] = key;
+		}
+	} else {
+		if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS)
+			arvif = ahvif->link[key->link_id];
+		else
+			arvif = &ahvif->deflink;
+
+		if (!arvif || !arvif->is_created) {
+			key_conf = kzalloc(sizeof(*key_conf), GFP_ATOMIC);
+
+			if (!key_conf) {
+				ath12k_info(NULL, "failed to alloc cache key config\n");
+				goto out;
+			}
+
+			key_conf->cmd = cmd;
+			key_conf->sta = sta;
+			key_conf->key = key;
+
+			list_add_tail(&key_conf->list,
+				      &ahvif->cache[link_id].key_conf.list);
+
+			ath12k_dbg(NULL, ATH12K_DBG_MAC, "set key param cached since vif not assign to radio\n");
+			mutex_unlock(&ah->conf_mutex);
+
+			return 0;
+		}
+
+		if (!(arvif && arvif->ar)) {
+			ath12k_err(NULL, "Failed to set key.\n");
+			mutex_unlock(&ah->conf_mutex);
+			return -EINVAL;
+		}
+
+		if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &arvif->ar->ab->dev_flags)) {
+			ath12k_dbg(arvif->ar->ab, ATH12K_DBG_MODE1_RECOVERY,
+				   "cmd:%d called for crash flush set ab\n", cmd);
+			mutex_unlock(&ah->conf_mutex);
+			return 0;
+		}
+
+		mutex_lock(&arvif->ar->conf_mutex);
+		ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+		/* if sta is null, consider it has self peer */
+		arvif->keys[key->keyidx] = key;
+		mutex_unlock(&arvif->ar->conf_mutex);
+	}
+out:
+	mutex_unlock(&ah->conf_mutex);
+	return ret;
+}
+
+static int
+ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
+				    enum nl80211_band band,
+				    const struct cfg80211_bitrate_mask *mask)
+{
+	int num_rates = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+		num_rates += hweight16(mask->control[band].ht_mcs[i]);
+
+	return num_rates;
+}
+
 static int
 ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
 				      enum nl80211_band band,
@@ -3057,8 +6117,50 @@
 }
 
 static int
-ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif,
-				   struct ieee80211_sta *sta,
+ath12k_mac_bitrate_mask_num_he_rates(struct ath12k *ar,
+				     enum nl80211_band band,
+				     const struct cfg80211_bitrate_mask *mask)
+{
+	int num_rates = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+		num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+	return num_rates;
+}
+
+static int
+ath12k_mac_bitrate_mask_num_he_ul_rates(struct ath12k *ar,
+				    enum nl80211_band band,
+				    const struct cfg80211_bitrate_mask *mask)
+{
+	int num_rates = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_ul_mcs); i++)
+		num_rates += hweight16(mask->control[band].he_ul_mcs[i]);
+
+	return num_rates;
+}
+
+static int
+ath12k_mac_bitrate_mask_num_eht_rates(struct ath12k *ar,
+				      enum nl80211_band band,
+				      const struct cfg80211_bitrate_mask *mask)
+{
+	int num_rates = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].eht_mcs); i++)
+		num_rates += hweight16(mask->control[band].eht_mcs[i]);
+
+	return num_rates;
+}
+
+static int
+ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
+				   struct ath12k_link_sta *arsta,
 				   const struct cfg80211_bitrate_mask *mask,
 				   enum nl80211_band band)
 {
@@ -3066,6 +6168,11 @@
 	u8 vht_rate, nss;
 	u32 rate_code;
 	int ret, i;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -3080,73 +6187,332 @@
 
 	if (!nss) {
 		ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
-			    sta->addr);
+			    arsta->addr);
+		return -EINVAL;
+	}
+
+	/* Avoid updating invalid nss as fixed rate*/
+	rcu_read_lock();
+	link_sta = rcu_dereference(sta->link[arsta->link_id]);
+
+	if (!link_sta || nss > link_sta->rx_nss) {
+		rcu_read_unlock();
 		return -EINVAL;
 	}
 
+	rcu_read_unlock();
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 		   "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
-		   sta->addr);
+		   arsta->addr);
 
 	rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
 					WMI_RATE_PREAMBLE_VHT);
-	ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
 					arvif->vdev_id,
 					WMI_PEER_PARAM_FIXED_RATE,
 					rate_code);
 	if (ret)
 		ath12k_warn(ar->ab,
 			    "failed to update STA %pM Fixed Rate %d: %d\n",
+			     arsta->addr, rate_code, ret);
+
+	return ret;
+}
+
+static int
+ath12k_mac_set_peer_he_fixed_rate(struct ath12k_link_vif *arvif,
+				  struct ath12k_link_sta *arsta,
+				  const struct cfg80211_bitrate_mask *mask,
+				  enum nl80211_band band)
+{
+	struct ath12k *ar = arvif->ar;
+	u8 he_rate, nss;
+	u32 rate_code;
+	int ret, i;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	nss = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+		if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+			nss = i + 1;
+			he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+		}
+	}
+
+	if (!nss) {
+		ath12k_warn(ar->ab, "No single HE Fixed rate found to set for %pM",
+			    arsta->addr);
+		return -EINVAL;
+	}
+
+	/* Avoid updating invalid nss as fixed rate*/
+	rcu_read_lock();
+	link_sta = rcu_dereference(sta->link[arsta->link_id]);
+
+	if (!link_sta || nss > link_sta->rx_nss) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	rcu_read_unlock();
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "Setting Fixed HE Rate for peer %pM. Device will not switch to any other selected rates",
+		   arsta->addr);
+
+	rate_code = ATH12K_HW_RATE_CODE(he_rate, nss - 1,
+					WMI_RATE_PREAMBLE_HE);
+
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+					arvif->vdev_id,
+					WMI_PEER_PARAM_FIXED_RATE,
+					rate_code);
+	if (ret)
+		ath12k_warn(ar->ab,
+			    "failed to update STA %pM Fixed Rate %d: %d\n",
+			    arsta->addr, rate_code, ret);
+
+	return ret;
+}
+
+static int
+ath12k_mac_set_peer_ht_fixed_rate(struct ath12k_link_vif *arvif,
+				 struct ath12k_link_sta *arsta,
+				 const struct cfg80211_bitrate_mask *mask,
+				 enum nl80211_band band)
+{
+	struct ath12k *ar = arvif->ar;
+	u8 ht_rate, nss;
+	u32 rate_code;
+	int ret, i;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	nss = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+		if (hweight16(mask->control[band].ht_mcs[i]) == 1) {
+			nss = i + 1;
+			ht_rate = ffs(mask->control[band].ht_mcs[i]) - 1;
+		}
+	}
+
+	if (!nss) {
+		ath12k_warn(ar->ab, "No single HT Fixed rate found to set for %pM",
+			    sta->addr);
+		return -EINVAL;
+	}
+
+	/* Avoid updating invalid nss as fixed rate*/
+	rcu_read_lock();
+	link_sta = rcu_dereference(sta->link[arsta->link_id]);
+
+	if (!link_sta || nss > link_sta->rx_nss) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	rcu_read_unlock();
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			"Setting Fixed HT Rate for peer %pM. Device will not switch to any other selected rates",
+		  sta->addr);
+
+	rate_code = ATH12K_HW_RATE_CODE(ht_rate, nss - 1,
+					WMI_RATE_PREAMBLE_HT);
+	ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+					arvif->vdev_id,
+					WMI_PEER_PARAM_FIXED_RATE,
+					rate_code);
+
+	if (ret)
+		ath12k_warn(ar->ab,
+			    "failed to update STA %pM HT Fixed Rate %d: %d\n",
 			     sta->addr, rate_code, ret);
 
 	return ret;
 }
 
+static int
+ath12k_mac_set_peer_eht_fixed_rate(struct ath12k_link_vif *arvif,
+				   struct ath12k_link_sta *arsta,
+				   const struct cfg80211_bitrate_mask *mask,
+				   enum nl80211_band band)
+{
+	struct ath12k *ar = arvif->ar;
+	u8 eht_rate, nss;
+	u32 rate_code;
+	int ret, i;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	nss = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].eht_mcs); i++) {
+		if (hweight16(mask->control[band].eht_mcs[i]) == 1) {
+			nss = i + 1;
+			eht_rate = ffs(mask->control[band].eht_mcs[i]) - 1;
+		}
+	}
+
+	if (!nss) {
+		ath12k_warn(ar->ab, "No single EHT Fixed rate found to set for %pM",
+			    arsta->addr);
+		return -EINVAL;
+	}
+
+	/* Avoid updating invalid nss as fixed rate*/
+	rcu_read_lock();
+	link_sta = rcu_dereference(sta->link[arsta->link_id]);
+
+	if (!link_sta || nss > link_sta->rx_nss) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	rcu_read_unlock();
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "Setting Fixed EHT Rate for peer %pM. Device will not switch to any other selected rates",
+		   arsta->addr);
+
+	rate_code = ATH12K_HW_RATE_CODE(eht_rate, nss - 1,
+					WMI_RATE_PREAMBLE_EHT);
+
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+					arvif->vdev_id,
+					WMI_PEER_PARAM_FIXED_RATE,
+					rate_code);
+	if (ret)
+		ath12k_warn(ar->ab,
+			    "failed to update STA %pM Fixed Rate %d: %d\n",
+			    arsta->addr, rate_code, ret);
+
+	return ret;
+}
+
 static int ath12k_station_assoc(struct ath12k *ar,
-				struct ieee80211_vif *vif,
-				struct ieee80211_sta *sta,
+				struct ath12k_link_vif *arvif,
+				struct ath12k_link_sta *arsta,
 				bool reassoc)
 {
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_vif *vif = ahvif->vif;
+	struct ieee80211_sta *sta;
+	struct ieee80211_link_sta *link_sta;
 	struct ath12k_wmi_peer_assoc_arg peer_arg;
 	int ret;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	struct cfg80211_bitrate_mask *mask;
-	u8 num_vht_rates;
+	u8 num_vht_rates, num_he_rates, num_eht_rates, num_ht_rates;
+	u8 link_id = arvif->link_id;
+	bool ht_supp, vht_supp, has_he, has_eht;
+	struct ieee80211_sta_ht_cap ht_cap;
+	enum ieee80211_sta_rx_bandwidth bandwidth;
+	u16 he_6ghz_capa;
+
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+	if (!arvif)
+		return -EINVAL;
+
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, link_id)))
 		return -EPERM;
 
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	if (WARN_ON(rcu_access_pointer(sta->link[link_id]) == NULL))
+		return -EINVAL;
+
 	band = def.chan->band;
 	mask = &arvif->bitrate_mask;
 
-	ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+	ahsta = ath12k_sta_to_ahsta(sta);
+	arsta = ahsta->link[link_id];
+
+	if (WARN_ON(!arsta))
+		return -EINVAL;
 
+	rcu_read_lock();
+
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (!link_sta) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access link sta in station assoc\n");
+		return -EINVAL;
+	}
+
+	he_6ghz_capa = le16_to_cpu(link_sta->he_6ghz_capa.capa);
+	bandwidth = link_sta->bandwidth;
+	ht_cap = link_sta->ht_cap;
+	ht_supp = link_sta->ht_cap.ht_supported;
+	vht_supp = link_sta->vht_cap.vht_supported;
+	has_he = link_sta->he_cap.has_he;
+	has_eht = link_sta->eht_cap.has_eht;
+	bandwidth = link_sta->bandwidth;
+
+	ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, reassoc);
+
+	rcu_read_unlock();
+	peer_arg.is_assoc = true;
 	ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
-			    sta->addr, arvif->vdev_id, ret);
+			    arsta->addr, arvif->vdev_id, ret);
 		return ret;
 	}
 
 	if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
 		ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
-			    sta->addr, arvif->vdev_id);
+			    arsta->addr, arvif->vdev_id);
 		return -ETIMEDOUT;
 	}
 
 	num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
-
-	/* If single VHT rate is configured (by set_bitrate_mask()),
-	 * peer_assoc will disable VHT. This is now enabled by a peer specific
-	 * fixed param.
+	num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask);
+	num_eht_rates = ath12k_mac_bitrate_mask_num_eht_rates(ar, band, mask);
+	num_ht_rates = ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask);
+
+	/* If single VHT/HE/EHT rate is configured (by set_bitrate_mask()),
+	 * peer_assoc will disable VHT/HE/EHT. This is now enabled by a peer
+	 * specific fixed param.
 	 * Note that all other rates and NSS will be disabled for this peer.
 	 */
-	if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
-		ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+	if (vht_supp && num_vht_rates == 1) {
+		ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
+							 band);
+		if (ret)
+			return ret;
+	} else if (has_he && num_he_rates == 1) {
+		ret = ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask,
+							band);
+		if (ret)
+			return ret;
+	} else if (has_eht && num_eht_rates == 1) {
+		ret = ath12k_mac_set_peer_eht_fixed_rate(arvif, arsta, mask,
+							 band);
+		if (ret)
+			return ret;
+	} else if (ht_supp && num_ht_rates == 1) {
+		ret = ath12k_mac_set_peer_ht_fixed_rate(arvif, arsta, mask,
 							 band);
 		if (ret)
 			return ret;
@@ -3158,14 +6524,24 @@
 	if (reassoc)
 		return 0;
 
-	ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
-				     &sta->deflink.ht_cap);
+	ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr,
+				     &ht_cap,
+				     he_6ghz_capa);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
 		return ret;
 	}
 
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	ret = ath12k_wmi_set_peer_intra_bss_cmd(ar, arvif->vdev_id, arsta->addr, 1);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set vdev %i intra bss enable: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+#endif
+
 	if (!sta->wme) {
 		arvif->num_legacy_stations++;
 		ret = ath12k_recalc_rtscts_prot(arvif);
@@ -3174,26 +6550,62 @@
 	}
 
 	if (sta->wme && sta->uapsd_queues) {
-		ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta);
+		ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta);
 		if (ret) {
 			ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
-				    sta->addr, arvif->vdev_id, ret);
+				    arsta->addr, arvif->vdev_id, ret);
 			return ret;
 		}
 	}
 
+	spin_lock_bh(&ar->data_lock);
+
+	/* Set arsta bw and prev bw */
+	arsta->bw = bandwidth;
+	arsta->bw_prev = bandwidth;
+
+	arvif->num_stations++;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MODE1_RECOVERY,
+		   "mac station %pM connected to vdev %u. num_stations=%u\n",
+		   arsta->addr,  arvif->vdev_id, arvif->num_stations);
+
+
+	spin_unlock_bh(&ar->data_lock);
+
 	return 0;
 }
 
 static int ath12k_station_disassoc(struct ath12k *ar,
-				   struct ieee80211_vif *vif,
-				   struct ieee80211_sta *sta)
+				   struct ath12k_link_vif *arvif,
+				   struct ath12k_link_sta *arsta)
 {
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (!arvif)
+		return -EINVAL;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	spin_lock_bh(&arvif->ar->data_lock);
+
+	if (!arvif->num_stations) {
+		ath12k_warn(ar->ab,
+			    "mac station disassoc for vdev %u which does not have any station connected\n",
+			    arvif->vdev_id);
+	} else {
+		arvif->num_stations--;
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "mac station %pM disconnected from vdev %u. num_stations=%u\n",
+			   arsta->addr, arvif->vdev_id, arvif->num_stations);
+	}
+
+	spin_unlock_bh(&arvif->ar->data_lock);
+
 	if (!sta->wme) {
 		arvif->num_legacy_stations--;
 		ret = ath12k_recalc_rtscts_prot(arvif);
@@ -3201,42 +6613,201 @@
 			return ret;
 	}
 
-	ret = ath12k_clear_peer_keys(arvif, sta->addr);
-	if (ret) {
+	return 0;
+}
+
+static int ath12k_station_authorize(struct ath12k *ar,
+				    struct ath12k_link_vif *arvif,
+				    struct ath12k_link_sta *arsta)
+{
+	struct ath12k_peer *peer;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	spin_lock_bh(&ar->ab->base_lock);
+
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+	if (peer)
+		peer->is_authorized = true;
+
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	if (ahvif->vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+		ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+						arvif->vdev_id,
+						WMI_PEER_AUTHORIZE,
+						1);
+		if (ret)
+			ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+				    arsta->addr, arvif->vdev_id, ret);
+	}
+
+	return ret;
+}
+
+static int ath12k_station_unauthorize(struct ath12k *ar,
+				      struct ath12k_link_vif *arvif,
+				      struct ath12k_link_sta *arsta)
+{
+	struct ath12k_peer *peer;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+	spin_lock_bh(&ar->ab->base_lock);
+
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+	if (peer)
+		peer->is_authorized = false;
+
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	/* Driver should clear the peer keys during mac80211's ref ptr
+	 * gets cleared in __sta_info_destroy_part2 (trans from
+	 * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC)
+	 */
+	ret = ath12k_clear_peer_keys(arvif, arsta->addr);
+	if (ret)
 		ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
 			    arvif->vdev_id, ret);
+
 		return ret;
 	}
+
+static int ath12k_mac_set_peer_ch_switch_data(struct ath12k_link_vif *arvif,
+					      struct ath12k_link_sta *arsta,
+					      enum wmi_phy_mode peer_phymode,
+					      bool is_upgrade)
+{
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_peer_ch_width_switch_data *peer_data;
+	struct wmi_chan_width_peer_arg *peer_arg;
+	struct ieee80211_link_sta *link_sta;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct cfg80211_chan_def def;
+	u16 ru_punct_bitmap;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!ar->ab->chwidth_num_peer_caps)
+		return -EOPNOTSUPP;
+
+	if (WARN_ON(ath12k_mac_vif_chan(vif, &def, arvif->link_id)))
+		return -EINVAL;
+
+	peer_data = arvif->peer_ch_width_switch_data;
+
+	if (!peer_data) {
+		peer_data = kzalloc(struct_size(peer_data, peer_arg,
+						arvif->num_stations),
+				    GFP_KERNEL);
+		if (!peer_data)
+			return -ENOMEM;
+
+		peer_data->count = 0;
+		arvif->peer_ch_width_switch_data = peer_data;
+	}
+
+	/* before adding to the list, if its BW upgrade, phymode should be updated.
+	 * We update now and check because if we fail to update the phymode, no
+	 * point is setting the bandwidth */
+	if (is_upgrade) {
+		ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+						arvif->vdev_id, WMI_PEER_PHYMODE,
+						peer_phymode);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+				    arsta->addr, peer_phymode, ret);
+
+			if (peer_data->count == 0) {
+				kfree(peer_data);
+				arvif->peer_ch_width_switch_data = NULL;
+			}
+
+			return -EINVAL;
+		}
+	}
+
+	peer_arg = &peer_data->peer_arg[peer_data->count++];
+
+
+	ru_punct_bitmap = 0;
+
+	rcu_read_lock();
+	link_sta = ath12k_get_link_sta(arsta);
+
+	if (link_sta) {
+		if (link_sta->he_cap.has_he && link_sta->eht_cap.has_eht)
+			ru_punct_bitmap = def.ru_punct_bitmap;
+
+		if (ieee80211_vif_is_mesh(vif) && link_sta->ru_punct_bitmap)
+			ru_punct_bitmap = link_sta->ru_punct_bitmap;
+	}
+
+	rcu_read_unlock();
+
+	spin_lock_bh(&ar->data_lock);
+	ether_addr_copy(peer_arg->mac_addr.addr, arsta->addr);
+	peer_arg->chan_width = arsta->bw;
+	peer_arg->puncture_20mhz_bitmap = ~ru_punct_bitmap;
+	peer_arg->peer_phymode = peer_phymode;
+	peer_arg->is_upgrade = is_upgrade;
+	spin_unlock_bh(&ar->data_lock);
+
+	if (peer_data->count == 1) {
+		reinit_completion(&arvif->peer_ch_width_switch_send);
+		ieee80211_queue_work(ar->ah->hw, &arvif->peer_ch_width_switch_work);
+	}
+
+	if (peer_data->count == arvif->num_stations)
+		complete(&arvif->peer_ch_width_switch_send);
+
 	return 0;
 }
 
 static void ath12k_sta_rc_update_wk(struct work_struct *wk)
 {
 	struct ath12k *ar;
-	struct ath12k_vif *arvif;
-	struct ath12k_sta *arsta;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_sta *arsta;
 	struct ieee80211_sta *sta;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
+	const u16 *he_mcs_mask;
+	const u16 *eht_mcs_mask;
 	u32 changed, bw, nss, smps, bw_prev;
-	int err, num_vht_rates;
+	int err, num_vht_rates, num_he_rates, num_eht_rates, num_ht_rates;
 	const struct cfg80211_bitrate_mask *mask;
 	struct ath12k_wmi_peer_assoc_arg peer_arg;
 	enum wmi_phy_mode peer_phymode;
-
-	arsta = container_of(wk, struct ath12k_sta, update_wk);
-	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+	struct ath12k_sta *ahsta;
+	struct ieee80211_link_sta *link_sta = NULL;
+	bool ht_supp, vht_supp, has_he, has_eht;
+
+	arsta = container_of(wk, struct ath12k_link_sta, update_wk);
+	ahsta = arsta->ahsta;
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 	arvif = arsta->arvif;
+	ahvif = arvif->ahvif;
 	ar = arvif->ar;
 
-	if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def)))
+	if (WARN_ON(ath12k_mac_vif_chan(ahvif->vif, &def, arsta->link_id)))
 		return;
 
 	band = def.chan->band;
 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+	he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+	eht_mcs_mask = arvif->bitrate_mask.control[band].eht_mcs;
 
 	spin_lock_bh(&ar->data_lock);
 
@@ -3253,13 +6824,20 @@
 	mutex_lock(&ar->conf_mutex);
 
 	nss = max_t(u32, 1, nss);
-	nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
-			   ath12k_mac_max_vht_nss(vht_mcs_mask)));
+	nss = min(nss, max(max(max(ath12k_mac_max_ht_nss(ht_mcs_mask),
+				   ath12k_mac_max_vht_nss(vht_mcs_mask)),
+			       ath12k_mac_max_he_nss(he_mcs_mask)),
+			   ath12k_mac_max_eht_nss(eht_mcs_mask)));
 
 	if (changed & IEEE80211_RC_BW_CHANGED) {
-		ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
+		ath12k_peer_assoc_h_phymode(ar, arvif, arsta, &peer_arg);
 		peer_phymode = peer_arg.peer_phymode;
 
+		err = ath12k_mac_set_peer_ch_switch_data(arvif, arsta, peer_phymode,
+							 bw > bw_prev ? true : false);
+		if (!err || err == -EINVAL)
+			goto err_rc_bw_changed;
+
 		if (bw > bw_prev) {
 			/* Phymode shows maximum supported channel width, if we
 			 * upgrade bandwidth then due to sanity check of firmware,
@@ -3267,13 +6845,13 @@
 			 * WMI_PEER_CHWIDTH
 			 */
 			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
-				   sta->addr, bw, bw_prev);
-			err = ath12k_wmi_set_peer_param(ar, sta->addr,
+					arsta->addr, bw, bw_prev);
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
 							arvif->vdev_id, WMI_PEER_PHYMODE,
 							peer_phymode);
 			if (err) {
 				ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
-					    sta->addr, peer_phymode, err);
+					    arsta->addr, peer_phymode, err);
 				goto err_rc_bw_changed;
 			}
 			err = ath12k_wmi_set_peer_param(ar, sta->addr,
@@ -3281,57 +6859,75 @@
 							bw);
 			if (err)
 				ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
-					    sta->addr, bw, err);
+					    arsta->addr, bw, err);
 		} else {
 			/* When we downgrade bandwidth this will conflict with phymode
 			 * and cause to trigger firmware crash. In this case we send
 			 * WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE
 			 */
 			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
-				   sta->addr, bw, bw_prev);
-			err = ath12k_wmi_set_peer_param(ar, sta->addr,
+					arsta->addr, bw, bw_prev);
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
 							arvif->vdev_id, WMI_PEER_CHWIDTH,
 							bw);
 			if (err) {
 				ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
-					    sta->addr, bw, err);
+					    arsta->addr, bw, err);
 				goto err_rc_bw_changed;
 			}
-			err = ath12k_wmi_set_peer_param(ar, sta->addr,
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
 							arvif->vdev_id, WMI_PEER_PHYMODE,
 							peer_phymode);
 			if (err)
 				ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
-					    sta->addr, peer_phymode, err);
+					    arsta->addr, peer_phymode, err);
 		}
 	}
 
 	if (changed & IEEE80211_RC_NSS_CHANGED) {
-		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
-			   sta->addr, nss);
+		ath12k_dbg(ar->ab, ATH12K_DBG_PEER, "mac update sta %pM nss %d\n",
+			   arsta->addr, nss);
 
-		err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+		err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
 						WMI_PEER_NSS, nss);
 		if (err)
 			ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
-				    sta->addr, nss, err);
+				    arsta->addr, nss, err);
 	}
 
 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
-		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
-			   sta->addr, smps);
+		ath12k_dbg(ar->ab, ATH12K_DBG_PEER, "mac update sta %pM smps %d\n",
+			   arsta->addr, smps);
 
-		err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+		err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
 						WMI_PEER_MIMO_PS_STATE, smps);
 		if (err)
 			ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
-				    sta->addr, smps, err);
+				    arsta->addr, smps, err);
 	}
 
 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+		if (arsta->disable_fixed_rate) {
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
+							arvif->vdev_id,
+							WMI_PEER_PARAM_FIXED_RATE,
+							WMI_FIXED_RATE_NONE);
+			if (err)
+				ath12k_warn(ar->ab,
+					    "failed to disable peer fixed rate for STA %pM ret %d\n",
+					    arsta->addr, err);
+
+			arsta->disable_fixed_rate = false;
+		}
 		mask = &arvif->bitrate_mask;
+		num_ht_rates = ath12k_mac_bitrate_mask_num_ht_rates(ar, band,
+								    mask);
 		num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
 								      mask);
+		num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band,
+								    mask);
+		num_eht_rates = ath12k_mac_bitrate_mask_num_eht_rates(ar, band,
+								      mask);
 
 		/* Peer_assoc_prepare will reject vht rates in
 		 * bitrate_mask if its not available in range format and
@@ -3344,39 +6940,110 @@
 		 * TODO: Check RATEMASK_CMDID to support auto rates selection
 		 * across HT/VHT and for multiple VHT MCS support.
 		 */
-		if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
-			ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+		rcu_read_lock();
+		link_sta = rcu_dereference(sta->link[arsta->link_id]);
+
+		if (!link_sta) {
+			rcu_read_unlock();
+			goto err_rc_bw_changed;
+		}
+
+		ht_supp = link_sta->ht_cap.ht_supported;
+		vht_supp = link_sta->vht_cap.vht_supported;
+		has_he = link_sta->he_cap.has_he;
+		has_eht = link_sta->eht_cap.has_eht;
+		rcu_read_unlock();
+
+		if (vht_supp && num_vht_rates == 1) {
+
+			ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
+							   band);
+		} else if (has_he && num_he_rates == 1) {
+			ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask,
+							  band);
+		} else if (has_eht && num_eht_rates == 1) {
+			ath12k_mac_set_peer_eht_fixed_rate(arvif, arsta, mask,
+							   band);
+		} else if (ht_supp && num_ht_rates == 1) {
+			ath12k_mac_set_peer_ht_fixed_rate(arvif, arsta, mask,
 							   band);
 		} else {
-			/* If the peer is non-VHT or no fixed VHT rate
-			 * is provided in the new bitrate mask we set the
-			 * other rates using peer_assoc command.
+			/* If the peer is non-VHT/HE/EHT or no fixed VHT/HE/EHT
+			 * rate is provided in the new bitrate mask we set the
+			 * other rates using peer_assoc command. Also clear
+			 * the peer fixed rate settings as it has higher proprity
+			 * than peer assoc
 			 */
-			ath12k_peer_assoc_prepare(ar, arvif->vif, sta,
+
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
+							arvif->vdev_id,
+							WMI_PEER_PARAM_FIXED_RATE,
+							WMI_FIXED_RATE_NONE);
+			if (err)
+				ath12k_warn(ar->ab,
+					    "failed to disable peer fixed rate for STA %pM ret %d\n",
+					    arsta->addr, err);
+			ath12k_peer_assoc_prepare(ar, arvif, arsta,
 						  &peer_arg, true);
 
+			peer_arg.is_assoc = false;
 			err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
 			if (err)
 				ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
-					    sta->addr, arvif->vdev_id, err);
+					    arsta->addr, arvif->vdev_id, err);
 
 			if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
 				ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
-					    sta->addr, arvif->vdev_id);
+					    arsta->addr, arvif->vdev_id);
 		}
 	}
 err_rc_bw_changed:
 	mutex_unlock(&ar->conf_mutex);
 }
 
-static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif,
-				       struct ieee80211_sta *sta)
+static void ath12k_sta_set_4addr_wk(struct work_struct *wk)
 {
+	struct ath12k *ar;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_sta *ahsta;
+	struct ath12k_link_sta *arsta;
+	struct ieee80211_sta *sta;
+	int ret = 0;
+	u8 link_id;
+
+	ahsta = container_of(wk, struct ath12k_sta, set_4addr_wk);
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	for_each_set_bit(link_id, &ahsta->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arsta = ahsta->link[link_id];
+		arvif = arsta->arvif;
+		ar = arvif->ar;
+
+		ath12k_dbg(ar->ab, ATH12K_DBG_PEER,
+			   "setting USE_4ADDR for peer %pM\n", arsta->addr);
+
+		ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+						arvif->vdev_id,
+						WMI_PEER_USE_4ADDR, 1);
+		if (ret)
+			ath12k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n",
+				    arsta->addr, ret);
+	}
+}
+
+static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif,
+				       struct ath12k_link_sta *arsta)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ath12k *ar = arvif->ar;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
 		return 0;
 
 	if (ar->num_stations >= ar->max_num_stations)
@@ -3384,95 +7051,226 @@
 
 	ar->num_stations++;
 
+	ath12k_dbg(ar->ab, ATH12K_DBG_MODE1_RECOVERY,
+		   "num_stat incremented:%d\n", ar->num_stations);
 	return 0;
 }
 
-static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif,
-					struct ieee80211_sta *sta)
+static void ath12k_mac_station_post_remove(struct ath12k *ar,
+					   struct ath12k_link_vif *arvif,
+					   struct ath12k_link_sta *arsta)
 {
-	struct ath12k *ar = arvif->ar;
+	struct ath12k_peer *peer;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
+	bool skip_peer_delete;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	ath12k_mac_dec_num_stations(arvif, arsta);
+
+	mutex_lock(&ar->ab->tbl_mtx_lock);
+	spin_lock_bh(&ar->ab->base_lock);
+	skip_peer_delete = ar->ab->hw_params->vdev_start_delay &&
+			   vif->type == NL80211_IFTYPE_STATION;
+
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+	if (skip_peer_delete && peer) {
+		peer->sta = NULL;
+	} else if (peer && peer->sta == sta) {
+		ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+			    arsta->addr, arvif->vdev_id);
+		ath12k_peer_rhash_delete(ar->ab, peer);
+		peer->sta = NULL;
+		list_del(&peer->list);
+#ifdef CONFIG_ATH12K_SAWF
+		if (peer->sawf_ctx_peer.telemetry_peer_ctx)
+			ath12k_telemetry_peer_ctx_free(peer->sawf_ctx_peer.telemetry_peer_ctx);
+#endif
+		kfree(peer);
+		ar->num_peers--;
+	}
+	spin_unlock_bh(&ar->ab->base_lock);
+	mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+	kfree(arsta->tx_stats);
+	arsta->tx_stats = NULL;
+
+	kfree(arsta->rx_stats);
+	arsta->rx_stats = NULL;
+	ath12k_mac_ap_ps_recalc(ar);
+	ahsta->ahvif = NULL;
+}
+
+static int ath12k_mac_station_remove(struct ath12k *ar,
+				     struct ath12k_link_vif *arvif,
+				     struct ath12k_link_sta *arsta)
+{
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	int ret;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+        struct ieee80211_vif *vif = ahvif->vif;
+	bool skip_peer_delete;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
-		return;
+	ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
 
-	ar->num_stations--;
+	skip_peer_delete = ar->ab->hw_params->vdev_start_delay &&
+			   vif->type == NL80211_IFTYPE_STATION;
+
+	if (!skip_peer_delete) {
+		ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+		if (ret)
+			ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+				    arsta->addr, arvif->vdev_id);
+		else
+			ath12k_dbg(ar->ab, ATH12K_DBG_PEER, "Removed peer: %pM for VDEV: %d\n",
+				   arsta->addr, arvif->vdev_id);
+
+		ath12k_mac_station_post_remove(ar, arvif, arsta);
+	}
+
+ 	return ret;
 }
 
 static int ath12k_mac_station_add(struct ath12k *ar,
-				  struct ieee80211_vif *vif,
-				  struct ieee80211_sta *sta)
+				  struct ath12k_link_vif *arvif,
+				  struct ath12k_link_sta *arsta)
 {
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
-	struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
-	struct ath12k_wmi_peer_create_arg peer_param;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+	struct ath12k_wmi_peer_create_arg peer_param = {0};
+	struct ath12k_neighbor_peer *nrp, *tmp;
+	int nvdev_id;
 	int ret;
+	bool del_nrp = false;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	ret = ath12k_mac_inc_num_stations(arvif, sta);
+	if (!arvif)
+		return -EINVAL;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	ath12k_mac_inc_num_stations(arvif, arsta);
 	if (ret) {
 		ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
 			    ar->max_num_stations);
 		goto exit;
 	}
 
+	if (ath12k_debugfs_is_extd_rx_stats_enabled(ar) && !arsta->rx_stats) {
 	arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
 	if (!arsta->rx_stats) {
 		ret = -ENOMEM;
 		goto dec_num_station;
 	}
+	}
 
 	peer_param.vdev_id = arvif->vdev_id;
-	peer_param.peer_addr = sta->addr;
+	peer_param.peer_addr = arsta->addr;
 	peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+	peer_param.ml_enabled = sta->mlo;
+
+	/*
+	 * When the neighbor peer associates with this AP and successfully
+	 * becomes a station, check and clear the corresponding MAC from
+	 * NRP list and failing to do so would inadvertently cause the
+	 * STA association(peer creation for STA) to fail due to the NRP
+	 * having created a peer already for the same MAC address
+	 */
+	if (!list_empty(&ab->neighbor_peers)) {
+		spin_lock_bh(&ab->base_lock);
+		list_for_each_entry_safe(nrp, tmp, &ab->neighbor_peers, list) {
+			if (ether_addr_equal(nrp->addr, arsta->addr)) {
+				nvdev_id = nrp->vdev_id;
+				list_del(&nrp->list);
+				kfree(nrp);
+				del_nrp = true;
+				break;
+			}
+		}
+		spin_unlock_bh(&ab->base_lock);
+
+		if (del_nrp) {
+			ath12k_peer_delete(ar, nvdev_id, arsta->addr);
+			ath12k_debugfs_nrp_clean(ar, arsta->addr);
+		}
+	}
 
 	ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
 	if (ret) {
 		ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
-			    sta->addr, arvif->vdev_id);
+			    arsta->addr, arvif->vdev_id);
+		goto free_rx_stats;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_PEER, "Added peer: %pM for VDEV: %d num_stations : %d\n",
+		    arsta->addr, arvif->vdev_id, ar->num_stations);
+
+	if (ath12k_debugfs_is_extd_tx_stats_enabled(ar) && (!arsta->tx_stats)) {
+		arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
+					  GFP_KERNEL);
+		if (!arsta->tx_stats) {
+			ret = -ENOMEM;
 		goto free_peer;
 	}
 
-	ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
-		   sta->addr, arvif->vdev_id);
+		arsta->wbm_tx_stats = kzalloc(sizeof(*arsta->wbm_tx_stats), GFP_KERNEL);
+		if(!arsta->wbm_tx_stats) {
+			ret = -ENOMEM;
+			goto free_peer;
+		}
+	}
 
 	if (ieee80211_vif_is_mesh(vif)) {
-		ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+		ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
 						arvif->vdev_id,
 						WMI_PEER_USE_4ADDR, 1);
 		if (ret) {
 			ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
-				    sta->addr, ret);
-			goto free_peer;
+				    arsta->addr, ret);
+			goto free_tx_stats;
 		}
 	}
 
-	ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
-	if (ret) {
-		ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
-			    sta->addr, arvif->vdev_id, ret);
-		goto free_peer;
-	}
-
 	if (ab->hw_params->vdev_start_delay &&
 	    !arvif->is_started &&
-	    arvif->vdev_type != WMI_VDEV_TYPE_AP) {
-		ret = ath12k_start_vdev_delay(ar->hw, vif);
+	    ahvif->vdev_type != WMI_VDEV_TYPE_AP) {
+		ret = ath12k_start_vdev_delay(ar, vif);
 		if (ret) {
 			ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
-			goto free_peer;
+			goto free_tx_stats;
 		}
 	}
 
+	INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+	ahsta->ahvif = ahvif;
+	ewma_avg_rssi_init(&arsta->avg_rssi);
 	return 0;
 
+free_tx_stats:
+	kfree(arsta->tx_stats);
+	arsta->tx_stats = NULL;
+	kfree(arsta->wbm_tx_stats);
+	arsta->wbm_tx_stats = NULL;
 free_peer:
-	ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+	ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+free_rx_stats:
+	kfree(arsta->rx_stats);
+	arsta->rx_stats = NULL;
 dec_num_station:
-	ath12k_mac_dec_num_stations(arvif, sta);
+	ath12k_mac_dec_num_stations(arvif, arsta);
 exit:
 	return ret;
 }
@@ -3495,6 +7293,9 @@
 	case IEEE80211_STA_RX_BW_160:
 		bw = WMI_PEER_CHWIDTH_160MHZ;
 		break;
+	case IEEE80211_STA_RX_BW_320:
+		bw = WMI_PEER_CHWIDTH_320MHZ;
+		break;
 	default:
 		ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
 			    sta->deflink.bandwidth, sta->addr);
@@ -3505,118 +7306,258 @@
 	return bw;
 }
 
-static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif,
-				   struct ieee80211_sta *sta,
+static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
+				       struct ath12k_sta *ahsta,
+				       struct ath12k_link_sta *arsta,
+				       struct ath12k_vif *ahvif,
+				       u8 link_id)
+{
+	struct ieee80211_link_sta *link_sta;
+	struct ieee80211_sta *sta;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if (!arsta || link_id > IEEE80211_MLD_MAX_NUM_LINKS)
+		return -EINVAL;
+
+	if (WARN_ON(!ahvif->link[link_id]))
+		return -EINVAL;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	memset(arsta, 0, sizeof(*arsta));
+
+	rcu_read_lock();
+	link_sta = rcu_dereference(sta->link[link_id]);
+	if (!link_sta) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	ether_addr_copy(arsta->addr, link_sta->addr);
+	rcu_read_unlock();
+
+	/* logical index of the link sta in order of creation */
+	arsta->link_idx = ahsta->num_peer++;
+
+	ahsta->link[link_id] = arsta;
+	ahsta->links_map |= BIT(link_id);
+	arsta->arvif = ahvif->link[link_id];
+	arsta->ahsta = ahsta;
+	arsta->link_id = link_id;
+	arsta->state = IEEE80211_STA_NONE;
+
+	return 0;
+}
+
+static int ath12k_mac_unassign_link_sta(struct ath12k_hw *ah,
+				       struct ath12k_sta *ahsta,
+				       u8 link_id)
+{
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if (link_id > IEEE80211_MLD_MAX_NUM_LINKS)
+		return -EINVAL;
+
+	ahsta->link[link_id] = NULL;
+	ahsta->links_map &= ~BIT(link_id);
+
+	return 0;
+}
+
+static struct ath12k_link_sta *
+ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah, struct ath12k_sta *ahsta,
+				 struct ath12k_vif *ahvif, u8 link_id)
+{
+	struct ath12k_link_sta *arsta;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if (link_id > IEEE80211_MLD_MAX_NUM_LINKS)
+		return NULL;
+
+	if (ahsta->link[link_id]) {
+		WARN_ON(1);
+		return NULL;
+	}
+
+	arsta = kzalloc(sizeof(*arsta), GFP_KERNEL);
+	if (!arsta)
+		return NULL;
+
+	if (ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id)) {
+		kfree(arsta);
+		return NULL;
+	}
+
+	return arsta;
+}
+
+static int ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah,
+					     struct ath12k_sta *ahsta,
+					     u8 link_id)
+{
+	struct ath12k_link_sta *arsta;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if (link_id > IEEE80211_MLD_MAX_NUM_LINKS)
+		return -EINVAL;
+
+	arsta = ahsta->link[link_id];
+
+	WARN_ON(arsta == NULL);
+
+	ath12k_mac_unassign_link_sta(ah, ahsta, link_id);
+
+	if (arsta != &ahsta->deflink)
+		kfree(arsta);
+
+	return 0;
+}
+
+static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif,
+				        struct ath12k_sta *ahsta)
+{
+	struct ieee80211_sta *sta;
+	struct ath12k_hw *ah = ahvif->ah;
+ 	struct ath12k_link_vif *arvif;
+ 	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	unsigned long links;
+	u8 link_id;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	ath12k_ml_link_peers_delete(ahvif, ahsta);
+
+	/* validate link station removal and clear arsta links */
+	links = sta->valid_links;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = ahvif->link[link_id];
+		arsta = ahsta->link[link_id];
+
+		if (!arvif || !arsta)
+			continue;
+
+		ar = arvif->ar;
+
+		mutex_lock(&ar->conf_mutex);
+
+		ath12k_mac_station_post_remove(ar, arvif, arsta);
+		mutex_unlock(&ar->conf_mutex);
+
+		ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+	}
+	ath12k_ml_peer_delete(ah, sta);
+}
+
+static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
+					    struct ath12k_link_vif *arvif,
+					    struct ath12k_link_sta *arsta,
 				   enum ieee80211_sta_state old_state,
 				   enum ieee80211_sta_state new_state)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
-	struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
-	struct ath12k_peer *peer;
+	struct ath12k *ar = arvif->ar;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
 	int ret = 0;
 
-	/* cancel must be done outside the mutex to avoid deadlock */
+	if (!ar) {
+		ath12k_err(NULL, "unable to determine device to set sta state\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return -ESHUTDOWN;
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	if (sta->mlo &&
+	    (test_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &ar->ab->dev_flags)))
+		/* Shouldn't allow MLO STA assoc until UMAC_RECOVERY bit is cleared */
+		return 0;
+
+	/* cancel must be done outside the ar mutex to avoid deadlock */
 	if ((old_state == IEEE80211_STA_NONE &&
-	     new_state == IEEE80211_STA_NOTEXIST))
+	     new_state == IEEE80211_STA_NOTEXIST)) {
+		/* ML sta needs separate handling */
+		if (sta->mlo)
+			return 0;
 		cancel_work_sync(&arsta->update_wk);
+	}
 
 	mutex_lock(&ar->conf_mutex);
 
 	if (old_state == IEEE80211_STA_NOTEXIST &&
 	    new_state == IEEE80211_STA_NONE) {
-		memset(arsta, 0, sizeof(*arsta));
-		arsta->arvif = arvif;
-		INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk);
-
-		ret = ath12k_mac_station_add(ar, vif, sta);
+		ret = ath12k_mac_station_add(ar, arvif, arsta);
 		if (ret)
 			ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
-				    sta->addr, arvif->vdev_id);
+				    arsta->addr, arvif->vdev_id);
+		ahsta->low_ack_sent = false;
 	} else if ((old_state == IEEE80211_STA_NONE &&
 		    new_state == IEEE80211_STA_NOTEXIST)) {
-		ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
 
-		ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+		ret = ath12k_mac_station_remove(ar, arvif, arsta);
 		if (ret)
-			ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
-				    sta->addr, arvif->vdev_id);
-		else
-			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
-				   sta->addr, arvif->vdev_id);
+			ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+				    arsta->addr, arvif->vdev_id);
+		if (sta->valid_links)
+			ath12k_mac_free_unassign_link_sta(arvif->ahvif->ah,
+							  arsta->ahsta, arsta->link_id);
+	} else if (old_state == IEEE80211_STA_AUTH &&
+		   new_state == IEEE80211_STA_ASSOC) {
 
-		ath12k_mac_dec_num_stations(arvif, sta);
-		spin_lock_bh(&ar->ab->base_lock);
-		peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer && peer->sta == sta) {
-			ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
-				    vif->addr, arvif->vdev_id);
-			peer->sta = NULL;
-			list_del(&peer->list);
-			kfree(peer);
-			ar->num_peers--;
-		}
-		spin_unlock_bh(&ar->ab->base_lock);
+		ret = ath12k_dp_peer_setup(ar, arvif, arsta);
+		if (ret)
+			ath12k_warn(ar->ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+				    arsta->addr, arvif->vdev_id, ret);
 
-		kfree(arsta->rx_stats);
-		arsta->rx_stats = NULL;
-	} else if (old_state == IEEE80211_STA_AUTH &&
-		   new_state == IEEE80211_STA_ASSOC &&
-		   (vif->type == NL80211_IFTYPE_AP ||
+		ath12k_dp_peer_default_route_setup(ar, arvif, arsta);
+
+		if (vif->type == NL80211_IFTYPE_AP ||
 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
-		    vif->type == NL80211_IFTYPE_ADHOC)) {
-		ret = ath12k_station_assoc(ar, vif, sta, false);
+		    vif->type == NL80211_IFTYPE_ADHOC) {
+			ret = ath12k_station_assoc(ar, arvif, arsta, false);
 		if (ret)
 			ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
-				    sta->addr);
-
-		spin_lock_bh(&ar->data_lock);
-
-		arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
-		arsta->bw_prev = sta->deflink.bandwidth;
-
-		spin_unlock_bh(&ar->data_lock);
+					    arsta->addr);
+		}
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTHORIZED) {
-		spin_lock_bh(&ar->ab->base_lock);
-
-		peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer)
-			peer->is_authorized = true;
-
-		spin_unlock_bh(&ar->ab->base_lock);
-
-		if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
-			ret = ath12k_wmi_set_peer_param(ar, sta->addr,
-							arvif->vdev_id,
-							WMI_PEER_AUTHORIZE,
-							1);
+		ret = ath12k_station_authorize(ar, arvif, arsta);
+	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
+		   new_state == IEEE80211_STA_ASSOC) {
+		ath12k_station_unauthorize(ar, arvif, arsta);
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		   new_state == IEEE80211_STA_AUTHORIZED) {
+		ret = ath12k_station_authorize(ar, arvif, arsta);
 			if (ret)
 				ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
 					    sta->addr, arvif->vdev_id, ret);
-		}
 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
 		   new_state == IEEE80211_STA_ASSOC) {
-		spin_lock_bh(&ar->ab->base_lock);
-
-		peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer)
-			peer->is_authorized = false;
-
-		spin_unlock_bh(&ar->ab->base_lock);
+		ath12k_station_unauthorize(ar, arvif, arsta);
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTH &&
 		   (vif->type == NL80211_IFTYPE_AP ||
 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
 		    vif->type == NL80211_IFTYPE_ADHOC)) {
-		ret = ath12k_station_disassoc(ar, vif, sta);
+		ret = ath12k_station_disassoc(ar, arvif, arsta);
 		if (ret)
 			ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
-				    sta->addr);
+				    arsta->addr);
 	}
 
+	arsta->state = new_state;
 	mutex_unlock(&ar->conf_mutex);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_PEER, "mac sta %pM old state %d new state :%d\n",
+		   arsta->addr, old_state, new_state);
+
 	return ret;
 }
 
@@ -3624,64 +7565,458 @@
 				       struct ieee80211_vif *vif,
 				       struct ieee80211_sta *sta)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
 	int ret;
 	s16 txpwr;
+	/* TODO use link id from op after support is available */
+	u8 link_id = 0;
+
+	mutex_lock(&ah->conf_mutex);
+
+	/* TODO get arvif based on link id */
+	arvif = ahvif->link[link_id];
+	arsta = ahsta->link[link_id];
+
+	if (!arvif) {
+		ath12k_err(NULL, "unable to determine device to set sta txpwr\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	if (!ar) {
+		ath12k_err(NULL, "unable to determine device to set sta txpwr\n");
+		ret = -EINVAL;
+		goto out;
+	}
 
 	if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
 		txpwr = 0;
 	} else {
 		txpwr = sta->deflink.txpwr.power;
-		if (!txpwr)
-			return -EINVAL;
+		if (!txpwr) {
+			ret = -EINVAL;
+			goto out;
+		}
 	}
 
-	if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
-		return -EINVAL;
+	if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	mutex_lock(&ar->conf_mutex);
 
-	ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
 					WMI_PEER_USE_FIXED_PWR, txpwr);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
 			    ret);
-		goto out;
+		goto err;
 	}
 
+err:
+	mutex_unlock(&ar->conf_mutex);
 out:
+	mutex_unlock(&ah->conf_mutex);
+	return ret;
+}
+
+static inline void ath12k_vdev_stop_notify(struct ath12k_link_vif *arvif)
+{
+	int ret = 0;
+	if (WARN_ON(!arvif || !arvif->ar))
+		return;
+
+	mutex_lock(&arvif->ar->conf_mutex);
+	reinit_completion(&arvif->ar->vdev_setup_done);
+	ret = ath12k_wmi_vdev_stop(arvif->ar, arvif->vdev_id);
+
+	if (ret) {
+		ath12k_warn(arvif->ar->ab,
+			    "failed to stop WMI vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		goto exit;
+	}
+
+	ret = ath12k_mac_vdev_setup_sync(arvif->ar);
+	if (ret)
+		ath12k_warn(arvif->ar->ab,
+			    "failed to synchronize setup for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+exit:
+	if (!ret)
+		arvif->vdev_stop_notify_done = true;
+
+	mutex_unlock(&arvif->ar->conf_mutex);
+}
+
+int ath12k_mac_update_sta_state(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta,
+				enum ieee80211_sta_state old_state,
+				enum ieee80211_sta_state new_state)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	unsigned long links;
+	int ret = 0;
+	u8 link_id = 0, link;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (vif->valid_links && sta->valid_links) {
+		if (!sta->mlo)
+			WARN_ON(hweight16(sta->valid_links) != 1);
+		link_id = ffs(sta->valid_links) - 1;
+	}
+
+	if ((old_state == IEEE80211_STA_NONE &&
+	     new_state == IEEE80211_STA_NOTEXIST)) {
+		cancel_work_sync(&ahsta->set_4addr_wk);
+	}
+
+	if ((old_state == IEEE80211_STA_NOTEXIST &&
+	     new_state == IEEE80211_STA_NONE)) {
+		INIT_WORK(&ahsta->set_4addr_wk, ath12k_sta_set_4addr_wk);
+		if (!sta->mlo) {
+			ret = ath12k_mac_assign_link_sta(ah, ahsta, &ahsta->deflink,
+							   ahvif, link_id);
+			if (ret)
+				goto exit;
+		}
+	}
+
+	if (!sta->mlo) {
+		arvif = ahvif->link[link_id];
+		arsta = ahsta->link[link_id];
+
+		if (WARN_ON(arvif == NULL || arsta == NULL)) {
+			ret = -EINVAL;
+			goto exit;
+		}
+		if (new_state == IEEE80211_STA_NOTEXIST &&
+		    old_state == IEEE80211_STA_NONE &&
+		    ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+			ath12k_vdev_stop_notify(arvif);
+
+		ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta,
+						       old_state, new_state);
+		if (arvif->ar &&
+		    test_bit(ATH12K_FLAG_RECOVERY, &arvif->ar->ab->dev_flags))
+			ret = 0;
+
+		mutex_unlock(&ah->conf_mutex);
+		return ret;
+	}
+
+	if (!sta->valid_links && !vif->valid_links)
+		WARN_ON(1);
+
+	/* assign default link to the first link sta */
+	if (!ahsta->links_map && hweight16(sta->valid_links) == 1 &&
+	    new_state == IEEE80211_STA_NONE && old_state == IEEE80211_STA_NOTEXIST) {
+		/* add case to prevent MLO assoc from happening when UMAC recovery
+		 * happens
+		 */
+		for_each_set_bit(link, &ahvif->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+			arvif = ahvif->link[link];
+
+			if (!arvif || !arvif->ar ||
+			    (test_bit(ATH12K_FLAG_UMAC_RECOVERY_START,
+				      &arvif->ar->ab->dev_flags))){
+				ret = -EINVAL;
+				goto exit;
+			}
+		}
+
+		ret = ath12k_ml_peer_create(ah, sta);
+		if (ret) {
+			ath12k_err(NULL, "unable to create ML peer for sta %pM", sta->addr);
+			mutex_unlock(&ah->conf_mutex);
+			return ret;
+		}
+
+		ath12k_mac_assign_link_sta(ah, ahsta, &ahsta->deflink,
+					   ahvif, link_id);
+
+		ahsta->deflink.is_assoc_link = true;
+		ahsta->assoc_link_id = link_id;
+		ahsta->primary_link_id = link_id;
+	}
+
+	if (new_state == IEEE80211_STA_NOTEXIST && old_state == IEEE80211_STA_NONE) {
+		if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+			links = sta->valid_links;
+			for_each_set_bit(link_id, &links,
+					 IEEE80211_MLD_MAX_NUM_LINKS) {
+				arvif = ahvif->link[link_id];
+				ath12k_vdev_stop_notify(arvif);
+			}
+		}
+		ath12k_mac_ml_station_remove(ahvif, ahsta);
+		goto exit;
+ 	}
+
+	/* Reconfig links of arsta during recovery */
+	if (ahsta->link[ahsta->assoc_link_id]->state != IEEE80211_STA_NONE &&
+	    old_state == IEEE80211_STA_NOTEXIST &&
+	    new_state == IEEE80211_STA_NONE) {
+		ahsta->num_peer = 0;
+
+		for_each_set_bit(link_id, &ahsta->links_map,
+				 IEEE80211_MLD_MAX_NUM_LINKS) {
+			arsta = ahsta->link[link_id];
+			arvif = ahvif->link[link_id];
+
+			ath12k_mac_assign_link_sta(ah, ahsta, arsta,
+						   ahvif, link_id);
+		}
+		ahsta->deflink.is_assoc_link = true;
+
+		if (ahsta->use_4addr_set)
+			ieee80211_queue_work(ah->hw, &ahsta->set_4addr_wk);
+	}
+
+	links = sta->valid_links;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = ahvif->link[link_id];
+		arsta = ahsta->link[link_id];
+
+		/* some assumptions went wrong! */
+		if (WARN_ON(!arvif || !arsta))
+			continue;
+
+		ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta,
+						       old_state, new_state);
+		if (ret) {
+			if (ret != -ESHUTDOWN)
+				ath12k_err(NULL, "unable to move link sta %d of sta %pM from state %d to %d",
+					   link_id, arsta->addr, old_state, new_state);
+
+			/* If FW recovery is ongoing, no need to move down sta states
+			 * as FW will wake up with a clean slate. Hence we set the
+			 * return value to 0, so that upper layers are not aware
+			 * of the FW being in recovery state.
+			 */
+			if (old_state > new_state) {
+				if (!arvif->ar)
+					continue;
+				if (test_bit(ATH12K_FLAG_RECOVERY, &arvif->ar->ab->dev_flags))
+					ret = 0;
+			}
+
+			if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE &&
+					!test_bit(ATH12K_FLAG_RECOVERY, &arvif->ar->ab->dev_flags)) {
+
+				/* Unassign this link sta which couldnt be added to FW and
+				 * cleanup the other link stations added earlier
+				 */
+				ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+				ath12k_mac_ml_station_remove(ahvif, ahsta);
+			}
+
+			mutex_unlock(&ah->conf_mutex);
+			return ret;
+		}
+	}
+
+	if (old_state == IEEE80211_STA_AUTH &&  new_state == IEEE80211_STA_ASSOC) {
+		/* TODO sync wait for ML peer map success, else clear ml peer info on
+		 * all partners? TBD on testing
+		 */
+	}
+
+exit:
+	mutex_unlock(&ah->conf_mutex);
+
+	return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_sta *sta,
+				   enum ieee80211_sta_state old_state,
+				   enum ieee80211_sta_state new_state)
+{
+	return ath12k_mac_update_sta_state(hw, vif, sta, old_state,
+					   new_state);
+}
+
+static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+					    struct ieee80211_vif *vif,
+					    struct ieee80211_sta *sta,
+					    u16 old_links, u16 new_links)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	unsigned long valid_links;
+	u8 link_id;
+	int ret = 0;
+
+	if (!sta->valid_links)
+		return -EINVAL;
+
+	/* FW doesnt support removal of one of link stas. All sta would be removed during ML STA
+	 * delete in sta_state(), hence link sta removal is not handled here.
+	 */
+	if (new_links < old_links)
+		return 0;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+		ath12k_err(NULL, "unable to add link for ml sta %pM", sta->addr);
+		mutex_unlock(&ah->conf_mutex);
+		return -EINVAL;
+	}
+
+	/* this op is expected only after initial sta insertion with default link */
+	WARN_ON(ahsta->links_map == 0);
+
+	valid_links = sta->valid_links;
+	if ((test_bit(ahvif->primary_link_id, &valid_links))) {
+		arvif = ahvif->link[ahvif->primary_link_id];
+		if (arvif->ar->ab->hw_params->is_plink_preferable) {
+			ahsta->primary_link_id = ahvif->primary_link_id;
+		} else {
+			ahsta->primary_link_id = ahsta->assoc_link_id;
+			arvif = ahvif->link[ahsta->assoc_link_id];
+			if (!arvif->ar->ab->hw_params->is_plink_preferable) {
+				valid_links = sta->valid_links;
+				for_each_set_bit(link_id, &valid_links,
+						 IEEE80211_MLD_MAX_NUM_LINKS) {
+					if (link_id != ahsta->primary_link_id) {
+						ahsta->primary_link_id = link_id;
+						break;
+					}
+				}
+			}
+		}
+	}
+
+	valid_links = new_links;
+	for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		if (ahsta->links_map & BIT(link_id))
+			continue;
+
+		arvif = ahvif->link[link_id];
+		arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id);
+
+		if (!arvif || !arsta) {
+			ath12k_err(NULL, "Failed to alloc/assign link sta");
+			continue;
+		}
+
+		ar = arvif->ar;
+		if (!ar) {
+			ath12k_err(NULL,
+				   "Failed to get ar to change sta links\n");
+			continue;
+		}
+
+		mutex_lock(&ar->conf_mutex);
+		ret = ath12k_mac_station_add(ar, arvif, arsta);
+		if (ret) {
+			mutex_unlock(&ar->conf_mutex);
+			ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+				    arsta->addr, arvif->vdev_id);
+			ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+			break;
+		}
 	mutex_unlock(&ar->conf_mutex);
+	}
+
+	mutex_unlock(&ah->conf_mutex);
+
 	return ret;
 }
 
+static void ath12k_mac_op_sta_set_4addr(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					struct ieee80211_sta *sta, bool enabled)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+
+	if (enabled && !ahsta->use_4addr_set) {
+		ieee80211_queue_work(ah->hw, &ahsta->set_4addr_wk);
+		ahsta->use_4addr_set = true;
+	}
+}
+
 static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
 					struct ieee80211_sta *sta,
-					u32 changed)
+					u32 changed /*, u8 link_id */)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	u8 link_id = 0;
+	struct ath12k *ar;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
 	struct ath12k_peer *peer;
 	u32 bw, smps;
+	struct ieee80211_link_sta *link_sta;
+
+	arvif = ahvif->link[link_id];
+	arsta = ahsta->link[link_id];
+
+	if (!arsta) {
+		ath12k_err(NULL, "unable to determine arsta\n");
+		return;
+	}
+
+	if (!arvif) {
+		ath12k_err(NULL, "unable to determine device for sta update\n");
+		return;
+	}
+
+	if (arvif->is_created)
+		ar = arvif->ar;
+
+	if (!ar) {
+		ath12k_err(NULL, "unable to determine device for sta update\n");
+		return;
+	}
 
 	spin_lock_bh(&ar->ab->base_lock);
 
-	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
 	if (!peer) {
 		spin_unlock_bh(&ar->ab->base_lock);
 		ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
-			    sta->addr, arvif->vdev_id);
+			   arsta->addr, arvif->vdev_id);
 		return;
 	}
 
 	spin_unlock_bh(&ar->ab->base_lock);
 
+	rcu_read_lock();
+	link_sta = rcu_dereference(sta->link[link_id]);
+
+	if (!link_sta) {
+		rcu_read_unlock();
+		return;
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
-		   sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
-		   sta->deflink.smps_mode);
+		   arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss,
+		   link_sta->smps_mode);
 
 	spin_lock_bh(&ar->data_lock);
 
@@ -3692,12 +8027,12 @@
 	}
 
 	if (changed & IEEE80211_RC_NSS_CHANGED)
-		arsta->nss = sta->deflink.rx_nss;
+		arsta->nss = link_sta->rx_nss;
 
 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
 		smps = WMI_PEER_SMPS_PS_NONE;
 
-		switch (sta->deflink.smps_mode) {
+		switch (link_sta->smps_mode) {
 		case IEEE80211_SMPS_AUTOMATIC:
 		case IEEE80211_SMPS_OFF:
 			smps = WMI_PEER_SMPS_PS_NONE;
@@ -3710,7 +8045,7 @@
 			break;
 		default:
 			ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
-				    sta->deflink.smps_mode, sta->addr);
+				    link_sta->smps_mode, arsta->addr);
 			smps = WMI_PEER_SMPS_PS_NONE;
 			break;
 		}
@@ -3721,18 +8056,19 @@
 	arsta->changed |= changed;
 
 	spin_unlock_bh(&ar->data_lock);
+	rcu_read_unlock();
 
 	ieee80211_queue_work(hw, &arsta->update_wk);
 }
 
-static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
+static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ath12k_link_vif *arvif,
 				u16 ac, bool enable)
 {
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	u32 value;
 	int ret;
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+	if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
 		return 0;
 
 	switch (ac) {
@@ -3754,20 +8090,21 @@
 		break;
 	}
 
+	/* TODO move link specific ? */
 	if (enable)
-		arvif->u.sta.uapsd |= value;
+		ahvif->u.sta.uapsd |= value;
 	else
-		arvif->u.sta.uapsd &= ~value;
+		ahvif->u.sta.uapsd &= ~value;
 
 	ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
 					  WMI_STA_PS_PARAM_UAPSD,
-					  arvif->u.sta.uapsd);
+					  ahvif->u.sta.uapsd);
 	if (ret) {
 		ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret);
 		goto exit;
 	}
 
-	if (arvif->u.sta.uapsd)
+	if (ahvif->u.sta.uapsd)
 		value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
 	else
 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
@@ -3782,17 +8119,17 @@
 	return ret;
 }
 
-static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
-				 struct ieee80211_vif *vif,
-				 unsigned int link_id, u16 ac,
+int ath12k_mac_conf_tx(struct ath12k *ar,
+		       struct ath12k_link_vif *arvif, u16 ac,
 				 const struct ieee80211_tx_queue_params *params)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
 	struct wmi_wmm_params_arg *p = NULL;
 	int ret;
 
-	mutex_lock(&ar->conf_mutex);
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!arvif->is_created)
+		return -EINVAL;
 
 	switch (ac) {
 	case IEEE80211_AC_VO:
@@ -3826,13 +8163,56 @@
 		goto exit;
 	}
 
-	ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+	ret = ath12k_conf_tx_uapsd(ar, arvif, ac, params->uapsd);
 
 	if (ret)
 		ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
 
 exit:
+	return ret;
+}
+
+static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 unsigned int link_id, u16 ac,
+				 const struct ieee80211_tx_queue_params *params)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	int ret;
+
+	if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+		return -EINVAL;
+
+	mutex_lock(&ah->conf_mutex);
+	arvif = ahvif->link[link_id];
+
+	/* If the tx config is received before a vdev is assigned to vif
+	 * cache the info to be updated once vdev is created
+	 */
+	if (arvif == NULL || !arvif->is_created) {
+		ath12k_dbg(NULL, ATH12K_DBG_MAC,
+			   "tx queue params cached since vif is not assigned to radio\n");
+		ahvif->cache[link_id].tx_conf.changed = true;
+		ahvif->cache[link_id].tx_conf.ac = ac;
+		ahvif->cache[link_id].tx_conf.tx_queue_params = *params;
+		mutex_unlock(&ah->conf_mutex);
+		return 0;
+	}
+	mutex_unlock(&ah->conf_mutex);
+
+	ar = arvif->ar;
+	if (!ar) {
+		ath12k_err(NULL, "Failed to config tx\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	ret = ath12k_mac_conf_tx(ar, arvif, ac, params);
 	mutex_unlock(&ar->conf_mutex);
+
 	return ret;
 }
 
@@ -3902,10 +8282,11 @@
 	return ht_cap;
 }
 
-static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif)
+static int ath12k_mac_set_txbf_conf(struct ath12k_link_vif *arvif)
 {
 	u32 value = 0;
 	struct ath12k *ar = arvif->ar;
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	int nsts;
 	int sound_dim;
 	u32 vht_cap = ar->pdev->cap.vht_cap;
@@ -3933,7 +8314,7 @@
 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
 
 		if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
-		    arvif->vdev_type == WMI_VDEV_TYPE_AP)
+		    ahvif->vdev_type == WMI_VDEV_TYPE_AP)
 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
 	}
 
@@ -3941,7 +8322,7 @@
 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
 
 		if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
-		    arvif->vdev_type == WMI_VDEV_TYPE_STA)
+		    ahvif->vdev_type == WMI_VDEV_TYPE_STA)
 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
 	}
 
@@ -4002,12 +8383,11 @@
 	vht_cap.vht_supported = 1;
 	vht_cap.cap = ar->pdev->cap.vht_cap;
 
-	ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
+	if (ar->pdev->cap.nss_ratio_enabled)
+		vht_cap.vht_mcs.tx_highest |=
+			cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
 
-	/* TODO: Enable back VHT160 mode once association issues are fixed */
-	/* Disabling VHT160 and VHT80+80 modes */
-	vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
-	vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+	ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
 
 	rxmcs_map = 0;
 	txmcs_map = 0;
@@ -4029,6 +8409,12 @@
 	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
 
+	/* Check if the HW supports 1:1 NSS ratio and reset
+	 * EXT NSS BW Support field to 0 to indicate 1:1 ratio
+	 */
+	if (ar->pdev->cap.nss_ratio_info == WMI_NSS_RATIO_1_NSS)
+		vht_cap.cap &= ~IEEE80211_VHT_CAP_EXT_NSS_BW_MASK;
+
 	return vht_cap;
 }
 
@@ -4051,6 +8437,8 @@
 			*ht_cap_info = ht_cap;
 		band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
 						    rate_cap_rx_chainmask);
+		// band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask,
+		// 				    rate_cap_rx_chainmask);
 	}
 
 	if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
@@ -4067,15 +8455,6 @@
 	}
 }
 
-static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant)
-{
-	/* TODO: Check the request chainmask against the supported
-	 * chainmask table which is advertised in extented_service_ready event
-	 */
-
-	return 0;
-}
-
 static void ath12k_gen_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet,
 				  u8 *he_ppet)
 {
@@ -4105,6 +8484,48 @@
 	}
 }
 
+static void ath12k_gen_eht_ppe_thresh(struct ath12k_ppe_threshold *fw_ppet,
+				      struct ieee80211_sta_eht_cap *cap)
+{
+	int nss, ru;
+	u8 len = 0, bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+
+	len = hweight8(fw_ppet->ru_bit_mask);
+	len *= (1 + fw_ppet->numss_m1);
+
+	len = (len * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE) +
+	      IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+	len = DIV_ROUND_UP(len, 8);
+
+	cap->eht_ppe_thres[0] = FIELD_PREP(IEEE80211_EHT_PPE_THRES_NSS_MASK,
+				       fw_ppet->numss_m1);
+	cap->eht_ppe_thres[0] |= FIELD_PREP((u8)IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK,
+					fw_ppet->ru_bit_mask);
+	cap->eht_ppe_thres[1] |= (fw_ppet->ru_bit_mask >>
+			      (8 - IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_POS));
+
+	for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+		for (ru = 0;
+		     ru < hweight8(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+		     ru++) {
+			u8 val;
+			int i;
+
+			if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+				continue;
+			val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+			       0x3f;
+			val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+			for (i = (2 * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE) - 1;
+			     i >= 0; i--) {
+				cap->eht_ppe_thres[bit / 8] |=
+					((val >> i) & 0x1) << ((bit % 8));
+				bit++;
+			}
+		}
+	}
+}
+
 static void
 ath12k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
 {
@@ -4166,14 +8587,6 @@
 	    IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
 	    IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
 	he_cap_elem->phy_cap_info[8] &= ~m;
-
-	m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
-	    IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
-	    IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
-	    IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
-	    IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
-	    IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
-	he_cap_elem->phy_cap_info[9] &= ~m;
 }
 
 static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap,
@@ -4206,30 +8619,62 @@
 	return cpu_to_le16(bcap->he_6ghz_capa);
 }
 
-static int ath12k_mac_copy_he_cap(struct ath12k *ar,
+static void ath12k_mac_set_hemcsmap(struct ath12k *ar,
 				  struct ath12k_pdev_cap *cap,
-				  struct ieee80211_sband_iftype_data *data,
-				  int band)
+				    struct ieee80211_sta_he_cap *he_cap)
 {
-	int i, idx = 0;
+	u16 txmcs_map = 0, rxmcs_map = 0;
+	u16 txmcs_map_160 = 0, rxmcs_map_160 = 0;
+	u32 i = 0;
+	u8 maxtxnss_160 = ath12k_get_nss_160mhz(ar, ar->num_tx_chains);
+	u8 maxrxnss_160 = ath12k_get_nss_160mhz(ar, ar->num_rx_chains);
 
-	for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
-		struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
-		struct ath12k_band_cap *band_cap = &cap->band[band];
-		struct ieee80211_he_cap_elem *he_cap_elem =
-				&he_cap->he_cap_elem;
+	for (i = 0; i < 8; i++) {
+		if (i < ar->num_tx_chains &&
+		    (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
 
-		switch (i) {
-		case NL80211_IFTYPE_STATION:
-		case NL80211_IFTYPE_AP:
-		case NL80211_IFTYPE_MESH_POINT:
-			break;
+		if (i < ar->num_rx_chains &&
+		    (ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
 
-		default:
-			continue;
+		if (i < maxtxnss_160 &&
+		    (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			txmcs_map_160 |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			txmcs_map_160 |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+		if (i < maxrxnss_160 &&
+		    (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			rxmcs_map_160 |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			rxmcs_map_160 |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
 		}
 
-		data[idx].types_mask = BIT(i);
+	he_cap->he_mcs_nss_supp.rx_mcs_80 =
+		cpu_to_le16(rxmcs_map & 0xffff);
+	he_cap->he_mcs_nss_supp.tx_mcs_80 =
+		cpu_to_le16(txmcs_map & 0xffff);
+	he_cap->he_mcs_nss_supp.rx_mcs_160 =
+		cpu_to_le16(rxmcs_map_160 & 0xffff);
+	he_cap->he_mcs_nss_supp.tx_mcs_160 =
+		cpu_to_le16(txmcs_map_160 & 0xffff);
+	he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+		cpu_to_le16(rxmcs_map_160 & 0xffff);
+	he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+		cpu_to_le16(txmcs_map_160 & 0xffff);
+}
+
+static void ath12k_mac_copy_he_cap(struct ath12k *ar,
+				   struct ath12k_band_cap *band_cap,
+				   struct ieee80211_sta_he_cap *he_cap,
+				   struct ieee80211_he_cap_elem *he_cap_elem,
+				   int iftype)
+{
 		he_cap->has_he = true;
 		memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
 		       sizeof(he_cap_elem->mac_cap_info));
@@ -4238,27 +8683,25 @@
 
 		he_cap_elem->mac_cap_info[1] &=
 			IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
-
+	he_cap_elem->phy_cap_info[0] &=
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+	he_cap_elem->phy_cap_info[0] &=
+		~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
 		he_cap_elem->phy_cap_info[5] &=
 			~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
-		he_cap_elem->phy_cap_info[5] &=
-			~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
 		he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
 
-		switch (i) {
+	switch (iftype) {
 		case NL80211_IFTYPE_AP:
+		he_cap_elem->mac_cap_info[2] &= ~IEEE80211_HE_MAC_CAP2_BCAST_TWT;
 			he_cap_elem->phy_cap_info[3] &=
 				~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
-			he_cap_elem->phy_cap_info[9] |=
-				IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
 			break;
 		case NL80211_IFTYPE_STATION:
-			he_cap_elem->mac_cap_info[0] &=
-				~IEEE80211_HE_MAC_CAP0_TWT_RES;
-			he_cap_elem->mac_cap_info[0] |=
-				IEEE80211_HE_MAC_CAP0_TWT_REQ;
-			he_cap_elem->phy_cap_info[9] |=
-				IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+		he_cap_elem->mac_cap_info[0] &= ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+		he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ;
 			break;
 		case NL80211_IFTYPE_MESH_POINT:
 			ath12k_mac_filter_he_cap_mesh(he_cap_elem);
@@ -4278,30 +8721,192 @@
 		he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
 			cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
 
+	ath12k_mac_set_hemcsmap(ar, &ar->pdev->cap, he_cap);
 		memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
 		if (he_cap_elem->phy_cap_info[6] &
 		    IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
 			ath12k_gen_ppe_thresh(&band_cap->he_ppet,
 					      he_cap->ppe_thres);
+}
+
+static void ath12k_gen_eht_mcs_nss(struct ath12k_band_cap *band_cap,
+                                   struct ieee80211_eht_mcs_nss_supp *mcs_nss,
+                                   const struct ieee80211_he_cap_elem *he_cap,
+                                   const struct ieee80211_eht_cap_elem_fixed *eht_cap)
+{
+        if ((he_cap->phy_cap_info[0] &
+            (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+            IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+            IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+            IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0)
+		memcpy(&mcs_nss->only_20mhz, &band_cap->eht_mcs_20_only,
+		       sizeof(struct ieee80211_eht_mcs_nss_supp_20mhz_only));
+
+        if (he_cap->phy_cap_info[0] &
+            (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+            IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G))
+		memcpy(&mcs_nss->bw._80, &band_cap->eht_mcs_80,
+		       sizeof(struct ieee80211_eht_mcs_nss_supp_bw));
+
+	if (he_cap->phy_cap_info[0] &
+	    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+		memcpy(&mcs_nss->bw._160, &band_cap->eht_mcs_160,
+		       sizeof(struct ieee80211_eht_mcs_nss_supp_bw));
+
+	if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+		memcpy(&mcs_nss->bw._320, &band_cap->eht_mcs_320,
+		       sizeof(struct ieee80211_eht_mcs_nss_supp_bw));
+}
+
+static void
+ath12k_mac_filter_eht_cap_mesh(struct ieee80211_eht_cap_elem_fixed *eht_cap_elem)
+{
+	u8 m;
+
+	m = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS;
+	eht_cap_elem->mac_cap_info[0] &= ~m;
+
+	m = IEEE80211_EHT_MAC_CAP1_TWO_BQRS_SUPP |
+	    IEEE80211_EHT_MAC_CAP1_EHT_LINK_ADAPTATION_SUPP;
+	eht_cap_elem->mac_cap_info[1] &= ~m;
+
+	m = IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO;
+	eht_cap_elem->phy_cap_info[0] &= ~m;
+
+	m = IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+	    IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+	    IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+	    IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+	eht_cap_elem->phy_cap_info[3] &= ~m;
+
+	m = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+	    IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+	    IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+	    IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI;
+	eht_cap_elem->phy_cap_info[4] &= ~m;
+
+	m = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+	    IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+	    IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+	    IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK;
+	eht_cap_elem->phy_cap_info[5] &= ~m;
+
+	m = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK;
+	eht_cap_elem->phy_cap_info[6] &= ~m;
+
+	m = IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+	    IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+	    IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+	    IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+	    IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+	    IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
+	eht_cap_elem->phy_cap_info[7] &= ~m;
+
+	m = IEEE80211_EHT_PHY_CAP8_20MHZ_ONLY_CAPS |
+	    IEEE80211_EHT_PHY_CAP8_20MHZ_ONLY_TRIGGER_MUBF_FL_BW_FB_DLMUMIMO |
+	    IEEE80211_EHT_PHY_CAP8_20MHZ_ONLY_MRU_SUPP;
+	eht_cap_elem->phy_cap_info[8] &= ~m;
+}
+
+static void ath12k_mac_copy_eht_cap(struct ath12k *ar,
+				    struct ath12k_band_cap *band_cap,
+				    struct ieee80211_he_cap_elem *he_cap_elem,
+				    struct ieee80211_sta_eht_cap *eht_cap,
+				    struct ieee80211_eht_cap_elem_fixed *eht_cap_elem,
+				    int iftype)
+{
+	memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap));
+
+	eht_cap->has_eht = true;
+	memcpy(eht_cap_elem->mac_cap_info, band_cap->eht_cap_mac_info,
+	       sizeof(eht_cap_elem->mac_cap_info));
+	memcpy(eht_cap_elem->phy_cap_info, band_cap->eht_cap_phy_info,
+	       sizeof(eht_cap_elem->phy_cap_info));
+
+	switch (iftype) {
+	case NL80211_IFTYPE_AP:
+		eht_cap_elem->phy_cap_info[0] &=
+			~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ;
+		eht_cap_elem->phy_cap_info[4] &=
+			~IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO;
+		break;
+	case NL80211_IFTYPE_STATION:
+		eht_cap_elem->phy_cap_info[7] &=
+			~(IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+			  IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+			  IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ);
+		eht_cap_elem->phy_cap_info[7] &=
+			~(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+			  IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+			  IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ);
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		ath12k_mac_filter_eht_cap_mesh(eht_cap_elem);
+		break;
+	default:
+		break;
+	}
 
+	ath12k_gen_eht_mcs_nss(band_cap, &eht_cap->eht_mcs_nss_supp,
+				he_cap_elem, eht_cap_elem);
+
+	if (eht_cap_elem->phy_cap_info[5] &
+	    IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT)
+		ath12k_gen_eht_ppe_thresh(&band_cap->eht_ppet, eht_cap);
+}
+
+static int ath12k_mac_copy_he_eht_cap(struct ath12k *ar,
+				      struct ath12k_pdev_cap *cap,
+				      struct ieee80211_sband_iftype_data *data,
+				      int band)
+{
+	struct ath12k_band_cap *band_cap = &cap->band[band];
+	struct ieee80211_sta_he_cap *he_cap;
+	struct ieee80211_he_cap_elem *he_cap_elem;
+	struct ieee80211_sta_eht_cap *eht_cap;
+	struct ieee80211_eht_cap_elem_fixed *eht_cap_elem;
+	int i, idx = 0;
+
+	for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+		he_cap = &data[idx].he_cap;
+		he_cap_elem = &he_cap->he_cap_elem;
+		eht_cap = &data[idx].eht_cap;
+		eht_cap_elem = &eht_cap->eht_cap_elem;
+
+		switch (i) {
+		case NL80211_IFTYPE_STATION:
+		case NL80211_IFTYPE_AP:
+		case NL80211_IFTYPE_MESH_POINT:
+			break;
+
+		default:
+			continue;
+		}
+
+		data[idx].types_mask = BIT(i);
+		
+		ath12k_mac_copy_he_cap(ar, band_cap, he_cap, he_cap_elem, i);
 		if (band == NL80211_BAND_6GHZ) {
 			data[idx].he_6ghz_capa.capa =
 				ath12k_mac_setup_he_6ghz_cap(cap, band_cap);
 		}
+		ath12k_mac_copy_eht_cap(ar, band_cap, he_cap_elem, eht_cap,
+					eht_cap_elem, i);
+
 		idx++;
 	}
 
 	return idx;
 }
 
-static void ath12k_mac_setup_he_cap(struct ath12k *ar,
+static void ath12k_mac_setup_he_eht_cap(struct ath12k *ar,
 				    struct ath12k_pdev_cap *cap)
 {
 	struct ieee80211_supported_band *band;
 	int count;
 
 	if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
-		count = ath12k_mac_copy_he_cap(ar, cap,
+		count = ath12k_mac_copy_he_eht_cap(ar, cap,
 					       ar->mac.iftype[NL80211_BAND_2GHZ],
 					       NL80211_BAND_2GHZ);
 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
@@ -4309,8 +8914,9 @@
 		band->n_iftype_data = count;
 	}
 
-	if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
-		count = ath12k_mac_copy_he_cap(ar, cap,
+	if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+	    !ar->supports_6ghz) {
+		count = ath12k_mac_copy_he_eht_cap(ar, cap,
 					       ar->mac.iftype[NL80211_BAND_5GHZ],
 					       NL80211_BAND_5GHZ);
 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
@@ -4320,13 +8926,14 @@
 
 	if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
 	    ar->supports_6ghz) {
-		count = ath12k_mac_copy_he_cap(ar, cap,
+		count = ath12k_mac_copy_he_eht_cap(ar, cap,
 					       ar->mac.iftype[NL80211_BAND_6GHZ],
 					       NL80211_BAND_6GHZ);
 		band = &ar->mac.sbands[NL80211_BAND_6GHZ];
 		band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ];
 		band->n_iftype_data = count;
 	}
+
 }
 
 static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
@@ -4335,15 +8942,23 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (ath12k_check_chain_mask(ar, tx_ant, true))
-		return -EINVAL;
-
-	if (ath12k_check_chain_mask(ar, rx_ant, false))
-		return -EINVAL;
+	/* Since we advertised the max cap of all radios combined during wiphy
+	 * registration, ensure we dont set the antenna config higher than our
+	 * limits
+	 */
+	tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask);
+	rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask);
 
 	ar->cfg_tx_chainmask = tx_ant;
 	ar->cfg_rx_chainmask = rx_ant;
 
+	ar->num_tx_chains = hweight32(tx_ant);
+	ar->num_rx_chains = hweight32(rx_ant);
+
+	/* Reload HT/VHT/HE capability */
+	ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+	ath12k_mac_setup_he_eht_cap(ar, &ar->pdev->cap);
+
 	if (ar->state != ATH12K_STATE_ON &&
 	    ar->state != ATH12K_STATE_RESTARTED)
 		return 0;
@@ -4351,79 +8966,104 @@
 	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
 					tx_ant, ar->pdev->pdev_id);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+		ath12k_err(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
 			    ret, tx_ant);
 		return ret;
 	}
 
-	ar->num_tx_chains = hweight32(tx_ant);
-
 	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
 					rx_ant, ar->pdev->pdev_id);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+		ath12k_err(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
 			    ret, rx_ant);
 		return ret;
 	}
 
-	ar->num_rx_chains = hweight32(rx_ant);
+	return 0;
+}
 
-	/* Reload HT/VHT/HE capability */
-	ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
-	ath12k_mac_setup_he_cap(ar, &ar->pdev->cap);
+static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
+{
+	int num_mgmt = 0;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
-	return 0;
+	if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+		num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
+	ieee80211_free_txskb(ar->ah->hw, skb);
+
+	if (num_mgmt < 0)
+		WARN_ON_ONCE(1);
+
+	if (atomic_read(&ar->flush_request) && !num_mgmt)
+		wake_up(&ar->tx_empty_waitq);
 }
 
-int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id)
 {
-	struct sk_buff *msdu = skb;
+	struct sk_buff *msdu;
 	struct ieee80211_tx_info *info;
-	struct ath12k *ar = ctx;
-	struct ath12k_base *ab = ar->ab;
 
 	spin_lock_bh(&ar->txmgmt_idr_lock);
-	idr_remove(&ar->txmgmt_idr, buf_id);
+	msdu = idr_remove(&ar->txmgmt_idr, buf_id);
 	spin_unlock_bh(&ar->txmgmt_idr_lock);
-	dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
+
+	if (!msdu)
+		return;
+
+	dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
 			 DMA_TO_DEVICE);
 
 	info = IEEE80211_SKB_CB(msdu);
 	memset(&info->status, 0, sizeof(info->status));
 
-	ieee80211_free_txskb(ar->hw, msdu);
+	ath12k_mgmt_over_wmi_tx_drop(ar, msdu);
 
-	return 0;
 }
 
-static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
 {
-	struct ieee80211_vif *vif = ctx;
-	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
-	struct sk_buff *msdu = skb;
-	struct ath12k *ar = skb_cb->ar;
-	struct ath12k_base *ab = ar->ab;
+	struct ath12k_mac_tx_mgmt_free_arg *arg = ctx;
+	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB((struct sk_buff *)skb);
+	struct ath12k *ar = NULL;
 
-	if (skb_cb->vif == vif) {
-		spin_lock_bh(&ar->txmgmt_idr_lock);
-		idr_remove(&ar->txmgmt_idr, buf_id);
-		spin_unlock_bh(&ar->txmgmt_idr_lock);
-		dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
-				 DMA_TO_DEVICE);
+	if (u8_get_bits(arg->type, ATH12K_MAC_TX_MGMT_FREE_TYPE_PDEV))
+		ar = arg->ar;
+
+	/* If radio invalid, dont proceed */
+	if (!ar)
+		goto out;
+
+	/* If vif valid, then free the match vif alone */
+	if (u8_get_bits(arg->type, ATH12K_MAC_TX_MGMT_FREE_TYPE_VIF)) {
+		if (skb_cb->vif != arg->vif)
+			goto out;
 	}
 
+	/* If link_id valid, then free the match link_id alone */
+	if (u8_get_bits(arg->type, ATH12K_MAC_TX_MGMT_FREE_TYPE_LINK)) {
+		if ((skb_cb->link_id != arg->link_id) &&
+		    (skb_cb->link_id != IEEE80211_MLD_MAX_NUM_LINKS))
+			goto out;
+	}
+
+	ath12k_mac_tx_mgmt_free(arg->ar, buf_id);
+
+out:
 	return 0;
 }
 
-static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
+static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arvif,
 				  struct sk_buff *skb)
 {
 	struct ath12k_base *ab = ar->ab;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *info;
 	dma_addr_t paddr;
+	struct ath12k_skb_cb *skb_cb;
 	int buf_id;
 	int ret;
+	bool link_agnostic;
 
 	spin_lock_bh(&ar->txmgmt_idr_lock);
 	buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
@@ -4432,13 +9072,26 @@
 	if (buf_id < 0)
 		return -ENOSPC;
 
-	info = IEEE80211_SKB_CB(skb);
-	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+	skb_cb = ATH12K_SKB_CB(skb);
+	if (!(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP)) {
 		if ((ieee80211_is_action(hdr->frame_control) ||
 		     ieee80211_is_deauth(hdr->frame_control) ||
 		     ieee80211_is_disassoc(hdr->frame_control)) &&
 		     ieee80211_has_protected(hdr->frame_control)) {
-			skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+			int mic_len;
+
+			switch(skb_cb->cipher) {
+				case WLAN_CIPHER_SUITE_CCMP:
+					mic_len = IEEE80211_CCMP_MIC_LEN;
+					break;
+				case WLAN_CIPHER_SUITE_GCMP:
+				case WLAN_CIPHER_SUITE_GCMP_256:
+					mic_len = IEEE80211_GCMP_MIC_LEN;
+					break;
+				default:
+					mic_len = IEEE80211_CCMP_MIC_LEN;
+			}
+			skb_put(skb, mic_len);
 		}
 	}
 
@@ -4449,9 +9102,16 @@
 		goto err_free_idr;
 	}
 
-	ATH12K_SKB_CB(skb)->paddr = paddr;
+	skb_cb->paddr = paddr;
 
-	ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+		ret = ath12k_wmi_offchan_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+	} else {
+		link_agnostic = ATH12K_SKB_CB(skb)->flags & ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+
+		ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb,
+					   link_agnostic);
+	}
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
 		goto err_unmap_buf;
@@ -4460,7 +9120,7 @@
 	return 0;
 
 err_unmap_buf:
-	dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+	dma_unmap_single(ab->dev, skb_cb->paddr,
 			 skb->len, DMA_TO_DEVICE);
 err_free_idr:
 	spin_lock_bh(&ar->txmgmt_idr_lock);
@@ -4475,50 +9135,262 @@
 	struct sk_buff *skb;
 
 	while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
-		ieee80211_free_txskb(ar->hw, skb);
+		ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+}
+
+static int ath12k_mac_mgmt_action_frame_fill_elem(struct ath12k_link_vif *arvif,
+						  struct sk_buff *skb)
+{
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_bss_conf *link_conf;
+	struct stats_request_params req_param;
+	struct ath12k_fw_stats_pdev *pdev;
+	int ret, cur_tx_power, max_tx_power;
+	bool has_protected;
+	u8 category, *buf, iv_len;
+	u8 action_code, dialog_token;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* make sure category field is present */
+	if (skb->len < IEEE80211_MIN_ACTION_SIZE) {
+		skb_cb->flags &= ~ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+		return -EINVAL;
+	}
+
+	has_protected = ieee80211_has_protected(hdr->frame_control);
+
+	/* SW_CRYPTO and hdr protected case (PMF), packet will be encrypted,
+	 * we can't put in data in this case
+	 */
+	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->ag->dev_flags) &&
+	    has_protected) {
+		skb_cb->flags &= ~ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+		return -EOPNOTSUPP;
+	}
+
+	mgmt = (struct ieee80211_mgmt *)hdr;
+	buf = (u8 *)&mgmt->u.action;
+
+	/* FCTL_PROTECTED frame might have extra space added for HDR_LEN. Offset that
+	 * many bytes if it is there
+	 */
+	if (has_protected) {
+		switch (skb_cb->cipher) {
+		/* Currently only for CCMP cipher suite, we asked for it via
+		 * setting %IEEE80211_KEY_FLAG_GENERATE_IV_MGMT in key. Check
+		 * ath12k_install_key()
+		 */
+		case WLAN_CIPHER_SUITE_CCMP:
+			iv_len = IEEE80211_CCMP_HDR_LEN;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+		case WLAN_CIPHER_SUITE_CCMP_256:
+		case WLAN_CIPHER_SUITE_GCMP:
+		case WLAN_CIPHER_SUITE_GCMP_256:
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+		case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+			iv_len = 0;
+			break;
+		default:
+			skb_cb->flags &= ~ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+			return -EINVAL;
+		}
+
+		buf = buf + iv_len;
+	}
+
+	category = *buf++;
+
+	switch (category) {
+	case WLAN_CATEGORY_RADIO_MEASUREMENT:
+		/* Packet Format:
+		 *	Action Code | Dialog Token | Variable Len (based on Action Code)
+		 */
+		action_code = *buf++;
+		dialog_token = *buf++;
+
+		rcu_read_lock();
+
+		link_conf = ath12k_get_link_bss_conf(arvif);
+
+		if (!link_conf) {
+			rcu_read_unlock();
+			ath12k_warn(ar->ab, "unable to access bss link conf\n");
+			return -EINVAL;
+		}
+
+		cur_tx_power = link_conf->txpower;
+		max_tx_power = min(link_conf->chanctx_conf->def.chan->max_reg_power,
+				   (int)ar->max_tx_power / 2);
+
+		rcu_read_unlock();
+
+		/* fetch current tx power from FW pdev stats */
+		req_param.pdev_id = ar->pdev->pdev_id;
+		req_param.vdev_id = 0;
+		req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+		ret = ath12k_fw_stats_request(ar, &req_param);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to request fw pdev stats: %d\n", ret);
+			goto check_rm_action_frame;
+		}
+
+		spin_lock_bh(&ar->data_lock);
+		pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+						struct ath12k_fw_stats_pdev,
+						list);
+		if (!pdev) {
+			spin_unlock_bh(&ar->data_lock);
+			goto check_rm_action_frame;
+		}
+
+		/* Tx power is set as 2 units per dBm in FW. */
+		cur_tx_power = pdev->chan_tx_power / 2;
+		spin_unlock_bh(&ar->data_lock);
+
+check_rm_action_frame:
+		switch (action_code) {
+		case WLAN_ACTION_RADIO_MSR_LINK_MSR_REQ:
+			/* Variable Len Format:
+			 *	Transmit Power | Max Tx Power
+			 * We fill both of these.
+			 */
+			*buf++ = cur_tx_power;
+			*buf = max_tx_power;
+
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				   "RRM: Link Measurement Req dialog_token=%u, cur_tx_power=%d, max_tx_power=%d\n",
+				   dialog_token, cur_tx_power, max_tx_power);
+			skb_cb->flags &= ~ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+			break;
+		case WLAN_ACTION_RADIO_MSR_LINK_MSR_REP:
+			/* Variable Len Format:
+			 *	TPC Report | Variable Fields
+			 *
+			 * TPC Report Format:
+			 *	Element ID | Len | Tx Power | Link Margin
+			 *
+			 * We fill Tx power in the TPC Report (2nd index)
+			 */
+			buf[2] = cur_tx_power;
+
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				   "RRM: Link Measurement Resp dialog_token=%u, cur_tx_power=%d\n",
+				   dialog_token, cur_tx_power);
+			skb_cb->flags &= ~ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		/* nothing to fill */
+		skb_cb->flags &= ~ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+		return 0;
+	}
+
+	return 0;
+}
+
+static int ath12k_mac_mgmt_frame_fill_elem(struct ath12k_link_vif *arvif,
+					   struct sk_buff *skb)
+{
+	struct ath12k *ar = arvif->ar;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!ieee80211_is_action(hdr->frame_control))
+		return 0;
+
+	return ath12k_mac_mgmt_action_frame_fill_elem(arvif, skb);
 }
 
 static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
 {
 	struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
 	struct ath12k_skb_cb *skb_cb;
-	struct ath12k_vif *arvif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif;
 	struct sk_buff *skb;
 	int ret;
+	struct ath12k_hw *ah = ar->ah;
 
+	mutex_lock(&ah->conf_mutex);
 	while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
 		skb_cb = ATH12K_SKB_CB(skb);
 		if (!skb_cb->vif) {
 			ath12k_warn(ar->ab, "no vif found for mgmt frame\n");
-			ieee80211_free_txskb(ar->hw, skb);
+			ath12k_mgmt_over_wmi_tx_drop(ar, skb);
 			continue;
 		}
 
-		arvif = ath12k_vif_to_arvif(skb_cb->vif);
+		ahvif = ath12k_vif_to_ahvif(skb_cb->vif);
+		if (!(ahvif->links_map & BIT(skb_cb->link_id))) {
+			ath12k_warn(ar->ab,
+				    "invalid linkid 0x%X in mgmt over wmi tx with linkmap %lu\n",
+				    skb_cb->link_id,
+				    ahvif->links_map);
+			ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+			continue;
+		}
+
+		arvif = ahvif->link[skb_cb->link_id];
+		if (!arvif) {
+			ath12k_warn(ar->ab, "invalid arvif for mgmt tx - link %d\n",
+				    skb_cb->link_id);
+			ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+			continue;
+		}
+
+		mutex_lock(&ar->conf_mutex);
 		if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
 		    arvif->is_started) {
+			/* Fill the data which is required to be filled in by the driver
+			 * Example: Max Tx power in Link Measurement Request/Report
+			 */
+			ret = ath12k_mac_mgmt_frame_fill_elem(arvif, skb);
+			if (ret) {
+				/* If we couldn't fill the data due to any reason, let's not discard
+				 * transmitting the packet.
+				 * For ex: SW crypto and PMF case
+				 */
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+					   "Cant't fill in the required data for the mgmt packet. err=%d\n",
+					   ret);
+			}
+
 			ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
 			if (ret) {
 				ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
 					    arvif->vdev_id, ret);
-				ieee80211_free_txskb(ar->hw, skb);
-			} else {
-				atomic_inc(&ar->num_pending_mgmt_tx);
+				ath12k_mgmt_over_wmi_tx_drop(ar, skb);
 			}
 		} else {
 			ath12k_warn(ar->ab,
-				    "dropping mgmt frame for vdev %d, is_started %d\n",
+				    "dropping mgmt frame for vdev %d, is_started %d link %d\n",
 				    arvif->vdev_id,
-				    arvif->is_started);
-			ieee80211_free_txskb(ar->hw, skb);
+				    arvif->is_started,
+				    skb_cb->link_id);
+			ath12k_mgmt_over_wmi_tx_drop(ar, skb);
 		}
+		mutex_unlock(&ar->conf_mutex);
 	}
+	mutex_unlock(&ah->conf_mutex);
 }
 
 static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
 			      bool is_prb_rsp)
 {
 	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
 		return -ESHUTDOWN;
@@ -4530,66 +9402,484 @@
 	 */
 	if (is_prb_rsp &&
 	    atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) {
-		ath12k_warn(ar->ab,
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 			    "dropping probe response as pending queue is almost full\n");
-		return -ENOSPC;
+		return -EBUSY;
 	}
 
-	if (skb_queue_len(q) == ATH12K_TX_MGMT_NUM_PENDING_MAX) {
+	if (skb_queue_len_lockless(q) >= ATH12K_TX_MGMT_NUM_PENDING_MAX) {
 		ath12k_warn(ar->ab, "mgmt tx queue is full\n");
 		return -ENOSPC;
 	}
 
 	skb_queue_tail(q, skb);
-	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+	/* For some of the off chan frames in DPP, host will not receive tx status,
+	 * due to that skipping incrementing pending frames for off channel frames
+	 * only to avoid the leak
+	 */
+	if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+		atomic_inc(&ar->num_pending_mgmt_tx);
+
+	queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work);
 
 	return 0;
 }
 
+static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
+						  u8 link, struct sk_buff *skb,
+						  u32 info_flags)
+{
+	struct ieee80211_bss_conf *bss_conf;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+		return;
+
+	bss_conf = rcu_dereference(vif->link_conf[link]);
+	if (bss_conf)
+		ether_addr_copy(hdr->addr2, bss_conf->addr);
+}
+
+/* This function should be called only for a mgmt frame to a ML STA,
+ * hence, such sanity checks are skipped
+ */
+static bool ath12k_mac_is_mgmt_link_agnostic(struct sk_buff *skb)
+{
+	struct ieee80211_mgmt *mgmt;
+	mgmt = (struct ieee80211_mgmt *)skb->data;
+
+	if (ieee80211_is_action(mgmt->frame_control))
+		return true;
+
+	/* TODO Extend as per requirement */
+	return false;
+}
+
+static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+				 u8 link, struct sk_buff *skb, u32 info_flags)
+{
+	struct ath12k_sta *ahsta;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ieee80211_link_sta *link_sta;
+	struct ieee80211_bss_conf *bss_conf;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	/* Use the link id passed or the default vif link */
+	if (!sta) {
+		if (link != IEEE80211_LINK_UNSPECIFIED)
+			return link;
+
+		return ahvif->deflink.link_id;
+	}
+
+	ahsta = ath12k_sta_to_ahsta(sta);
+
+	/* Below translation ensures we pass proper A2 & A3 for non ML clients.
+	 * Also it assumes for now support only for MLO AP in this path
+	 */
+	if (!sta->mlo) {
+		 link = ahsta->deflink.link_id;
+
+		 if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+			 return link;
+
+		 bss_conf = rcu_dereference(vif->link_conf[link]);
+		 if (bss_conf) {
+			 ether_addr_copy(hdr->addr2, bss_conf->addr);
+			 if (!ieee80211_has_tods(hdr->frame_control) &&
+			     !ieee80211_has_fromds(hdr->frame_control))
+				 ether_addr_copy(hdr->addr3, bss_conf->addr);
+		}
+		return link;
+	}
+
+	/* enqueue eth data frames on primary link */
+	if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+		return ahsta->primary_link_id;
+	else if (ieee80211_is_data(hdr->frame_control))
+		return ahsta->primary_link_id;
+
+	/* 802.11 frame cases */
+	if (link == IEEE80211_LINK_UNSPECIFIED)
+		link = ahsta->deflink.link_id;
+
+	if (ieee80211_is_mgmt(hdr->frame_control)) {
+		/* For MLD STA post FW recovery, sending disassoc
+		 * in other link apart from primary link results in
+		 * disassoc packet getting dropped always, hence as
+		 * a workaround until FW enables it post recovery
+		 * this is needed to send disassoc always in primary
+		 * link
+		 */
+		if (ieee80211_is_disassoc(hdr->frame_control))
+			link = ahsta->deflink.link_id;
+
+		/* Perform address conversion for ML STA Tx */
+		bss_conf = rcu_dereference(vif->link_conf[link]);
+		link_sta = rcu_dereference(sta->link[link]);
+		if (bss_conf && link_sta) {
+			ether_addr_copy(hdr->addr1, link_sta->addr);
+			ether_addr_copy(hdr->addr2, bss_conf->addr);
+			if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+				ether_addr_copy(hdr->addr3, bss_conf->bssid);
+			else if (vif->type == NL80211_IFTYPE_AP)
+				ether_addr_copy(hdr->addr3, bss_conf->addr);
+		} else if (bss_conf) {
+			/* In certain cases where a ML sta associated and added subset of links
+			 * on which the ML AP is active, but now sends some frame (ex. Probe request)
+			 * on a different link which is active in our MLD but was not added during
+			 * previous association, we can still honor the Tx to that ML STA via the
+			 * requested link.
+			 * The control would reach here in such case only when that link address
+			 * is same as the MLD address or in worst case clients used MLD address at TA wrongly
+			 * which would have helped identify the ML sta object and pass it here.
+			 * If the link address of that STA is different from MLD address, then
+			 * the sta object would be NULL and control wont reach here but return at the
+			 * start of the function itself with !sta check. Also this would not need any
+			 * translation at hdr->addr1 from MLD to link address since the RA is the
+			 * MLD address (same as that link address ideally) already.
+			 */
+			ether_addr_copy(hdr->addr2, bss_conf->addr);
+			if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+				ether_addr_copy(hdr->addr3, bss_conf->bssid);
+			else if (vif->type == NL80211_IFTYPE_AP)
+				ether_addr_copy(hdr->addr3, bss_conf->addr);
+
+
+			ath12k_dbg(NULL, ATH12K_DBG_MAC,
+				   "unable to determine link sta addr for translation - Frame control %x link %d sta links %x ahsta links %lu\n",
+				   hdr->frame_control, link, sta->valid_links,
+				   ahsta->links_map);
+		} else {
+			ath12k_err(NULL, "unable to determine Tx link for frame %x link %d\n vif links %x sta links %x ahvif links %lu ahsta links %lu",
+				   hdr->frame_control, link, vif->valid_links,
+				   sta->valid_links, ahvif->links_map, ahsta->links_map);
+			link = IEEE80211_MLD_MAX_NUM_LINKS;
+		}
+
+		/* Check if this mgmt frame can be queued at MLD level, in that
+		 * case the FW can decide on which link it needs to be finally
+		 * transmitted based on the power state of that link.
+		 * The link param returned by this function still needs
+		 * to be valid to get queued to one of the valid link FW
+		 */
+		if (ath12k_mac_is_mgmt_link_agnostic(skb)) {
+			ATH12K_SKB_CB(skb)->flags |= ATH12K_SKB_MGMT_LINK_AGNOSTIC;
+			/* For action frames this will be reset if not needed
+			 * later based on action category.
+			 */
+		}
+	}
+
+	return link;
+}
+
+bool ath12k_mac_tx_check_max_limit(struct ath12k *ar, struct sk_buff *skb)
+{
+	struct ath12k_base *ab = ar->ab;
+
+	/* Allow EAPOL */
+	if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+		return false;
+
+	if (atomic_read(&ab->ag->num_dp_tx_pending) > ATH12K_DP_GROUP_TX_LIMIT) {
+		ab->soc_stats.tx_err.group_threshold_limit++;
+		return true;
+	}
+
+	if (atomic_read(&ar->dp.num_tx_pending) > ATH12K_DP_PDEV_TX_LIMIT) {
+		ar->ab->soc_stats.tx_err.pdev_threshold_limit++;
+		return true;
+	}
+
+	return false;
+}
+
 static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
 			     struct ieee80211_tx_control *control,
 			     struct sk_buff *skb)
 {
-	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
-	struct ath12k *ar = hw->priv;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct ieee80211_vif *vif = info->control.vif;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_key_conf *key = info->control.hw_key;
-	u32 info_flags = info->flags;
+	struct ath12k_skb_cb *skb_cb;
+	struct ieee80211_tx_info *info;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif, *tmp_arvif;
+	struct ath12k *ar, *tmp_ar;
+	struct ath12k_hw *ah;
+	struct ieee80211_hdr *hdr;
+	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct ieee80211_key_conf *key;
+	struct ath12k_mgmt_frame_stats *mgmt_stats;
+	struct ath12k_sta *ahsta = NULL;
+	struct sk_buff *msdu_copied;
+	struct ath12k_link_sta *arsta = NULL;
+	struct ieee80211_sta *sta = NULL;
+	struct ath12k_peer *peer;
+	u32 info_flags;
 	bool is_prb_rsp;
+	bool is_mcast = false;
+	u16 frm_type = 0;
+	u8 link_id;
+	u16 mcbc_gsn;
 	int ret;
+ 	u8 link;
+
+	info = IEEE80211_SKB_CB(skb);
+	vif = info->control.vif;
+	ahvif = ath12k_vif_to_ahvif(vif);
+	link = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
+	info_flags = info->flags;
 
+	if (control)
+		sta = control->sta;
+
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+		ieee80211_free_txskb(ah->hw, skb);
+		return;
+	}
+
+	skb_cb = ATH12K_SKB_CB(skb);
+	key = info->control.hw_key;
 	memset(skb_cb, 0, sizeof(*skb_cb));
 	skb_cb->vif = vif;
 
+	/* handle only for MLO case, use deflink for non MLO case */
+	if (vif->valid_links) {
+		link = ath12k_mac_get_tx_link(sta, vif, link, skb, info_flags);
+
+		if (link >= IEEE80211_MLD_MAX_NUM_LINKS) {
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
+	} else {
+		link = 0;
+	}
+
+	arvif = ahvif->link[link];
+	skb_cb->link_id = link;
+
+	if (!arvif || !arvif->ar) {
+		ieee80211_free_txskb(hw, skb);
+		return;
+	}
+
+	ar = arvif->ar;
+
+	ah = ar->ah;
+
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) {
+		ieee80211_free_txskb(ah->hw, skb);
+		return;
+	}
+
+#ifdef CONFIG_MAC80211_SFE_SUPPORT
+	if (skb->fast_xmit) {
+		ret = ath12k_mac_tx_check_max_limit(ar, skb);
+		if (ret) {
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				   "failed fast tx due to limit check pdev idx %d\n",
+				   ar->pdev_idx);
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
+
+		skb_cb->flags = ATH12K_SKB_HW_80211_ENCAP;
+		ret = ath12k_dp_tx_direct(arvif, skb);
+		if (unlikely(ret)) {
+			ath12k_dbg(arvif->ar->ab, ATH12K_DBG_MAC,
+				   "failed to transmit frame %d\n", ret);
+			ieee80211_free_txskb(hw, skb);
+		}
+		return;
+	}
+#endif
+
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_ff_done) {
+		ret = ath12k_mac_tx_check_max_limit(ar, skb);
+		if (ret) {
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				   "failed fast tx due to limit check pdev idx %d\n",
+				   ar->pdev_idx);
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
+
+		skb_cb->flags = ATH12K_SKB_HW_80211_ENCAP;
+		ret = ath12k_dp_tx_direct(arvif, skb);
+		if (unlikely(ret)) {
+			ath12k_dbg(arvif->ar->ab, ATH12K_DBG_MAC,
+				   "failed to transmit frame %d\n", ret);
+			ieee80211_free_txskb(hw, skb);
+		}
+		return;
+	}
+#endif
+
 	if (key) {
 		skb_cb->cipher = key->cipher;
 		skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
 	}
 
+	hdr = (struct ieee80211_hdr *)skb->data;
+
 	if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
 		skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
+		is_mcast = is_multicast_ether_addr(eth->h_dest);
 	} else if (ieee80211_is_mgmt(hdr->frame_control)) {
+		frm_type = FIELD_GET(IEEE80211_FCTL_STYPE, hdr->frame_control);
 		is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+		mgmt_stats = &ahvif->mgmt_stats;
 		ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
 		if (ret) {
-			ath12k_warn(ar->ab, "failed to queue management frame %d\n",
-				    ret);
-			ieee80211_free_txskb(ar->hw, skb);
+			if (ret != -EBUSY)
+				ath12k_warn(ar->ab, "failed to queue mgmt stype 0x%x frame %d\n", frm_type, ret);
+
+			ieee80211_free_txskb(ah->hw, skb);
+			spin_lock_bh(&ar->data_lock);
+			mgmt_stats->tx_fail_cnt[frm_type]++;
+			spin_unlock_bh(&ar->data_lock);
+		} else {
+			spin_lock_bh(&ar->data_lock);
+			mgmt_stats->tx_succ_cnt[frm_type]++;
+			spin_unlock_bh(&ar->data_lock);
 		}
 		return;
-	}
+	} else
+		is_mcast = is_multicast_ether_addr(hdr->addr1);
 
-	ret = ath12k_dp_tx(ar, arvif, skb);
+	if (sta)
+		ahsta = ath12k_sta_to_ahsta(control->sta);
+
+	/* Must call mac80211 tx status handler, else when stats is disabled we
+	 * free the skb from driver. Own tx packets on monitor will also be
+	 * disabled.
+	 */
+	if ((info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+			    IEEE80211_TX_INTFL_NL80211_FRAME_TX)) ||
+	    info->status_data_idr || vif->type == NL80211_IFTYPE_MESH_POINT ||
+	    test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags))
+		skb_cb->flags |= ATH12K_SKB_TX_STATUS;
+
+	if (!vif->valid_links || !is_mcast ||
+	    test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->ag->dev_flags)) {
+		ret = ath12k_mac_tx_check_max_limit(ar, skb);
 	if (ret) {
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				   "failed due to limit check pdev idx %d\n",
+				   ar->pdev_idx);
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
+
+		ret = ath12k_dp_tx(ar, arvif, ahsta, skb, false, 0);
+		if (unlikely(ret)) {
+			if (ret == -ENOMEM)
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+					   "failed to transmit frame %d\n", ret);
+			else
 		ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
-		ieee80211_free_txskb(ar->hw, skb);
+
+			ieee80211_free_txskb(ah->hw, skb);
 	}
+		if (ath12k_debugfs_is_extd_tx_stats_enabled(ar) && ahsta) {
+			arsta = &ahsta->deflink;
+			atomic_inc(&arsta->drv_tx_pkts.pkts_in);
+			if (!ret)
+				atomic_inc(&arsta->drv_tx_pkts.pkts_out);
 }
+	} else {
+		spin_lock(&ahvif->mcbc_gsn_lock);
+		mcbc_gsn = ahvif->mcbc_gsn++;
+		if (ahvif->mcbc_gsn > 0xFFF)
+			ahvif->mcbc_gsn = 0;
+		spin_unlock(&ahvif->mcbc_gsn_lock);
+
+		for_each_set_bit(link_id, &ahvif->links_map,
+				 IEEE80211_MLD_MAX_NUM_LINKS) {
+			tmp_arvif = ahvif->link[link_id];
+			if (!(tmp_arvif && tmp_arvif->is_up))
+				continue;
+
+			tmp_ar = tmp_arvif->ar;
+
+			if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &tmp_ar->ab->dev_flags)))
+				continue;
 
-void ath12k_mac_drain_tx(struct ath12k *ar)
+			ret = ath12k_mac_tx_check_max_limit(tmp_ar, skb);
+			if (ret) {
+				ath12k_dbg(tmp_ar->ab, ATH12K_DBG_MAC,
+					   "failed mcast tx due to limit check pdev idx %d\n",
+					   tmp_ar->pdev_idx);
+				continue;
+			}
+
+			msdu_copied = skb_copy(skb, GFP_ATOMIC);
+			if (!msdu_copied) {
+				ath12k_err(ar->ab,
+					   "skb copy failure link_id 0x%X vdevid 0x%X\n",
+					   link_id, tmp_arvif->vdev_id);
+				continue;
+			}
+
+			ath12k_mlo_mcast_update_tx_link_address(vif, link_id,
+								msdu_copied,
+								info_flags);
+			/* upper layer would not have handled the keys so
+			 * handle it here */
+			skb_cb = ATH12K_SKB_CB(msdu_copied);
+			skb_cb->link_id = link_id;
+
+			/* For open mode, skip peer find logic */
+			if (ahvif->key_cipher == INVALID_CIPHER)
+				goto skip_peer_find;
+
+			spin_lock_bh(&tmp_ar->ab->base_lock);
+			peer = ath12k_peer_find_by_addr(tmp_ar->ab, tmp_arvif->addr);
+			if (!peer) {
+				spin_unlock_bh(&tmp_ar->ab->base_lock);
+				ath12k_warn(tmp_ar->ab,
+					    "failed to find peer for vdev_id 0x%X addr %pM link_map %lu\n",
+					    tmp_arvif->vdev_id, tmp_arvif->addr,
+					    ahvif->links_map);
+				tmp_ar->ab->soc_stats.tx_err.peers_not_present++;
+				ieee80211_free_txskb(ah->hw, msdu_copied);
+				continue;
+			}
+
+			key = peer->keys[peer->mcast_keyidx];
+			if (key) {
+				skb_cb->cipher = key->cipher;
+				skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+
+				hdr = (struct ieee80211_hdr *)msdu_copied->data;
+				if (!ieee80211_has_protected(hdr->frame_control))
+					hdr->frame_control |=
+						cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+			}
+			spin_unlock_bh(&tmp_ar->ab->base_lock);
+skip_peer_find:
+			ret = ath12k_dp_tx(tmp_ar, tmp_arvif, ahsta,
+					   msdu_copied, true, mcbc_gsn);
+
+			if (unlikely(ret)) {
+				if (ret == -ENOMEM)
+					ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "failed to transmit frame %d\n",
+						   ret);
+				else
+					ath12k_warn(ar->ab, "failed to transmit frame %d\n",
+						    ret);
+
+				ieee80211_free_txskb(ah->hw, msdu_copied);
+			}
+		}
+		ieee80211_free_txskb(ah->hw, skb);
+	}
+
+}
+
+void ath12k_mac_radio_drain_tx(struct ath12k *ar)
 {
 	/* make sure rcu-protected mac80211 tx path itself is drained */
 	synchronize_net();
@@ -4600,40 +9890,227 @@
 
 static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
 {
-	return -ENOTSUPP;
-	/* TODO: Need to support new monitor mode */
+	struct htt_rx_ring_tlv_filter tlv_filter = {0};
+	struct ath12k_base *ab = ar->ab;
+	int i, ret = 0;
+	u32 ring_id;
+
+	if (enable) {
+		tlv_filter = ath12k_mac_mon_status_filter_default;
+		if (ath12k_debugfs_rx_filter(ar))
+			tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
+
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+			if (g_bonded_interface_model &&
+			    test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ar->ab->dev_flags))
+				tlv_filter.rx_filter |= (HTT_RX_FILTER_TLV_FLAGS_PPDU_START |
+							 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS |
+							 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT |
+							 HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO);
+#endif /* CONFIG_ATH12K_BONDED_DS_SUPPORT */
+
 }
 
-static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
+	tlv_filter.offset_valid = false;
+
+	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+		ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+						       ar->dp.mac_id + i,
+						       HAL_RXDMA_MONITOR_DST,
+						       DP_RXDMA_REFILL_RING_SIZE,
+						       &tlv_filter);
+	}
+
+	return ret;
+}
+
+static int __ath12k_mac_mlo_ready(struct ath12k *ar)
 {
-	int recovery_start_count;
+	int ret;
 
-	if (!ab->is_reset)
-		return;
+	ret = ath12k_wmi_mlo_ready(ar);
+	if (ret) {
+		ath12k_err(ar->ab, "MLO ready failed for pdev_idx %d: %d\n",
+			   ar->pdev_idx, ret);
 
-	recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
+		return ret;
+	}
 
-	ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "MLO ready done for pdev_idx %d\n",
+		   ar->pdev_idx);
 
-	if (recovery_start_count == ab->num_radios) {
-		complete(&ab->recovery_start);
-		ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
+	return 0;
 	}
 
-	ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
+int ath12k_mac_mlo_ready(struct ath12k_hw *ah)
+{
+	struct ath12k *ar;
+	int ret;
+	int i;
+
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
+		ret = __ath12k_mac_mlo_ready(ar);
+		if (ret)
+			goto out;
 
-	wait_for_completion_timeout(&ab->reconfigure_complete,
-				    ATH12K_RECONFIGURE_TIMEOUT_HZ);
+		ar++;
 }
 
-static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+out:
+	return ret;
+
+}
+
+static int __ath12k_mac_mlo_setup(struct ath12k *ar)
+{
+	struct ath12k_base *partner_ab, *ab = ar->ab;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_pdev *pdev;
+	struct wmi_mlo_setup_params mlo_params = { 0 };
+	u8 num_link = 0, partner_link_id[ATH12K_GROUP_MAX_RADIO] = { 0 };
+	int chip_idx, pdev_idx, ret;
+	unsigned long time_left;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	reinit_completion(&ar->mlo_setup_done);
+	for (chip_idx = 0; chip_idx < ag->num_chip; chip_idx++) {
+		partner_ab = ag->ab[chip_idx];
+
+		for (pdev_idx = 0; pdev_idx < partner_ab->num_radios; pdev_idx++) {
+			pdev = &partner_ab->pdevs[pdev_idx];
+
+			/* Avoid the self link */
+			if (ar == pdev->ar)
+				continue;
+
+			partner_link_id[num_link] = pdev->hw_link_id;
+			num_link++;
+
+			ath12k_dbg(ab, ATH12K_DBG_MAC,
+				   "chip_id %d pdev_idx %d link id %d num_link %d\n",
+				   chip_idx, pdev_idx, pdev->hw_link_id, num_link);
+		}
+	}
+
+	mlo_params.group_id = ag->id;
+	mlo_params.partner_link_id = partner_link_id;
+	mlo_params.num_partner_links = num_link;
+	ar->mlo_setup_status = 0;
+
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "grp id %d num_link %d\n", ag->id, num_link);
+
+	ret = ath12k_wmi_mlo_setup(ar, &mlo_params);
+	if (ret) {
+		ath12k_err(ab, "failed to setup MLO for pdev_idx %d: %d\n",
+			   ar->pdev_idx, ret);
+		return ret;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->mlo_setup_done,
+						WMI_MLO_CMD_TIMEOUT_HZ);
+	if (!time_left || ar->mlo_setup_status)
+		return ar->mlo_setup_status ? : -ETIMEDOUT;
+
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "MLO setup done for pdev_idx %d\n",
+		   ar->pdev_idx);
+	return 0;
+}
+
+static int __ath12k_mac_mlo_teardown(struct ath12k *ar, bool umac_reset)
+{
+	struct ath12k_base *ab = ar->ab;
+	int ret;
+
+	// If we receive teardown during ab is down, then exit silently
+	if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
+		return 0;
+
+	ret = ath12k_wmi_mlo_teardown(ar, umac_reset);
+	if (ret) {
+		ath12k_err(ab, "failed to teardown MLO for pdev_idx %d: %d\n",
+			   ar->pdev_idx, ret);
+		return ret;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "MLO teardown for pdev_idx %d:%d\n",
+		   ar->pdev_idx, umac_reset);
+	return 0;
+}
+
+int ath12k_mac_mlo_setup(struct ath12k_hw *ah)
+{
+	struct ath12k *ar;
+	int ret, fbret;
+	int i;
+
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
+		ret = __ath12k_mac_mlo_setup(ar);
+		if (ret) {
+			ath12k_err(ar->ab, "failed to setup mlo %d\n", ret);
+			goto err_setup;
+		}
+
+		ar++;
+	}
+
+	return 0;
+
+err_setup:
+	ar = ah->radio;
+	for (i = i - 1; i >= 0; i--) {
+		fbret = __ath12k_mac_mlo_teardown(ar, false);
+		if (fbret)
+			ath12k_err(ar->ab, "MLO teardown failed %d in setup cleanup\n",
+				   fbret);
+
+		ar++;
+	}
+	return ret;
+}
+
+static void ath12k_mac_drain_tx(struct ath12k_hw *ah,
+				struct ath12k *asserted_ar)
+{
+	struct ath12k *ar;
+	int i;
+
+	if (asserted_ar) {
+		/* if asserted ar is NOT NULL, consider it
+		 * is during Mode1 recovery and drain only
+		 * asserted pdev
+		 */
+		ath12k_mac_radio_drain_tx(asserted_ar);
+		return;
+	}
+
+	ar = ah->radio;
+
+	for (i = 0; i < ah->num_radio; i++) {
+		ath12k_mac_radio_drain_tx(ar);
+		ar++;
+	}
+}
+
+int ath12k_mac_radio_start(struct ath12k *ar)
 {
-	struct ath12k *ar = hw->priv;
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_pdev *pdev = ar->pdev;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_hw *ah = ar->ah;
 	int ret;
 
-	ath12k_mac_drain_tx(ar);
+	/* reo init/drain tx only needed for recovered
+	 * ab during Mode1 scenario
+	 */
+	if (ab->recovery_start) {
+		ath12k_mgmt_rx_reo_init_timer(ag);
+		ath12k_mac_drain_tx(ah, ar);
+	}
+
 	mutex_lock(&ar->conf_mutex);
 
 	switch (ar->state) {
@@ -4642,11 +10119,11 @@
 		break;
 	case ATH12K_STATE_RESTARTING:
 		ar->state = ATH12K_STATE_RESTARTED;
-		ath12k_mac_wait_reconfigure(ab);
 		break;
 	case ATH12K_STATE_RESTARTED:
 	case ATH12K_STATE_WEDGED:
 	case ATH12K_STATE_ON:
+	case ATH12K_STATE_TM:
 		WARN_ON(1);
 		ret = -EINVAL;
 		goto err;
@@ -4656,14 +10133,14 @@
 					1, pdev->pdev_id);
 
 	if (ret) {
-		ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+		ath12k_err(ab, "failed to enable PMF QOS: (%d)\n", ret);
 		goto err;
 	}
 
 	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
 					pdev->pdev_id);
 	if (ret) {
-		ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+		ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret);
 		goto err;
 	}
 
@@ -4683,6 +10160,7 @@
 	}
 
 	ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+						  ab->stats_disable ? 0 :
 						  HTT_PPDU_STATS_TAG_DEFAULT);
 	if (ret) {
 		ath12k_err(ab, "failed to req ppdu stats: %d\n", ret);
@@ -4693,20 +10171,70 @@
 					1, pdev->pdev_id);
 
 	if (ret) {
-		ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+		ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+		goto err;
+	}
+
+	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_SET_CONG_CTRL_MAX_MSDUS,
+					ATH12K_NUM_POOL_TX_DESC, pdev->pdev_id);
+
+	if (ret) {
+		ath12k_err(ab, "failed to set congestion control MAX MSDUS: %d\n", ret);
+		goto err;
+	}
+	/* Enable(1)/Disable(0) sub channel marking */
+	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+		ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_SUB_CHANNEL_MARKING,
+						1, pdev->pdev_id);
+		if (ret) {
+			ath12k_err(ab, "failed to enable SUB CHANNEL MARKING: %d\n", ret);
+			goto err;
+		}
+	}
+
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	/* program CCE rule for eapol packets to be routed to wbm release ring */
+	if (g_bonded_interface_model) {
+		ret = ath12k_dp_rx_pkt_type_filter(ar, ATH12K_PKT_TYPE_EAP,
+						   ATH12K_ROUTE_EAP_METADATA);
+		if (ret) {
+			ath12k_err(ar->ab, "failed to configure EAP pkt route: %d\n", ret);
 		goto err;
 	}
 
+		ret = ath12k_dp_rx_pkt_type_filter(ar, ATH12K_PKT_TYPE_ARP_IPV4,
+						   ATH12K_ROUTE_ARP_METADATA);
+		if (ret) {
+			ath12k_err(ar->ab, "failed to configure ARP pkt route: %d\n", ret);
+			goto err;
+		}
+	}
+#endif
+
 	__ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
 
 	/* TODO: Do we need to enable ANI? */
 
-	ath12k_reg_update_chan_list(ar);
+	ret = ath12k_reg_update_chan_list(ar);
+
+	/* the ar state alone can be turned off for non supported country without
+	 * returning the error value. As we need to update the channel for the next ar
+	 */
+	if (ret) {
+		if (ret == -ENOTSUPP)
+			ret = 0;
+		goto err;
+	}
 
 	ar->num_started_vdevs = 0;
 	ar->num_created_vdevs = 0;
 	ar->num_peers = 0;
 	ar->allocated_vdev_map = 0;
+	ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
+
+	spin_lock_bh(&ar->data_lock);
+        ar->awgn_intf_handling_in_prog = false;
+        spin_unlock_bh(&ar->data_lock);
 
 	/* Configure monitor status ring with default rx_filter to get rx status
 	 * such as rssi, rx_duration.
@@ -4719,7 +10247,7 @@
 	}
 
 	if (ret == -ENOTSUPP)
-		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		ath12k_dbg(ab, ATH12K_DBG_MAC,
 			   "monitor status config is not yet supported");
 
 	/* Configure the hash seed for hash based reo dest ring selection */
@@ -4740,6 +10268,9 @@
 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
 			   &ab->pdevs[ar->pdev_idx]);
 
+	/*ani is enabled by default*/
+	ar->ani_enabled = true;
+
 	return 0;
 
 err:
@@ -4749,18 +10280,80 @@
 	return ret;
 }
 
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+int ath12k_mac_mlo_teardown(struct ath12k_hw *ah)
 {
-	struct ath12k *ar = hw->priv;
-	struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+	struct ath12k *ar;
 	int ret;
+	int i;
+
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
+		ret = __ath12k_mac_mlo_teardown(ar, false);
+		if (ret)
+			goto out;
+
+		ar++;
+	}
 
-	ath12k_mac_drain_tx(ar);
+out:
+	return ret;
+}
+
+int ath12k_mac_mlo_teardown_with_umac_reset(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_base *partner_ab;
+	struct ath12k *ar;
+	struct ath12k_pdev *pdev;
+	int i, j, ret = 0;
+	bool umac_reset;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+
+		for (j = 0; j < partner_ab->num_radios; j++) {
+			pdev = &partner_ab->pdevs[j];
+			ar = pdev->ar;
+
+			if (!ar)
+				continue;
+
+			if (ab == partner_ab) {
+				/* No need to send teardown event for asserted
+				 * chip, as anyway there will be no completion
+				 * event from FW.
+				 */
+				ar->mlo_complete_event = true;
+				continue;
+			}
+
+			/* Need to umac_reset as 1 for only one chip */
+			umac_reset = false;
+			if (!ag->trigger_umac_reset) {
+				umac_reset = true;
+				ag->trigger_umac_reset = true;
+			}
+
+			ret = __ath12k_mac_mlo_teardown(ar, umac_reset);
+			if (ret)
+				goto out;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static void ath12k_mac_radio_stop(struct ath12k *ar)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+	int ret;
 
 	mutex_lock(&ar->conf_mutex);
 	ret = ath12k_mac_config_mon_status_default(ar, false);
 	if (ret && (ret != -ENOTSUPP))
-		ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+		ath12k_err(ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
 			   ret);
 
 	clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
@@ -4768,6 +10361,7 @@
 	mutex_unlock(&ar->conf_mutex);
 
 	cancel_delayed_work_sync(&ar->scan.timeout);
+	cancel_work_sync(&ar->scan.vdev_del_wk);
 	cancel_work_sync(&ar->regd_update_work);
 
 	spin_lock_bh(&ar->data_lock);
@@ -4777,15 +10371,131 @@
 	}
 	spin_unlock_bh(&ar->data_lock);
 
-	rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+	if (!list_empty(&ab->neighbor_peers))
+		ath12k_debugfs_nrp_cleanup_all(ar);
+
+	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
 
+	if(!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->ag->dev_flags)))
 	synchronize_rcu();
 
 	atomic_set(&ar->num_pending_mgmt_tx, 0);
+	atomic_set(&ar->flush_request, 0);
+
+	spin_lock_bh(&ar->data_lock);
+        ar->awgn_intf_handling_in_prog = false;
+        spin_unlock_bh(&ar->data_lock);
+}
+
+void ath12k_mgmt_rx_reo_init_timer(struct ath12k_hw_group *ag)
+{
+	struct ath12k_mgmt_rx_reo_context *reo_context = &ag->rx_reo;
+
+	if (!(ag->mlo_mem.is_mlo_mem_avail && ag->mgmt_rx_reorder))
+		return;
+
+	if (reo_context->timer_init_done)
+		return;
+
+	mod_timer(&reo_context->reo_list.ageout_timer, jiffies +
+			msecs_to_jiffies(ATH12K_MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS));
+
+	mod_timer(&reo_context->reo_list.global_mgmt_rx_inactivity_timer, jiffies +
+			ATH12K_MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT);
+
+	reo_context->timer_init_done = true;
+}
+
+static void ath12k_mgmt_rx_reo_deinit_timer(struct ath12k_hw_group *ag)
+{
+	struct ath12k_mgmt_rx_reo_context *reo_context = &ag->rx_reo;
+
+	if (!(ag->mlo_mem.is_mlo_mem_avail && ag->mgmt_rx_reorder))
+		return;
+
+	if (!reo_context->timer_init_done)
+		return;
+
+	del_timer_sync(&reo_context->reo_list.global_mgmt_rx_inactivity_timer);
+	del_timer_sync(&reo_context->reo_list.ageout_timer);
+
+	reo_context->timer_init_done = false;
+}
+
+static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_base *ab;
+	struct ath12k_hw_group *ag = ah->ag;
+	int i;
+	int ret;
+
+	ath12k_mgmt_rx_reo_init_timer(ag);
+
+	ath12k_mac_drain_tx(ah, NULL);
+
+	mutex_lock(&ah->conf_mutex);
+	ar = ah->radio;
+	ab = ar->ab;
+
+	if (ath12k_ftm_mode) {
+		ath12k_err(ab, "fail to start mac operations in ftm mode\n");
+		mutex_unlock(&ah->conf_mutex);
+		return -EWOULDBLOCK;
+	}
+
+	/* TODO Maintain state for ah? */
+
+	for (i = 0; i < ah->num_radio; i++) {
+		ab = ar->ab;
+		ret = ath12k_mac_radio_start(ar);
+		if (ret) {
+			ath12k_err(ab, "fail to start mac operations in radio %d ret %d\n",
+				   i, ret);
+			goto err;
+		}
+		ar++;
+	}
+
+	mutex_unlock(&ah->conf_mutex);
+	return 0;
+
+err:
+	ar = ah->radio;
+	for (i = i - 1; i >= 0; i--) {
+		ath12k_mac_radio_stop(ar);
+		ar++;
+	}
+	mutex_unlock(&ah->conf_mutex);
+	return ret;
+}
+
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_hw_group *ag = ah->ag;
+	int i;
+
+	ath12k_mgmt_rx_reo_deinit_timer(ag);
+
+	ath12k_mac_drain_tx(ah, NULL);
+
+	mutex_lock(&ah->conf_mutex);
+	ar = ah->radio;
+
+	/* TODO Maintain state for ah? */
+	for (i = ah->num_radio - 1, ar = ar + (ah->num_radio - 1); i >= 0; i--) {
+		ath12k_mac_radio_stop(ar);
+		ar--;
+	}
+
+	mutex_unlock(&ah->conf_mutex);
 }
 
 static u8
-ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
+ath12k_mac_get_vdev_stats_id(struct ath12k_link_vif *arvif)
 {
 	struct ath12k_base *ab = arvif->ar->ab;
 	u8 vdev_stats_id = 0;
@@ -4793,7 +10503,7 @@
 	do {
 		if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
 			vdev_stats_id++;
-			if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+			if (vdev_stats_id >= ATH12K_INVAL_VDEV_STATS_ID) {
 				vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
 				break;
 			}
@@ -4807,22 +10517,107 @@
 	return vdev_stats_id;
 }
 
-static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
+static int ath12k_mac_setup_vdev_args_mbssid(struct ath12k_link_vif *arvif,
+					       u32 *flags, u32 *tx_vdev_id)
+{
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_link_vif *tx_arvif;
+	struct ath12k_vif *tx_ahvif;
+	struct ieee80211_vif *tx_vif;
+	struct ieee80211_bss_conf *link_conf;
+
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access bss link conf in mbssid params setup\n");
+		return -EINVAL;
+	}
+
+	*tx_vdev_id = 0;
+	tx_vif = link_conf->mbssid_tx_vif;
+	if (!tx_vif) {
+		/* Since a 6GHz AP is MBSS capable by default, FW expects
+		 * Tx vdev flag to be set even in case of single bss case
+		 * WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP is to be used for non 6GHz
+		 * cases
+		 */
+		if (ar->supports_6ghz && arvif->ahvif->vif->type == NL80211_IFTYPE_AP)
+			*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
+		else
+			*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
+		rcu_read_unlock();
+		return 0;
+	}
+
+	tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
+	if (!tx_ahvif) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	lockdep_assert_held(&tx_ahvif->ah->conf_mutex);
+	tx_arvif = tx_ahvif->link[link_conf->mbssid_tx_vif_linkid];
+	if (!tx_arvif) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	lockdep_assert_held(&tx_arvif->ar->conf_mutex);
+
+	if (link_conf->nontransmitted) {
+		if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) {
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+
+		*flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
+		*tx_vdev_id = tx_arvif->vdev_id;
+	} else if (tx_arvif == arvif) {
+		*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
+	} else {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	arvif->tx_vdev_id = *tx_vdev_id;
+
+	if (link_conf->ema_ap)
+		*flags |= WMI_HOST_VDEV_FLAGS_EMA_MODE;
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static int ath12k_mac_setup_vdev_create_args(struct ath12k_link_vif *arvif,
 					     struct ath12k_wmi_vdev_create_arg *arg)
 {
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ath12k *ar = arvif->ar;
 	struct ath12k_pdev *pdev = ar->pdev;
+	int ret;
 
 	arg->if_id = arvif->vdev_id;
-	arg->type = arvif->vdev_type;
-	arg->subtype = arvif->vdev_subtype;
+	arg->type = ahvif->vdev_type;
+	arg->subtype = ahvif->vdev_subtype;
 	arg->pdev_id = pdev->pdev_id;
+	arg->mbssid_flags = 0;
+	arg->mbssid_tx_vdev_id = 0;
+
+	if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+		      ar->ab->wmi_ab.svc_map)) {
+		ret = ath12k_mac_setup_vdev_args_mbssid(arvif,
+							  &arg->mbssid_flags,
+							  &arg->mbssid_tx_vdev_id);
+		if (ret)
+			return ret;
+	}
 
 	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
 		arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
 		arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
 	}
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+	    !ar->supports_6ghz) {
 		arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
 		arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
 	}
@@ -4833,97 +10628,51 @@
 	}
 
 	arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
-}
-
-static u32
-ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
-{
-	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
-	struct ath12k_band_cap *cap_band = NULL;
-	u32 *hecap_phy_ptr = NULL;
-	u32 hemode;
-
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
-		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
-	else
-		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
-
-	hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
-
-	hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) |
-		 u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr),
-				 HE_MODE_SU_TX_BFER) |
-		 u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr),
-				 HE_MODE_UL_MUMIMO);
 
-	/* TODO: WDS and other modes */
-	if (viftype == NL80211_IFTYPE_AP) {
-		hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr),
-					  HE_MODE_MU_TX_BFER) |
-			  u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
-			  u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
-	} else {
-		hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
+	if (ath12k_mac_is_ml_arvif(arvif)) {
+		if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) {
+			WARN_ON(1);
+			return -EINVAL;
 	}
-
-	return hemode;
+		ether_addr_copy(arg->mld_addr, ahvif->vif->addr);
 }
 
-static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
-					  struct ath12k_vif *arvif)
-{
-	u32 param_id, param_value;
-	struct ath12k_base *ab = ar->ab;
-	int ret;
-
-	param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
-	param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
-	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    param_id, param_value);
-	if (ret) {
-		ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
-			    arvif->vdev_id, ret, param_value);
-		return ret;
-	}
-	param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
-	param_value =
-		u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
-		u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
-				HE_TRIG_NONTRIG_SOUNDING_MODE);
-	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    param_id, param_value);
-	if (ret) {
-		ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
-			    arvif->vdev_id, ret);
-		return ret;
-	}
-	return ret;
+	return 0;
 }
 
-static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
-					     struct ieee80211_vif *vif)
+static void ath12k_mac_update_vif_offload(struct ath12k *ar,
+					  struct ath12k_link_vif *arvif)
 {
-	struct ath12k *ar = hw->priv;
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
 	u32 param_id, param_value;
 	int ret;
 
 	param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
-	if (vif->type != NL80211_IFTYPE_STATION &&
-	    vif->type != NL80211_IFTYPE_AP)
+	if (ath12k_frame_mode != ATH12K_HW_TXRX_ETHERNET ||
+	    (vif->type != NL80211_IFTYPE_STATION &&
+	     vif->type != NL80211_IFTYPE_AP))
 		vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
 					IEEE80211_OFFLOAD_DECAP_ENABLED);
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (vif->type == NL80211_IFTYPE_AP && arvif->ndev_pvt
+			&& (hweight16(vif->valid_links) > 1))
+		vif->offload_flags |= (IEEE80211_OFFLOAD_ENCAP_ENABLED |
+					IEEE80211_OFFLOAD_DECAP_ENABLED);
+#endif
+
 	if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
-		arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
-	else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
-		arvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
+		ahvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
+	else if (test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags))
+		ahvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
 	else
-		arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
+		ahvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
 
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    param_id, arvif->tx_encap_type);
+					    param_id, ahvif->tx_encap_type);
 	if (ret) {
 		ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
 			    arvif->vdev_id, ret);
@@ -4933,7 +10682,7 @@
 	param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
 	if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
 		param_value = ATH12K_HW_TXRX_ETHERNET;
-	else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+	else if (test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags))
 		param_value = ATH12K_HW_TXRX_RAW;
 	else
 		param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
@@ -4947,110 +10696,250 @@
 	}
 }
 
-static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+static void ath12k_mac_hdl_update_vif_offload(struct ath12k_link_vif *arvif)
+{
+	if (!arvif->ar) {
+		ath12k_info(NULL,
+				"unable to determine device to apply vif encap/decap flags, setting will be applied on channel assignment\n");
+		return;
+	}
+
+	ath12k_mac_update_vif_offload(arvif->ar, arvif);
+	ath12k_dp_tx_update_bank_profile(arvif);
+}
+
+static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
 				       struct ieee80211_vif *vif)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_link_vif *arvif;
+	int link_id;
+
+	mutex_lock(&ah->conf_mutex);
+	if (vif->valid_links) {
+		unsigned long links = vif->valid_links;
+		for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+			arvif = ahvif->link[link_id];
+			if (!(arvif && arvif->ar))
+				continue;
+
+			ath12k_mac_hdl_update_vif_offload(arvif);
+		}
+	} else {
+		ath12k_mac_hdl_update_vif_offload(&ahvif->deflink);
+	}
+	mutex_unlock(&ah->conf_mutex);
+}
+
+static void ath12k_update_bcn_template_work(struct work_struct *work)
+{
+	struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
+					update_bcn_template_work);
+	struct ath12k *ar = arvif->ar;
+	int ret = -EINVAL;
+
+	if (!ar)
+		return;
+
+	mutex_lock(&ar->conf_mutex);
+	if (arvif->is_created)
+		ret = ath12k_mac_setup_bcn_tmpl(arvif);
+	mutex_unlock(&ar->conf_mutex);
+	if (ret)
+		ath12k_warn(ar->ab, "failed to update bcn tmpl for vdev_id: %d ret: %d\n",
+				arvif->vdev_id, ret);
+}
+
+static void ath12k_update_obss_color_notify_work(struct work_struct *work)
+{
+	struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
+					update_obss_color_notify_work);
+	struct ath12k *ar;
+
+	ar = arvif->ar;
+
+	if (!ar)
+		return;
+
+	mutex_lock(&ar->conf_mutex);
+	if (arvif->is_created)
+		ieee80211_obss_color_collision_notify_mlo(arvif->ahvif->vif,
+							  arvif->obss_color_bitmap,
+							  GFP_KERNEL,
+							  arvif->link_id);
+	arvif->obss_color_bitmap = 0;
+	mutex_unlock(&ar->conf_mutex);
+}
+
+ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_hw *hw = ar->ah->hw;
 	struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
-	struct ath12k_wmi_peer_create_arg peer_param;
+	struct ath12k_wmi_peer_create_arg peer_param = {0};
+	struct ieee80211_bss_conf *link_conf;
 	u32 param_id, param_value;
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif);
+	struct net_device *link_ndev = NULL;
+	struct ath12k_link_vif_pvt *link_ndev_pvt = NULL;
+#endif
 	u16 nss;
 	int i;
-	int ret;
-	int bit;
+	int ret, fbret, vdev_id;
+	u8 link_addr[ETH_ALEN];
+	int txpower;
+	u8 link_id;
 
-	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+	lockdep_assert_held(&ar->conf_mutex);
 
-	mutex_lock(&ar->conf_mutex);
+	/* If no link is active and scan vdev is requested
+	 * use a default link conf for scan address purpose
+	 */
+	if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK &&
+	    vif->valid_links)
+		link_id = ffs(vif->valid_links) - 1;
+	else
+		link_id = arvif->link_id;
 
-	if (vif->type == NL80211_IFTYPE_AP &&
-	    ar->num_peers > (ar->max_num_peers - 1)) {
-		ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
-		ret = -ENOBUFS;
-		goto err;
-	}
+	rcu_read_lock();
 
-	if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
-		ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
-			    TARGET_NUM_VDEVS);
-		ret = -EBUSY;
-		goto err;
-	}
+	link_conf = rcu_dereference(vif->link_conf[link_id]);
 
-	memset(arvif, 0, sizeof(*arvif));
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
 
-	arvif->ar = ar;
-	arvif->vif = vif;
+	if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK &&
+	    !is_zero_ether_addr(arvif->addr)) {
+		memcpy(link_addr, arvif->addr, ETH_ALEN);
+	} else {
+		memcpy(link_addr, link_conf->addr, ETH_ALEN);
+		memcpy(arvif->addr, link_conf->addr, ETH_ALEN);
+	}
 
-	INIT_LIST_HEAD(&arvif->list);
+	txpower = link_conf->txpower;
+	rcu_read_unlock();
 
-	/* Should we initialize any worker to handle connection loss indication
-	 * from firmware in sta mode?
+	/* Send vdev stats offload commands to firmware before first vdev
+	 * creation. ie., when num_created_vdevs = 0
 	 */
+	if (ar->fw_stats.en_vdev_stats_ol && !ar->num_created_vdevs) {
+		ret = ath12k_dp_tx_htt_h2t_vdev_stats_ol_req(ar, 0);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to request vdev stats offload: %d\n", ret);
+			goto err;
+		}
+	}
 
-	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
-		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
-		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
-		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
-		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
-		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+	spin_lock_bh(&ar->ab->base_lock);
+
+	if (!ab->free_vdev_map) {
+		spin_unlock_bh(&ar->ab->base_lock);
+		ath12k_warn(ar->ab, "failed to create vdev. No free vdev id left.\n");
+		ret = -EINVAL;
+		goto err;
 	}
 
-	bit = __ffs64(ab->free_vdev_map);
+	vdev_id = __ffs64(ab->free_vdev_map);
+	ab->free_vdev_map &= ~(1LL << vdev_id);
+	spin_unlock_bh(&ar->ab->base_lock);
 
-	arvif->vdev_id = bit;
-	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+	arvif->vdev_id = vdev_id;
+	/* Assume it as non-mbssid initially, well overwrite it later.
+	 */
+	arvif->tx_vdev_id = vdev_id;
+	ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+	arvif->ar = ar;
+	arvif->ab = ab;
+	arvif->dp = &ab->dp;
+	arvif->dev = ab->dev;
+	arvif->pdev_idx = ar->pdev_idx;
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	if (arvif->link_id != ATH12K_DEFAULT_SCAN_LINK) {
+		link_ndev = vif->link_ndev[link_id];
+		if (link_ndev) {
+			link_ndev_pvt = netdev_priv(link_ndev);
+			link_ndev_pvt->ab = ab;
+			link_ndev_pvt->arvif = arvif;
+			arvif->ndev_pvt = link_ndev_pvt;
+		}
+	}
+#endif
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NL80211_IFTYPE_STATION:
-		arvif->vdev_type = WMI_VDEV_TYPE_STA;
+		ahvif->vdev_type = WMI_VDEV_TYPE_STA;
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
-		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+		ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
 		fallthrough;
 	case NL80211_IFTYPE_AP:
-		arvif->vdev_type = WMI_VDEV_TYPE_AP;
+		ahvif->vdev_type = WMI_VDEV_TYPE_AP;
 		break;
 	case NL80211_IFTYPE_MONITOR:
-		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
-		ar->monitor_vdev_id = bit;
+		ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+		ar->monitor_vdev_id = vdev_id;
 		break;
 	default:
 		WARN_ON(1);
 		break;
 	}
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
-		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+	ath12k_dbg(ar->ab, ATH12K_DBG_SET(MAC, L1), "mac vdev create id %d type %d subtype %d map %llx\n",
+		   arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype,
 		   ab->free_vdev_map);
 
+	/* TODO Revisit this sharing of common queues across all ar's */
 	vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1);
 	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
 		vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
 
-	ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+	ret = ath12k_mac_setup_vdev_create_args(arvif, &vdev_arg);
+	if (ret) {
+		ath12k_warn(ab, "failed to create vdev parameters %d: %d\n",
+			    arvif->vdev_id, ret);
+		goto err;
+	}
 
-	ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
+	ret = ath12k_wmi_vdev_create(ar, vdev_arg.type == WMI_VDEV_TYPE_MONITOR ?
+				     ar->mac_addr : link_addr,
+				     &vdev_arg);
 	if (ret) {
 		ath12k_warn(ab, "failed to create WMI vdev %d: %d\n",
 			    arvif->vdev_id, ret);
+	        spin_lock_bh(&ar->ab->base_lock);
+		ab->free_vdev_map |= 1LL << arvif->vdev_id;
+		spin_unlock_bh(&ar->ab->base_lock);
+
 		goto err;
 	}
 
 	ar->num_created_vdevs++;
+	ahvif->num_vdev_created++;
+	arvif->is_created = true;
 	ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
-		   vif->addr, arvif->vdev_id);
+		   link_addr, arvif->vdev_id);
+
 	ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
-	ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
 
 	spin_lock_bh(&ar->data_lock);
+
+	/* list added is not needed during mode1 recovery
+	 * as the arvif(s) updated are from the existing
+	 * list
+	 */
+	if (!ab->recovery_start)
 	list_add(&arvif->list, &ar->arvifs);
+
 	spin_unlock_bh(&ar->data_lock);
 
-	ath12k_mac_op_update_vif_offload(hw, vif);
+	ath12k_mac_update_vif_offload(ar, arvif);
 
 	nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -5061,10 +10950,10 @@
 		goto err_vdev_del;
 	}
 
-	switch (arvif->vdev_type) {
+	switch (ahvif->vdev_type) {
 	case WMI_VDEV_TYPE_AP:
 		peer_param.vdev_id = arvif->vdev_id;
-		peer_param.peer_addr = vif->addr;
+		peer_param.peer_addr = link_addr;
 		peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
 		ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
 		if (ret) {
@@ -5079,6 +10968,22 @@
 				    arvif->vdev_id, ret);
 			goto err_peer_del;
 		}
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+		if (arvif->link_id != ATH12K_DEFAULT_SCAN_LINK) {
+			if (hweight16(vif->valid_links) <= 1 && link_ndev && !wdev->ml_netdev) {
+				ath12k_free_bonddev_for_sfe(wdev, vif, link_id);
+				arvif->ndev_pvt = NULL;
+				arvif->ppe_vp_num = -1;
+			} else if (link_ndev && !arvif->ndev_pvt->is_bond_enslaved) {
+				ath12k_enable_ppe_for_link_netdev(ab, arvif,
+								 arvif->ndev_pvt->link_ndev);
+				netif_tx_start_all_queues(arvif->ndev_pvt->bond_dev);
+				netif_tx_start_all_queues(arvif->ndev_pvt->link_ndev);
+				arvif->ndev_pvt->bond_dev->flags |= IFF_UP;
+				ath12k_bond_link_enslave(arvif, arvif->ndev_pvt->link_ndev);
+			}
+		}
+#endif
 		break;
 	case WMI_VDEV_TYPE_STA:
 		param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
@@ -5118,17 +11023,20 @@
 			goto err_peer_del;
 		}
 		break;
+	case WMI_VDEV_TYPE_MONITOR:
+		set_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags);
+		break;
 	default:
 		break;
 	}
 
-	arvif->txpower = vif->bss_conf.txpower;
+	arvif->txpower = txpower;
 	ret = ath12k_mac_txpower_recalc(ar);
 	if (ret)
 		goto err_peer_del;
 
 	param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
-	param_value = ar->hw->wiphy->rts_threshold;
+	param_value = hw->wiphy->rts_threshold;
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 					    param_id, param_value);
 	if (ret) {
@@ -5138,47 +11046,240 @@
 
 	ath12k_dp_vdev_tx_attach(ar, arvif);
 
-	if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
+	ath12k_mac_ap_ps_recalc(ar);
+
+	if (vif->type != NL80211_IFTYPE_MONITOR &&
+	    test_bit(MONITOR_CONF_ENABLED, &ar->monitor_flags))
 		ath12k_mac_monitor_vdev_create(ar);
 
-	mutex_unlock(&ar->conf_mutex);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	ret = ath12k_wmi_send_vdev_set_intra_bss_cmd(ar, arvif->vdev_id,
+						     1);
+	if (ret) {
+		ath12k_warn(ab, "failed to set vdev %d intra bss enable :%d\n",
+			    arvif->vdev_id, ret);
+		goto err_peer_del;
+	}
+#endif
+
+	ath12k_debugfs_add_interface(arvif);
 
 	return ret;
 
 err_peer_del:
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-		reinit_completion(&ar->peer_delete_done);
-
-		ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr,
-						      arvif->vdev_id);
-		if (ret) {
-			ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
-				    arvif->vdev_id, vif->addr);
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+		fbret = ath12k_peer_delete(ar, arvif->vdev_id, link_addr);
+		if (fbret) {
+			ath12k_warn(ar->ab, "failed to delete peer %pM vdev_id %d ret %d\n",
+				    link_addr, arvif->vdev_id, fbret);
 			goto err;
 		}
-
-		ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
-						       vif->addr);
-		if (ret)
-			goto err;
-
-		ar->num_peers--;
 	}
 
 err_vdev_del:
 	ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
 	ar->num_created_vdevs--;
+	arvif->is_created = false;
 	ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+	spin_lock_bh(&ar->ab->base_lock);
 	ab->free_vdev_map |= 1LL << arvif->vdev_id;
+	spin_unlock_bh(&ar->ab->base_lock);
 	ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
 	spin_lock_bh(&ar->data_lock);
 	list_del(&arvif->list);
 	spin_unlock_bh(&ar->data_lock);
 
 err:
+	arvif->is_created = false;
+	arvif->ar = NULL;
+	return ret;
+}
+
+void ath12k_mac_vif_cache_flush(struct ath12k *ar,  struct ieee80211_vif *vif,
+				u8 link_id)
+{
+	struct ath12k_hw *ah = ar->ah;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_key_conf *key_conf, *tmp;
+	struct ath12k_link_sta *arsta = NULL;
+	struct ath12k_sta *ahsta;
+	struct ath12k_vif_cache *cache;
+	int ret;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+
+	arvif = ahvif->link[link_id];
+
+	if (WARN_ON(!arvif))
+		return;
+
+	if (WARN_ON(!arvif->is_created))
+		return;
+
+	cache = &ahvif->cache[link_id];
+
+	if (cache->tx_conf.changed) {
+		ret = ath12k_mac_conf_tx(ar, arvif, cache->tx_conf.ac,
+					 &cache->tx_conf.tx_queue_params);
+		if (ret)
+			ath12k_warn(ar->ab,
+				    "unable to apply tx config parameters to vdev %d\n", ret);
+
+		memset(&cache->tx_conf, 0, sizeof(struct ath12k_tx_conf));
+	}
+
+	if (cache->bss_conf_changed) {
+		ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
+					    cache->bss_conf_changed);
+
+		cache->bss_conf_changed = 0;
+	}
+
+	if (!list_empty(&cache->key_conf.list)) {
+		list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
+			if (key_conf->sta) {
+				ahsta = ath12k_sta_to_ahsta(key_conf->sta);
+				arsta = ahsta->link[link_id];
+			}
+
+			ret = ath12k_mac_set_key(ar, key_conf->cmd,
+					arvif, arsta,
+					key_conf->key);
+			if (ret)
+				ath12k_warn(ar->ab, "unable to apply set key param to vdev %d ret %d\n",
+						arvif->vdev_id, ret);
+
+			list_del(&key_conf->list);
+			kfree(key_conf);
+		}
+	}
+}
+
+static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
+						    struct ath12k_link_vif *arvif,
+						    struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_base *ab;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ath12k_link_vif *scan_arvif;
+	u8 bit;
+	int ret;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if (arvif->ar)
+		goto out;
+
+	ar = ath12k_get_ar_by_ctx(hw, ctx);
+
+	if (!ar) {
+		ath12k_err(NULL,
+			   "unable to determine device for the passed channel ctx\n");
+		goto out;
+	}
+
+	/* cleanup the scan vdev if we are done scan on that ar
+	 * and now we want to create for actual usage
+	 */
+	if (vif->valid_links) {
+		scan_arvif = arvif->ahvif->link[ATH12K_DEFAULT_SCAN_LINK];
+
+		if (scan_arvif && scan_arvif->ar == ar) {
+			mutex_lock(&ar->conf_mutex);
+			ar->scan.vdev_id = -1;
 	mutex_unlock(&ar->conf_mutex);
+			ath12k_mac_remove_link_interface(hw, scan_arvif);
+			ath12k_mac_unassign_link_vif(scan_arvif);
+		}
+	}
 
-	return ret;
+	ab = ar->ab;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (vif->type == NL80211_IFTYPE_AP &&
+	    ar->num_peers > (ar->max_num_peers - 1)) {
+		ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+		ret = -ENOBUFS;
+		goto unlock;
+	}
+
+	if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+		ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+			    TARGET_NUM_VDEVS);
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	ret = ath12k_mac_vdev_create(ar, arvif);
+	if (ret) {
+		ath12k_warn(ab, "failed to create vdev %d ret %d", bit, ret);
+		goto unlock;
+	}
+
+	/* Apply any parameters for the vdev which were received after
+	 * add_interface, corresponding to this vif
+	 */
+	ath12k_mac_vif_cache_flush(ar, vif, arvif->link_id);
+
+unlock:
+	mutex_unlock(&ar->conf_mutex);
+out:
+	return arvif->ar;
+}
+
+static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif)
+{
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_hw *ah = hw->priv;
+	int i;
+
+	/* The ar in which this vif will be assigned can be determined since
+	 * it depends on the channel in which the vif is brought up.
+	 * For now, prepare the vif/arvif with common changes
+	 * TODO Add checks to ensure at least one ar has a free vdev
+	 * Also if the hw uses only one link/ar the vdev could be created
+	 * here itself
+	 */
+
+	mutex_lock(&ah->conf_mutex);
+	memset(ahvif, 0, sizeof(*ahvif));
+
+	ahvif->vdev_type = vif->type;
+	ahvif->vif = vif;
+	ahvif->ah = ah;
+
+	ahvif->deflink.ahvif = ahvif;
+	ahvif->deflink.link_id = 0;
+	ahvif->mcbc_gsn = 0;
+	INIT_LIST_HEAD(&ahvif->deflink.list);
+	INIT_WORK(&ahvif->deflink.update_obss_color_notify_work,
+		  ath12k_update_obss_color_notify_work);
+	INIT_WORK(&ahvif->deflink.update_bcn_template_work,
+		  ath12k_update_bcn_template_work);
+	ahvif->deflink.num_stations = 0;
+	init_completion(&ahvif->deflink.peer_ch_width_switch_send);
+	INIT_WORK(&ahvif->deflink.peer_ch_width_switch_work,
+		  ath12k_wmi_peer_chan_width_switch_work);
+
+	ahvif->key_cipher = INVALID_CIPHER;
+
+	/* Allocate Default Queue now and reassign during actual vdev create */
+	vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
+	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+		vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
+
+	for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++)
+		INIT_LIST_HEAD(&ahvif->cache[i].key_conf.list);
+
+	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+	mutex_unlock(&ah->conf_mutex);
+	return 0;
 }
 
 static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
@@ -5206,80 +11307,154 @@
 	}
 }
 
-static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
-					   struct ieee80211_vif *vif)
+static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
+					     struct ath12k_link_vif *arvif)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
-	struct ath12k_base *ab = ar->ab;
-	unsigned long time_left;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
+	struct ath12k_key_conf *key_conf, *tmp;
+	struct ath12k_base *ab;
+	struct ath12k_mac_tx_mgmt_free_arg arg = { };
 	int ret;
 
+	cancel_work_sync(&arvif->update_obss_color_notify_work);
+	cancel_work_sync(&arvif->update_bcn_template_work);
+	cancel_work_sync(&arvif->peer_ch_width_switch_work);
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	ar = arvif->ar;
+	if (!ar) {
+		ath12k_dbg(NULL, ATH12K_DBG_MAC,
+			   "unable to determine device to remove interface\n");
+		return;
+	}
+
+	ab = ar->ab;
+
 	mutex_lock(&ar->conf_mutex);
 
 	ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
 		   arvif->vdev_id);
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-		ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
+	ret = ath12k_spectral_vif_stop(arvif);
 		if (ret)
-			ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+		ath12k_warn(ab, "failed to stop spectral for vdev %i: %d\n",
 				    arvif->vdev_id, ret);
-	}
-
-	reinit_completion(&ar->vdev_delete_done);
 
-	ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
-	if (ret) {
-		ath12k_warn(ab, "failed to delete WMI vdev %d: %d\n",
-			    arvif->vdev_id, ret);
-		goto err_vdev_del;
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+		ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->addr);
+		if (ret)
+			ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d"
+				    " num_peer : %d\n", arvif->vdev_id, ret, ar->num_peers);
 	}
 
-	time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
-						ATH12K_VDEV_DELETE_TIMEOUT_HZ);
-	if (time_left == 0) {
-		ath12k_warn(ab, "Timeout in receiving vdev delete response\n");
+	ret = ath12k_mac_vdev_delete(ar, arvif);
+	if (ret)
 		goto err_vdev_del;
-	}
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
 		ar->monitor_vdev_id = -1;
-		ar->monitor_vdev_created = false;
-	} else if (ar->monitor_vdev_created && !ar->monitor_started) {
+		clear_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags);
+	} else if (test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags) &&
+		   !test_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags)) {
 		ret = ath12k_mac_monitor_vdev_delete(ar);
 	}
 
-	ab->free_vdev_map |= 1LL << (arvif->vdev_id);
-	ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
-	ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
-	ar->num_created_vdevs--;
-
 	ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
-		   vif->addr, arvif->vdev_id);
+		   arvif->addr, arvif->vdev_id);
 
 err_vdev_del:
+	ath12k_peer_cleanup(ar, arvif->vdev_id);
+
 	spin_lock_bh(&ar->data_lock);
-	list_del(&arvif->list);
-	spin_unlock_bh(&ar->data_lock);
 
-	ath12k_peer_cleanup(ar, arvif->vdev_id);
+	arg.ar = ar;
+	arg.vif = vif;
+	arg.link_id = arvif->link_id;
+	arg.type = u8_encode_bits(true, ATH12K_MAC_TX_MGMT_FREE_TYPE_PDEV) |
+		   u8_encode_bits(true, ATH12K_MAC_TX_MGMT_FREE_TYPE_VIF) |
+		   u8_encode_bits(true, ATH12K_MAC_TX_MGMT_FREE_TYPE_LINK);
 
 	idr_for_each(&ar->txmgmt_idr,
-		     ath12k_mac_vif_txmgmt_idr_remove, vif);
+		     ath12k_mac_tx_mgmt_pending_free, &arg);
+
+	spin_unlock_bh(&ar->data_lock);
 
 	ath12k_mac_vif_unref(&ab->dp, vif);
 	ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id);
+	ahvif->key_cipher = INVALID_CIPHER;
 
 	/* Recalc txpower for remaining vdev */
 	ath12k_mac_txpower_recalc(ar);
-	clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+	ath12k_mac_ap_ps_recalc(ar);
+
+	ath12k_debugfs_remove_interface(arvif);
+
+	if (arvif->link_id < IEEE80211_MLD_MAX_NUM_LINKS &&
+	    !list_empty(&ahvif->cache[arvif->link_id].key_conf.list)) {
+		list_for_each_entry_safe(key_conf, tmp,
+				    &ahvif->cache[arvif->link_id].key_conf.list,
+				    list) {
+			list_del(&key_conf->list);
+			kfree(key_conf);
+		}
+	}
 
 	/* TODO: recal traffic pause state based on the available vdevs */
 
 	mutex_unlock(&ar->conf_mutex);
 }
 
+static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k *ar;
+	u8 link_id;
+	int ret;
+
+	mutex_lock(&ah->conf_mutex);
+
+	for_each_set_bit(link_id, &ahvif->links_map, ATH12K_NUM_MAX_LINKS) {
+		arvif = ahvif->link[link_id];
+
+		if (!arvif)
+			continue;
+
+		ar = arvif->ar;
+
+		if (!ar)
+			continue;
+
+		if (WARN_ON(arvif->link_id != link_id))
+			continue;
+
+		if (arvif->is_scan_vif && arvif->is_started) {
+			mutex_lock(&ar->conf_mutex);
+			ret = ath12k_mac_vdev_stop(arvif);
+			mutex_unlock(&ar->conf_mutex);
+			if (ret) {
+				mutex_unlock(&ah->conf_mutex);
+				ath12k_warn(ar->ab, "failed to stop vdev %d: %d\n",
+					    arvif->vdev_id, ret);
+				return;
+			}
+			arvif->is_started = false;
+			arvif->is_scan_vif = false;
+		}
+
+		ath12k_mac_remove_link_interface(hw, arvif);
+		ath12k_mac_unassign_link_vif(arvif);
+	}
+
+	mutex_unlock(&ah->conf_mutex);
+}
+
 /* FIXME: Has to be verified. */
 #define SUPPORTED_FILTERS			\
 	(FIF_ALLMULTI |				\
@@ -5295,77 +11470,107 @@
 					   unsigned int *total_flags,
 					   u64 multicast)
 {
-	struct ath12k *ar = hw->priv;
-	bool reset_flag;
-	int ret;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	int i;
 
+	mutex_lock(&ah->conf_mutex);
+
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
 	mutex_lock(&ar->conf_mutex);
 
-	changed_flags &= SUPPORTED_FILTERS;
 	*total_flags &= SUPPORTED_FILTERS;
 	ar->filter_flags = *total_flags;
 
-	/* For monitor mode */
-	reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
-
-	ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
-	if (!ret) {
-		if (!reset_flag)
-			set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
-		else
-			clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
-	} else {
-		ath12k_warn(ar->ab,
-			    "fail to set monitor filter: %d\n", ret);
-	}
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
-		   "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
-		   changed_flags, *total_flags, reset_flag);
-
 	mutex_unlock(&ar->conf_mutex);
+		ar++;
+	}
+	mutex_unlock(&ah->conf_mutex);
 }
 
+/* TODO Also support link based antenna configs, below might not be accurate */
 static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	int antennas_rx = 0, antennas_tx = 0, i;
+
+	mutex_lock(&ah->conf_mutex);
 
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
 	mutex_lock(&ar->conf_mutex);
 
-	*tx_ant = ar->cfg_tx_chainmask;
-	*rx_ant = ar->cfg_rx_chainmask;
+		antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask);
+		antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask);
 
 	mutex_unlock(&ar->conf_mutex);
+		ar++;
+	}
+
+	*tx_ant = antennas_tx;
+	*rx_ant = antennas_rx;
+
+	mutex_unlock(&ah->conf_mutex);
 
 	return 0;
 }
 
 static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
 {
-	struct ath12k *ar = hw->priv;
-	int ret;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	int ret = 0, i;
+
+	mutex_lock(&ah->conf_mutex);
 
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
 	mutex_lock(&ar->conf_mutex);
 	ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
 	mutex_unlock(&ar->conf_mutex);
 
+		ar++;
+	}
+
+	mutex_unlock(&ah->conf_mutex);
+
 	return ret;
 }
 
-static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
 				      struct ieee80211_vif *vif,
-				      struct ieee80211_ampdu_params *params)
+				   struct ieee80211_ampdu_params *params,
+				   u8 link_id)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
 	int ret = -EINVAL;
 
+	lockdep_assert_held(&ah->conf_mutex);
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	if (!ar) {
+		ath12k_err(NULL, "unable to determine device to set ampdu params\n");
+		return -EPERM;
+	}
+
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return -ESHUTDOWN;
+
+	if (params->sta->mlo &&
+	   (test_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &ar->ab->dev_flags)))
+		return 0;
+
 	mutex_lock(&ar->conf_mutex);
 
 	switch (params->action) {
 	case IEEE80211_AMPDU_RX_START:
-		ret = ath12k_dp_rx_ampdu_start(ar, params);
+		ret = ath12k_dp_rx_ampdu_start(ar, params, link_id);
 		break;
 	case IEEE80211_AMPDU_RX_STOP:
-		ret = ath12k_dp_rx_ampdu_stop(ar, params);
+		ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id);
 		break;
 	case IEEE80211_AMPDU_TX_START:
 	case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -5379,18 +11584,67 @@
 		break;
 	}
 
+	if (ret)
+		ath12k_warn(ar->ab, "unable to perform ampdu action %d ret %d\n",
+			    params->action, ret);
+
 	mutex_unlock(&ar->conf_mutex);
 
 	return ret;
 }
 
+static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_ampdu_params *params)
+{
+	struct ieee80211_sta *sta = params->sta;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = hw->priv;
+	int ret = -EINVAL;
+	u8 link_id;
+
+	if (WARN_ON(!ahsta->links_map))
+		return -EINVAL;
+
+	mutex_lock(&ah->conf_mutex);
+
+	for_each_set_bit(link_id, &ahsta->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		ret = ath12k_mac_ampdu_action(hw, vif, params, link_id);
+		if (ret)
+			break;
+	}
+
+	mutex_unlock(&ah->conf_mutex);
+
+	return ret;
+}
+
+#ifndef CONFIG_ATH12K_BONDED_DS_SUPPORT
+int ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   u16 old_links, u16 new_links,
+				   struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+	ath12k_info(NULL,
+		    "link changed for MLD %pM old %d new %d\n", vif->addr, old_links, new_links);
+	return 0;
+}
+#endif
+
 static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
 				     struct ieee80211_chanctx_conf *ctx)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_base *ab = ar->ab;
+	struct ath12k *ar;
 
-	ath12k_dbg(ab, ATH12K_DBG_MAC,
+	ar = ath12k_get_ar_by_ctx(hw, ctx);
+
+	if (!ar) {
+		ath12k_err(NULL,
+			   "unable to determine device for the passed channel ctx\n");
+		return -EINVAL;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 		   "mac chanctx add freq %u width %d ptr %pK\n",
 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
 
@@ -5403,6 +11657,7 @@
 	ar->rx_channel = ctx->def.chan;
 	spin_unlock_bh(&ar->data_lock);
 
+	ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
 	mutex_unlock(&ar->conf_mutex);
 
 	return 0;
@@ -5411,10 +11666,17 @@
 static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
 					 struct ieee80211_chanctx_conf *ctx)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_base *ab = ar->ab;
+	struct ath12k *ar;
 
-	ath12k_dbg(ab, ATH12K_DBG_MAC,
+	ar = ath12k_get_ar_by_ctx(hw, ctx);
+
+	if (!ar) {
+		ath12k_err(NULL,
+			   "unable to determine device for the passed channel ctx\n");
+		return;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 		   "mac chanctx remove freq %u width %d ptr %pK\n",
 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
 
@@ -5427,18 +11689,186 @@
 	ar->rx_channel = NULL;
 	spin_unlock_bh(&ar->data_lock);
 
+	ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
 	mutex_unlock(&ar->conf_mutex);
 }
 
+static int ath12k_mac_set_6g_nonht_dup_conf(struct ath12k_link_vif *arvif,
+					    const struct cfg80211_chan_def *chandef)
+{
+	struct ath12k *ar = arvif->ar;
+	int param_id, ret = 0;
+	uint8_t value = 0;
+	bool is_psc = cfg80211_channel_is_psc(chandef->chan);
+	enum wmi_phy_mode mode = ath12k_phymodes[chandef->chan->band][chandef->width];
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_bss_conf *link_conf;
+	u8 link_addr[ETH_ALEN];
+	bool nontransmitted;
+
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	memcpy(link_addr, link_conf->addr, ETH_ALEN);
+	nontransmitted = link_conf->nontransmitted;
+	rcu_read_unlock();
+
+	if ((ahvif->vdev_type == WMI_VDEV_TYPE_AP) &&
+	    !nontransmitted &&
+	    (chandef->chan->band == NL80211_BAND_6GHZ)) {
+		param_id = WMI_VDEV_PARAM_6GHZ_PARAMS;
+		if (mode > MODE_11AX_HE20 && !is_psc) {
+			value |= WMI_VDEV_6GHZ_BITMAP_NON_HT_DUPLICATE_BEACON;
+			value |= WMI_VDEV_6GHZ_BITMAP_NON_HT_DUPLICATE_BCAST_PROBE_RSP;
+			value |= WMI_VDEV_6GHZ_BITMAP_NON_HT_DUPLICATE_FD_FRAME;
+		}
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		           "Set 6GHz non-ht dup params for vdev %pM ,vdev_id %d param %d value %d\n",
+			   link_addr, arvif->vdev_id, param_id, value);
+		ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, value);
+	}
+	return ret;
+}
+
+static void
+ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
+			     struct wmi_ml_arg *ml_arg)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_link_vif *arvif_p;
+	u8 link_id;
+	struct wmi_ml_partner_info *partner_info;
+	struct ieee80211_bss_conf *link_conf;
+
+	if (!ath12k_mac_is_ml_arvif(arvif))
+		return;
+
+	if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS)
+		return;
+
+	rcu_read_lock();
+
+	ml_arg->enabled = true;
+
+	/* We always add a new link via VDEV START, FW takes
+	 * care of internally adding this link to existing
+	 * link vdevs which are advertised as partners below
+	 */
+	ml_arg->link_add = true;
+
+	/* TODO check assoc and mcast vdev for AP mode */
+
+	partner_info = ml_arg->partner_info;
+
+	for_each_set_bit(link_id, &ahvif->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif_p = ahvif->link[link_id];
+
+		if (WARN_ON(arvif_p == NULL))
+			continue;
+
+		if (arvif == arvif_p)
+			continue;
+
+		link_conf = rcu_dereference(ahvif->vif->link_conf[arvif_p->link_id]);
+
+		if (!link_conf)
+			continue;
+
+		partner_info->vdev_id = arvif_p->vdev_id;
+		partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id;
+		ether_addr_copy(partner_info->addr, link_conf->addr);
+		ml_arg->num_partner_links++;
+		partner_info++;
+	}
+	rcu_read_unlock();
+}
+
 static int
-ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
+ath12k_mac_vdev_config_after_start(struct ath12k_link_vif *arvif,
+				   const struct cfg80211_chan_def *chandef)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_base *ab = ar->ab;
+	int ret;
+	unsigned int dfs_cac_time;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (ar->supports_6ghz &&
+            chandef->chan->band == NL80211_BAND_6GHZ &&
+            (ahvif->vdev_type == WMI_VDEV_TYPE_STA || ahvif->vdev_type == WMI_VDEV_TYPE_AP) &&
+            test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map)) {
+
+		if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+			ath12k_mac_parse_tx_pwr_env(ar, arvif, &arvif->chanctx);
+
+                ath12k_mac_fill_reg_tpc_info(ar, arvif, &arvif->chanctx);
+                ath12k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+                                                   &arvif->reg_tpc_info);
+	}
+
+	/* Enable CAC Flag in the driver by checking all sub-channel's  DFS
+	 * state as NL80211_DFS_USABLE which indicates CAC needs to be
+	 * done before channel usage. This flags is used to drop rx packets.
+	 * during CAC.
+	 */
+	/* TODO Set the flag for other interface types as required */
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_AP && arvif->chanctx.radar_enabled &&
+	    cfg80211_chandef_dfs_usable(ar->ah->hw->wiphy, chandef)) {
+		set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+		dfs_cac_time = cfg80211_chandef_dfs_cac_time(ar->ah->hw->wiphy,
+							     chandef/*, false, false */);
+		ath12k_dbg(ab, ATH12K_DBG_MAC,
+			   "CAC (for %u ms) Started in center_freq %d center_freq1 %d for vdev %d\n",
+			   dfs_cac_time, chandef->chan->center_freq,
+			   chandef->center_freq1, arvif->vdev_id);
+	}
+
+	ret = ath12k_mac_set_txbf_conf(arvif);
+	if (ret)
+		ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+			    arvif->vdev_id, ret);
+
+	ret = ath12k_mac_set_6g_nonht_dup_conf(arvif, chandef);
+	if (ret)
+		ath12k_warn(ab, "failed to set 6G non-ht dup conf for vdev %d: %d\n",
+		            arvif->vdev_id, ret);
+	 /* In case of ADFS, we have to abort ongoing backgrorund CAC */
+	if ((ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+	    test_bit(ar->cfg_rx_chainmask, &ar->pdev->cap.adfs_chain_mask) &&
+	    ar->agile_chandef.chan) {
+		ath12k_dbg(ab, ATH12K_DBG_MAC,
+			   "Aborting ongoing Agile DFS on freq %d",
+			   ar->agile_chandef.chan->center_freq);
+		ret = ath12k_wmi_vdev_adfs_ocac_abort_cmd_send(ar,arvif->vdev_id);
+		if (!ret) {
+			memset(&ar->agile_chandef, 0, sizeof(struct cfg80211_chan_def));
+			ar->agile_chandef.chan = NULL;
+			ath12k_mac_background_dfs_event(ar, ATH12K_BGDFS_ABORT);
+		} else {
+			ath12k_warn(ab, "failed to abort agile CAC for vdev %d",
+				    arvif->vdev_id);
+		}
+	}
+
+	return ret;
+}
+
+static int ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
 			      const struct cfg80211_chan_def *chandef,
-			      bool restart)
+					 bool restart, bool radar_enabled)
 {
+	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ath12k *ar = arvif->ar;
 	struct ath12k_base *ab = ar->ab;
 	struct wmi_vdev_start_req_arg arg = {};
-	int he_support = arvif->vif->bss_conf.he_support;
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
@@ -5449,50 +11879,66 @@
 	arg.dtim_period = arvif->dtim_period;
 	arg.bcn_intval = arvif->beacon_interval;
 
-	arg.freq = chandef->chan->center_freq;
-	arg.band_center_freq1 = chandef->center_freq1;
-	arg.band_center_freq2 = chandef->center_freq2;
-	arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
-
-	arg.min_power = 0;
-	arg.max_power = chandef->chan->max_power * 2;
-	arg.max_reg_power = chandef->chan->max_reg_power * 2;
-	arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+	arg.channel.freq = chandef->chan->center_freq;
+	arg.channel.band_center_freq1 = chandef->center_freq1;
+	arg.channel.band_center_freq2 = chandef->center_freq2;
+	arg.channel.mode =
+		ath12k_phymodes[chandef->chan->band][chandef->width];
+
+	arg.channel.min_power = 0;
+	arg.channel.max_power = chandef->chan->max_power;
+	arg.channel.max_reg_power = chandef->chan->max_reg_power;
+	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
+	arg.ru_punct_bitmap = ~chandef->ru_punct_bitmap;
+	if (test_bit(WMI_TLV_SERVICE_SW_PROG_DFS_SUPPORT,
+		     ar->ab->wmi_ab.svc_map)) {
+#if 0
+		arg.width_device = chandef->width_device;
+		arg.center_freq_device = chandef->center_freq_device;
+#endif
+	}
 
 	arg.pref_tx_streams = ar->num_tx_chains;
 	arg.pref_rx_streams = ar->num_rx_chains;
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-		arg.ssid = arvif->u.ap.ssid;
-		arg.ssid_len = arvif->u.ap.ssid_len;
-		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+	arg.mbssid_flags = 0;
+	arg.mbssid_tx_vdev_id = 0;
+	if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+		     ar->ab->wmi_ab.svc_map)) {
+		ret = ath12k_mac_setup_vdev_args_mbssid(arvif,
+							  &arg.mbssid_flags,
+							  &arg.mbssid_tx_vdev_id);
+		if (ret)
+			return ret;
+	}
+
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+		arg.ssid = ahvif->u.ap.ssid;
+		arg.ssid_len = ahvif->u.ap.ssid_len;
+		arg.hidden_ssid = ahvif->u.ap.hidden_ssid;
 
 		/* For now allow DFS for AP mode */
-		arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+		arg.channel.chan_radar =
+			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+		arg.channel.freq2_radar = radar_enabled;
+
 
-		arg.passive = arg.chan_radar;
+		arg.channel.passive = arg.channel.chan_radar;
 
 		spin_lock_bh(&ab->base_lock);
 		arg.regdomain = ar->ab->dfs_region;
 		spin_unlock_bh(&ab->base_lock);
-
-		/* TODO: Notify if secondary 80Mhz also needs radar detection */
-		if (he_support) {
-			ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
-			if (ret) {
-				ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
-					    arg.vdev_id);
-				return ret;
-			}
-		}
 	}
 
-	arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+	arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+	if (!restart)
+		ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml);
 
 	ath12k_dbg(ab, ATH12K_DBG_MAC,
-		   "mac vdev %d start center_freq %d phymode %s\n",
-		   arg.vdev_id, arg.freq,
-		   ath12k_mac_phymode_str(arg.mode));
+		   "mac vdev %d start center_freq %d punct bitmap 0x%x phymode %s\n",
+		   arg.vdev_id, arg.channel.freq, arg.ru_punct_bitmap,
+		   ath12k_mac_phymode_str(arg.channel.mode));
 
 	ret = ath12k_wmi_vdev_start(ar, &arg, restart);
 	if (ret) {
@@ -5507,85 +11953,198 @@
 			    arg.vdev_id, restart ? "restart" : "start", ret);
 		return ret;
 	}
-
+	arvif->vdev_stop_notify_done = false;
 	ar->num_started_vdevs++;
 	ath12k_dbg(ab, ATH12K_DBG_MAC,  "vdev %pM started, vdev_id %d\n",
-		   arvif->vif->addr, arvif->vdev_id);
+		   arvif->addr, arvif->vdev_id);
 
-	/* Enable CAC Flag in the driver by checking the channel DFS cac time,
-	 * i.e dfs_cac_ms value which will be valid only for radar channels
-	 * and state as NL80211_DFS_USABLE which indicates CAC needs to be
-	 * done before channel usage. This flags is used to drop rx packets.
-	 * during CAC.
-	 */
-	/* TODO: Set the flag for other interface types as required */
-	if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
-	    chandef->chan->dfs_cac_ms &&
-	    chandef->chan->dfs_state == NL80211_DFS_USABLE) {
-		set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
-		ath12k_dbg(ab, ATH12K_DBG_MAC,
-			   "CAC Started in chan_freq %d for vdev %d\n",
-			   arg.freq, arg.vdev_id);
-	}
-
-	ret = ath12k_mac_set_txbf_conf(arvif);
+	ret = ath12k_mac_vdev_config_after_start(arvif, chandef);
 	if (ret)
-		ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
-			    arvif->vdev_id, ret);
+		ath12k_warn(ab, "failed to configure vdev %d after %s: %d\n",
+			    arvif->vdev_id,
+			    restart ? "restart" : "start", ret);
 
 	return 0;
 }
 
-static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+int ath12k_mac_vdev_start(struct ath12k_link_vif *arvif,
+			  const struct cfg80211_chan_def *chandef,
+			  bool radar_enabled)
+{
+	return ath12k_mac_vdev_start_restart(arvif, chandef, false,
+			radar_enabled);
+}
+
+static int ath12k_mac_vdev_restart(struct ath12k_link_vif *arvif,
+				   const struct cfg80211_chan_def *chandef,
+				   bool pseudo_restart, bool radar_enabled)
 {
-	struct ath12k *ar = arvif->ar;
+	struct ath12k_base *ab = arvif->ar->ab;
 	int ret;
 
-	lockdep_assert_held(&ar->conf_mutex);
-
-	reinit_completion(&ar->vdev_setup_done);
+	if(!pseudo_restart)
+		return ath12k_mac_vdev_start_restart(arvif, chandef, true,
+				radar_enabled);
 
-	ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+	ret = ath12k_mac_vdev_stop(arvif);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+		ath12k_warn(ab, "failed to stop vdev %d: %d during restart\n",
 			    arvif->vdev_id, ret);
-		goto err;
+		return ret;
 	}
 
-	ret = ath12k_mac_vdev_setup_sync(ar);
+	ret = ath12k_mac_vdev_start(arvif, chandef, radar_enabled);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+		ath12k_warn(ab, "failed to start vdev %d: %d during restart\n",
 			    arvif->vdev_id, ret);
-		goto err;
+		return ret;
 	}
 
-	WARN_ON(ar->num_started_vdevs == 0);
+	return ret;
+}
 
-	ar->num_started_vdevs--;
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
-		   arvif->vif->addr, arvif->vdev_id);
+static void
+ath12k_mac_update_peer_ru_punct_bitmap_iter(void *data,
+					    struct ieee80211_sta *sta)
+{
+	struct ath12k_link_vif *arvif = data;
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta;
+	struct ieee80211_link_sta *link_sta;
+	u8 link_id = arvif->link_id;
 
-	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
-		clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
-		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
-			   arvif->vdev_id);
+	if (ahsta->ahvif != arvif->ahvif)
+		return;
+
+	/* Check if there is a link sta in the vif link */
+	if (!(BIT(link_id) & ahsta->links_map))
+		return;
+
+	arsta = ahsta->link[link_id];
+	link_sta = ath12k_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer ru punct bitmap update\n");
+		return;
 	}
 
-	return 0;
-err:
-	return ret;
+	/* Puncturing in only applicable for EHT supported peers */
+	if (!link_sta->he_cap.has_he || !link_sta->eht_cap.has_eht)
+		return;
+
+	spin_lock_bh(&ar->data_lock);
+	/* RC_BW_CHANGED handler has infra already to send the bitmap.
+	 * Hence we can leverage from the same flag
+	 */
+	arsta->changed |= IEEE80211_RC_BW_CHANGED;
+	spin_unlock_bh(&ar->data_lock);
+
+	ieee80211_queue_work(ar->ah->hw, &arsta->update_wk);
 }
 
-static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
-				 const struct cfg80211_chan_def *chandef)
+void ath12k_mac_update_ru_punct_bitmap(struct ath12k_link_vif *arvif,
+				       struct ieee80211_chanctx_conf *old_ctx,
+				       struct ieee80211_chanctx_conf *new_ctx)
 {
-	return ath12k_mac_vdev_start_restart(arvif, chandef, false);
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_hw *ah = ar->ah;
+
+	lockdep_assert_held(&ah->conf_mutex);
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (old_ctx->def.ru_punct_bitmap == new_ctx->def.ru_punct_bitmap)
+		return;
+
+	ieee80211_iterate_stations_atomic(ah->hw,
+					  ath12k_mac_update_peer_ru_punct_bitmap_iter,
+					  arvif);
 }
 
-static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif,
-				   const struct cfg80211_chan_def *chandef)
+static int ath12k_vdev_restart_sequence(struct ath12k_link_vif *arvif,
+					struct ieee80211_chanctx_conf *new_ctx,
+					u64 vif_down_failed_map,
+					int vdev_index)
 {
-	return ath12k_mac_vdev_start_restart(arvif, chandef, true);
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_link_vif *tx_arvif;
+	struct ath12k_vif *tx_ahvif;
+	struct ieee80211_bss_conf *link;
+	struct ieee80211_chanctx_conf old_chanctx;
+	struct vdev_up_params params = { 0 };
+	int ret = -EINVAL;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+
+	spin_lock_bh(&ar->data_lock);
+	old_chanctx = arvif->chanctx;
+	memcpy(&arvif->chanctx, new_ctx, sizeof(*new_ctx));
+	spin_unlock_bh(&ar->data_lock);
+
+	/* vdev is already restarted via mvr, need to setup
+	 * certain config alone after restart */
+	if (vdev_index == -1) {
+		ret = ath12k_mac_vdev_config_after_start(arvif, &new_ctx->def);
+		if (!ret)
+			goto beacon_tmpl_setup;
+	} else if (vif_down_failed_map & BIT_ULL(vdev_index)) {
+		ret = ath12k_mac_vdev_restart(arvif, &new_ctx->def, false,
+				new_ctx->radar_enabled);
+	} else {
+		ret = ath12k_mac_vdev_restart(arvif, &new_ctx->def, true,
+				new_ctx->radar_enabled);
+	}
+
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to restart vdev %d: %d\n",
+			    arvif->vdev_id, ret);
+		spin_lock_bh(&ar->data_lock);
+		arvif->chanctx = old_chanctx;
+		spin_unlock_bh(&ar->data_lock);
+		return ret;
+	}
+
+beacon_tmpl_setup:
+	ath12k_mac_update_ru_punct_bitmap(arvif, &old_chanctx, new_ctx);
+
+	if (arvif->pending_csa_up)
+		return 0;
+
+	if (!arvif->is_up)
+		return -EOPNOTSUPP;
+
+	ret = ath12k_mac_setup_bcn_tmpl(arvif);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to update bcn tmpl during csa: %d\n", arvif->vdev_id);
+		return ret;
+	}
+
+	params.vdev_id = arvif->vdev_id;
+	params.aid = ahvif->aid;
+	params.bssid = arvif->bssid;
+	rcu_read_lock();
+	link = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+	if (link->mbssid_tx_vif) {
+		tx_ahvif = (void *)link->mbssid_tx_vif->drv_priv;
+		tx_arvif = tx_ahvif->link[link->mbssid_tx_vif_linkid];
+		params.tx_bssid = tx_arvif->bssid;
+		params.profile_idx = ahvif->vif->bss_conf.bssid_index;
+		params.profile_count = BIT(link->bssid_indicator);
+	}
+
+	if (ahvif->vif->type == NL80211_IFTYPE_STATION && link->nontransmitted) {
+		params.profile_idx = link->bssid_index;
+		params.profile_count = BIT(link->bssid_indicator) - 1;
+		params.tx_bssid = link->transmitter_bssid;
+	}
+
+	rcu_read_unlock();
+	ret = ath12k_wmi_vdev_up(arvif->ar, &params);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to bring vdev up %d: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return ret;
 }
 
 struct ath12k_mac_change_chanctx_arg {
@@ -5593,30 +12152,74 @@
 	struct ieee80211_vif_chanctx_switch *vifs;
 	int n_vifs;
 	int next_vif;
+	bool csa_active;
+	struct ath12k *ar;
 };
 
 static void
 ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
 				   struct ieee80211_vif *vif)
 {
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_link_vif *arvif;
 	struct ath12k_mac_change_chanctx_arg *arg = data;
+	u8 link_id;
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k_hw *ah = ahvif->ah;
 
-	if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
-		return;
+	lockdep_assert_held(&ah->conf_mutex);
+
+	for_each_set_bit(link_id, &ahvif->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = ahvif->link[link_id];
+ 
+		if (WARN_ON(arvif == NULL))
+			continue;
+
+		link_conf = ath12k_get_link_bss_conf(arvif);
+
+		if (WARN_ON(link_conf == NULL))
+			continue;
+
+		if ((rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx) ||
+		    (arvif->ar != arg->ar))
+			continue;
+
+		if (link_conf->csa_active)
+			arg->csa_active = true;
 
 	arg->n_vifs++;
 }
+}
 
 static void
 ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
 				    struct ieee80211_vif *vif)
 {
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_link_vif *arvif;
 	struct ath12k_mac_change_chanctx_arg *arg = data;
 	struct ieee80211_chanctx_conf *ctx;
+	u8 link_id;
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k_hw *ah = ahvif->ah;
 
-	ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
-	if (ctx != arg->ctx)
-		return;
+	lockdep_assert_held(&ah->conf_mutex);
+
+	for_each_set_bit(link_id, &ahvif->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = ahvif->link[link_id];
+
+		if (WARN_ON(arvif == NULL))
+			continue;
+
+		link_conf = ath12k_get_link_bss_conf(arvif);
+
+		if (WARN_ON(link_conf == NULL))
+			continue;
+
+		ctx = rcu_access_pointer(link_conf->chanctx_conf);
+		if ((ctx != arg->ctx) ||
+		    (arvif->ar != arg->ar))
+			continue;
 
 	if (WARN_ON(arg->next_vif == arg->n_vifs))
 		return;
@@ -5624,8 +12227,179 @@
 	arg->vifs[arg->next_vif].vif = vif;
 	arg->vifs[arg->next_vif].old_ctx = ctx;
 	arg->vifs[arg->next_vif].new_ctx = ctx;
+		arg->vifs[arg->next_vif].link_conf = link_conf;
 	arg->next_vif++;
 }
+}
+
+static void ath12k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+                                         struct ieee80211_chanctx_conf *conf,
+                                         void *data)
+{
+	struct ath12k_mac_num_chanctxs_arg *arg =
+				     (struct ath12k_mac_num_chanctxs_arg *)data;
+	struct ath12k *ctx_ar, *ar = arg->ar;
+
+	ctx_ar = ath12k_get_ar_by_ctx(ar->ah->hw, conf);
+
+	if (ctx_ar == ar)
+	        arg->num++;
+}
+
+static int ath12k_mac_num_chanctxs(struct ath12k *ar)
+{
+	struct ath12k_mac_num_chanctxs_arg arg = { .ar = ar, .num = 0};
+
+        ieee80211_iter_chan_contexts_atomic(ar->ah->hw,
+                                            ath12k_mac_num_chanctxs_iter,
+                                            &arg);
+
+        return arg.num;
+}
+
+static void ath12k_mac_update_rx_channel(struct ath12k *ar,
+                                         struct ieee80211_chanctx_conf *ctx,
+                                         struct ieee80211_vif_chanctx_switch *vifs,
+                                         int n_vifs)
+{
+	struct ath12k_mac_any_chandef_arg arg = { .ar = ar, .def = NULL};
+
+        /* Both locks are required because ar->rx_channel is modified. This
+         * allows readers to hold either lock.
+         */
+        lockdep_assert_held(&ar->conf_mutex);
+        lockdep_assert_held(&ar->data_lock);
+
+        WARN_ON(ctx && vifs);
+        WARN_ON(vifs && !n_vifs);
+
+        /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+         * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+         * ppdu on Rx may reduce performance on low-end systems. It should be
+         * possible to make tables/hashmaps to speed the lookup up (be vary of
+         * cpu data cache lines though regarding sizes) but to keep the initial
+         * implementation simple and less intrusive fallback to the slow lookup
+         * only for multi-channel cases. Single-channel cases will remain to
+         * use the old channel derival and thus performance should not be
+         * affected much.
+         */
+        rcu_read_lock();
+        if (!ctx && ath12k_mac_num_chanctxs(ar) == 1) {
+                ieee80211_iter_chan_contexts_atomic(ar->ah->hw,
+                                                    ath12k_mac_get_any_chandef_iter,
+                                                    &arg);
+                if (vifs)
+                	ar->rx_channel = vifs[0].new_ctx->def.chan;
+                else if (arg.def)
+	                ar->rx_channel = arg.def->chan;
+	        else
+	        	ar->rx_channel = NULL;
+        } else if ((ctx && ath12k_mac_num_chanctxs(ar) == 0) ||
+                  (ctx && (ar->state == ATH12K_STATE_RESTARTED))) {
+               /* During driver restart due to firmware assert, since mac80211
+                * already has valid channel context for given radio, channel
+                * context iteration return num_chanctx > 0. So fix rx_channel
+                * when restart is in progress.
+                */
+                ar->rx_channel = ctx->def.chan;
+        } else {
+                ar->rx_channel = NULL;
+        }
+        rcu_read_unlock();
+	ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
+}
+
+static int
+ath12k_mac_multi_vdev_restart(struct ath12k *ar,
+			      const struct cfg80211_chan_def *chandef,
+			      u32 *vdev_id, int len,
+			      bool radar_enabled)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct wmi_pdev_multiple_vdev_restart_req_arg arg = {};
+	int ret, i;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	arg.vdev_ids.id_len = len;
+
+	for (i = 0; i < len; i++)
+		arg.vdev_ids.id[i] = vdev_id[i];
+
+	arg.channel.freq = chandef->chan->center_freq;
+	arg.channel.band_center_freq1 = chandef->center_freq1;
+	arg.channel.band_center_freq2 = chandef->center_freq2;
+	arg.channel.mode =
+		ath12k_phymodes[chandef->chan->band][chandef->width];
+
+	arg.channel.min_power = 0;
+	arg.channel.max_power = chandef->chan->max_power;
+	arg.channel.max_reg_power = chandef->chan->max_reg_power;
+	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
+	arg.channel.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+	arg.channel.passive = arg.channel.chan_radar;
+	arg.channel.freq2_radar = radar_enabled;
+	arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+	arg.ru_punct_bitmap = ~chandef->ru_punct_bitmap;
+
+	if (test_bit(WMI_TLV_SERVICE_SW_PROG_DFS_SUPPORT,
+		     ar->ab->wmi_ab.svc_map)) {
+#if 0
+		arg.width_device = chandef->width_device;
+		arg.center_freq_device = chandef->center_freq_device;
+#endif
+	}
+
+	ret = ath12k_wmi_pdev_multiple_vdev_restart(ar, &arg);
+	if (ret)
+		ath12k_warn(ab, "mac failed to do mvr (%d)\n", ret);
+
+	return ret;
+}
+
+static void
+ath12k_mac_update_vif_chan_extras(struct ath12k *ar,
+				  struct ieee80211_vif_chanctx_switch *vifs,
+				  int n_vifs)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct cfg80211_chan_def *chandef;
+	lockdep_assert_held(&ar->conf_mutex);
+
+	chandef = &vifs[0].new_ctx->def;
+
+	spin_lock_bh(&ar->data_lock);
+        if (ar->awgn_intf_handling_in_prog && chandef) {
+                if (!ar->chan_bw_interference_bitmap ||
+                    (ar->chan_bw_interference_bitmap & WMI_DCS_SEG_PRI20)) {
+                        if (ar->awgn_chandef.chan->center_freq !=
+                            chandef->chan->center_freq) {
+                                ar->awgn_intf_handling_in_prog = false;
+                                ath12k_dbg(ab, ATH12K_DBG_MAC,
+                                           "AWGN : channel switch completed\n");
+                        } else {
+                                ath12k_warn(ab, "AWGN : channel switch is not done, freq : %d\n",
+                                            ar->awgn_chandef.chan->center_freq);
+                        }
+                } else {
+                        if ((ar->awgn_chandef.chan->center_freq ==
+                             chandef->chan->center_freq) &&
+                            (ar->awgn_chandef.width != chandef->width)) {
+                                ath12k_dbg(ab, ATH12K_DBG_MAC,
+                                           "AWGN : BW reduction is complete\n");
+                                ar->awgn_intf_handling_in_prog = false;
+                        } else {
+                                ath12k_warn(ab, "AWGN : awgn_freq : %d chan_freq %d"
+                                            " awgn_width %d chan_width %d\n",
+                                            ar->awgn_chandef.chan->center_freq,
+                                            chandef->chan->center_freq,
+                                            ar->awgn_chandef.width,
+                                            chandef->width);
+                        }
+                }
+        }
+        spin_unlock_bh(&ar->data_lock);
+}
 
 static void
 ath12k_mac_update_vif_chan(struct ath12k *ar,
@@ -5633,18 +12407,31 @@
 			   int n_vifs)
 {
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif;
+	struct ath12k_link_vif *arvif, *tx_arvif;
+	struct ath12k_vif *ahvif, *tx_ahvif = NULL;
 	int ret;
-	int i;
-	bool monitor_vif = false;
+	int i, trans_vdev_index;
+	u64 vif_down_failed_map = 0;
+	struct ieee80211_vif *tx_vif;
+	struct ieee80211_bss_conf *link;
+
+	/* Each vif is mapped to each bit of vif_down_failed_map. */
+	if (n_vifs > sizeof(vif_down_failed_map)*__CHAR_BIT__) {
+		ath12k_warn(ar->ab, "%d n_vifs are not supported currently\n",
+			    n_vifs);
+		return;
+	}
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	tx_arvif = NULL;
+
 	for (i = 0; i < n_vifs; i++) {
-		arvif = (void *)vifs[i].vif->drv_priv;
+		ahvif = (void *)vifs[i].vif->drv_priv;
+		arvif = ahvif->link[vifs[i].link_conf->link_id];
 
-		if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
-			monitor_vif = true;
+		if (WARN_ON(!arvif))
+			continue;
 
 		ath12k_dbg(ab, ATH12K_DBG_MAC,
 			   "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
@@ -5654,88 +12441,289 @@
 			   vifs[i].old_ctx->def.width,
 			   vifs[i].new_ctx->def.width);
 
-		if (WARN_ON(!arvif->is_started))
+		if (!arvif->is_started) {
+			memcpy(&arvif->chanctx, vifs[i].new_ctx, sizeof(*vifs[i].new_ctx));
 			continue;
+		}
 
-		if (WARN_ON(!arvif->is_up))
+		if (!arvif->is_up)
 			continue;
 
+		if (vifs[i].link_conf->mbssid_tx_vif &&
+		    ahvif == (struct ath12k_vif *)vifs[i].link_conf->mbssid_tx_vif->drv_priv) {
+			tx_vif = vifs[i].link_conf->mbssid_tx_vif;
+			tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
+			tx_arvif = tx_ahvif->link[vifs[i].link_conf->mbssid_tx_vif_linkid];
+			trans_vdev_index = i;
+		}
+
 		ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
 		if (ret) {
+			vif_down_failed_map |= BIT_ULL(i);
 			ath12k_warn(ab, "failed to down vdev %d: %d\n",
 				    arvif->vdev_id, ret);
 			continue;
 		}
 	}
 
-	/* All relevant vdevs are downed and associated channel resources
-	 * should be available for the channel switch now.
-	 */
+	ath12k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
 
-	/* TODO: Update ar->rx_channel */
+	if (tx_arvif) {
+		rcu_read_lock();
+		link = rcu_dereference(tx_ahvif->vif->link_conf[tx_arvif->link_id]);
+
+		if (link->csa_active && tx_arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			tx_arvif->pending_csa_up = true;
+
+		rcu_read_unlock();
+
+		ret = ath12k_vdev_restart_sequence(tx_arvif,
+						   vifs[trans_vdev_index].new_ctx,
+						   vif_down_failed_map,
+						   trans_vdev_index);
+
+		if (ret)
+			ath12k_warn(ab, "failed to restart vdev:%d: %d\n",
+				    tx_arvif->vdev_id, ret);
+	}
 
 	for (i = 0; i < n_vifs; i++) {
-		arvif = (void *)vifs[i].vif->drv_priv;
+		ahvif = (void *)vifs[i].vif->drv_priv;
+		arvif = ahvif->link[vifs[i].link_conf->link_id];
 
-		if (WARN_ON(!arvif->is_started))
+		if (WARN_ON(!arvif))
 			continue;
 
-		if (WARN_ON(!arvif->is_up))
+		if (vifs[i].link_conf->mbssid_tx_vif &&
+		    arvif == tx_arvif)
 			continue;
 
-		ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
-		if (ret) {
-			ath12k_warn(ab, "failed to restart vdev %d: %d\n",
+		rcu_read_lock();
+		link = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+
+		if (link->csa_active && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			arvif->pending_csa_up = true;
+
+		rcu_read_unlock();
+
+		ret = ath12k_vdev_restart_sequence(arvif,
+						   vifs[i].new_ctx,
+						   vif_down_failed_map, i);
+
+		if (ret && ret != -EOPNOTSUPP) {
+			ath12k_warn(ab, "failed to bring up vdev %d: %d\n",
 				    arvif->vdev_id, ret);
+		}
+	}
+}
+
+static void
+ath12k_mac_update_vif_chan_mvr(struct ath12k *ar,
+			       struct ieee80211_vif_chanctx_switch *vifs,
+			       int n_vifs)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_link_vif *arvif, *tx_arvif = NULL;
+	struct ath12k_vif *ahvif, *tx_ahvif = NULL;
+	struct cfg80211_chan_def *chandef;
+	struct ieee80211_vif *tx_vif;
+	int ret, i, time_left, trans_vdev_index, vdev_idx, n_vdevs = 0;
+	u32 vdev_ids[TARGET_NUM_VDEVS];
+	struct ieee80211_bss_conf *link;
+	bool monitor_vif = false;
+	int k;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	chandef = &vifs[0].new_ctx->def;
+	tx_arvif = NULL;
+
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "mac chanctx switch via mvr");
+
+	ath12k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
+
+	for (i = 0; i < n_vifs; i++) {
+		ahvif = (void *)vifs[i].vif->drv_priv;
+		arvif = ahvif->link[vifs[i].link_conf->link_id];
+
+		if (WARN_ON(!arvif))
+			continue;
+
+		ath12k_dbg(ab, ATH12K_DBG_MAC,
+			   "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
+			   arvif->vdev_id,
+			   vifs[i].old_ctx->def.chan->center_freq,
+			   vifs[i].new_ctx->def.chan->center_freq,
+			   vifs[i].old_ctx->def.width,
+			   vifs[i].new_ctx->def.width);
+
+		if (!arvif->is_started) {
+			memcpy(&arvif->chanctx, vifs[i].new_ctx, sizeof(*vifs[i].new_ctx));
 			continue;
 		}
 
-		ret = ath12k_mac_setup_bcn_tmpl(arvif);
-		if (ret)
-			ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
-				    ret);
+		if (vifs[i].link_conf->mbssid_tx_vif &&
+		    ahvif == (struct ath12k_vif *)vifs[i].link_conf->mbssid_tx_vif->drv_priv) {
+			tx_vif = vifs[i].link_conf->mbssid_tx_vif;
+			tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
+			tx_arvif = tx_ahvif->link[vifs[i].link_conf->mbssid_tx_vif_linkid];
+			trans_vdev_index = i;
+		}
+
+		arvif->mvr_processing = true;
+		vdev_ids[n_vdevs++] = arvif->vdev_id;
+	}
 
-		ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
-					 arvif->bssid);
+	for (k = 0; k < n_vifs; k++) {
+		if (vifs[k].vif->type == NL80211_IFTYPE_MONITOR) {
+			monitor_vif = true;
+			break;
+		}
+	}
+	if (!monitor_vif &&
+	    test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+		vdev_ids[n_vdevs++] = ar->monitor_vdev_id;
+	}
+
+	if (!n_vdevs) {
+		ath12k_dbg(ab, ATH12K_DBG_MAC,
+			   "mac 0 vdevs available to switch chan ctx via mvr\n");
+		return;
+	}
+
+	reinit_completion(&ar->mvr_complete);
+
+	ret = ath12k_mac_multi_vdev_restart(ar, chandef, vdev_ids, n_vdevs,
+					    vifs[0].new_ctx->radar_enabled);
 		if (ret) {
-			ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
-				    arvif->vdev_id, ret);
-			continue;
+		ath12k_warn(ab, "mac failed to send mvr command (%d)\n", ret);
+		return;
 		}
+
+	time_left = wait_for_completion_timeout(&ar->mvr_complete,
+						WMI_MVR_CMD_TIMEOUT_HZ);
+	if (!time_left) {
+		ath12k_err(ar->ab, "mac mvr cmd response timed out\n");
+		/* fallback to restarting one-by-one */
+		return ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
+	}
+
+	if (tx_arvif) {
+		vdev_idx = -1;
+
+		if (tx_arvif->mvr_processing) {
+			/* failed to restart tx vif via mvr, fallback */
+			arvif->mvr_processing = false;
+			vdev_idx = trans_vdev_index;
+			ath12k_err(ab,
+				   "mac failed to restart mbssid tx vdev %d via mvr cmd\n",
+				   tx_arvif->vdev_id);
 	}
 
-	/* Restart the internal monitor vdev on new channel */
-	if (!monitor_vif && ar->monitor_vdev_created) {
-		if (!ath12k_mac_monitor_stop(ar))
-			ath12k_mac_monitor_start(ar);
+		rcu_read_lock();
+		link = rcu_dereference(tx_ahvif->vif->link_conf[tx_arvif->link_id]);
+
+		if (link->csa_active && tx_arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			tx_arvif->pending_csa_up = true;
+
+		rcu_read_unlock();
+
+		ret = ath12k_vdev_restart_sequence(tx_arvif,
+						   vifs[trans_vdev_index].new_ctx,
+						   BIT_ULL(trans_vdev_index),
+						   vdev_idx);
+		if (ret)
+			ath12k_warn(ab,
+				    "mac failed to bring up mbssid tx vdev %d after mvr (%d)\n",
+				    tx_arvif->vdev_id, ret);
+	}
+
+	for (i = 0; i < n_vifs; i++) {
+		ahvif = (void *)vifs[i].vif->drv_priv;
+		arvif = ahvif->link[vifs[i].link_conf->link_id];
+
+		if (WARN_ON(!arvif))
+			continue;
+
+		vdev_idx = -1;
+
+		if (vifs[i].link_conf->mbssid_tx_vif && arvif == tx_arvif)
+			continue;
+
+		if (arvif->mvr_processing) {
+			/* failed to restart vdev via mvr, fallback */
+			arvif->mvr_processing = false;
+			vdev_idx = i;
+			ath12k_err(ab, "mac failed to restart vdev %d via mvr cmd\n",
+				   arvif->vdev_id);
 	}
+
+		rcu_read_lock();
+		link = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+
+		if (link->csa_active && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			arvif->pending_csa_up = true;
+
+		rcu_read_unlock();
+
+		ret = ath12k_vdev_restart_sequence(arvif, vifs[i].new_ctx,
+						   BIT_ULL(i), vdev_idx);
+		if (ret && ret != -EOPNOTSUPP)
+			ath12k_warn(ab, "mac failed to bring up vdev %d after mvr (%d)\n",
+				    arvif->vdev_id, ret);
+	}
+}
+
+static void
+ath12k_mac_process_update_vif_chan(struct ath12k *ar,
+				   struct ieee80211_vif_chanctx_switch *vifs,
+				   int n_vifs)
+{
+	struct ath12k_base *ab = ar->ab;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* should not happen */
+	if (WARN_ON(n_vifs > TARGET_NUM_VDEVS))
+		return;
+
+	if (ath12k_wmi_is_mvr_supported(ab))
+		ath12k_mac_update_vif_chan_mvr(ar, vifs, n_vifs);
+	else
+		ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+	ath12k_mac_update_vif_chan_extras(ar, vifs, n_vifs);
 }
 
 static void
 ath12k_mac_update_active_vif_chan(struct ath12k *ar,
 				  struct ieee80211_chanctx_conf *ctx)
 {
-	struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
+	struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx,
+						     .csa_active = false,
+						     .ar = ar };
+	struct ath12k_hw *ah = ar->ah;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+	ieee80211_iterate_active_interfaces_atomic(ah->hw,
 						   IEEE80211_IFACE_ITER_NORMAL,
 						   ath12k_mac_change_chanctx_cnt_iter,
 						   &arg);
-	if (arg.n_vifs == 0)
+
+	if (arg.n_vifs == 0 || arg.csa_active)
 		return;
 
 	arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
 	if (!arg.vifs)
 		return;
 
-	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+	ieee80211_iterate_active_interfaces_atomic(ah->hw,
 						   IEEE80211_IFACE_ITER_NORMAL,
 						   ath12k_mac_change_chanctx_fill_iter,
 						   &arg);
 
-	ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+	ath12k_mac_process_update_vif_chan(ar, arg.vifs, arg.n_vifs);
 
 	kfree(arg.vifs);
 }
@@ -5744,8 +12732,21 @@
 					 struct ieee80211_chanctx_conf *ctx,
 					 u32 changed)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_base *ab = ar->ab;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_base *ab;
+
+	mutex_lock(&ah->conf_mutex);
+
+	ar = ath12k_get_ar_by_ctx(hw, ctx);
+
+	if (!ar) {
+		ath12k_err(NULL, "unable to determine device for the passed channel ctx\n");
+		mutex_unlock(&ah->conf_mutex);
+		return;
+	}
+
+	ab = ar->ab;
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -5759,35 +12760,38 @@
 	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
 		goto unlock;
 
-	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
+			changed & IEEE80211_CHANCTX_CHANGE_RADAR)
 		ath12k_mac_update_active_vif_chan(ar, ctx);
 
 	/* TODO: Recalc radar detection */
 
 unlock:
 	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
 }
 
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
+static int ath12k_start_vdev_delay(struct ath12k *ar,
 				   struct ieee80211_vif *vif)
 {
-	struct ath12k *ar = hw->priv;
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif = &ahvif->deflink;
 	int ret;
 
 	if (WARN_ON(arvif->is_started))
 		return -EBUSY;
 
-	ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def);
+	ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def,
+			arvif->chanctx.radar_enabled);
 	if (ret) {
 		ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
-			    arvif->vdev_id, vif->addr,
+			    arvif->vdev_id, arvif->addr,
 			    arvif->chanctx.def.chan->center_freq, ret);
 		return ret;
 	}
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
 		ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id);
 		if (ret) {
 			ath12k_warn(ab, "failed put monitor up: %d\n", ret);
@@ -5801,17 +12805,636 @@
 	return 0;
 }
 
+static u8 ath12k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
+{
+        switch (txpwr_intrprt) {
+        /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
+         * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
+         * "IEEE Std 802.11ax 2021".
+         */
+        case IEEE80211_TPE_LOCAL_EIRP:
+        case IEEE80211_TPE_REG_CLIENT_EIRP:
+                txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
+                txpwr_cnt = txpwr_cnt + 1;
+                break;
+        /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
+         * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
+         * "IEEE Std 802.11ax 2021".
+         */
+        case IEEE80211_TPE_LOCAL_EIRP_PSD:
+        case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+                txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
+                txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
+                break;
+        }
+
+        return txpwr_cnt;
+}
+
+static u8 ath12k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+        u8 num_pwr_levels;
+
+        if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+                switch (chan_def->width) {
+                case NL80211_CHAN_WIDTH_20:
+                        num_pwr_levels = 1;
+                        break;
+                case NL80211_CHAN_WIDTH_40:
+                        num_pwr_levels = 2;
+                        break;
+                case NL80211_CHAN_WIDTH_80:
+                        num_pwr_levels = 4;
+                        break;
+                case NL80211_CHAN_WIDTH_80P80:
+                case NL80211_CHAN_WIDTH_160:
+                        num_pwr_levels = 8;
+                        break;
+                case NL80211_CHAN_WIDTH_320:
+                	num_pwr_levels = 16;
+                	break;
+                default:
+                        return 1;
+                }
+        } else {
+                switch (chan_def->width) {
+                case NL80211_CHAN_WIDTH_20:
+                        num_pwr_levels = 1;
+                        break;
+                case NL80211_CHAN_WIDTH_40:
+                        num_pwr_levels = 2;
+                        break;
+                case NL80211_CHAN_WIDTH_80:
+                        num_pwr_levels = 3;
+                        break;
+                case NL80211_CHAN_WIDTH_80P80:
+                case NL80211_CHAN_WIDTH_160:
+                        num_pwr_levels = 4;
+                        break;
+                case NL80211_CHAN_WIDTH_320:
+                	num_pwr_levels = 5;
+                	break;
+                default:
+                        return 1;
+                }
+        }
+
+        return num_pwr_levels;
+}
+
+static u16 ath12k_mac_get_6g_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+        u16 diff_seq;
+
+        /* It is to get the lowest channel number's center frequency of the chan.
+         * For example,
+         * bandwidth=40MHz, center frequency is 5965, lowest channel is 1
+         * with center frequency 5955, its diff is 5965 - 5955 = 10.
+         * bandwidth=80MHz, center frequency is 5985, lowest channel is 1
+         * with center frequency 5955, its diff is 5985 - 5955 = 30.
+         * bandwidth=160MHz, center frequency is 6025, lowest channel is 1
+         * with center frequency 5955, its diff is 6025 - 5955 = 70.
+         */
+        switch (chan_def->width) {
+        case NL80211_CHAN_WIDTH_320:
+        	diff_seq = 150;
+        	break;
+        case NL80211_CHAN_WIDTH_160:
+                diff_seq = 70;
+                break;
+        case NL80211_CHAN_WIDTH_80:
+        case NL80211_CHAN_WIDTH_80P80:
+                diff_seq = 30;
+                break;
+        case NL80211_CHAN_WIDTH_40:
+                diff_seq = 10;
+                break;
+        default:
+                diff_seq = 0;
+        }
+
+        return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath12k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+                                  u16 start_seq, u8 seq)
+{
+       u16 seg_seq;
+
+       /* It is to get the center frequency of the specific bandwidth.
+        * start_seq means the lowest channel number's center freqence.
+        * seq 0/1/2/3 means 20MHz/40MHz/80MHz/160MHz&80P80.
+        * For example,
+        * lowest channel is 1, its center frequency 5955,
+        * center frequency is 5955 when bandwidth=20MHz, its diff is 5955 - 5955 = 0.
+        * lowest channel is 1, its center frequency 5955,
+        * center frequency is 5965 when bandwidth=40MHz, its diff is 5965 - 5955 = 10.
+        * lowest channel is 1, its center frequency 5955,
+        * center frequency is 5985 when bandwidth=80MHz, its diff is 5985 - 5955 = 30.
+        * lowest channel is 1, its center frequency 5955,
+        * center frequency is 6025 when bandwidth=160MHz, its diff is 6025 - 5955 = 70.
+        */
+       if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3)
+               return chan_def->center_freq2;
+
+       seg_seq = 10 * (BIT(seq) - 1);
+       return seg_seq + start_seq;
+}
+
+static void ath12k_mac_get_psd_channel(struct ath12k *ar,
+                                      u16 step_freq,
+                                      u16 *start_freq,
+                                      u16 *center_freq,
+                                      u8 i,
+                                      struct ieee80211_channel **temp_chan,
+                                      s8 *tx_power,
+				      u8 reg_6g_power_mode)
+{
+       /* It is to get the the center frequency for each 20MHz.
+        * For example, if the chan is 160MHz and center frequency is 6025,
+        * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+        * channel number 1's center frequency is 5955, it is parameter start_freq.
+        * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+        * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+        * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+        * the gap is 20 for each channel, parameter step_freq means the gap.
+        * after get the center frequency of each channel, it is easy to find the
+        * struct ieee80211_channel of it and get the max_reg_power.
+        */
+       *center_freq = *start_freq + i * step_freq;
+       /* -1 to reg_6g_power_mode to make it 0 based indexing */
+       *temp_chan = ieee80211_get_channel_khz(ar->ah->hw->wiphy, MHZ_TO_KHZ(*center_freq));
+       *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath12k_mac_get_eirp_power(struct ath12k *ar,
+                                     u16 *start_freq,
+                                     u16 *center_freq,
+                                     u8 i,
+                                     struct ieee80211_channel **temp_chan,
+                                     struct cfg80211_chan_def *def,
+                                     s8 *tx_power,
+				     u8 reg_6g_power_mode)
+{
+       /* It is to get the the center frequency for 40MHz/80MHz/
+        * 160MHz&80P80 bandwidth, and then plus 10 to the center frequency,
+        * it is the center frequency of a channel number.
+        * For example, when configured channel number is 1.
+        * center frequency is 5965 when bandwidth=40MHz, after plus 10, it is 5975,
+        * then it is channel number 5.
+        * center frequency is 5985 when bandwidth=80MHz, after plus 10, it is 5995,
+        * then it is channel number 9.
+        * center frequency is 6025 when bandwidth=160MHz, after plus 10, it is 6035,
+        * then it is channel number 17.
+        * after get the center frequency of each channel, it is easy to find the
+        * struct ieee80211_channel of it and get the max_reg_power.
+        */
+       *center_freq = ath12k_mac_get_seg_freq(def, *start_freq, i);
+       /* For 20 MHz, no +10 offset is required */
+       if (i != 0)
+               *center_freq += 10;
+
+       /* -1 to reg_6g_power_mode to make it 0 based indexing */
+       *temp_chan = ieee80211_get_channel_khz(ar->ah->hw->wiphy, MHZ_TO_KHZ(*center_freq));
+       *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
+                                  struct ath12k_link_vif *arvif,
+                                  struct ieee80211_chanctx_conf *ctx)
+{
+        struct ath12k_base *ab = ar->ab;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+        struct ieee80211_bss_conf *bss_conf;
+        struct ath12k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+        struct ieee80211_channel *chan, *temp_chan;
+        u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+        bool is_psd_power = false, is_tpe_present = false;
+        s8 max_tx_power[ATH12K_MAX_NUM_PWR_LEVEL],
+                psd_power, tx_power = 0, eirp_power = 0;
+        u16 oper_freq = 0, start_freq = 0, center_freq = 0;
+	u8 reg_6g_power_mode;
+
+	rcu_read_lock();
+
+	bss_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!bss_conf) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access bss link conf in tpc reg fill\n");
+		return;
+	}
+
+       /* For STA, 6g power mode will be present in the beacon, but for AP,
+        * AP cant parse its own beacon. Hence, we get the 6g power mode
+        * from the wdev corresponding to the struct ieee80211_vif
+        */
+       if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+               reg_6g_power_mode = bss_conf->power_type;
+		if (reg_6g_power_mode == IEEE80211_REG_UNSET_AP)
+			reg_6g_power_mode = IEEE80211_REG_LPI_AP;
+	} else if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+	       reg_6g_power_mode = IEEE80211_REG_LPI_AP;
+       } else
+               reg_6g_power_mode = IEEE80211_REG_LPI_AP;
+
+        chan = ctx->def.chan;
+        oper_freq = ctx->def.chan->center_freq;
+        start_freq = ath12k_mac_get_6g_start_frequency(&ctx->def);
+        pwr_reduction = bss_conf->pwr_reduction;
+
+	rcu_read_unlock();
+
+        if (ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+	    arvif->reg_tpc_info.num_pwr_levels) {
+                is_tpe_present = true;
+                num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+        } else {
+                num_pwr_levels = ath12k_mac_get_num_pwr_levels(&ctx->def);
+        }
+
+        for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+                /* STA received TPE IE*/
+                if (is_tpe_present) {
+                        /* local power is PSD power*/
+                        if (chan->flags & IEEE80211_CHAN_PSD) {
+                                /* Connecting AP is psd power */
+                                if (reg_tpc_info->is_psd_power) {
+                                        is_psd_power = true;
+                                        ath12k_mac_get_psd_channel(ar, 20,
+                                                                   &start_freq,
+                                                                   &center_freq,
+                                                                   pwr_lvl_idx,
+                                                                   &temp_chan,
+                                                                   &tx_power,
+								   reg_6g_power_mode);
+                                        psd_power = temp_chan->psd;
+                                        eirp_power = tx_power;
+                                        max_tx_power[pwr_lvl_idx] =
+                                                min_t(s8,
+                                                      psd_power,
+                                                      reg_tpc_info->tpe[pwr_lvl_idx]);
+                                /* Connecting AP is not psd power */
+                                } else {
+                                        ath12k_mac_get_eirp_power(ar,
+                                                                  &start_freq,
+                                                                  &center_freq,
+                                                                  pwr_lvl_idx,
+                                                                  &temp_chan,
+                                                                  &ctx->def,
+                                                                  &tx_power,
+								  reg_6g_power_mode);
+                                        psd_power = temp_chan->psd;
+                                        /* convert psd power to EIRP power based
+                                         * on channel width
+                                         */
+                                        tx_power =
+                                                min_t(s8, tx_power,
+                                                      psd_power + 13 + pwr_lvl_idx * 3);
+                                        max_tx_power[pwr_lvl_idx] =
+                                                min_t(s8,
+                                                      tx_power,
+                                                      reg_tpc_info->tpe[pwr_lvl_idx]);
+                                }
+                        /* local power is not PSD power */
+                        } else {
+                                /* Connecting AP is psd power */
+                                if (reg_tpc_info->is_psd_power) {
+                                        is_psd_power = true;
+                                        ath12k_mac_get_psd_channel(ar, 20,
+                                                                   &start_freq,
+                                                                   &center_freq,
+                                                                   pwr_lvl_idx,
+                                                                   &temp_chan,
+                                                                   &tx_power,
+								   reg_6g_power_mode);
+                                        eirp_power = tx_power;
+                                        max_tx_power[pwr_lvl_idx] =
+                                                reg_tpc_info->tpe[pwr_lvl_idx];
+                                /* Connecting AP is not psd power */
+                                } else {
+                                        ath12k_mac_get_eirp_power(ar,
+                                                                  &start_freq,
+                                                                  &center_freq,
+                                                                  pwr_lvl_idx,
+                                                                  &temp_chan,
+                                                                  &ctx->def,
+                                                                  &tx_power,
+								  reg_6g_power_mode);
+                                        max_tx_power[pwr_lvl_idx] =
+                                                min_t(s8,
+                                                      tx_power,
+                                                      reg_tpc_info->tpe[pwr_lvl_idx]);
+                                }
+                        }
+                /* STA not received TPE IE */
+                } else {
+                        /* local power is PSD power*/
+                        if (chan->flags & IEEE80211_CHAN_PSD) {
+                                is_psd_power = true;
+                                ath12k_mac_get_psd_channel(ar, 20,
+                                                           &start_freq,
+                                                           &center_freq,
+                                                           pwr_lvl_idx,
+                                                           &temp_chan,
+                                                           &tx_power,
+							   reg_6g_power_mode);
+                                psd_power = temp_chan->psd;
+                                eirp_power = tx_power;
+                                max_tx_power[pwr_lvl_idx] = psd_power;
+                        } else {
+                                ath12k_mac_get_eirp_power(ar,
+                                                          &start_freq,
+                                                          &center_freq,
+                                                          pwr_lvl_idx,
+                                                          &temp_chan,
+                                                          &ctx->def,
+                                                          &tx_power,
+							  reg_6g_power_mode);
+                                max_tx_power[pwr_lvl_idx] = tx_power;
+                        }
+                }
+
+                if (is_psd_power) {
+                        /* If AP local power constraint is present */
+                        if (pwr_reduction)
+                                eirp_power = eirp_power - pwr_reduction;
+
+                        /* If FW updated max tx power is non zero, then take the min of
+                         * firmware updated ap tx power
+                         * and max power derived from above mentioned parameters.
+                         */
+                        ath12k_dbg(ab, ATH12K_DBG_MAC,
+                                   "eirp power : %d firmware report power : %d\n",
+                                   eirp_power, ar->max_allowed_tx_power);
+                        if ((ar->max_allowed_tx_power) && (ab->hw_params->idle_ps))
+                                eirp_power = min_t(s8,
+                                                   eirp_power,
+                                                   ar->max_allowed_tx_power);
+                } else {
+                        /* If AP local power constraint is present */
+                        if (pwr_reduction)
+                                max_tx_power[pwr_lvl_idx] =
+                                        max_tx_power[pwr_lvl_idx] - pwr_reduction;
+                        /* If FW updated max tx power is non zero, then take the min of
+                         * firmware updated ap tx power
+                         * and max power derived from above mentioned parameters.
+                         */
+                        if ((ar->max_allowed_tx_power) && (ab->hw_params->idle_ps))
+                                max_tx_power[pwr_lvl_idx] =
+                                        min_t(s8,
+                                              max_tx_power[pwr_lvl_idx],
+                                              ar->max_allowed_tx_power);
+                }
+                reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+                reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+                        max_tx_power[pwr_lvl_idx];
+        }
+
+        reg_tpc_info->num_pwr_levels = num_pwr_levels;
+        reg_tpc_info->is_psd_power = is_psd_power;
+        reg_tpc_info->eirp_power = eirp_power;
+        reg_tpc_info->power_type_6g =
+                ath12k_ieee80211_ap_pwr_type_convert(reg_6g_power_mode);
+}
+
+void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
+				 struct ath12k_link_vif *arvif,
+				 struct ieee80211_chanctx_conf *ctx)
+{
+        struct ath12k_base *ab = ar->ab;
+        struct ieee80211_bss_conf *bss_conf;
+        struct ieee80211_tx_pwr_env *single_tpe;
+        enum wmi_reg_6g_client_type client_type;
+        int i;
+        u8 pwr_count, pwr_interpret, pwr_category;
+        u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
+        bool use_local_tpe, non_psd_set = false, psd_set = false;
+
+	rcu_read_lock();
+
+	bss_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!bss_conf) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access bss link conf in tpc reg fill\n");
+		return;
+	}
+
+	client_type = WMI_REG_DEFAULT_CLIENT;
+
+        for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+                single_tpe = &bss_conf->tx_pwr_env[i];
+                pwr_category = u8_get_bits(single_tpe->tx_power_info,
+                                           IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+                pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+                                            IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+                if (pwr_category == client_type) {
+                        if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
+                            pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
+                                local_tpe_count++;
+                        else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
+                                 pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
+                                reg_tpe_count++;
+                }
+        }
+
+        if (!reg_tpe_count && !local_tpe_count) {
+		rcu_read_unlock();
+                ath12k_warn(ab,
+                            "no transmit power envelope match client power type %d\n",
+                            client_type);
+                return;
+        } else if (!reg_tpe_count) {
+                use_local_tpe = true;
+        } else {
+                use_local_tpe = false;
+        }
+        for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+                single_tpe = &bss_conf->tx_pwr_env[i];
+                pwr_category = u8_get_bits(single_tpe->tx_power_info,
+                                           IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+                pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+                                            IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+                if (pwr_category != client_type)
+                        continue;
+
+                /* get local transmit power envelope */
+                if (use_local_tpe) {
+                        if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
+                                non_psd_index = i;
+                                non_psd_set = true;
+                        } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
+                                psd_index = i;
+                                psd_set = true;
+                        }
+                /* get regulatory transmit power envelope */
+                } else {
+                        if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
+                                non_psd_index = i;
+                                non_psd_set = true;
+                        } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
+                                psd_index = i;
+                                psd_set = true;
+                        }
+                }
+        }
+
+        if (non_psd_set && !psd_set) {
+                single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
+                pwr_count = u8_get_bits(single_tpe->tx_power_info,
+         	                        IEEE80211_TX_PWR_ENV_INFO_COUNT);
+                pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+                                            IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+                arvif->reg_tpc_info.is_psd_power = false;
+                arvif->reg_tpc_info.eirp_power = 0;
+
+                arvif->reg_tpc_info.num_pwr_levels =
+                        ath12k_mac_get_tpe_count(pwr_interpret, pwr_count);
+                for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+                        ath12k_dbg(ab, ATH12K_DBG_MAC,
+                                   "non PSD power[%d] : %d\n",
+                                   i, single_tpe->tx_power[i]);
+                        arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+                }
+        }
+        if (psd_set) {
+                single_tpe = &bss_conf->tx_pwr_env[psd_index];
+                pwr_count = u8_get_bits(single_tpe->tx_power_info,
+                                        IEEE80211_TX_PWR_ENV_INFO_COUNT);
+                pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+                                            IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+                arvif->reg_tpc_info.is_psd_power = true;
+
+                if (pwr_count == 0) {
+                        ath12k_dbg(ab, ATH12K_DBG_MAC,
+                                   "TPE PSD power : %d\n", single_tpe->tx_power[0]);
+                        arvif->reg_tpc_info.num_pwr_levels =
+                                ath12k_mac_get_num_pwr_levels(&ctx->def);
+                        for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
+                                arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
+                } else {
+                        arvif->reg_tpc_info.num_pwr_levels =
+                                ath12k_mac_get_tpe_count(pwr_interpret, pwr_count);
+                        for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+                                ath12k_dbg(ab, ATH12K_DBG_MAC,
+                                           "TPE PSD power[%d] : %d\n",
+                                           i, single_tpe->tx_power[i]);
+                                arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+                        }
+                }
+        }
+	rcu_read_unlock();
+}
+
 static int
 ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_bss_conf *link_conf,
 				 struct ieee80211_chanctx_conf *ctx)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar, *arvif_ar;
+	struct ath12k_base *ab;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	struct ieee80211_sta *sta;
+	struct ath12k_sta *ahsta;
+	struct ath12k_link_sta *arsta;
 	int ret;
 	struct ath12k_wmi_peer_create_arg param;
+	enum ieee80211_ap_reg_power power_type;
+	enum ieee80211_sta_state state, prev_state;
+
+	u8 link_id = link_conf->link_id;
+
+	if (!ctx)
+		return -EINVAL;
+
+	mutex_lock(&ah->conf_mutex);
+
+	arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+	if (!arvif) {
+		ath12k_err(NULL, "unable to allocate link vif\n");
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	if (!arvif->is_created) {
+		ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
+		if (!ar) {
+			mutex_unlock(&ah->conf_mutex);
+			return -EINVAL;
+		}
+	}else if (vif->valid_links){
+		ar = arvif->ar;
+		if (WARN_ON(!ar)) {
+			mutex_unlock(&ah->conf_mutex);
+			return -EINVAL;
+		}
+ 	}  else {
+		ar = ath12k_get_ar_by_ctx(hw, ctx);
+		if (!ar) {
+			ath12k_err(NULL,
+				   "unable to determine device for the passed channel ctx\n");
+			mutex_unlock(&ah->conf_mutex);
+			return -EINVAL;
+		}
+
+		/* TODO If vif is already assigned, but now the chan is different and belongs
+		 * to a different ar, then delete the old vdev in different ar and create new
+		 * This is not expected for MLO
+		 */
+		/* If VIF is created for ROC scan, stop and delete the scan vif
+		 * before creating vdev for new connection
+		 */
+		if (ar != arvif->ar || (arvif->is_scan_vif)) {
+			if (!(arvif->is_scan_vif) && WARN_ON(arvif->is_started)) {
+				mutex_unlock(&ah->conf_mutex);
+				return -EBUSY;
+			}
+
+			arvif_ar = arvif->ar;
+			if (!arvif_ar) {
+				mutex_unlock(&ah->conf_mutex);
+				ath12k_warn(NULL, "arvif_ar not found\n");
+				return -EINVAL;
+			}
+
+			mutex_lock(&arvif_ar->conf_mutex);
+			if (arvif->is_scan_vif && arvif->is_started) {
+				ret = ath12k_mac_vdev_stop(arvif);
+				if (ret) {
+					mutex_unlock(&arvif_ar->conf_mutex);
+					mutex_unlock(&ah->conf_mutex);
+					ath12k_warn(arvif_ar->ab, "failed to stop vdev %d: %d\n",
+						    arvif->vdev_id, ret);
+					return -EINVAL;
+				}
+				arvif->is_started = false;
+				arvif->is_scan_vif = false;
+			}
+
+			ret = ath12k_mac_vdev_delete(arvif_ar, arvif);
+			mutex_unlock(&arvif_ar->conf_mutex);
+			if (ret)
+				ath12k_warn(arvif_ar->ab, "unable to delete vdev %d\n", ret);
+
+			mutex_lock(&ar->conf_mutex);
+			ret = ath12k_mac_vdev_create(ar, arvif);
+			if (ret) {
+				mutex_unlock(&ar->conf_mutex);
+				mutex_unlock(&ah->conf_mutex);
+				ath12k_warn(ar->ab, "unable to create vdev %d\n", ret);
+				return -EINVAL;
+			}
+			mutex_unlock(&ar->conf_mutex);
+		}
+	}
+
+	ab = ar->ab;
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -5819,10 +13442,28 @@
 		   "mac chanctx assign ptr %pK vdev_id %i\n",
 		   ctx, arvif->vdev_id);
 
+	if (ar->supports_6ghz && ctx->def.chan->band == NL80211_BAND_6GHZ &&
+            (ahvif->vdev_type == WMI_VDEV_TYPE_STA ||
+             ahvif->vdev_type == WMI_VDEV_TYPE_AP)) {
+                power_type = link_conf->power_type;
+                ath12k_dbg(ab, ATH12K_DBG_MAC, "mac chanctx power type %d\n",
+                           power_type);
+                if (power_type == IEEE80211_REG_UNSET_AP)
+                        power_type = IEEE80211_REG_LPI_AP;
+				arvif->chanctx = *ctx;
+				/* TODO: Transmit Power Envelope specification for 320 is not
+                 * available yet. Need to add TPE 320 support when spec is ready
+                 */
+				if (ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+				    ctx->def.width != NL80211_CHAN_WIDTH_320) {
+                        ath12k_mac_parse_tx_pwr_env(ar, arvif, ctx);
+					}
+        }
+
 	/* for some targets bss peer must be created before vdev_start */
 	if (ab->hw_params->vdev_start_delay &&
-	    arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-	    arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+	    ahvif->vdev_type != WMI_VDEV_TYPE_AP &&
+	    ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
 	    !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
 		memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
 		ret = 0;
@@ -5835,8 +13476,8 @@
 	}
 
 	if (ab->hw_params->vdev_start_delay &&
-	    (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
-	    arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
+	    (ahvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	     ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
 		param.vdev_id = arvif->vdev_id;
 		param.peer_type = WMI_PEER_TYPE_DEFAULT;
 		param.peer_addr = ar->mac_addr;
@@ -5849,7 +13490,33 @@
 		}
 	}
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+	if (!ab->hw_params->vdev_start_delay &&
+	    ahvif->vdev_type == WMI_VDEV_TYPE_STA && ahvif->chanctx_peer_del_done) {
+		rcu_read_lock();
+		sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+		if (!sta) {
+			ath12k_warn(ar->ab, "failed to find station entry for bss vdev\n");
+			rcu_read_unlock();
+			goto out;
+		}
+
+		ahsta = ath12k_sta_to_ahsta(sta);
+		arsta = &ahsta->deflink;
+		rcu_read_unlock();
+
+		mutex_unlock(&ar->conf_mutex);
+		mutex_unlock(&ah->conf_mutex);
+		prev_state = arsta->state;
+		for (state = IEEE80211_STA_NOTEXIST; state < prev_state;
+		     state++)
+			ath12k_mac_update_sta_state(ar->ah->hw, arvif->ahvif->vif, sta,
+						    state, (state + 1));
+		mutex_lock(&ah->conf_mutex);
+		mutex_lock(&ar->conf_mutex);
+		ahvif->chanctx_peer_del_done = false;
+	}
+
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
 		ret = ath12k_mac_monitor_start(ar);
 		if (ret)
 			goto out;
@@ -5857,15 +13524,18 @@
 		goto out;
 	}
 
-	ret = ath12k_mac_vdev_start(arvif, &ctx->def);
+	memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
+
+	ret = ath12k_mac_vdev_start(arvif, &ctx->def, ctx->radar_enabled);
 	if (ret) {
 		ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
-			    arvif->vdev_id, vif->addr,
+			    arvif->vdev_id, arvif->addr,
 			    ctx->def.chan->center_freq, ret);
 		goto out;
 	}
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
+	if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+	    test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags))
 		ath12k_mac_monitor_start(ar);
 
 	arvif->is_started = true;
@@ -5874,45 +13544,165 @@
 
 out:
 	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
 
 	return ret;
 }
 
+struct ath12k_link_vif *
+ath12k_mac_get_primary_arvif(struct ath12k *ar,
+			     struct ath12k_vif *ahvif)
+{
+	struct ath12k_link_vif *arvif;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->ahvif != ahvif)
+			continue;
+
+		if (arvif->assoc_link)
+			return arvif;
+	}
+
+	return NULL;
+}
+
+static void
+ath12k_mac_get_ahvif_status(struct ath12k *ar,
+			    struct ath12k_vif *ahvif,
+			    int *out_started_count,
+			    int *out_up_count,
+			    int *out_pending_stop_count)
+{
+	struct ath12k_link_vif *arvif;
+	int started_count = 0;
+	int up_count = 0;
+	int pending_stop_count = 0;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->ahvif != ahvif)
+			continue;
+
+		if (arvif->is_started)
+			started_count++;
+
+		if (arvif->is_up)
+			up_count++;
+
+		if (arvif->pending_stop)
+			pending_stop_count++;
+	}
+
+	*out_started_count = started_count;
+	*out_up_count = up_count;
+	*out_pending_stop_count = pending_stop_count;
+}
+
 static void
 ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
 				   struct ieee80211_vif *vif,
 				   struct ieee80211_bss_conf *link_conf,
 				   struct ieee80211_chanctx_conf *ctx)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_base *ab;
+	struct ath12k_peer *peer;
+	struct ath12k *ar;
 	int ret;
+	bool is_pending_stop = false;
+	bool to_stop_primary = false;
+	u8 link_id = link_conf->link_id;
+
+	mutex_lock(&ah->conf_mutex);
+
+	arvif = ahvif->link[link_id];
+
+	if (!arvif) {
+		ath12k_err(NULL,
+			   "unable to determine the assigned link vif on link id %d\n",
+			   link_id);
+		mutex_unlock(&ah->conf_mutex);
+		return;
+	}
+
+	ar = arvif->ar;
+	if (!ar) {
+		ath12k_err(NULL,
+			   "unable to determine device to stop vdev during channel unassign\n");
+		mutex_unlock(&ah->conf_mutex);
+		WARN_ON(1);
+		return;
+	}
+	ab = ar->ab;
 
 	mutex_lock(&ar->conf_mutex);
 
-	ath12k_dbg(ab, ATH12K_DBG_MAC,
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		goto out;
+
+	ath12k_dbg(ab, ATH12K_DBG_SET(MAC, L2),
 		   "mac chanctx unassign ptr %pK vdev_id %i\n",
 		   ctx, arvif->vdev_id);
 
-	WARN_ON(!arvif->is_started);
+	if (!arvif->is_started)
+		goto out;
 
 	if (ab->hw_params->vdev_start_delay &&
-	    arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
+	    ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
 	    ath12k_peer_find_by_addr(ab, ar->mac_addr))
 		ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
 
-	if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
 		ret = ath12k_mac_monitor_stop(ar);
-		if (ret) {
+		if (ret)
+			goto out;
+
+		arvif->is_started = false;
 			mutex_unlock(&ar->conf_mutex);
-			return;
+		ath12k_mac_remove_link_interface(hw, arvif);
+		goto unassign_exit;
 		}
 
-		arvif->is_started = false;
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac unassign vif links map %lu vdev id %d addr %pM assoc_link %d\n",
+		   ahvif->links_map, arvif->vdev_id, arvif->addr, arvif->assoc_link);
+
+	/* The primary link should be stop vdev and delete peer as last one. */
+	if (hweight16(ahvif->links_map) > 1 &&
+	    ab->hw_params->vdev_start_delay &&
+	    ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+		int started_count = 0;
+		int up_count = 0;
+		int pending_stop_count = 0;
+
+		ath12k_mac_get_ahvif_status(ar, ahvif, &started_count,
+					    &up_count,
+					    &pending_stop_count);
+
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "mac unassign vif status started %d up %d pending stop %d\n",
+			   started_count, up_count, pending_stop_count);
+
+		if (arvif->assoc_link && started_count != 1)
+			is_pending_stop = true;
+
+		if (pending_stop_count && started_count - pending_stop_count == 1)
+			to_stop_primary = true;
+	}
+
+	if (is_pending_stop) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "mac skip pending stop vdev id %d addr %pM assoc_link %d\n",
+			   arvif->vdev_id, arvif->addr, arvif->assoc_link);
+		arvif->pending_stop = true;
 		mutex_unlock(&ar->conf_mutex);
+		return;
 	}
 
+stop_primary:
+	ath12k_bss_disassoc(ar, arvif, true);
+
 	ret = ath12k_mac_vdev_stop(arvif);
 	if (ret)
 		ath12k_warn(ab, "failed to stop vdev %i: %d\n",
@@ -5920,15 +13710,62 @@
 
 	arvif->is_started = false;
 
+	peer = ath12k_peer_find_by_vdev_id(ab, arvif->vdev_id);
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_STA && peer) {
+		struct ieee80211_sta *sta;
+		unsigned int sta_link_id = 0xff;
+
+		if (vif->valid_links)
+			sta = ieee80211_find_sta_by_link_addrs(hw, peer->addr,
+							       NULL, &sta_link_id);
+		else
+			sta = ieee80211_find_sta_by_ifaddr(hw, peer->addr, NULL);
+		ath12k_dbg(ab, ATH12K_DBG_MAC,
+			   "peer delete check links 0x%x vdev id %i peer %pM link id %d sta %pK\n",
+			   vif->valid_links, arvif->vdev_id,
+			   peer->addr, sta_link_id, sta);
+
+		if ((ab->hw_params->vdev_start_delay && !sta && !peer->sta) ||
+		     !ab->hw_params->vdev_start_delay) {
+			ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+			if (ret) {
+				ath12k_warn(ar->ab,
+					    "failed to delete peer %pM for vdev %d: %d\n",
+					    arvif->bssid, arvif->vdev_id, ret);
+			} else {
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+					   "mac removed peer %pM  vdev %d\n",
+					   arvif->bssid, arvif->vdev_id);
+				ahvif->chanctx_peer_del_done = true;
+			}
+		}
+	}
+
 	if (ab->hw_params->vdev_start_delay &&
-	    arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+	    ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
 		ath12k_wmi_vdev_down(ar, arvif->vdev_id);
 
-	if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
-	    ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
+	if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+	    ar->num_started_vdevs == 1 &&
+	    test_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags))
 		ath12k_mac_monitor_stop(ar);
 
+	if (to_stop_primary) {
+		to_stop_primary = false;
+		arvif = ath12k_mac_get_primary_arvif(ar, ahvif);
+		WARN_ON(!arvif);
+		link_id = arvif->link_id;
+		goto stop_primary;
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+	ath12k_mac_remove_link_interface(hw, arvif);
+	ath12k_mac_unassign_link_vif(arvif);
+	goto unassign_exit;
+out:
 	mutex_unlock(&ar->conf_mutex);
+unassign_exit:
+	mutex_unlock(&ah->conf_mutex);
 }
 
 static int
@@ -5937,24 +13774,88 @@
 				 int n_vifs,
 				 enum ieee80211_chanctx_switch_mode mode)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *curr_ar, *new_ar, *ar;
+	struct ieee80211_chanctx_conf *prev_ctx, *curr_ctx;
+	int i, ret = 0, idx;
+
+	mutex_lock(&ah->conf_mutex);
+
+	/* TODO Switching a vif between two radios require deleting of vdev
+	 * in its current ar and creating a vdev and applying its cached params
+	 * to the new vdev in ar. So instead of returning error, handle it?
+	 */
+	for (i = 0; i < n_vifs; i++) {
+		if (vifs[i].old_ctx->def.chan->band !=
+		    vifs[i].new_ctx->def.chan->band) {
+			WARN_ON(1);
+			ret = -EINVAL;
+			break;
+		}
 
-	mutex_lock(&ar->conf_mutex);
+		curr_ar = ath12k_get_ar_by_ctx(hw, vifs[i].old_ctx);
+		new_ar = ath12k_get_ar_by_ctx(hw, vifs[i].new_ctx);
+		if (!curr_ar || !new_ar) {
+			ath12k_err(NULL,
+				   "unable to determine device for the passed channel ctx");
+			ath12k_err(NULL,
+				   "Old freq %d MHz (device %s) to new freq %d MHz (device %s)\n",
+				   vifs[i].old_ctx->def.chan->center_freq,
+				   curr_ar ? "valid" : "invalid",
+				   vifs[i].new_ctx->def.chan->center_freq,
+				   new_ar ? "valid" : "invalid");
+			ret = -EINVAL;
+			break;
+		}
 
+		/* Switching a vif between two radios is not allowed */
+		if (curr_ar != new_ar) {
+			ath12k_dbg(curr_ar->ab, ATH12K_DBG_MAC,
+				   "mac chanctx switch to another radio not supported.");
+			ret = -EOPNOTSUPP;
+			break;
+		}
+	}
+
+	if (ret)
+		goto unlock;
+
+	prev_ctx = vifs[0].old_ctx;
+	idx = 0;
+
+	for (i = 1; i < n_vifs; i++) {
+		curr_ctx = vifs[i].old_ctx;
+
+		if (curr_ctx != prev_ctx) {
+			ar = ath12k_get_ar_by_ctx(hw, prev_ctx);
+			mutex_lock(&ar->conf_mutex);
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 		   "mac chanctx switch n_vifs %d mode %d\n",
-		   n_vifs, mode);
-	ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
+				   i - idx, mode);
+			ath12k_mac_process_update_vif_chan(ar, vifs + idx, i - idx);
+			mutex_unlock(&ar->conf_mutex);
+			prev_ctx = curr_ctx;
+			idx = i;
+		}
+	}
 
+	ar = ath12k_get_ar_by_ctx(hw, prev_ctx);
+	mutex_lock(&ar->conf_mutex);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac chanctx switch n_vifs %d mode %d\n",
+		   i - idx, mode);
+	ath12k_mac_process_update_vif_chan(ar, vifs + idx, i - idx);
 	mutex_unlock(&ar->conf_mutex);
 
-	return 0;
+unlock:
+	mutex_unlock(&ah->conf_mutex);
+	return ret;
 }
 
-static int
+int
 ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
 {
-	struct ath12k_vif *arvif;
+	struct ath12k_link_vif *arvif;
 	int ret = 0;
 
 	mutex_lock(&ar->conf_mutex);
@@ -5976,13 +13877,58 @@
 
 /* mac80211 stores device specific RTS/Fragmentation threshold value,
  * this is set interface specific to firmware from ath12k driver
+ * TODO Move to link specific config
  */
-static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value /*, */
+					   /* struct ieee80211_vif *vif, int link_id */)
 {
-	struct ath12k *ar = hw->priv;
+	u8 link_id = 0;
+	struct ieee80211_vif *vif = NULL;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_vif *ahvif;
+	struct ath12k *ar;
+	struct ath12k_link_vif *arvif;
+	int ret = -1;
 	int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
 
-	return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+	if (vif) {
+		mutex_lock(&ah->conf_mutex);
+		ahvif = ath12k_vif_to_ahvif(vif);
+		arvif = ahvif->link[link_id];
+
+		if (arvif == NULL || !arvif->is_created) {
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+				    "bss info parameter changes %d cached to apply after vdev create on channel assign\n",
+				    param_id);
+			mutex_unlock(&ah->conf_mutex);
+			return ret;
+		}
+
+		ar = arvif->ar;
+		if (!ar) {
+			ath12k_err(NULL,
+				   "Failed to set rts threshold for link: %d\n",
+				   link_id);
+			mutex_unlock(&ah->conf_mutex);
+			return ret;
+		}
+
+		ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d",
+				    ar->pdev->pdev_id);
+		}
+
+		mutex_unlock(&ah->conf_mutex);
+	} else {
+		ar = ah->radio;
+		ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d",
+				    ar->pdev->pdev_id);
+		}
+	}
+	return ret;
 }
 
 static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -6000,34 +13946,75 @@
 	return -EOPNOTSUPP;
 }
 
-static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-				u32 queues, bool drop)
+static void ath12k_mac_flush(struct ath12k *ar)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k_base *ab = ar->ab;
 	long time_left;
 
-	if (drop)
+	atomic_inc(&ar->flush_request);
+	time_left = wait_event_timeout(ar->tx_empty_waitq,
+				       ((atomic_read(&ar->dp.num_tx_pending) == 0) &&
+					(atomic_read(&ar->num_pending_mgmt_tx) == 0)),
+				       ATH12K_FLUSH_TIMEOUT);
+	atomic_dec(&ar->flush_request);
+
+	if (time_left == 0) {
+		ath12k_warn(ab, "failed to flush transmit queue pending mgmt %d data %d\n",
+			    atomic_read(&ar->num_pending_mgmt_tx),
+			    atomic_read(&ar->dp.num_tx_pending));
 		return;
+	}
 
-	time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
-				       (atomic_read(&ar->dp.num_tx_pending) == 0),
-				       ATH12K_FLUSH_TIMEOUT);
-	if (time_left == 0)
-		ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+	ath12k_dbg(ab, ATH12K_DBG_SET(MAC, L3),
+		   "mac tx flush pending mgmt %d data %d\n",
+		   atomic_read(&ar->num_pending_mgmt_tx),
+		   atomic_read(&ar->dp.num_tx_pending));
 }
 
-static int
-ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
-				     enum nl80211_band band,
-				     const struct cfg80211_bitrate_mask *mask)
+static void ath12k_mac_op_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+				    struct ieee80211_sta *sta)
 {
-	int num_rates = 0;
+	/* There is no station specific queue, No need to do anything here */
+}
+
+static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+				u32 queues, bool drop)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_vif *ahvif;
 	int i;
+	u8 link_id;
 
-	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
-		num_rates += hweight16(mask->control[band].ht_mcs[i]);
+	if (drop)
+		return;
 
-	return num_rates;
+	if (!vif) {
+		/* FIXME: Need to identify which radio to flush.
+		 * Until flush all the device if vif NULL
+		 */
+		ar = ah->radio;
+		for (i = 0; i < ah->num_radio; i++) {
+			ath12k_mac_flush(ar);
+			ar++;
+		}
+	} else {
+		ahvif = (void *)vif->drv_priv;
+
+		for_each_set_bit(link_id, &ahvif->links_map, ATH12K_NUM_MAX_LINKS) {
+			if (!ahvif->link[link_id])
+				continue;
+
+			ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+			if (!ar) {
+				ath12k_err(NULL,
+					   "unable to determine device for link_id %d tx flush\n",
+					   link_id);
+				continue;
+			}
+			ath12k_mac_flush(ar);
+		}
+	}
 }
 
 static bool
@@ -6045,9 +14032,29 @@
 	if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
 		return false;
 
+	if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+		return false;
+
+	if (ath12k_mac_bitrate_mask_num_eht_rates(ar, band, mask))
+		return false;
+
 	return num_rates == 1;
 }
 
+static __le16
+ath12k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+	if (he_cap->he_cap_elem.phy_cap_info[0] &
+	    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+		return he_cap->he_mcs_nss_supp.tx_mcs_80p80;
+
+	if (he_cap->he_cap_elem.phy_cap_info[0] &
+	    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+		return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+	return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
 static bool
 ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
 				       enum nl80211_band band,
@@ -6056,8 +14063,13 @@
 {
 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
 	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+	u16 he_mcs_map = 0;
+	u16 eht_mcs_map = 0;
 	u8 ht_nss_mask = 0;
 	u8 vht_nss_mask = 0;
+	u8 he_nss_mask = 0;
+	u8 eht_nss_mask = 0;
+	u8 mcs_nss_len;
 	int i;
 
 	/* No need to consider legacy here. Basic rates are always present
@@ -6084,7 +14096,71 @@
 			return false;
 	}
 
-	if (ht_nss_mask != vht_nss_mask)
+	he_mcs_map = le16_to_cpu(ath12k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap));
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+		if (mask->control[band].he_mcs[i] == 0)
+			continue;
+
+		if (mask->control[band].he_mcs[i] ==
+		    ath12k_mac_get_max_he_mcs_map(he_mcs_map, i))
+			he_nss_mask |= BIT(i);
+		else
+			return false;
+	}
+
+	mcs_nss_len = ieee80211_eht_mcs_nss_size(&sband->iftype_data->he_cap.he_cap_elem,
+						 &sband->iftype_data->eht_cap.eht_cap_elem,
+						 false);
+	if (mcs_nss_len == 4) {
+		/* 20 MHz only STA case */
+		const struct ieee80211_eht_mcs_nss_supp_20mhz_only *eht_mcs_nss =
+			&sband->iftype_data->eht_cap.eht_mcs_nss_supp.only_20mhz;
+		if (eht_mcs_nss->rx_tx_mcs13_max_nss)
+			eht_mcs_map = 0x1fff;
+		else if (eht_mcs_nss->rx_tx_mcs11_max_nss)
+			eht_mcs_map = 0x07ff;
+		else if (eht_mcs_nss->rx_tx_mcs9_max_nss)
+			eht_mcs_map = 0x01ff;
+		else
+			eht_mcs_map = 0x007f;
+	} else {
+		const struct ieee80211_eht_mcs_nss_supp_bw *eht_mcs_nss;
+
+		switch (mcs_nss_len) {
+		case 9:
+			eht_mcs_nss = &sband->iftype_data->eht_cap.eht_mcs_nss_supp.bw._320;
+			break;
+		case 6:
+			eht_mcs_nss = &sband->iftype_data->eht_cap.eht_mcs_nss_supp.bw._160;
+			break;
+		case 3:
+			eht_mcs_nss = &sband->iftype_data->eht_cap.eht_mcs_nss_supp.bw._80;
+			break;
+		default:
+			return false;
+		}
+
+		if (eht_mcs_nss->rx_tx_mcs13_max_nss)
+			eht_mcs_map = 0x1fff;
+		else if (eht_mcs_nss->rx_tx_mcs11_max_nss)
+			eht_mcs_map = 0x7ff;
+		else
+			eht_mcs_map = 0x1ff;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].eht_mcs); i++) {
+		if (mask->control[band].eht_mcs[i] == 0)
+			continue;
+
+		if (mask->control[band].eht_mcs[i] < eht_mcs_map)
+			eht_nss_mask |= BIT(i);
+		else
+			return false;
+	}
+
+	if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask ||
+	    ht_nss_mask != eht_nss_mask)
 		return false;
 
 	if (ht_nss_mask == 0)
@@ -6131,18 +14207,157 @@
 	return 0;
 }
 
-static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif,
-					    u32 rate, u8 nss, u8 sgi, u8 ldpc)
+static int
+ath12k_mac_set_fixed_rate_GI_LTF(struct ath12k_link_vif *arvif, u8 gi, u8 ltf)
+{
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k *ar = arvif->ar;
+	int param, ret;
+	bool eht_support;
+
+	/* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+	if (gi && gi != 0xFF)
+		gi += 1;
+
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	eht_support = link_conf->eht_support;
+
+	rcu_read_unlock();
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    WMI_VDEV_PARAM_SGI, gi);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set HE GI:%d, error:%d\n",
+			    gi, ret);
+		return ret;
+	}
+	/* start from 1 */
+	if (ltf != 0xFF)
+		ltf += 1;
+
+	if (eht_support)
+		param = WMI_VDEV_PARAM_EHT_LTF;
+	else
+		param = WMI_VDEV_PARAM_HE_LTF;
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    param, ltf);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set HE LTF:%d, error:%d\n",
+			    ltf, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+ath12k_mac_set_auto_rate_GI_LTF(struct ath12k_link_vif *arvif, u16 gi, u8 ltf)
 {
 	struct ath12k *ar = arvif->ar;
-	u32 vdev_param;
 	int ret;
+	u32 ar_gi_ltf = 0;
+
+	if (gi != 0xFF) {
+		switch (gi) {
+		case NL80211_RATE_INFO_HE_GI_0_8:
+			gi = WMI_AUTORATE_800NS_GI;
+			break;
+		case NL80211_RATE_INFO_HE_GI_1_6:
+			gi = WMI_AUTORATE_1600NS_GI;
+			break;
+		case NL80211_RATE_INFO_HE_GI_3_2:
+			gi = WMI_AUTORATE_3200NS_GI;
+			break;
+		default:
+			ath12k_warn(ar->ab, "Invalid GI\n");
+			return -EINVAL;
+		}
+	}
+
+	if (ltf != 0xFF) {
+		switch (ltf) {
+		case NL80211_RATE_INFO_HE_1XLTF:
+			ltf = WMI_HE_AUTORATE_LTF_1X;
+			break;
+		case NL80211_RATE_INFO_HE_2XLTF:
+			ltf = WMI_HE_AUTORATE_LTF_2X;
+			break;
+		case NL80211_RATE_INFO_HE_4XLTF:
+			ltf = WMI_HE_AUTORATE_LTF_4X;
+			break;
+		default:
+			ath12k_warn(ar->ab, "Invalid LTF\n");
+			return -EINVAL;
+		}
+	}
+
+	ar_gi_ltf = gi | ltf;
+
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+					    ar_gi_ltf);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to set HE autorate GI:%u, LTF:%u params, error:%d\n",
+			    gi, ltf, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath12k_mac_set_rate_params(struct ath12k_link_vif *arvif,
+				      u32 rate, u8 nss, u8 sgi, u8 ldpc,
+				      u8 he_gi, u8 he_ltf, bool he_fixed_rate,
+				      u8 eht_gi, u8 eht_ltf,
+				      bool eht_fixed_rate,
+				      int he_ul_rate, u8 he_ul_nss)
+{
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k *ar = arvif->ar;
+	u32 vdev_param, rate_code;
+	int ret;
+	bool he_support, eht_support;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
-		   arvif->vdev_id, rate, nss, sgi);
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	eht_support = link_conf->eht_support;
+	he_support = link_conf->he_support;
 
+	rcu_read_unlock();
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac set rate params vdev %i, rate:0x%02x, nss:0x%02x, sgi:0x%02x, ldpc:0x%02x\n",
+		   arvif->vdev_id, rate, nss, sgi, ldpc);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "he_gi:0x%02x, he_ltf:0x%02x, he_fixed_rate:%d\n", he_gi,
+		   he_ltf, he_fixed_rate);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "eht_gi:0x%02x, eht_ltf:0x%02x, eht_fixed_rate:%d\n", eht_gi,
+		   eht_ltf, eht_fixed_rate);
+
+	if (!he_support || !eht_support) {
 	vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 					    vdev_param, rate);
@@ -6151,8 +14366,10 @@
 			    rate, ret);
 		return ret;
 	}
+	}
 
 	vdev_param = WMI_VDEV_PARAM_NSS;
+
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 					    vdev_param, nss);
 	if (ret) {
@@ -6161,6 +14378,35 @@
 		return ret;
 	}
 
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    WMI_VDEV_PARAM_LDPC, ldpc);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+			    ldpc, ret);
+		return ret;
+	}
+
+	if (eht_support) {
+		if (eht_fixed_rate) {
+			ret = ath12k_mac_set_fixed_rate_GI_LTF(arvif, eht_gi,
+							       eht_ltf);
+		} else {
+			ret = ath12k_mac_set_auto_rate_GI_LTF(arvif, eht_gi,
+							      eht_ltf);
+		}
+		if (ret)
+			return ret;
+	} else if (he_support) {
+		if (he_fixed_rate) {
+			ret = ath12k_mac_set_fixed_rate_GI_LTF(arvif, he_gi,
+							       he_ltf);
+		} else {
+			ret = ath12k_mac_set_auto_rate_GI_LTF(arvif, he_gi,
+							      he_ltf);
+		}
+		if (ret)
+			return ret;
+	} else {
 	vdev_param = WMI_VDEV_PARAM_SGI;
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 					    vdev_param, sgi);
@@ -6169,14 +14415,21 @@
 			    sgi, ret);
 		return ret;
 	}
+	}
 
-	vdev_param = WMI_VDEV_PARAM_LDPC;
+	if ((he_ul_rate < 0) || !he_ul_nss)
+		return 0;
+
+	rate_code = ATH12K_HW_RATE_CODE(he_ul_rate, he_ul_nss - 1,
+					WMI_RATE_PREAMBLE_HE);
+
+	vdev_param = WMI_VDEV_PARAM_UL_FIXED_RATE;
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    vdev_param, ldpc);
+					    vdev_param, rate_code);
+
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
-			    ldpc, ret);
-		return ret;
+		ath12k_warn(ar->ab, "failed to set HE UL Fixed Rate:%d, error:%d\n",
+			    he_ul_rate, ret);
 	}
 
 	return 0;
@@ -6207,67 +14460,371 @@
 	return true;
 }
 
+static bool
+ath12k_mac_he_mcs_range_present(struct ath12k *ar,
+				enum nl80211_band band,
+				const struct cfg80211_bitrate_mask *mask)
+{
+	int i;
+	u16 he_mcs;
+
+	for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+		he_mcs = mask->control[band].he_mcs[i];
+
+		switch (he_mcs) {
+		case 0:
+		case BIT(8) - 1:
+		case BIT(10) - 1:
+		case BIT(12) - 1:
+			break;
+		default:
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static bool
+ath12k_mac_he_ul_mcs_present(struct ath12k *ar,
+				enum nl80211_band band,
+				const struct cfg80211_bitrate_mask *mask)
+{
+	int i;
+
+	for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+		if (mask->control[band].he_ul_mcs[i])
+			return true;
+	}
+
+	return false;
+}
+
+static bool
+ath12k_mac_eht_mcs_range_present(struct ath12k *ar,
+				 enum nl80211_band band,
+				 const struct cfg80211_bitrate_mask *mask)
+{
+	int i;
+	u16 eht_mcs;
+
+	for (i = 0; i < NL80211_EHT_NSS_MAX; i++) {
+		eht_mcs = mask->control[band].eht_mcs[i];
+
+		switch (eht_mcs) {
+		case 0:
+		case BIT(8) - 1:
+		case BIT(10) - 1:
+		case BIT(12) - 1:
+		case BIT(14) - 1:
+			break;
+		case BIT(15) - 1:
+		case BIT(16) - 1:
+		case BIT(16) - BIT(14) - 1:
+			if (i != 0)
+				return false;
+			break;
+		default:
+			return false;
+		}
+	}
+
+	return true;
+}
+
 static void ath12k_mac_set_bitrate_mask_iter(void *data,
 					     struct ieee80211_sta *sta)
 {
-	struct ath12k_vif *arvif = data;
-	struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+	struct ath12k_link_vif *arvif = data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta;
 	struct ath12k *ar = arvif->ar;
+	u8 link_id = arvif->link_id;
+
+	if (ahsta->ahvif != arvif->ahvif)
+		return;
+
+	/* Check if there is a link sta in the vif link */
+	if (!(BIT(link_id) & ahsta->links_map))
+		return;
+
+	arsta = ahsta->link[link_id];
 
 	spin_lock_bh(&ar->data_lock);
 	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
 	spin_unlock_bh(&ar->data_lock);
 
-	ieee80211_queue_work(ar->hw, &arsta->update_wk);
+	ieee80211_queue_work(ar->ah->hw, &arsta->update_wk);
 }
 
 static void ath12k_mac_disable_peer_fixed_rate(void *data,
 					       struct ieee80211_sta *sta)
 {
-	struct ath12k_vif *arvif = data;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_vif *arvif = data;
+	struct ath12k *ar;
+	u8 link_id = arvif->link_id;
+
+	if (ahsta->ahvif != arvif->ahvif)
+		return;
+
+	/* Check if there is a link sta in the vif link */
+	if (!(BIT(link_id) & ahsta->links_map))
+		return;
+
+	arsta = ahsta->link[link_id];
+
+	ar = arvif->ar;
+
+	spin_lock_bh(&ar->data_lock);
+	arsta->disable_fixed_rate = true;
+	spin_unlock_bh(&ar->data_lock);
+
+	ieee80211_queue_work(ar->ah->hw, &arsta->update_wk);
+}
+
+static bool
+ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band band,
+					const struct cfg80211_bitrate_mask *mask,
+					unsigned int link_id)
+{
+	bool eht_fixed_rate = false, he_fixed_rate = false, vht_fixed_rate = false;
+	bool he_ul_fixed_rate = false;
+	struct ath12k_peer *peer, *tmp;
+	const u16 *vht_mcs_mask, *he_mcs_mask, *eht_mcs_mask, *he_ul_mcs_mask;
+	u8 vht_nss, he_nss, eht_nss, he_ul_nss;
+	bool ret = true;
+	struct ieee80211_link_sta *link_sta;
+
+	vht_mcs_mask = mask->control[band].vht_mcs;
+	he_mcs_mask = mask->control[band].he_mcs;
+	eht_mcs_mask = mask->control[band].eht_mcs;
+	he_ul_mcs_mask = mask->control[band].he_ul_mcs;
+
+	if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+		vht_fixed_rate = true;
+
+	if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+		he_fixed_rate = true;
+
+	if (ath12k_mac_bitrate_mask_num_eht_rates(ar, band, mask) == 1)
+		eht_fixed_rate = true;
+
+	if (ath12k_mac_bitrate_mask_num_he_ul_rates(ar, band, mask) == 1)
+		he_ul_fixed_rate = true;
+
+	if (!vht_fixed_rate && !he_fixed_rate && !eht_fixed_rate && !he_ul_fixed_rate)
+		return true;
+
+	vht_nss = ath12k_mac_max_vht_nss(vht_mcs_mask);
+	he_nss =  ath12k_mac_max_he_nss(he_mcs_mask);
+	eht_nss = ath12k_mac_max_eht_nss(eht_mcs_mask);
+	he_ul_nss =  ath12k_mac_max_he_nss(he_ul_mcs_mask);
+
+	rcu_read_lock();
+	spin_lock_bh(&ar->ab->base_lock);
+	list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
+		if (peer->sta) {
+			link_sta = rcu_dereference(peer->sta->link[link_id]);
+
+			if (!link_sta) {
+				ret = false;
+				goto exit;
+			}
+
+			if (vht_fixed_rate && (!link_sta->vht_cap.vht_supported ||
+					       link_sta->rx_nss < vht_nss)) {
+				ret = false;
+				goto exit;
+			}
+			if (he_fixed_rate && (!link_sta->he_cap.has_he ||
+					      link_sta->rx_nss < he_nss)) {
+				ret = false;
+				goto exit;
+			}
+			if (eht_fixed_rate && (!link_sta->eht_cap.has_eht ||
+					       link_sta->rx_nss < eht_nss)) {
+				ret = false;
+				goto exit;
+			}
+			/* TODO:
+			*	check when UL is valid
+			*/
+			if (he_ul_fixed_rate && (!link_sta->he_cap.has_he ||
+					link_sta->rx_nss < he_ul_nss)) {
+				ret = false;
+				goto exit;
+			}
+		}
+	}
+exit:
+	spin_unlock_bh(&ar->ab->base_lock);
+	rcu_read_unlock();
+	return ret;
+}
+
+static bool
+ath12k_mac_check_fixed_rate_settings_for_mumimo(struct ath12k_link_vif *arvif,
+                                               const u16 *vht_mcs_mask,
+                                               const u16 *he_mcs_mask)
+{
 	struct ath12k *ar = arvif->ar;
-	int ret;
+	struct ieee80211_he_cap_elem he_cap_elem = {0};
+	int nss_idx;
+	int he_nss;
+	int vht_nss;
 
-	ret = ath12k_wmi_set_peer_param(ar, sta->addr,
-					arvif->vdev_id,
-					WMI_PEER_PARAM_FIXED_RATE,
-					WMI_FIXED_RATE_NONE);
-	if (ret)
-		ath12k_warn(ar->ab,
-			    "failed to disable peer fixed rate for STA %pM ret %d\n",
-			    sta->addr, ret);
+	struct ieee80211_bss_conf *link_conf;
+
+	rcu_read_lock();
+
+	link_conf = rcu_dereference(arvif->ahvif->vif->link_conf[arvif->link_id]);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	vht_nss =  ath12k_mac_max_vht_nss(vht_mcs_mask);
+
+	if (vht_nss != 1) {
+		for (nss_idx = vht_nss-1; nss_idx >= 0; nss_idx--) {
+			if (vht_mcs_mask[nss_idx])
+				continue;
+
+			if (arvif->vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) {
+				rcu_read_unlock();
+				ath12k_warn(ar->ab, "vht fixed NSS rate is allowed only when MU MIMO is disabled\n");
+				return false;
+			}
+		}
+	}
+
+	if (!link_conf->he_support) {
+		rcu_read_unlock();
+		return true;
+	}
+
+	 he_nss =  ath12k_mac_max_he_nss(he_mcs_mask);
+
+
+	if (he_nss == 1) {
+	       rcu_read_unlock();
+	       return true;
+	}
+
+	 for (nss_idx = he_nss-1; nss_idx >= 0; nss_idx--) {
+		 if (he_mcs_mask[nss_idx])
+			 continue;
+
+		 if ((he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO) ||
+		      (he_cap_elem.phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) {
+			rcu_read_unlock();
+			ath12k_warn(ar->ab, "he fixed NSS rate is allowed only when MU MIMO is disabled\n");
+			return false;
+
+		 }
+	 }
+	 rcu_read_unlock();
+	 return true;
 }
 
 static int
 ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
-			       struct ieee80211_vif *vif,
+			       struct ieee80211_vif *vif, /* unsigned int link_id, */
 			       const struct cfg80211_bitrate_mask *mask)
 {
-	struct ath12k_vif *arvif = (void *)vif->drv_priv;
+	unsigned int link_id = 0;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_vif *ahvif = (void *)vif->drv_priv;
 	struct cfg80211_chan_def def;
-	struct ath12k *ar = arvif->ar;
+	struct ath12k *ar;
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
+	const u16 *he_mcs_mask;
+	const u16 *eht_mcs_mask;
+	const u16 *he_ul_mcs_mask;
+	u8 he_ltf = 0;
+	u8 he_gi = 0;
+	u8 eht_ltf = 0;
+	u8 eht_gi = 0;
 	u32 rate;
-	u8 nss;
+	u8 nss, he_ul_nss = 0;
 	u8 sgi;
 	u8 ldpc;
 	int single_nss;
-	int ret;
+	int ret, i;
 	int num_rates;
+	int he_ul_rate = -1;
+	bool he_fixed_rate = false;
+	bool eht_fixed_rate = false;
+	struct ath12k_link_vif *arvif;
 
-	if (ath12k_mac_vif_chan(vif, &def))
+	if (ath12k_mac_vif_chan(vif, &def, link_id))
 		return -EPERM;
 
+	mutex_lock(&ah->conf_mutex);
+
+	arvif = ahvif->link[link_id];
+
+	if (!arvif) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	if (!ar) {
+		ath12k_err(NULL,
+			   "unable to determine device to set bitrates, configs can be applied after device bringup\n");
+		ret = -EPERM;
+		goto out;
+	}
+
 	band = def.chan->band;
 	ht_mcs_mask = mask->control[band].ht_mcs;
 	vht_mcs_mask = mask->control[band].vht_mcs;
+	he_mcs_mask = mask->control[band].he_mcs;
+	he_ul_mcs_mask = mask->control[band].he_ul_mcs;
+	eht_mcs_mask = mask->control[band].eht_mcs;
 	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
 
+	if (vif->valid_links && ath12k_peer_assoc_h_eht_masked(eht_mcs_mask)) {
+		ath12k_warn(ar->ab, "cannot disable EHT rates on a ML bss\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	sgi = mask->control[band].gi;
-	if (sgi == NL80211_TXRATE_FORCE_LGI)
-		return -EINVAL;
+	if (sgi == NL80211_TXRATE_FORCE_LGI) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	he_gi = mask->control[band].he_gi;
+	he_ltf = mask->control[band].he_ltf;
+
+	eht_gi = 0xFF;
+	eht_ltf = 0xFF;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_ul_mcs); i++) {
+		if (hweight16(mask->control[band].he_ul_mcs[i]) == 1) {
+			he_ul_nss = i + 1;
+			he_ul_rate = ffs((int)
+					mask->control[band].he_ul_mcs[i]) - 1;
+			break;
+		}
+	}
+	num_rates = ath12k_mac_bitrate_mask_num_he_ul_rates(ar, band,
+			mask);
+	if (ath12k_mac_he_ul_mcs_present(ar, band, mask) &&
+			num_rates != 1) {
+		ath12k_warn(ar->ab,
+				"Setting HE UL MCS Fixed Rate range is not supported\n");
+		ret = -EINVAL;
+		goto out;
+	}
 
 	/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
 	 * requires passing at least one of used basic rates along with them.
@@ -6283,20 +14840,37 @@
 		if (ret) {
 			ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
 				    arvif->vdev_id, ret);
-			return ret;
+			goto out;
 		}
-		ieee80211_iterate_stations_atomic(ar->hw,
+		ieee80211_iterate_stations_atomic(hw,
 						  ath12k_mac_disable_peer_fixed_rate,
 						  arvif);
 	} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
 							  &single_nss)) {
 		rate = WMI_FIXED_RATE_NONE;
 		nss = single_nss;
+		mutex_lock(&ar->conf_mutex);
+		arvif->bitrate_mask = *mask;
+		ieee80211_iterate_stations_atomic(hw,
+						  ath12k_mac_set_bitrate_mask_iter,
+						  arvif);
+		mutex_unlock(&ar->conf_mutex);
 	} else {
 		rate = WMI_FIXED_RATE_NONE;
+
+		if (!ath12k_mac_check_fixed_rate_settings_for_mumimo(arvif, vht_mcs_mask, he_mcs_mask)) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (!ath12k_mac_validate_fixed_rate_settings(ar, band, mask, link_id))
+			ath12k_warn(ar->ab,
+				    "could not update fixed rate settings to all peers due to mcs/nss incompaitiblity\n");
 		nss = min_t(u32, ar->num_tx_chains,
-			    max(ath12k_mac_max_ht_nss(ht_mcs_mask),
-				ath12k_mac_max_vht_nss(vht_mcs_mask)));
+			    max(max(max(ath12k_mac_max_ht_nss(ht_mcs_mask),
+					ath12k_mac_max_vht_nss(vht_mcs_mask)),
+				    ath12k_mac_max_he_nss(he_mcs_mask)),
+				ath12k_mac_max_eht_nss(eht_mcs_mask)));
 
 		/* If multiple rates across different preambles are given
 		 * we can reconfigure this info with all peers using PEER_ASSOC
@@ -6328,17 +14902,43 @@
 			 */
 			ath12k_warn(ar->ab,
 				    "Setting more than one MCS Value in bitrate mask not supported\n");
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
 		}
 
-		ieee80211_iterate_stations_atomic(ar->hw,
-						  ath12k_mac_disable_peer_fixed_rate,
-						  arvif);
+		num_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band,
+								 mask);
+		if (num_rates == 1)
+			he_fixed_rate = true;
+
+		if (!ath12k_mac_he_mcs_range_present(ar, band, mask) &&
+		    num_rates > 1) {
+			ath12k_warn(ar->ab,
+				    "Setting more than one HE MCS Value in bitrate mask not supported\n");
+			ret = -EINVAL;
+			goto out;
+		}
+
+		num_rates = ath12k_mac_bitrate_mask_num_eht_rates(ar, band,
+								  mask);
+		if (num_rates == 1)
+			eht_fixed_rate = true;
+
+		if (!ath12k_mac_eht_mcs_range_present(ar, band, mask) &&
+		    num_rates > 1) {
+			ath12k_warn(ar->ab,
+				    "Setting more than one EHT MCS Value in bitrate mask not supported\n");
+			ret =-EINVAL;
+			goto out;
+		}
 
 		mutex_lock(&ar->conf_mutex);
+		ieee80211_iterate_stations_atomic(hw,
+						  ath12k_mac_disable_peer_fixed_rate,
+						  arvif);
 
 		arvif->bitrate_mask = *mask;
-		ieee80211_iterate_stations_atomic(ar->hw,
+		ieee80211_iterate_stations_atomic(hw,
 						  ath12k_mac_set_bitrate_mask_iter,
 						  arvif);
 
@@ -6347,56 +14947,119 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+	ret = ath12k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+					 he_ltf, he_fixed_rate, eht_gi, eht_ltf,
+					 eht_fixed_rate, he_ul_rate, he_ul_nss);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
 			    arvif->vdev_id, ret);
 	}
 
 	mutex_unlock(&ar->conf_mutex);
-
+out:
+	mutex_unlock(&ah->conf_mutex);
 	return ret;
 }
 
-static void
-ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
-				enum ieee80211_reconfig_type reconfig_type)
+static void ath12k_mac_sta_hw_restart_disconnect(struct ath12k *ar)
 {
-	struct ath12k *ar = hw->priv;
-	struct ath12k_base *ab = ar->ab;
-	int recovery_count;
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif;
 
-	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
-		return;
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ahvif = arvif->ahvif;
+		if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+			ieee80211_hw_restart_disconnect(ahvif->vif);
+	}
+}
+
+static void ath12k_mac_reconfig_completion(struct ath12k *ar,
+					   u8 *restart_count)
+{
+	u8 recovery_count;
 
 	mutex_lock(&ar->conf_mutex);
 
 	if (ar->state == ATH12K_STATE_RESTARTED) {
+		if (ar->ab->ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE0)
+			ath12k_mac_sta_hw_restart_disconnect(ar);
 		ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
 			    ar->pdev->pdev_id);
 		ar->state = ATH12K_STATE_ON;
-		ieee80211_wake_queues(ar->hw);
+		*restart_count += 1;
+	}
 
-		if (ab->is_reset) {
-			recovery_count = atomic_inc_return(&ab->recovery_count);
-			ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
-				   recovery_count);
+	if (ar->ab->is_reset) {
+		recovery_count = atomic_inc_return(&ar->ab->recovery_count);
+		ath12k_dbg(ar->ab, ATH12K_DBG_BOOT,
+			   "recovery count %d\n", recovery_count);
 			/* When there are multiple radios in an SOC,
 			 * the recovery has to be done for each radio
 			 */
-			if (recovery_count == ab->num_radios) {
-				atomic_dec(&ab->reset_count);
-				complete(&ab->reset_complete);
-				ab->is_reset = false;
-				atomic_set(&ab->fail_cont_count, 0);
-				ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
-			}
+		if (recovery_count == ar->ab->num_radios) {
+			atomic_dec(&ar->ab->reset_count);
+			complete(&ar->ab->reset_complete);
+			ar->ab->is_reset = false;
+			ar->ab->recovery_start = false;
+			atomic_set(&ar->ab->fail_cont_count, 0);
+			clear_bit(ATH12K_FLAG_RECOVERY, &ar->ab->dev_flags);
+			spin_lock_bh(&ar->ab->base_lock);
+			ar->ab->stats.last_recovery_time =
+				jiffies_to_msecs(jiffies -
+						ar->ab->recovery_start_time);
+			spin_unlock_bh(&ar->ab->base_lock);
+			ath12k_dbg(ar->ab, ATH12K_DBG_BOOT, "reset success\n");
 		}
 	}
 
 	mutex_unlock(&ar->conf_mutex);
 }
 
+void
+ath12k_mac_reconfig_complete(struct ieee80211_hw *hw,
+			     enum ieee80211_reconfig_type reconfig_type,
+			     struct ath12k *asserted_radio)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	u8 restart_count = 0;
+	int i;
+
+	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+		return;
+
+	mutex_lock(&ah->conf_mutex);
+
+	ar = ah->radio;
+
+	if (asserted_radio) {
+		ath12k_mac_reconfig_completion(asserted_radio, &restart_count);
+		goto out;
+	}
+
+	for (i = 0; i < ah->num_radio; i++) {
+		ath12k_mac_reconfig_completion(ar, &restart_count);
+		ar++;
+	}
+
+out:
+	/* If is_one_radio flag is set, then assume other radios
+	 * are up and running and wake queues once the reconfig is
+	 * done
+	 */
+ 	if (restart_count == ah->num_radio)
+		ieee80211_wake_queues(ah->hw);
+
+	mutex_unlock(&ah->conf_mutex);
+	ah->ag->hw_queues_stopped = false;
+}
+
+static void ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+					    enum ieee80211_reconfig_type reconfig_type)
+{
+	ath12k_mac_reconfig_complete(hw, reconfig_type, NULL);
+}
+
 static void
 ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
 				  struct ieee80211_channel *channel)
@@ -6432,32 +15095,59 @@
 static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
 				    struct survey_info *survey)
 {
-	struct ath12k *ar = hw->priv;
+	struct ath12k *ar;
 	struct ieee80211_supported_band *sband;
 	struct survey_info *ar_survey;
 	int ret = 0;
+	enum nl80211_band band;
 
 	if (idx >= ATH12K_NUM_CHANS)
 		return -ENOENT;
 
-	ar_survey = &ar->survey[idx];
-
-	mutex_lock(&ar->conf_mutex);
-
+	band = NL80211_BAND_2GHZ;
 	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
 	if (sband && idx >= sband->n_channels) {
 		idx -= sband->n_channels;
 		sband = NULL;
 	}
 
-	if (!sband)
+	if (!sband) {
 		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+		band = NL80211_BAND_5GHZ;
+	}
+
+	if (sband && idx >= sband->n_channels) {
+		idx -= sband->n_channels;
+		sband = NULL;
+	}
+
+	if (!sband) {
+		sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
+		band = NL80211_BAND_6GHZ;
+	}
 
 	if (!sband || idx >= sband->n_channels) {
 		ret = -ENOENT;
 		goto exit;
 	}
 
+	ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]);
+	if (!ar) {
+		if (sband->channels[idx].flags & IEEE80211_CHAN_DISABLED) {
+			ret = 0;
+			/* Flushing out the old survey results if any */
+			memset(survey, 0, sizeof(*survey));
+			goto exit;
+		} else {
+			ret = -ENOENT;
+			goto exit;
+		}
+	}
+
+	ar_survey = &ar->survey[idx];
+
+	mutex_lock(&ar->conf_mutex);
+
 	ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
 
 	spin_lock_bh(&ar->data_lock);
@@ -6469,27 +15159,68 @@
 	if (ar->rx_channel == survey->channel)
 		survey->filled |= SURVEY_INFO_IN_USE;
 
-exit:
 	mutex_unlock(&ar->conf_mutex);
+
+exit:
 	return ret;
 }
 
+void ath12k_mac_ap_ps_recalc(struct ath12k *ar) {
+	struct ath12k_link_vif *arvif;
+	enum ath12k_ap_ps_state state = ATH12K_AP_PS_STATE_OFF;
+	int ret;
+	bool allow_ap_ps = true;
+
+	lockdep_assert_held(&ar->conf_mutex);
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_AP) {
+			allow_ap_ps = false;
+			break;
+		}
+	}
+
+	if (!allow_ap_ps)
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "ap ps is not"
+			   "allowed\n");
+
+	if (allow_ap_ps && !ar->num_stations && ar->ap_ps_enabled)
+		state = ATH12K_AP_PS_STATE_ON;
+
+	if (ar->ap_ps_state == state)
+		return;
+
+	ret = ath12k_wmi_pdev_ap_ps_cmd_send(ar, ar->pdev->pdev_id, state);
+	if (!ret)
+		ar->ap_ps_state = state;
+	else
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "failed to send ap ps command pdev_id %u state %u\n",
+			    ar->pdev->pdev_id, state);
+}
+
 static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
 					 struct ieee80211_vif *vif,
 					 struct ieee80211_sta *sta,
 					 struct station_info *sinfo)
 {
-	struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
-
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k *ar = arsta->arvif->ar;
+
+	/* TODO accumulate link sta stats here? */
+
+	if (!ar) {
+		ath12k_err(NULL,
+			   "unable to determine sta statistics \n");
+		return;
+	}
 	sinfo->rx_duration = arsta->rx_duration;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
 
 	sinfo->tx_duration = arsta->tx_duration;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
 
-	if (!arsta->txrate.legacy && !arsta->txrate.nss)
-		return;
-
+	if (arsta->txrate.legacy || arsta->txrate.nss) {
 	if (arsta->txrate.legacy) {
 		sinfo->txrate.legacy = arsta->txrate.legacy;
 	} else {
@@ -6502,10 +15233,686 @@
 	}
 	sinfo->txrate.flags = arsta->txrate.flags;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+	}
 
-	/* TODO: Use real NF instead of default one. */
-	sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+	sinfo->signal = arsta->rssi_comb + ar->rssi_offsets.rssi_offset;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+
+	sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
+			    ar->rssi_offsets.rssi_offset;
+
+	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+}
+
+int ath12k_mac_btcoex_config(struct ath12k *ar, struct ath12k_link_vif *arvif,
+                           int coex, u32 wlan_prio_mask, u8 wlan_weight)
+{
+        struct coex_config_arg coex_config;
+        int ret;
+
+        lockdep_assert_held(&ar->conf_mutex);
+
+        if (coex == BTCOEX_CONFIGURE_DEFAULT || (test_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags) ^ coex)) {
+                goto next;
+        }
+
+        coex_config.vdev_id = arvif->vdev_id;
+        if (coex == BTCOEX_ENABLE) {
+                coex_config.config_type = WMI_COEX_CONFIG_PTA_INTERFACE;
+                coex_config.pta_num = ar->coex.pta_num;
+                coex_config.coex_mode = ar->coex.coex_mode;
+                coex_config.bt_txrx_time = ar->coex.bt_active_time_slot;
+                coex_config.bt_priority_time = ar->coex.bt_priority_time_slot;
+                coex_config.pta_algorithm = ar->coex.coex_algo_type;
+                coex_config.pta_priority = ar->coex.pta_priority;
+                ret = ath12k_send_coex_config_cmd(ar, &coex_config);
+                if (ret) {
+                        ath12k_warn(ar->ab,
+                                    "failed to set coex config vdev_id %d ret %d\n",
+                                    coex_config.vdev_id, ret);
+                        goto out;
+                }
+        }
+
+        memset(&coex_config, 0, sizeof(struct coex_config_arg));
+        coex_config.vdev_id = arvif->vdev_id;
+        coex_config.config_type = WMI_COEX_CONFIG_BTC_ENABLE;
+        coex_config.coex_enable = coex;
+        ret = ath12k_send_coex_config_cmd(ar, &coex_config);
+        if (ret) {
+                ath12k_warn(ar->ab,
+                            "failed to set coex config vdev_id %d ret %d\n",
+                            coex_config.vdev_id, ret);
+                goto out;
+        }
+
+next:
+        if (!coex) {
+		ret = 0;
+		goto out;
+        }
+
+        memset(&coex_config, 0, sizeof(struct coex_config_arg));
+        coex_config.vdev_id = arvif->vdev_id;
+        coex_config.config_type = WMI_COEX_CONFIG_WLAN_PKT_PRIORITY;
+        coex_config.wlan_pkt_type = wlan_prio_mask;
+        coex_config.wlan_pkt_weight = wlan_weight;
+        ret = ath12k_send_coex_config_cmd(ar, &coex_config);
+        if (ret) {
+                ath12k_warn(ar->ab,
+                            "failed to set coex config vdev_id %d ret %d\n",
+                            coex_config.vdev_id, ret);
+        }
+
+out:
+        return ret;
+}
+
+static int ath12k_fw_stats_request(struct ath12k *ar,
+				   struct stats_request_params *req_param)
+{
+	struct ath12k_base *ab = ar->ab;
+	unsigned long time_left;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ar->fw_stats_done = false;
+	ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+	spin_unlock_bh(&ar->data_lock);
+
+	reinit_completion(&ar->fw_stats_complete);
+
+	ret = ath12k_wmi_send_stats_request_cmd(ar, req_param->stats_id,
+					req_param->vdev_id, req_param->pdev_id);
+	if (ret) {
+		ath12k_warn(ab, "could not request fw stats (%d)\n",
+			    ret);
+		return ret;
+	}
+
+	time_left =
+		wait_for_completion_timeout(&ar->fw_stats_complete,
+					    1 * HZ);
+
+	if (!time_left)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif,
+				     /* unsigned int link_id,  */int *dbm)
+{
+	unsigned int link_id = 0;
+	struct ath12k_hw *ah = hw->priv;
+	struct stats_request_params req_param;
+	struct ath12k_fw_stats_pdev *pdev;
+	struct ath12k *ar;
+	struct ath12k_base *ab;
+	int ret;
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif;
+
+	/* Final Tx power is minimum of Target Power, CTL power, Regulatory
+	 * Power, PSD EIRP Power. We just know the Regulatory power from the
+	 * regulatory rules obtained. FW knows all these power and sets the min
+	 * of these. Hence, we request the FW pdev stats in which FW reports
+	 * the minimum of all vdev's channel Tx power.
+	 */
+	mutex_lock(&ah->conf_mutex);
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	if (!ar) {
+		goto err_fallback;
+	}
+
+	ab = ar->ab;
+	if (ar->state != ATH12K_STATE_ON) {
+		goto err_fallback;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+		mutex_unlock(&ar->conf_mutex);
+		mutex_unlock(&ah->conf_mutex);
+		*dbm = 0;
+		return 0;
+	}
+
+	/* Limit the requests to Firmware for fetching the tx power */
+	if (ar->chan_tx_pwr != ATH12K_PDEV_TX_POWER_INVALID &&
+	    time_before(jiffies, msecs_to_jiffies(ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS) +
+							ar->last_tx_power_update))
+		goto get_tx_power;
+
+	req_param.pdev_id = ar->pdev->pdev_id;
+	req_param.vdev_id = 0;
+	req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+	ret = ath12k_fw_stats_request(ar, &req_param);
+	if (ret) {
+		ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+		mutex_unlock(&ar->conf_mutex);
+		goto err_fallback;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+					struct ath12k_fw_stats_pdev,
+					list);
+	if (!pdev) {
+		spin_unlock_bh(&ar->data_lock);
+		mutex_unlock(&ar->conf_mutex);
+		goto err_fallback;
+	}
+
+	/* tx power is set as 2 units per dBm in FW. */
+	ar->chan_tx_pwr = pdev->chan_tx_power/2;
+	ar->last_tx_power_update = jiffies;
+
+	spin_unlock_bh(&ar->data_lock);
+
+get_tx_power:
+	*dbm = ar->chan_tx_pwr;
+
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "txpower reported %d dBm\n", *dbm);
+	return 0;
+
+err_fallback:
+	ahvif = ath12k_vif_to_ahvif(vif);
+	arvif = ahvif->link[link_id];
+
+	if (!arvif) {
+		*dbm = 0;
+		mutex_unlock(&ah->conf_mutex);
+		return 0;
+	}
+
+	rcu_read_lock();
+
+	/* We didn't get txpower from FW. Hence, relying on link_conf->txpower */
+	link_conf = ath12k_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		*dbm = 0;
+		rcu_read_unlock();
+		mutex_unlock(&ah->conf_mutex);
+		return 0;
+	}
+
+	*dbm = link_conf->txpower;
+	rcu_read_unlock();
+	mutex_unlock(&ah->conf_mutex);
+	ath12k_dbg(NULL, ATH12K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+		   *dbm);
+	return 0;
+}
+
+/*
+ * Cancel remain on channel scan. This API will be called when remain on
+ * channel timer expires or when ROC scan required to be cancelled by
+ * mac layer even before ROC timer expires.
+ */
+static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+						  struct ieee80211_vif *vif)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	u8 link_id = ahvif->last_scan_link;
+	struct ath12k_link_vif *arvif;
+
+	mutex_lock(&ah->conf_mutex);
+	if (!(ahvif->links_map & BIT(link_id))) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(NULL, "unable to cancel scan. Link id %d not found\n",
+			   link_id);
+		return -EINVAL;
+	}
+
+	arvif = ahvif->link[link_id];
+
+	if (!arvif || !arvif->is_created) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(NULL, "unable to cancel scan. arvif interface is not created\n");
+		return -EINVAL;
+	}
+
+	ar = arvif->ar;
+	if (!ar) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(NULL, "unable to select device to cancel scan\n");
+		return -EINVAL;
+	}
+	mutex_unlock(&ah->conf_mutex);
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ar->scan.roc_notify = false;
+	spin_unlock_bh(&ar->data_lock);
+
+	ath12k_scan_abort(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	cancel_delayed_work_sync(&ar->scan.timeout);
+
+	return 0;
+}
+
+/*
+ * Initiate remain on channel scan. When AP required to go to scan
+ * on off-channel, remain on channel will be called. This will
+ * start the scan in the requested channel
+ */
+static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif,
+					   struct ieee80211_channel *chan,
+					   int duration,
+					   enum ieee80211_roc_type type)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k *ar;
+	struct ath12k_wmi_scan_req_arg arg = {0};
+	struct cfg80211_chan_def chandef = {};
+	struct ath12k_vif *ahvif;
+	int ret = 0, link_id;
+	u32 scan_time_msec;
+	bool create = true;
+
+	mutex_lock(&ah->conf_mutex);
+
+	ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
+	if (!ar) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(NULL, "unable to select device for scan\n");
+		return -EINVAL;
+	}
+	if (unlikely(test_bit(ATH12K_FLAG_RECOVERY, &ar->ab->dev_flags))) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(ar->ab, "Recovery is in progress, cannot perform scan\n");
+		return -ESHUTDOWN;
+	}
+
+	link_id = ath12k_mac_find_link_id_by_freq(vif, ar,
+						  chan->center_freq);
+
+	arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+
+	if (!arvif) {
+		mutex_unlock(&ah->conf_mutex);
+		ath12k_err(ar->ab, "arvif device is not found, link_id:%d, chan:%d, band:%d\n",
+			   link_id, chan->center_freq, chan->band);
+		return -ENOMEM;
+	}
+
+	if (arvif->is_created) {
+		if (ar != arvif->ar) {
+			if (arvif->is_started) {
+				mutex_unlock(&ah->conf_mutex);
+				return -EINVAL;
+			}
+			if (!arvif->ar) {
+				mutex_unlock(&ah->conf_mutex);
+				return -EINVAL;
+			}
+			mutex_lock(&arvif->ar->conf_mutex);
+			arvif->ar->scan.vdev_id = -1;
+			mutex_unlock(&arvif->ar->conf_mutex);
+
+			ath12k_mac_remove_link_interface(hw, arvif);
+			ath12k_mac_unassign_link_vif(arvif);
+		} else {
+			create = false;
+		}
+	}
+
+	if (create) {
+		mutex_lock(&ar->conf_mutex);
+		ret = ath12k_mac_vdev_create(ar, arvif);
+		mutex_unlock(&ar->conf_mutex);
+		if (ret) {
+			mutex_unlock(&ah->conf_mutex);
+			ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret);
+			return -EINVAL;
+		}
+	}
+
+	ahvif = arvif->ahvif;
+	if (!arvif->is_started && ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+		chandef.chan = chan;
+		chandef.center_freq1 = chan->center_freq;
+		ret = ath12k_mac_vdev_start(arvif, &chandef, 0);
+		if (ret) {
+			mutex_unlock(&ah->conf_mutex);
+			ath12k_err(ar->ab, "vdev start failed sta roc\n");
+			return -EINVAL;
+		}
+		arvif->is_started = true;
+		arvif->is_scan_vif = true;
+	}
+
+	mutex_unlock(&ah->conf_mutex);
+
+	mutex_lock(&ar->conf_mutex);
+	spin_lock_bh(&ar->data_lock);
+	switch (ar->scan.state) {
+	case ATH12K_SCAN_IDLE:
+		reinit_completion(&ar->scan.started);
+		reinit_completion(&ar->scan.completed);
+		reinit_completion(&ar->scan.on_channel);
+		ar->scan.state = ATH12K_SCAN_STARTING;
+		ar->scan.is_roc = true;
+		ar->scan.vdev_id = arvif->vdev_id;
+		ar->scan.roc_freq = chan->center_freq;
+		ar->scan.roc_notify = true;
+		ret = 0;
+		break;
+	case ATH12K_SCAN_STARTING:
+	case ATH12K_SCAN_RUNNING:
+	case ATH12K_SCAN_ABORTING:
+		ret = -EBUSY;
+		break;
+	}
+	spin_unlock_bh(&ar->data_lock);
+
+	if (ret) {
+		mutex_unlock(&ar->conf_mutex);
+		ath12k_err(ar->ab, "Unable to start scan. scan_state:%d\n", ar->scan.state);
+		return ret;
+	}
+
+	if (duration <= 0)
+		scan_time_msec = ah->hw->wiphy->max_remain_on_channel_duration * 2;
+	else
+		scan_time_msec = duration;
+
+	ath12k_wmi_start_scan_init(ar, &arg);
+
+	arg.num_chan = 1;
+	arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+				GFP_KERNEL);
+	if (!arg.chan_list) {
+		mutex_unlock(&ar->conf_mutex);
+		ath12k_err(ar->ab, "chan list memory allocation failed\n");
+		return -ENOMEM;
+	}
+	arg.chan_list[0] = chan->center_freq;
+
+	arg.vdev_id = arvif->vdev_id;
+	arg.scan_id = ATH12K_SCAN_ID;
+	arg.dwell_time_active = scan_time_msec;
+	arg.dwell_time_passive = scan_time_msec;
+	arg.max_scan_time = scan_time_msec;
+	arg.dwell_time_active_6g = scan_time_msec;
+	arg.dwell_time_passive_6g = scan_time_msec;
+
+	/* Set required scan flags*/
+	arg.scan_f_passive = 1;
+	arg.scan_f_filter_prb_req = 1;
+
+	/*these flags enables fw to tx offchan frame to unknown STA*/
+	arg.scan_f_offchan_mgmt_tx = 1;
+	arg.scan_f_offchan_data_tx = 1;
+	arg.burst_duration = duration;
+
+	ret = ath12k_start_scan(ar, &arg);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+		spin_lock_bh(&ar->data_lock);
+		ar->scan.state = ATH12K_SCAN_IDLE;
+		spin_unlock_bh(&ar->data_lock);
+		goto exit;
+	}
+
+	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+	if (ret == 0) {
+		ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+		ret = ath12k_scan_stop(ar);
+		if (ret)
+			ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+		ret = -ETIMEDOUT;
+		goto exit;
+	}
+
+	ieee80211_queue_delayed_work(ah->hw, &ar->scan.timeout,
+				     msecs_to_jiffies(duration +
+						      ATH12K_MAC_SCAN_TIMEOUT_MSECS));
+
+	ret = 0;
+
+exit:
+	kfree(arg.chan_list);
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+/* Note: only half bandwidth agile is supported */
+bool ath12k_is_supported_agile_bandwidth(enum nl80211_chan_width conf_bw,
+					 enum nl80211_chan_width agile_bw)
+{
+	bool is_supported = false;
+
+	switch (conf_bw) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+        case NL80211_CHAN_WIDTH_20:
+        case NL80211_CHAN_WIDTH_40:
+		if (agile_bw <= conf_bw)
+			is_supported = true;
+		break;
+        case NL80211_CHAN_WIDTH_80:
+		if (agile_bw == conf_bw ||
+		    agile_bw == NL80211_CHAN_WIDTH_40)
+			is_supported = true;
+		break;
+        case NL80211_CHAN_WIDTH_160:
+		if (agile_bw == conf_bw ||
+		    agile_bw == NL80211_CHAN_WIDTH_80)
+			is_supported = true;
+		break;
+        case NL80211_CHAN_WIDTH_320:
+		if (agile_bw == conf_bw ||
+		    agile_bw == NL80211_CHAN_WIDTH_160)
+			is_supported = true;
+		break;
+	default:
+		break;
+	}
+
+	return is_supported;
+}
+
+/* Note: caller should call rcu_read_lock before getting the peer mac.
+ */
+static u8 *ath12k_get_peer_mac(struct ieee80211_vif *vif, u8 *addr, u8 *link_id)
+{
+	struct ieee80211_sta *ap_sta;
+	struct ath12k_sta *ahsta;
+	struct ath12k_link_sta *arsta;
+	u8 link, *peer_addr = NULL;
+
+	ap_sta = ieee80211_find_sta(vif, addr);
+	if (!ap_sta)
+		return NULL;
+
+	ahsta = ath12k_sta_to_ahsta(ap_sta);
+
+	for_each_set_bit(link, &ahsta->links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arsta = ahsta->link[link];
+		if (!arsta)
+			continue;
+
+		*link_id = link;
+		peer_addr = arsta->addr;
+		break;
+	}
+	return peer_addr;
+}
+
+int ath12k_mac_op_set_scs(struct wireless_dev *wdev,
+			  struct ath12k_latency_params *params)
+{
+	struct ieee80211_vif *vif;
+	struct ath12k *ar;
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif;
+	u8 *peer_mac;
+	int ret = 0;
+	u8 link_id;
+
+	if (!wdev || !params)
+		return -EINVAL;
+
+	vif = wdev_to_ieee80211_vif(wdev);
+	if (!vif)
+		return -EINVAL;
+
+	ahvif = ath12k_vif_to_ahvif(vif);
+	if (!ahvif)
+		return -EINVAL;
+
+	rcu_read_lock();
+	peer_mac = ath12k_get_peer_mac(vif, params->peer_mac, &link_id);
+	if (!peer_mac) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	memcpy(params->peer_mac, peer_mac, ETH_ALEN);
+	rcu_read_unlock();
+
+	arvif = ahvif->link[link_id];
+	if (WARN_ON(!arvif))
+		return -EINVAL;
+
+	ar = arvif->ar;
+	if (!ar)
+		return -EINVAL;
+
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "link id    	        %d\n", link_id);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "service interval    %d\n", params->service_interval);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "burst size          %d\n", params->burst_size);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "delay bound         %d\n", params->delay_bound);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "min data rate       %d\n", params->min_data_rate);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "user priority       %d\n", params->user_priority);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "req type            %d\n", params->req_type);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "access category     %d\n", params->ac);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "direction           %d\n", params->direction);
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "peer mac            %pM\n", params->peer_mac);
+
+	ret = ath12k_wmi_set_latency(ar, params);
+	return ret;
+}
+
+static int ath12k_mac_op_set_radar_background(struct ieee80211_hw *hw,
+					      struct cfg80211_chan_def *def)
+{
+	struct ath12k    *ar;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_vif *ahvif;
+	bool arvif_found = false;
+	int ret;
+	struct cfg80211_chan_def conf_def;
+
+	if (def)
+		ar = ath12k_mac_get_ar_by_chan(hw, def->chan);
+	else
+		ar = ath12k_mac_get_ar_by_agile_chandef(hw, NL80211_BAND_5GHZ);
+
+	if (!ar)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->ab->dfs_region == ATH12K_DFS_REG_UNSET) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!test_bit(ar->cfg_rx_chainmask, &ar->pdev->cap.adfs_chain_mask) &&
+	    !test_bit(WMI_TLV_SERVICE_SW_PROG_DFS_SUPPORT, ar->ab->wmi_ab.svc_map)) {
+		ret  = -EINVAL;
+		goto exit;
+	}
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ahvif = arvif->ahvif;
+		if (arvif->is_started && ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+			arvif_found = true;
+			break;
+		}
+	}
+
+	if (!arvif_found) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (!def) {
+		ret = ath12k_wmi_vdev_adfs_ocac_abort_cmd_send(ar,arvif->vdev_id);
+		if (!ret) {
+			memset(&ar->agile_chandef, 0, sizeof(struct cfg80211_chan_def));
+			ar->agile_chandef.chan = NULL;
+		}
+	} else {
+		if (!cfg80211_chandef_valid(def)) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		if (WARN_ON(ath12k_mac_vif_chan(ahvif->vif, &conf_def, arvif->link_id))) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		if (cfg80211_chandef_identical(&conf_def, def) /* &&
+			  cfg80211_chandef_device_present(def) */) {
+			if (!test_bit(WMI_TLV_SERVICE_SW_PROG_DFS_SUPPORT,
+				      ar->ab->wmi_ab.svc_map))
+				ret = -EINVAL;
+			else
+				ret = 0;
+			goto exit;
+		}
+
+		if (!(def->chan->flags & IEEE80211_CHAN_RADAR)) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		/* Note: Only Half width and full bandwidth is supported */
+		if(!(ath12k_is_supported_agile_bandwidth(conf_def.width,
+							  def->width))) {
+                        ret = -EINVAL;
+                        goto exit;
+                }
+
+		if (conf_def.center_freq1 == def->center_freq1) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		ret = ath12k_wmi_vdev_adfs_ch_cfg_cmd_send(ar, arvif->vdev_id, def);
+		if (!ret) {
+			memcpy(&ar->agile_chandef, def, sizeof(struct cfg80211_chan_def));
+		} else {
+			memset(&ar->agile_chandef, 0, sizeof(struct cfg80211_chan_def));
+			ar->agile_chandef.chan = NULL;
+		}
+	}
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
 }
 
 static const struct ieee80211_ops ath12k_ops = {
@@ -6518,12 +15925,15 @@
 	.remove_interface		= ath12k_mac_op_remove_interface,
 	.update_vif_offload		= ath12k_mac_op_update_vif_offload,
 	.config                         = ath12k_mac_op_config,
-	.bss_info_changed               = ath12k_mac_op_bss_info_changed,
+	.link_info_changed              = ath12k_mac_op_bss_info_changed,
+	.vif_cfg_changed		= ath12k_mac_op_vif_cfg_changed,
+	.change_vif_links		= ath12k_mac_op_change_vif_links,
 	.configure_filter		= ath12k_mac_op_configure_filter,
 	.hw_scan                        = ath12k_mac_op_hw_scan,
 	.cancel_hw_scan                 = ath12k_mac_op_cancel_hw_scan,
 	.set_key                        = ath12k_mac_op_set_key,
 	.sta_state                      = ath12k_mac_op_sta_state,
+	.sta_set_4addr                  = ath12k_mac_op_sta_set_4addr,
 	.sta_set_txpwr			= ath12k_mac_op_sta_set_txpwr,
 	.sta_rc_update			= ath12k_mac_op_sta_rc_update,
 	.conf_tx                        = ath12k_mac_op_conf_tx,
@@ -6541,7 +15951,21 @@
 	.set_bitrate_mask		= ath12k_mac_op_set_bitrate_mask,
 	.get_survey			= ath12k_mac_op_get_survey,
 	.flush				= ath12k_mac_op_flush,
+	.flush_sta			= ath12k_mac_op_flush_sta,
 	.sta_statistics			= ath12k_mac_op_sta_statistics,
+	.change_sta_links		= ath12k_mac_op_change_sta_links,
+	CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
+#ifdef CONFIG_ATH12K_DEBUGFS
+	.sta_add_debugfs		= ath12k_debugfs_sta_op_add,
+	.link_sta_add_debugfs		= ath12k_debugfs_link_sta_op_add,
+#endif
+	.remain_on_channel              = ath12k_mac_op_remain_on_channel,
+	.cancel_remain_on_channel       = ath12k_mac_op_cancel_remain_on_channel,
+	.get_txpower			= ath12k_mac_op_get_txpower,
+	.set_radar_background		= ath12k_mac_op_set_radar_background,
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	.set_multicast_to_unicast	= ath12k_mac_op_set_multicast_to_unicast,
+#endif
 };
 
 static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -6560,6 +15984,65 @@
 	}
 }
 
+/* During MLO split mac scenario, both the supporting channel list of same band
+ * should be updated in same wiphy->band[] place. We update the later incoming
+ * pdev supported channel list (new_band) into wiphy->band[] (orig_band)
+ */
+static int ath12k_mac_update_band(struct ath12k *ar,
+				   struct ieee80211_supported_band *orig_band,
+				   struct ieee80211_supported_band *new_band)
+{
+	struct ath12k_base *ab = ar->ab;
+	int i;
+
+	if (!orig_band || !new_band)
+		return -EINVAL;
+
+	if (orig_band->band != new_band->band)
+		return -EINVAL;
+
+	if (WARN_ON(!ab->ag->mlo_capable))
+		return -EOPNOTSUPP;
+
+	for (i = 0; i < new_band->n_channels; i++) {
+		if (new_band->channels[i].flags & IEEE80211_CHAN_DISABLED)
+			continue;
+
+		/* An enabled channel in new_band should not be already enabled
+		 * in the orig_band
+		 */
+		if (WARN_ON(!(orig_band->channels[i].flags &
+			      IEEE80211_CHAN_DISABLED)))
+			return -ENOTRECOVERABLE;
+
+		orig_band->channels[i].flags &= ~IEEE80211_CHAN_DISABLED;
+	}
+
+	return 0;
+}
+
+#define ATH12K_5_9_MIN_FREQ 5845
+#define ATH12K_5_9_MAX_FREQ 5885
+
+static void ath12k_mac_update_5_9_ch_list(struct ath12k *ar,
+                                      struct ieee80211_supported_band *band)
+{
+        int i;
+
+        if (test_bit(WMI_TLV_SERVICE_5_9GHZ_SUPPORT,
+                                ar->ab->wmi_ab.svc_map))
+                return;
+
+        if (ar->ab->dfs_region != ATH12K_DFS_REG_FCC)
+                return;
+
+        for (i = 0; i < band->n_channels; i++) {
+                if (band->channels[i].center_freq >= ATH12K_5_9_MIN_FREQ &&
+                    band->channels[i].center_freq <= ATH12K_5_9_MAX_FREQ)
+                        band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+        }
+}
+
 static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
 {
 	struct ath12k_pdev *pdev = ar->pdev;
@@ -6579,17 +16062,21 @@
 static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
 					   u32 supported_bands)
 {
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_hw *ah = ar->ah;
 	struct ieee80211_supported_band *band;
-	struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+	struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap, *temp_reg_cap;
 	void *channels;
-	u32 phy_id;
+	u32 phy_id, freq_low, freq_high;
+	int ret;
 
 	BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
 		      ARRAY_SIZE(ath12k_5ghz_channels) +
 		      ARRAY_SIZE(ath12k_6ghz_channels)) !=
 		     ATH12K_NUM_CHANS);
 
-	reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+	reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
+	temp_reg_cap = reg_cap;
 
 	if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
 		channels = kmemdup(ath12k_2ghz_channels,
@@ -6604,46 +16091,55 @@
 		band->channels = channels;
 		band->n_bitrates = ath12k_g_rates_size;
 		band->bitrates = ath12k_g_rates;
-		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
 
-		if (ar->ab->hw_params->single_pdev_only) {
+		if (ab->hw_params->single_pdev_only) {
 			phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
-			reg_cap = &ar->ab->hal_reg_cap[phy_id];
+			temp_reg_cap = &ab->hal_reg_cap[phy_id];
 		}
 		ath12k_mac_update_ch_list(ar, band,
-					  reg_cap->low_2ghz_chan,
-					  reg_cap->high_2ghz_chan);
-	}
+					  temp_reg_cap->low_2ghz_chan,
+					  temp_reg_cap->high_2ghz_chan);
 
-	if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
-		if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
-			channels = kmemdup(ath12k_6ghz_channels,
-					   sizeof(ath12k_6ghz_channels), GFP_KERNEL);
-			if (!channels) {
-				kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
-				return -ENOMEM;
-			}
+		freq_low = max(temp_reg_cap->low_2ghz_chan,
+			       ab->reg_freq_2g.start_freq);
+		freq_high = min(temp_reg_cap->high_2ghz_chan,
+			        ab->reg_freq_2g.end_freq);
+
+		ar->chan_info.low_freq = freq_low;
+		ar->chan_info.high_freq = freq_high;
+		ar->chan_info.num_channels = ath12k_reg_get_num_chans_in_band(ar, band,
+									   freq_low,
+									   freq_high);
+
+		if (!ah->hw->wiphy->bands[NL80211_BAND_2GHZ]) {
+			ah->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+			ah->supported_band_mask |= BIT(NL80211_BAND_2GHZ);
+		} else {
+			/* Split mac in same band under same wiphy during MLO */
+			ret = ath12k_mac_update_band(ar,
+						     ah->hw->wiphy->bands[NL80211_BAND_2GHZ],
+						     band);
+			if (ret)
+				return ret;
 
-			ar->supports_6ghz = true;
-			band = &ar->mac.sbands[NL80211_BAND_6GHZ];
-			band->band = NL80211_BAND_6GHZ;
-			band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels);
-			band->channels = channels;
-			band->n_bitrates = ath12k_a_rates_size;
-			band->bitrates = ath12k_a_rates;
-			ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
-			ath12k_mac_update_ch_list(ar, band,
-						  reg_cap->low_5ghz_chan,
-						  reg_cap->high_5ghz_chan);
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 2 GHz split mac during MLO\n",
+				   ar->pdev->pdev_id);
+		}
 		}
 
-		if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
+	if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+		/* If 5g end and 6g start overlaps, decide band based on
+                 * the difference between target limit and ATH12K_5G_MAX_CENTER.
+                 */
+                if ((reg_cap->low_5ghz_chan >= ATH12K_MIN_5G_FREQ) &&
+                    ((reg_cap->high_5ghz_chan < ATH12K_MAX_5G_FREQ) ||
+                    ((reg_cap->high_5ghz_chan - ATH12K_5G_MAX_CENTER) < (ATH12K_HALF_20MHZ_BW * 2)))) {
 			channels = kmemdup(ath12k_5ghz_channels,
 					   sizeof(ath12k_5ghz_channels),
 					   GFP_KERNEL);
 			if (!channels) {
 				kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
-				kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+				ar->mac.sbands[NL80211_BAND_2GHZ].channels = NULL;
 				return -ENOMEM;
 			}
 
@@ -6653,34 +16149,120 @@
 			band->channels = channels;
 			band->n_bitrates = ath12k_a_rates_size;
 			band->bitrates = ath12k_a_rates;
-			ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
 
 			if (ar->ab->hw_params->single_pdev_only) {
 				phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
-				reg_cap = &ar->ab->hal_reg_cap[phy_id];
+				temp_reg_cap = &ab->hal_reg_cap[phy_id];
 			}
+			ath12k_mac_update_ch_list(ar, band,
+                                                  temp_reg_cap->low_5ghz_chan,
+                                                  temp_reg_cap->high_5ghz_chan);
+
+			ath12k_mac_update_5_9_ch_list(ar, band);
+
+			freq_low = max(temp_reg_cap->low_5ghz_chan,
+				       ab->reg_freq_5g.start_freq);
+			freq_high = min(temp_reg_cap->high_5ghz_chan,
+					ab->reg_freq_5g.end_freq);
+
+			ar->chan_info.low_freq = freq_low;
+			ar->chan_info.high_freq = freq_high;
+			ar->chan_info.num_channels =
+					ath12k_reg_get_num_chans_in_band(ar, band,
+								      freq_low,
+								      freq_high);
+
+			if (!ah->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+				ah->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+				ah->supported_band_mask |= BIT(NL80211_BAND_5GHZ);
+			} else {
+				/* Split mac in same band under same wiphy during MLO */
+				ret = ath12k_mac_update_band(ar,
+						       	     ah->hw->wiphy->bands[NL80211_BAND_5GHZ],
+						             band);
+				if (ret)
+					return ret;
+
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 5 GHz split mac during MLO\n",
+					   ar->pdev->pdev_id);
+			}
+		} else if (reg_cap->low_5ghz_chan >= ATH12K_MIN_6G_FREQ &&
+                           reg_cap->high_5ghz_chan <= ATH12K_MAX_6G_FREQ) {
+                        band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+                        band->band = NL80211_BAND_6GHZ;
+			channels = kmemdup(ath12k_6ghz_channels,
+					   sizeof(ath12k_6ghz_channels),
+					   GFP_KERNEL);
+			if (!channels) {
+				kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+				ar->mac.sbands[NL80211_BAND_2GHZ].channels = NULL;
+				return -ENOMEM;
+			}
+
+                        band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+                        band->band = NL80211_BAND_6GHZ;
+                        band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels);
+                        band->channels = channels;
+                        band->n_bitrates = ath12k_a_rates_size;
+                        band->bitrates = ath12k_a_rates;
+                        ar->supports_6ghz = true;
 
 			ath12k_mac_update_ch_list(ar, band,
-						  reg_cap->low_5ghz_chan,
-						  reg_cap->high_5ghz_chan);
+						  temp_reg_cap->low_5ghz_chan,
+                                                  temp_reg_cap->high_5ghz_chan);
+
+			freq_low = max(temp_reg_cap->low_5ghz_chan,
+				       ab->reg_freq_6g.start_freq);
+			freq_high = min(temp_reg_cap->high_5ghz_chan,
+					ab->reg_freq_6g.end_freq);
+
+			ar->chan_info.low_freq = freq_low;
+			ar->chan_info.high_freq = freq_high;
+			ar->chan_info.num_channels =
+					ath12k_reg_get_num_chans_in_band(ar, band,
+								      freq_low,
+								      freq_high);
+
+			if (!ah->hw->wiphy->bands[NL80211_BAND_6GHZ]) {
+				ah->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+				ah->supported_band_mask |= BIT(NL80211_BAND_6GHZ);
+			} else {
+				/* Split mac in same band under same wiphy during MLO */
+				ret = ath12k_mac_update_band(ar,
+							     ah->hw->wiphy->bands[NL80211_BAND_6GHZ],
+							     band);
+				if (ret)
+					return ret;
+
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 6 GHz split mac during MLO\n",
+					   ar->pdev->pdev_id);
+			}
 		}
 	}
 
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u freq limits %u->%u MHz no. of channels %u\n",
+		   ar->pdev->pdev_id, ar->chan_info.low_freq,
+		   ar->chan_info.high_freq, ar->chan_info.num_channels);
 	return 0;
 }
 
-static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
 {
-	struct ath12k_base *ab = ar->ab;
+	struct ieee80211_hw *hw = ah->hw;
 	struct ieee80211_iface_combination *combinations;
 	struct ieee80211_iface_limit *limits;
+	struct ath12k_base *ab_dflt;
+	struct ath12k *ar;
 	int n_limits, max_interfaces;
 	bool ap, mesh;
 
-	ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
+	ar = ah->radio;
+	ab_dflt = ar->ab;
+
+	ap = ab_dflt->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
 
 	mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
-		ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
+		ab_dflt->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
 
 	combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
 	if (!combinations)
@@ -6720,32 +16302,93 @@
 	combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
 						BIT(NL80211_CHAN_WIDTH_20) |
 						BIT(NL80211_CHAN_WIDTH_40) |
-						BIT(NL80211_CHAN_WIDTH_80);
+						BIT(NL80211_CHAN_WIDTH_80) |
+						BIT(NL80211_CHAN_WIDTH_80P80) |
+						BIT(NL80211_CHAN_WIDTH_160);
 
-	ar->hw->wiphy->iface_combinations = combinations;
-	ar->hw->wiphy->n_iface_combinations = 1;
+	hw->wiphy->iface_combinations = combinations;
+	hw->wiphy->n_iface_combinations = 1;
 
 	return 0;
 }
 
+static void ath12k_mac_fetch_coex_info(struct ath12k *ar)
+{
+        struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+        struct ath12k_base *ab = ar->ab;
+        struct device *dev = ab->dev;
+
+        ar->coex.coex_support = false;
+
+        if (!(cap->supported_bands & WMI_HOST_WLAN_2G_CAP))
+                return;
+
+        if (of_property_read_u32(dev->of_node, "qcom,pta-num",
+                                &ar->coex.pta_num)) {
+                /* ath12k_err(ab, "No qcom,pta_num entry in dev-tree.\n"); */
+        }
+
+        if (of_property_read_u32(dev->of_node, "qcom,coex-mode",
+                                &ar->coex.coex_mode)) {
+                /* ath12k_err(ab, "No qcom,coex_mode entry in dev-tree.\n"); */
+        }
+
+        if (of_property_read_u32(dev->of_node, "qcom,bt-active-time",
+                                &ar->coex.bt_active_time_slot)) {
+                /* ath12k_err(ab, "No qcom,bt-active-time entry in dev-tree.\n"); */
+        }
+
+        if (of_property_read_u32(dev->of_node, "qcom,bt-priority-time",
+                                &ar->coex.bt_priority_time_slot)) {
+                /* ath12k_err(ab, "No qcom,bt-priority-time entry in dev-tree.\n"); */
+        }
+
+        if (of_property_read_u32(dev->of_node, "qcom,coex-algo",
+                                &ar->coex.coex_algo_type)) {
+                /* ath12k_err(ab, "No qcom,coex-algo entry in dev-tree.\n"); */
+        }
+
+        if (of_property_read_u32(dev->of_node, "qcom,pta-priority",
+                                &ar->coex.pta_priority)) {
+                /* ath12k_err(ab, "No qcom,pta-priority entry in dev-tree.\n"); */
+        }
+
+        if (ar->coex.coex_algo_type == COEX_ALGO_OCS) {
+                ar->coex.duty_cycle = 100000;
+                ar->coex.wlan_duration = 80000;
+        }
+
+        ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "coex pta_num %u coex_mode %u"
+                   " bt_active_time_slot %u bt_priority_time_slot %u"
+                   " coex_algorithm %u pta_priority %u\n", ar->coex.pta_num,
+                   ar->coex.coex_mode, ar->coex.bt_active_time_slot,
+                   ar->coex.bt_priority_time_slot, ar->coex.coex_algo_type,
+                   ar->coex.pta_priority);
+        ar->coex.coex_support = true;
+}
+
 static const u8 ath12k_if_types_ext_capa[] = {
 	[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+	[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
 	[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
 };
 
 static const u8 ath12k_if_types_ext_capa_sta[] = {
 	[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+	[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
 	[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
 	[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
 };
 
 static const u8 ath12k_if_types_ext_capa_ap[] = {
 	[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+	[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
 	[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
 	[9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+	[10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
 };
 
-static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
+static struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
 	{
 		.extended_capabilities = ath12k_if_types_ext_capa,
 		.extended_capabilities_mask = ath12k_if_types_ext_capa,
@@ -6762,48 +16405,158 @@
 		.extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
 		.extended_capabilities_len =
 				sizeof(ath12k_if_types_ext_capa_ap),
+		.eml_capabilities = 0,
+		.mld_capa_and_ops = 0,
 	},
 };
 
-static void __ath12k_mac_unregister(struct ath12k *ar)
+static void __ath12k_mac_pre_unregister(struct ath12k *ar)
 {
 	cancel_work_sync(&ar->regd_update_work);
+}
 
-	ieee80211_unregister_hw(ar->hw);
+static void __ath12k_mac_post_unregister(struct ath12k *ar)
+{
+	struct ath12k_mac_tx_mgmt_free_arg arg = { };
 
-	idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
+	arg.ar = ar;
+	arg.type = u8_encode_bits(true, ATH12K_MAC_TX_MGMT_FREE_TYPE_PDEV);
+	idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, &arg);
 	idr_destroy(&ar->txmgmt_idr);
 
 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
 	kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
 
-	kfree(ar->hw->wiphy->iface_combinations[0].limits);
-	kfree(ar->hw->wiphy->iface_combinations);
-
-	SET_IEEE80211_DEV(ar->hw, NULL);
+	ar->mac.sbands[NL80211_BAND_2GHZ].channels = NULL;
+	ar->mac.sbands[NL80211_BAND_5GHZ].channels = NULL;
+	ar->mac.sbands[NL80211_BAND_6GHZ].channels = NULL;
 }
 
-void ath12k_mac_unregister(struct ath12k_base *ab)
+static void __ath12k_mac_unregister(struct ath12k *ar)
+ {
+	__ath12k_mac_pre_unregister(ar);
+	__ath12k_mac_post_unregister(ar);
+}
+static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
 {
 	struct ath12k *ar;
-	struct ath12k_pdev *pdev;
 	int i;
 
-	for (i = 0; i < ab->num_radios; i++) {
-		pdev = &ab->pdevs[i];
-		ar = pdev->ar;
-		if (!ar)
+	ar = ah->radio;
+
+	for (i = 0; i < ah->num_radio; i++) {
+		__ath12k_mac_pre_unregister(ar);
+		ar++;
+	}
+
+	ieee80211_unregister_hw(ah->hw);
+
+	ar = ah->radio;
+	for (i = 0; i < ah->num_radio; i++) {
+		__ath12k_mac_post_unregister(ar);
+		ar++;
+	}
+
+	kfree(ah->hw->wiphy->iface_combinations[0].limits);
+	kfree(ah->hw->wiphy->iface_combinations);
+
+	SET_IEEE80211_DEV(ah->hw, NULL);
+}
+
+void ath12k_mac_unregister(struct ath12k_hw_group *ag)
+{
+	struct ath12k_hw *ah;
+	int i;
+
+	for (i = ag->num_hw - 1; i >= 0; i--) {
+		ah = ag->ah[i];
+		if (!ah)
 			continue;
 
-		__ath12k_mac_unregister(ar);
+		ath12k_mac_hw_unregister(ah);
 	}
 }
 
 static int __ath12k_mac_register(struct ath12k *ar)
 {
-	struct ath12k_base *ab = ar->ab;
-	struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+	struct ieee80211_hw *hw;
+	struct ath12k_pdev_cap *cap;
+	int ret;
+	u32 ht_cap = 0;
+
+	hw = ar->ah->hw;
+	init_waitqueue_head(&ar->tx_empty_waitq);
+	idr_init(&ar->txmgmt_idr);
+	spin_lock_init(&ar->txmgmt_idr_lock);
+
+	cap = &ar->pdev->cap;
+
+	ret = ath12k_mac_setup_channels_rates(ar,
+					      cap->supported_bands);
+	if (ret)
+		return ret;
+
+	ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+	ath12k_mac_setup_he_eht_cap(ar, cap);
+
+	ar->rssi_offsets.rssi_offset = ATH12K_DEFAULT_NOISE_FLOOR;
+
+	hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
+	hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
+
+	ath12k_pdev_caps_update(ar);
+
+	ar->max_num_stations = TARGET_NUM_STATIONS;
+	ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+
+	if (cap->nss_ratio_enabled)
+		ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+    ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+    ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+    ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+    ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+    ieee80211_hw_set(hw, USES_RSS);
+
+	/* TODO: Check if HT capability advertised from firmware is different
+	 * for each band for a dual band capable radio. It will be tricky to
+	 * handle it when the ht capability different for each band.
+	 */
+	if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz)
+		hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+	if (ar->supports_6ghz) {
+		wiphy_ext_feature_set(hw->wiphy,
+				      NL80211_EXT_FEATURE_FILS_DISCOVERY);
+		wiphy_ext_feature_set(hw->wiphy,
+				      NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+	}
+
+	if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+		if (test_bit(ar->cfg_rx_chainmask, &cap->adfs_chain_mask)) {
+			wiphy_ext_feature_set(hw->wiphy,
+					      NL80211_EXT_FEATURE_RADAR_BACKGROUND);
+		} else if (test_bit(WMI_TLV_SERVICE_SW_PROG_DFS_SUPPORT,
+				    ar->ab->wmi_ab.svc_map)) {
+			wiphy_ext_feature_set(hw->wiphy,
+					      NL80211_EXT_FEATURE_RADAR_BACKGROUND);
+		}
+	}
+
+	return 0;
+}
+static int ath12k_mac_hw_register(struct ath12k_hw *ah)
+{
+	struct ieee80211_hw *hw;
+	struct ath12k_pdev_cap *cap;
+	struct ath12k_base *ab, *ab_dflt;
+	struct ath12k *ar;
+	struct ath12k_hw_group *ag;
+	struct ath12k_pdev *pdev;
+	int i, j, ret;
+	u32 antennas_rx, antennas_tx;
+	bool unregister = false;
 	static const u32 cipher_suites[] = {
 		WLAN_CIPHER_SUITE_TKIP,
 		WLAN_CIPHER_SUITE_CCMP,
@@ -6815,239 +16568,312 @@
 		WLAN_CIPHER_SUITE_GCMP_256,
 		WLAN_CIPHER_SUITE_CCMP_256,
 	};
-	int ret;
-	u32 ht_cap = 0;
 
-	ath12k_pdev_caps_update(ar);
+	hw = ah->hw;
 
-	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+	hw->wiphy->max_ap_assoc_sta = 0;
+	antennas_rx = 0;
+	antennas_tx = 0;
+
+	ar = ah->radio;
+	ab = ar->ab;
+	ag = ab->ag;
+	hw->wiphy->dev_port = ar->pdev_idx;
 
-	SET_IEEE80211_DEV(ar->hw, ab->dev);
+	/* Use the first chip in group to advertise some features/info
+	 * these are expected to be similar for chips in group
+	 */
+	ab_dflt = ab;
 
-	ret = ath12k_mac_setup_channels_rates(ar,
-					      cap->supported_bands);
+	for (i = 0; i < ah->num_radio; i++) {
+		ab = ar->ab;
+		pdev = ar->pdev;
+		cap = &ar->pdev->cap;
+		if (ab->pdevs_macaddr_valid) {
+			ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+		} else {
+			ether_addr_copy(ar->mac_addr, ab->mac_addr);
+			ar->mac_addr[4] += i;
+		}
+
+		ret = __ath12k_mac_register(ar);
 	if (ret)
-		goto err;
+			goto err_cleanup;
 
-	ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
-	ath12k_mac_setup_he_cap(ar, cap);
+		/* Advertise the max antenna support of all radios, driver can handle
+		 * per pdev specific antenna setting based on pdev cap when antenna changes
+		 * are made
+		 */
+		antennas_rx = max_t(u32, antennas_rx, cap->rx_chain_mask);
+		antennas_tx = max_t(u32, antennas_tx, cap->tx_chain_mask);
 
-	ret = ath12k_mac_setup_iface_combinations(ar);
-	if (ret) {
-		ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
-		goto err_free_channels;
-	}
+		hw->wiphy->max_ap_assoc_sta += ar->max_num_stations;
+		SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
 
-	ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
-	ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
-
-	ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes;
-
-	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
-	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
-	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
-	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
-	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
-	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
-	ieee80211_hw_set(ar->hw, AP_LINK_PS);
-	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
-	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
-	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
-	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
-	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
-	ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
-	ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
-
-	if (ht_cap & WMI_HT_CAP_ENABLED) {
-		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
-		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
-		ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
-		ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
-		ieee80211_hw_set(ar->hw, USES_RSS);
+		ar++;
 	}
 
-	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
-	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->available_antennas_rx = antennas_rx;
+	hw->wiphy->available_antennas_tx = antennas_tx;
 
-	/* TODO: Check if HT capability advertised from firmware is different
-	 * for each band for a dual band capable radio. It will be tricky to
-	 * handle it when the ht capability different for each band.
+	/* TODO: Add link/ar specific iface combinations
+	 * For now, setting up combination once for the wiphy (ah)
 	 */
-	if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
-		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+	ret = ath12k_mac_setup_iface_combinations(ah);
+	if (ret) {
+		ath12k_err(NULL, "failed to setup interface combinations: %d\n",
+			   ret);
+		goto err_cleanup;
+	}
 
-	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
-	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+	SET_IEEE80211_DEV(hw, ab_dflt->dev);
 
-	ar->hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL;
+	/* Iface modes are expected to be similar for partner chips */
+	hw->wiphy->interface_modes = ab_dflt->hw_params->interface_modes;
 
-	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
-	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
-	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+	hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
+	hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
 
-	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
-	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(hw, AP_LINK_PS);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+	ieee80211_hw_set(hw, SUPPORTS_PER_STA_GTK);
+	ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
+	ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
+	ieee80211_hw_set(hw, REPORTS_LOW_ACK);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+	ieee80211_hw_set(hw, USES_RSS);
+	ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+
+	if (ath12k_frame_mode == ATH12K_HW_TXRX_ETHERNET) {
+		ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+		ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+	}
+
+	hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+	hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+	hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+	hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL;
+
+	hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+	hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+	hw->wiphy->max_remain_on_channel_duration = 5000;
+
+	hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+	hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
 				   NL80211_FEATURE_AP_SCAN;
 
-	ar->max_num_stations = TARGET_NUM_STATIONS;
-	ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+	hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
 
-	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+	if (ag->mlo_capable)
+		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
 
-	ar->hw->queues = ATH12K_HW_MAX_QUEUES;
-	ar->hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
-	ar->hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
-	ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+	/* TODO Split queues per chip */
+	hw->queues = ATH12K_HW_MAX_QUEUES;
+	hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
+	hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
+	hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT;
 
-	ar->hw->vif_data_size = sizeof(struct ath12k_vif);
-	ar->hw->sta_data_size = sizeof(struct ath12k_sta);
+	hw->vif_data_size = sizeof(struct ath12k_vif);
+	hw->sta_data_size = sizeof(struct ath12k_sta);
 
-	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
-	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
 
-	ar->hw->wiphy->cipher_suites = cipher_suites;
-	ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+	if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ab->wmi_ab.svc_map))
+                wiphy_ext_feature_set(hw->wiphy,
+                                      NL80211_EXT_FEATURE_BSS_COLOR);
 
-	ar->hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
-	ar->hw->wiphy->num_iftype_ext_capab =
-		ARRAY_SIZE(ath12k_iftypes_ext_capa);
+	hw->wiphy->cipher_suites = cipher_suites;
+	hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
-	if (ar->supports_6ghz) {
-		wiphy_ext_feature_set(ar->hw->wiphy,
-				      NL80211_EXT_FEATURE_FILS_DISCOVERY);
-		wiphy_ext_feature_set(ar->hw->wiphy,
-				      NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+	/* Copy over MLO related capabilities received from
+	 * WMI_SERVICE_READY_EXT2_EVENT if mlo_capable is set
+	 */
+	if (ag->mlo_capable) {
+		ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap;
+		ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap;
+		if (!test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags))
+			ieee80211_hw_set(hw, MLO_MCAST_MULTI_LINK_TX);
 	}
+	hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
+	hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa);
+
+	hw->wiphy->mbssid_max_interfaces = ah->num_radio * TARGET_NUM_VDEVS;
+	hw->wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
 
-	ath12k_reg_init(ar);
+	ah->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+	ah->hw->wiphy->reg_notifier = ath12k_reg_notifier;
 
-	if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
-		ar->hw->netdev_features = NETIF_F_HW_CSUM;
-		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
-		ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+	ath12k_vendor_register(ah);
+
+	if (!test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags)) {
+		ieee80211_hw_set(hw, SW_CRYPTO_CONTROL);
+		ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
 	}
 
-	ret = ieee80211_register_hw(ar->hw);
+	hw->netdev_features |= NETIF_F_LLTX;
+
+	ret = ieee80211_register_hw(hw);
 	if (ret) {
-		ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
-		goto err_free_if_combs;
+		ath12k_err(NULL, "ieee80211 registration failed: %d\n", ret);
+		goto err_full_cleanup;
 	}
 
-	if (!ab->hw_params->supports_monitor)
+	if (!ab_dflt->hw_params->supports_monitor)
 		/* There's a race between calling ieee80211_register_hw()
 		 * and here where the monitor mode is enabled for a little
 		 * while. But that time is so short and in practise it make
 		 * a difference in real life.
 		 */
-		ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+		hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
 
-	/* Apply the regd received during initialization */
+	ar = ah->radio;
+	ab = ar->ab;
+
+ 	for (i = 0; i < ah->num_radio; i++) {
 	ret = ath12k_regd_update(ar, true);
 	if (ret) {
 		ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
 		goto err_unregister_hw;
 	}
 
+		/* fw stats init is separately required since fw pdev stats is also used
+		 * in get_txpower mac ops hence it is handled separately from debugfs
+		 */
+		ath12k_fw_stats_init(ar);
+
+		ret = ath12k_debugfs_register(ar);
+		if (ret) {
+			ath12k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+			goto err_unregister_hw;
+		}
+		ar++;
+	}
+
 	return 0;
 
 err_unregister_hw:
-	ieee80211_unregister_hw(ar->hw);
-
-err_free_if_combs:
-	kfree(ar->hw->wiphy->iface_combinations[0].limits);
-	kfree(ar->hw->wiphy->iface_combinations);
+	ar = ah->radio;
+	for (j = 0; j < i; j++) {
+		ath12k_fw_stats_free(&ar->fw_stats);
+		ath12k_debugfs_unregister(ar);
+		ar++;
+	}
+	unregister = true;
+err_full_cleanup:
+	kfree(hw->wiphy->iface_combinations[0].limits);
+	kfree(hw->wiphy->iface_combinations);
+	i = ah->num_radio;
+err_cleanup:
+	ar = ah->radio;
+	for (j = 0; j < i; j++) {
+		__ath12k_mac_unregister(ar);
+		ar++;
+	}
 
-err_free_channels:
-	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
-	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
-	kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+	if (unregister) {
+		ieee80211_unregister_hw(hw);
+		SET_IEEE80211_DEV(hw, NULL);
+	}
 
-err:
-	SET_IEEE80211_DEV(ar->hw, NULL);
 	return ret;
 }
 
-int ath12k_mac_register(struct ath12k_base *ab)
+int ath12k_mac_register(struct ath12k_hw_group *ag)
 {
-	struct ath12k *ar;
-	struct ath12k_pdev *pdev;
-	int i;
-	int ret;
-
-	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
-		return 0;
+	struct ath12k_hw *ah;
+	int ret, i;
 
-	for (i = 0; i < ab->num_radios; i++) {
-		pdev = &ab->pdevs[i];
-		ar = pdev->ar;
-		if (ab->pdevs_macaddr_valid) {
-			ether_addr_copy(ar->mac_addr, pdev->mac_addr);
-		} else {
-			ether_addr_copy(ar->mac_addr, ab->mac_addr);
-			ar->mac_addr[4] += i;
-		}
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
 
-		ret = __ath12k_mac_register(ar);
+		ret = ath12k_mac_hw_register(ah);
 		if (ret)
-			goto err_cleanup;
-
-		idr_init(&ar->txmgmt_idr);
-		spin_lock_init(&ar->txmgmt_idr_lock);
+			goto err_mac_unregister;
 	}
 
-	/* Initialize channel counters frequency value in hertz */
-	ab->cc_freq_hz = 320000;
-	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
-
 	return 0;
 
-err_cleanup:
+err_mac_unregister:
 	for (i = i - 1; i >= 0; i--) {
-		pdev = &ab->pdevs[i];
-		ar = pdev->ar;
-		__ath12k_mac_unregister(ar);
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		ath12k_mac_hw_unregister(ah);
 	}
 
 	return ret;
 }
 
-int ath12k_mac_allocate(struct ath12k_base *ab)
+static inline struct ath12k *ath12k_mac_get_ar(struct ath12k_hw *ah,
+					       u8 link_idx)
+{
+	struct ath12k *ar = ah->radio;
+
+	if (link_idx >= ah->num_radio)
+		return NULL;
+
+	return ar + link_idx;
+}
+
+static int ath12k_mac_setup(struct ath12k_hw *ah, u8 link_id,
+			    struct ath12k_base *ab, int mac_id)
 {
-	struct ieee80211_hw *hw;
 	struct ath12k *ar;
 	struct ath12k_pdev *pdev;
-	int ret;
-	int i;
+	 struct ath12k_hw_group *ag = ab->ag;
+	u16 hw_link_id;
 
-	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
-		return 0;
+	pdev = &ab->pdevs[mac_id];
+	hw_link_id = pdev->hw_link_id;
 
-	for (i = 0; i < ab->num_radios; i++) {
-		pdev = &ab->pdevs[i];
-		hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
-		if (!hw) {
-			ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
-			ret = -ENOMEM;
-			goto err_free_mac;
+	if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
+		ath12k_err(ab, "HW link id %d is not supported\n", hw_link_id);
+		return -ENOENT;
 		}
 
-		ar = hw->priv;
-		ar->hw = hw;
+	ar = ath12k_mac_get_ar(ah, link_id);
+	if (!ar)
+		return -EINVAL;
+
 		ar->ab = ab;
+	ar->ah = ah;
 		ar->pdev = pdev;
-		ar->pdev_idx = i;
-		ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
+	ar->pdev_idx = mac_id;
+	ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, mac_id);
+	ar->link_idx = link_id;
 
-		ar->wmi = &ab->wmi_ab.wmi[i];
-		/* FIXME: wmi[0] is already initialized during attach,
+	ar->wmi = &ab->wmi_ab.wmi[mac_id];
+	/* FIXME wmi[0] is already initialized during attach,
 		 * Should we do this again?
 		 */
-		ath12k_wmi_pdev_attach(ab, i);
+	ath12k_wmi_pdev_attach(ab, mac_id);
+
+	ath12k_mac_fetch_coex_info(ar);
 
 		ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
 		ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
-		ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
-		ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+	ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask);
+	ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask);
 
 		pdev->ar = ar;
 		spin_lock_init(&ar->data_lock);
@@ -7062,36 +16888,248 @@
 		init_completion(&ar->bss_survey_done);
 		init_completion(&ar->scan.started);
 		init_completion(&ar->scan.completed);
+	init_completion(&ar->thermal.wmi_sync);
+	init_completion(&ar->mlo_setup_done);
+	init_completion(&ar->mvr_complete);
+	init_completion(&ar->scan.on_channel);
 
 		INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+	INIT_WORK(&ar->scan.vdev_del_wk, ath12k_scan_vdev_del_work);
+	ar->scan.vdev_id = -1;
 		INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
 
 		INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
 		skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
-		clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
-	}
+	clear_bit(MONITOR_VDEV_STARTED, &ar->monitor_flags);
+	ar->monitor_vdev_id = -1;
+	clear_bit(MONITOR_VDEV_CREATED, &ar->monitor_flags);
+	rcu_assign_pointer(ag->hw_links[hw_link_id], ar);
 
 	return 0;
+}
+
+int ath12k_mac_allocate(struct ath12k_hw_group *ag)
+{
+	struct ieee80211_hw *hw;
+	struct ath12k_base *ab;
+	struct ath12k_hw *ah;
+	struct ieee80211_ops *ops;
+	size_t len;
+	int i, j, total_radio, num_radios, ret;
+	int mac_id, chip_id;
+	u8 link_id, num_hw;
+
+	total_radio = 0;
+	for (i = 0; i < ag->num_chip; i++)
+		total_radio += ag->ab[i]->num_radios;
+
+	/* All pdev get combined and register as single wiphy if MLO capable
+	 * is enabled. where as in other group, each pdev get register separately.
+	 */
+	if (ag->mlo_capable)
+		num_radios = total_radio;
+	else
+		num_radios = 1;
+
+	num_hw = total_radio / num_radios;
+
+	if (num_hw >= ATH12K_GROUP_MAX_RADIO) {
+		ath12k_err(NULL, "HW count %d is not supported\n", num_hw);
+		return -ENOSPC;
+	}
+
+	ag->num_hw = 0;
+	mac_id = 0;
+	chip_id = 0;
+	for (i = 0; i < num_hw; i++) {
+		ops = kmemdup(&ath12k_ops, sizeof(ath12k_ops), GFP_KERNEL);
+		if (!ops) {
+			ret = -ENOMEM;
+			goto err_mac_destroy;
+		}
+
+		len = sizeof(struct ath12k_hw) + (num_radios * sizeof(struct ath12k));
+		hw = ieee80211_alloc_hw(len, ops);
+		if (!hw) {
+			kfree(ops);
+			ret = -ENOMEM;
+			goto err_mac_destroy;
+		}
+
+#ifdef CONFIG_MAC80211_BONDED_SUPPORT
+		ieee80211_enable_bond_dev(hw);
+#endif
+		ah = hw->priv;
+		ah->hw = hw;
+		ah->ops = ops;
+		ah->num_radio = num_radios;
+		ag->ah[i] = ah;
+		ah->ag = ag;
+
+		ath12k_info(NULL,
+			    "ath12k hw device created with %d radios in group %d with %d chips\n",
+			     ah->num_radio, ag->id, ag->num_chip);
+
+		mutex_init(&ah->conf_mutex);
+		INIT_LIST_HEAD(&ah->ml_peers);
+		spin_lock_init(&ah->data_lock);
+
+		link_id = 0;
+		for (j = 0; j < num_radios; j++) {
+			ab = ag->ab[chip_id];
+			ret = ath12k_mac_setup(ah, link_id, ab, mac_id);
+			if (ret) {
+				ieee80211_free_hw(ah->hw);
+				kfree(ops);
+				ag->ah[i] = NULL;
+
+				ath12k_err(ab, "failed to setup mac %d for chip %d group %d\n",
+					   mac_id, chip_id, ag->id);
+				goto err_mac_destroy;
+			}
+
+			link_id++;
+			mac_id++;
+
+			/* If mac_id falls beyond the current chip MACs then
+			 * move to next chip
+			 */
+			if (mac_id >= ab->num_radios) {
+				chip_id++;
+				mac_id = 0;
 
-err_free_mac:
-	ath12k_mac_destroy(ab);
+				/* Initialize channel counters frequency value in hertz */
+				ab->cc_freq_hz = 320000;
+
+				spin_lock_bh(&ab->base_lock);
+				ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+				spin_unlock_bh(&ab->base_lock);
+				ret = ath12k_peer_rhash_tbl_init(ab);
+				if (ret)
+					goto err_mac_destroy;
 
+				ath12k_dp_pdev_pre_alloc(ab);
+			}
+		}
+
+		ag->num_hw++;
+	}
+
+	return 0;
+
+err_mac_destroy:
+	ath12k_mac_destroy(ag);
 	return ret;
 }
 
-void ath12k_mac_destroy(struct ath12k_base *ab)
+void ath12k_mac_destroy(struct ath12k_hw_group *ag)
 {
+	struct ath12k_base *ab;
+	struct ath12k_hw *ah;
 	struct ath12k *ar;
 	struct ath12k_pdev *pdev;
-	int i;
+	const struct ieee80211_ops *ops;
+	int i, j;
+	u16 hw_link_id;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		for (j = 0; j < ab->num_radios; j++) {
+			pdev = &ab->pdevs[j];
+			hw_link_id = pdev->hw_link_id;
+
+			if (hw_link_id < ATH12K_GROUP_MAX_RADIO) {
+				rcu_assign_pointer(ag->hw_links[hw_link_id], NULL);
+				
+				if(!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->ag->dev_flags)))
+					synchronize_rcu();
+			}
 
-	for (i = 0; i < ab->num_radios; i++) {
-		pdev = &ab->pdevs[i];
 		ar = pdev->ar;
 		if (!ar)
 			continue;
 
-		ieee80211_free_hw(ar->hw);
 		pdev->ar = NULL;
 	}
+		ath12k_peer_rhash_tbl_destroy(ab);
+	}
+
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		ops = ah->ops;
+		ieee80211_free_hw(ah->hw);
+		kfree(ops);
+		ag->ah[i] = NULL;
+	}
+}
+
+u16 ath12k_calculate_subchannel_count(enum nl80211_chan_width width) {
+	u16 width_num = 0;
+
+	switch (width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+		width_num = 20;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		width_num = 40;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+	case NL80211_CHAN_WIDTH_80P80:
+		width_num = 80;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		width_num = 160;
+		break;
+	case NL80211_CHAN_WIDTH_320:
+		width_num = 320;
+		break;
+	default:
+		break;
+	}
+	return width_num/20;
+}
+
+void ath12k_mac_background_dfs_event(struct ath12k *ar,
+				     enum ath12k_background_dfs_events ev)
+{
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif;
+	bool arvif_found = false;
+	int ret = 0;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ahvif = arvif->ahvif;
+		if (arvif->is_started &&
+		    ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+			arvif_found = true;
+			break;
+		}
+	}
+
+	if (!arvif_found)
+		return;
+
+	if (ev == ATH12K_BGDFS_RADAR) {
+		cfg80211_background_radar_event(ar->ah->hw->wiphy, &ar->agile_chandef, GFP_ATOMIC);
+		lockdep_assert_held(&ar->conf_mutex);
+		ret = ath12k_wmi_vdev_adfs_ocac_abort_cmd_send(ar, arvif->vdev_id);
+	} else if (ev == ATH12K_BGDFS_ABORT) {
+		cfg80211_background_cac_abort(ar->ah->hw->wiphy);
+	}
+
+	if (!ret) {
+		memset(&ar->agile_chandef, 0, sizeof(struct cfg80211_chan_def));
+		ar->agile_chandef.chan = NULL;
+	} else {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "ADFS state can't be reset (ret=%d)\n",
+			   ret);
+	}
 }
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/mac.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mac.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/mac.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mac.h	2024-04-19 16:04:28.957735776 +0200
@@ -12,12 +12,56 @@
 
 struct ath12k;
 struct ath12k_base;
+struct ath12k_hw;
+struct ath12k_hw_group;
+struct ath12k_link_sta;
 
 struct ath12k_generic_iter {
 	struct ath12k *ar;
 	int ret;
 };
 
+struct ath12k_mac_any_chandef_arg {
+	struct ath12k *ar;
+	struct cfg80211_chan_def *def;
+};
+
+struct ath12k_mac_num_chanctxs_arg {
+	struct ath12k *ar;
+	int num;
+};
+
+#define ATH12K_MAC_TX_MGMT_FREE_TYPE_PDEV	BIT(0)
+#define ATH12K_MAC_TX_MGMT_FREE_TYPE_VIF	BIT(1)
+#define ATH12K_MAC_TX_MGMT_FREE_TYPE_LINK	BIT(2)
+
+struct ath12k_mac_tx_mgmt_free_arg {
+	u8 type;
+	struct ath12k *ar;
+	struct ieee80211_vif *vif;
+	u8 link_id;
+};
+
+struct ath12k_latency_params {
+	u32 service_interval;
+	u32 burst_size;
+	u32 delay_bound;
+	u32 min_data_rate;
+	u8 user_priority;
+	u8 req_type;
+	u8 ac;
+	u8 direction;
+	u8 peer_mac[6];
+};
+
+#define ATH12K_LATENCY_PARAM_SIZE sizeof(struct ath12k_latency_params)
+
+/* Default link after the IEEE802.11 defined Max link id limit
+ * for driver usage purpose
+ */
+#define ATH12K_DEFAULT_SCAN_LINK	IEEE80211_MLD_MAX_NUM_LINKS
+#define ATH12K_NUM_MAX_LINKS		(IEEE80211_MLD_MAX_NUM_LINKS + 1)
+
 /* number of failed packets (20 packets with 16 sw reties each) */
 #define ATH12K_KICKOUT_THRESHOLD		(20 * 16)
 
@@ -33,7 +77,16 @@
 #define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK	GENMASK(23, 16)
 #define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11	BIT(24)
 
-#define ATH12K_CHAN_WIDTH_NUM			8
+#define ATH12K_CHAN_WIDTH_NUM			14
+#define ATH12K_BW_NSS_MAP_ENABLE		BIT(31)
+#define ATH12K_PEER_RX_NSS_160MHZ		GENMASK(2, 0)
+#define ATH12K_PEER_RX_NSS_80_80MHZ		GENMASK(5, 3)
+
+#define ATH12K_OBSS_PD_MAX_THRESHOLD			-82
+#define ATH12K_OBSS_PD_THRESHOLD_DISABLED		-128
+#define ATH12K_OBSS_PD_THRESHOLD_IN_DBM			BIT(29)
+#define ATH12K_OBSS_PD_SRG_EN				BIT(30)
+#define ATH12K_OBSS_PD_NON_SRG_EN			BIT(31)
 
 #define ATH12K_TX_POWER_MAX_VAL	70
 #define ATH12K_TX_POWER_MIN_VAL	0
@@ -43,14 +96,28 @@
 	ATH12K_BW_40    = 1,
 	ATH12K_BW_80    = 2,
 	ATH12K_BW_160   = 3,
+	ATH12K_BW_320	= 4,
 };
 
+#define ATH12K_PDEV_TX_POWER_INVALID	(-1)
+#define ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS 5000 /* msecs */
+
+enum ath12k_background_dfs_events {
+	ATH12K_BGDFS_SUCCESS,
+	ATH12K_BGDFS_ABORT,
+	ATH12K_BGDFS_RADAR,
+};
+
+#define ATH12K_WLAN_PRIO_MAX    0x63
+#define ATH12K_WLAN_PRIO_WEIGHT 0xff
+
 extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
 
-void ath12k_mac_destroy(struct ath12k_base *ab);
-void ath12k_mac_unregister(struct ath12k_base *ab);
-int ath12k_mac_register(struct ath12k_base *ab);
-int ath12k_mac_allocate(struct ath12k_base *ab);
+void ath12k_mac_ap_ps_recalc(struct ath12k *ar);
+void ath12k_mac_destroy(struct ath12k_hw_group *ag);
+void ath12k_mac_unregister(struct ath12k_hw_group *ag);
+int ath12k_mac_register(struct ath12k_hw_group *ag);
+int ath12k_mac_allocate(struct ath12k_hw_group *ag);
 int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
 					  u16 *rate);
 u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
@@ -61,16 +128,85 @@
 void __ath12k_mac_scan_finish(struct ath12k *ar);
 void ath12k_mac_scan_finish(struct ath12k *ar);
 
-struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
-struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
+struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
 						   u32 vdev_id);
+int ath12k_mac_btcoex_config(struct ath12k *ar, struct ath12k_link_vif *arvif,
+			   int coex, u32 wlan_prio_mask, u8 wlan_weight);
 struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id);
 struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id);
 
-void ath12k_mac_drain_tx(struct ath12k *ar);
+void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
+                                  struct ath12k_link_vif *arvif,
+                                  struct ieee80211_chanctx_conf *ctx);
+void ath12k_mac_radio_drain_tx(struct ath12k *ar);
 void ath12k_mac_peer_cleanup_all(struct ath12k *ar);
 int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
 enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw);
+u32 ath12k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
+enum nl80211_he_ru_alloc ath12k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
+enum nl80211_he_ru_alloc ath12k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones);
 enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw);
 enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher);
+void ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+                                     struct ieee80211_chanctx_conf *conf,
+                                     void *data);
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+				    u8 link_id);
+int ath12k_mac_mlo_setup(struct ath12k_hw *ah);
+int ath12k_mac_mlo_ready(struct ath12k_hw *ah);
+int ath12k_mac_mlo_teardown(struct ath12k_hw *ah);
+struct ieee80211_bss_conf *ath12k_get_link_bss_conf(struct ath12k_link_vif *arvif);
+bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif);
+u16 ath12k_calculate_subchannel_count(enum nl80211_chan_width width);
+void ath12k_mac_background_dfs_event(struct ath12k *ar,
+				     enum ath12k_background_dfs_events ev);
+bool ath12k_mac_tx_check_max_limit(struct ath12k *ar, struct sk_buff *skb);
+int ath12k_mac_mlo_teardown_with_umac_reset(struct ath12k_base *ab);
+void ath12k_mac_bss_info_changed(struct ath12k *ar,
+				 struct ath12k_link_vif *arvif,
+				 struct ieee80211_bss_conf *info,
+				 u64 changed);
+int ath12k_mac_radio_start(struct ath12k *ar);
+int ath12k_mac_conf_tx(struct ath12k *ar,
+		       struct ath12k_link_vif *arvif, u16 ac,
+		       const struct ieee80211_tx_queue_params *params);
+int ath12k_mac_set_key(struct ath12k *ar,
+		       enum set_key_cmd cmd,
+		       struct ath12k_link_vif *arvif,
+		       struct ath12k_link_sta *arsta,
+		       struct ieee80211_key_conf *key);
+
+int ath12k_mac_update_sta_state(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta,
+				enum ieee80211_sta_state old_state,
+				enum ieee80211_sta_state new_state);
+void ath12k_mgmt_rx_reo_init_timer(struct ath12k_hw_group *ag);
+int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif);
+void ath12k_mac_vif_cache_flush(struct ath12k *ar,  struct ieee80211_vif *vif,
+				u8 link_id);
+void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
+				 struct ath12k_link_vif *arvif,
+				 struct ieee80211_chanctx_conf *ctx);
+int ath12k_mac_monitor_start(struct ath12k *ar);
+int ath12k_mac_mlo_vdev_start(struct ath12k_link_vif *arvif, bool flag);
+int ath12k_mac_vdev_start(struct ath12k_link_vif *arvif,
+			  const struct cfg80211_chan_def *chandef,
+			  bool radar_enabled);
+int ath12k_mac_monitor_vdev_create(struct ath12k *ar);
+int ath12k_mac_monitor_vdev_delete(struct ath12k *ar);
+int ath12k_mac_monitor_stop(struct ath12k *ar);
+int ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value);
+void ath12k_mac_reconfig_complete(struct ieee80211_hw *hw,
+				  enum ieee80211_reconfig_type reconfig_type,
+				  struct ath12k *asserted_radio);
+int ath12k_mac_partner_peer_cleanup(struct ath12k_base *ab);
+void ath12k_bss_assoc(struct ath12k *ar,
+		      struct ath12k_link_vif *arvif,
+		      struct ieee80211_bss_conf *bss_conf);
+void ath12k_bss_disassoc(struct ath12k *ar, struct ath12k_link_vif *arvif,
+			 bool do_vdev_down);
+int ath12k_mac_op_set_scs(struct wireless_dev *wdev, struct ath12k_latency_params *params);
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/mhi.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mhi.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/mhi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mhi.c	2024-01-19 17:01:19.869847139 +0100
@@ -6,13 +6,20 @@
 
 #include <linux/msi.h>
 #include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
 
 #include "core.h"
 #include "debug.h"
 #include "mhi.h"
 #include "pci.h"
+#include "pcic.h"
+#include "hif.h"
 
 #define MHI_TIMEOUT_DEFAULT_MS	90000
+#define OTP_INVALID_BOARD_ID	0xFFFF
+#define OTP_VALID_BOARD_ID_MASK	0x1000
 
 static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
 	{
@@ -252,7 +259,7 @@
 	int ret, num_vectors, i;
 	int *irq;
 
-	ret = ath12k_pci_get_user_msi_assignment(ab,
+	ret = ath12k_pcic_get_user_msi_assignment(ab,
 						 "MHI", &num_vectors,
 						 &user_base_data, &base_vector);
 	if (ret)
@@ -266,7 +273,7 @@
 		return -ENOMEM;
 
 	for (i = 0; i < num_vectors; i++)
-		irq[i] = ath12k_pci_get_msi_irq(ab->dev,
+		irq[i] = ath12k_hif_get_msi_irq(ab,
 						base_vector + i);
 
 	ab_pci->mhi_ctrl->irq = irq;
@@ -310,21 +317,76 @@
 	}
 }
 
+static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case ATH12K_MHI_INIT:
+		return "INIT";
+	case ATH12K_MHI_DEINIT:
+		return "DEINIT";
+	case ATH12K_MHI_POWER_ON:
+		return "POWER_ON";
+	case ATH12K_MHI_POWER_OFF:
+		return "POWER_OFF";
+	case ATH12K_MHI_FORCE_POWER_OFF:
+		return "FORCE_POWER_OFF";
+	case ATH12K_MHI_SUSPEND:
+		return "SUSPEND";
+	case ATH12K_MHI_RESUME:
+		return "RESUME";
+	case ATH12K_MHI_TRIGGER_RDDM:
+		return "TRIGGER_RDDM";
+	case ATH12K_MHI_RDDM_DONE:
+		return "RDDM_DONE";
+	case ATH12K_MHI_SOC_RESET:
+		return "SOC_RESET";
+	default:
+		return "UNKNOWN";
+	}
+};
+
 static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
 				    enum mhi_callback cb)
 {
 	struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
 
-	ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
-		   ath12k_mhi_op_callback_to_str(cb));
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s mhi_state:%s (0x%lx)\n",
+		   ath12k_mhi_op_callback_to_str(cb),
+		   ath12k_mhi_state_to_str(ab_pci->mhi_state), ab_pci->mhi_state);
 
 	switch (cb) {
 	case MHI_CB_SYS_ERROR:
 		ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
 		break;
 	case MHI_CB_EE_RDDM:
-		if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
+		/* check duplicate RDDM received from MHI */
+		if (mhi_get_exec_env(ab_pci->mhi_ctrl) == mhi_cntrl->ee) {
+			ath12k_dbg(ab, ATH12K_DBG_BOOT, "Ignore duplicate status cb %s(%d) received from MHI\n",
+				    ath12k_mhi_op_callback_to_str(cb),
+				    cb);
+			return;
+		}
+
+		/* In-case of rddm for mhi soc reset */
+		if(test_bit(ATH12K_MHI_SOC_RESET, &ab_pci->mhi_state)) {
+			ath12k_dbg(ab, ATH12K_DBG_BOOT, "Triggering RDDM from mhi soc reset\n");
+			clear_bit(ATH12K_MHI_SOC_RESET, &ab_pci->mhi_state);
+			complete(&ab->rddm_reset_done);
+			return;
+		}
+
+		/* In-case of rddm for fatal error and ab is registered. */
+		if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
+			set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+			set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
 			queue_work(ab->workqueue_aux, &ab->reset_work);
+			ath12k_dbg(ab, ATH12K_DBG_BOOT, "Schedule SSR Recovery reset work queue\n");
+			ath12k_hal_dump_srng_stats(ab);
+		} else {
+			BUG_ON(1);
+		}
+		ath12k_hal_dump_srng_stats(ab);
 		break;
 	default:
 		break;
@@ -347,17 +409,77 @@
 	writel(val, addr);
 }
 
+static int ath12k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
+{
+	struct device_node *np;
+	struct resource res;
+	int ret;
+
+	np = of_find_node_by_type(NULL, "memory");
+	if (!np)
+		return -ENOENT;
+
+	ret = of_address_to_resource(np, 0, &res);
+	if (ret)
+		return ret;
+
+	mhi_ctrl->iova_start = res.start + 0x1000000;
+	mhi_ctrl->iova_stop = res.end;
+
+	return 0;
+}
+
 int ath12k_mhi_register(struct ath12k_pci *ab_pci)
 {
 	struct ath12k_base *ab = ab_pci->ab;
+	struct device *dev = ab->dev;
 	struct mhi_controller *mhi_ctrl;
+	unsigned int board_id, otp_board_id;
+	const char *filename = ATH12K_AMSS_FILE;
 	int ret;
 
 	mhi_ctrl = mhi_alloc_controller();
 	if (!mhi_ctrl)
 		return -ENOMEM;
 
-	ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
+	otp_board_id = ath12k_pci_read32(ab, QCN9224_QFPROM_RAW_RFA_PDET_ROW13_LSB);
+
+	board_id = FIELD_GET(OTP_BOARD_ID_MASK, otp_board_id);
+	if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
+		if (of_property_read_u32(dev->of_node, "qcom,board_id", &board_id)) {
+			ath12k_warn(ab, "failed to read board id\n");
+		}
+	}
+
+	if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
+		u16 ov_board_id;
+
+		/* look for override */
+		if (ath12k_pci_has_board_id_override(ab, &ov_board_id)) {
+			board_id = ov_board_id;
+			ath12k_info(ab,
+				    "overriding board-id to 0x%x (%d)\n",
+				    board_id, board_id);
+		} else {
+			ath12k_err(ab,
+				   "device has invalid board-id (0x%04x), "
+				   "to use this card you need to setup "
+				   "%s/%s/%s file with "
+				   "this line:\n  pci:%s=<board_id>\n",
+				   board_id,
+				   ATH12K_FW_DIR, ab->hw_params->fw.dir,
+				   ATH12K_BOARD_OVERRIDE_FILE,
+				   pci_name(ab_pci->pdev));
+			return -EIO;
+		}
+	}
+	if (board_id & OTP_VALID_BOARD_ID_MASK) {
+		filename = ATH12K_AMSS_DUALMAC_FILE;
+		ath12k_dbg(ab, ATH12K_DBG_BOOT,
+				"dualmac fw selected for board id: %x\n", board_id);
+	}
+
+	ath12k_core_create_firmware_path(ab, filename,
 					 ab_pci->amss_path,
 					 sizeof(ab_pci->amss_path));
 
@@ -367,6 +489,8 @@
 	mhi_ctrl->regs = ab->mem;
 	mhi_ctrl->reg_len = ab->mem_len;
 
+	mhi_ctrl->rddm_size = ATH12K_PCI_FW_RDDM_SZ;
+
 	ret = ath12k_mhi_get_msi(ab_pci);
 	if (ret) {
 		ath12k_err(ab, "failed to get msi for mhi\n");
@@ -374,6 +498,15 @@
 		return ret;
 	}
 
+	if (ab->bus_params.fixed_mem_region) {
+		ret = ath12k_mhi_read_addr_from_dt(mhi_ctrl);
+		if (ret < 0)
+			return ret;
+	} else {
+		mhi_ctrl->iova_start = 0;
+		mhi_ctrl->iova_stop = 0xFFFFFFFF;
+	}
+
 	mhi_ctrl->iova_start = 0;
 	mhi_ctrl->iova_stop = 0xffffffff;
 	mhi_ctrl->sbl_size = SZ_512K;
@@ -405,32 +538,6 @@
 	ab_pci->mhi_ctrl = NULL;
 }
 
-static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
-{
-	switch (mhi_state) {
-	case ATH12K_MHI_INIT:
-		return "INIT";
-	case ATH12K_MHI_DEINIT:
-		return "DEINIT";
-	case ATH12K_MHI_POWER_ON:
-		return "POWER_ON";
-	case ATH12K_MHI_POWER_OFF:
-		return "POWER_OFF";
-	case ATH12K_MHI_FORCE_POWER_OFF:
-		return "FORCE_POWER_OFF";
-	case ATH12K_MHI_SUSPEND:
-		return "SUSPEND";
-	case ATH12K_MHI_RESUME:
-		return "RESUME";
-	case ATH12K_MHI_TRIGGER_RDDM:
-		return "TRIGGER_RDDM";
-	case ATH12K_MHI_RDDM_DONE:
-		return "RDDM_DONE";
-	default:
-		return "UNKNOWN";
-	}
-};
-
 static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci,
 				     enum ath12k_mhi_state mhi_state)
 {
@@ -464,6 +571,9 @@
 	case ATH12K_MHI_RDDM_DONE:
 		set_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state);
 		break;
+	case ATH12K_MHI_SOC_RESET:
+		set_bit(ATH12K_MHI_SOC_RESET, &ab_pci->mhi_state);
+                break;
 	default:
 		ath12k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
 	}
@@ -506,6 +616,8 @@
 		break;
 	case ATH12K_MHI_RDDM_DONE:
 		return 0;
+	case ATH12K_MHI_SOC_RESET:
+		return 0;
 	default:
 		ath12k_err(ab, "unhandled mhi state: %s(%d)\n",
 			   ath12k_mhi_state_to_str(mhi_state), mhi_state);
@@ -518,7 +630,7 @@
 	return -EINVAL;
 }
 
-static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
+int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
 				enum ath12k_mhi_state mhi_state)
 {
 	struct ath12k_base *ab = ab_pci->ab;
@@ -561,6 +673,9 @@
 		break;
 	case ATH12K_MHI_RDDM_DONE:
 		break;
+	case ATH12K_MHI_SOC_RESET:
+		mhi_soc_reset(ab_pci->mhi_ctrl);
+		break;
 	default:
 		ath12k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
 		ret = -EINVAL;
@@ -614,3 +729,8 @@
 {
 	ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME);
 }
+
+void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool state)
+{
+	mhi_download_rddm_image(mhi_ctrl, state);
+}
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/mhi.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mhi.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/mhi.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/mhi.h	2024-01-19 17:01:19.869847139 +0100
@@ -16,6 +16,7 @@
 #define MHISTATUS				0x48
 #define MHICTRL					0x38
 #define MHICTRL_RESET_MASK			0x2
+#define ATH12K_PCI_FW_RDDM_SZ			0x600000
 
 enum ath12k_mhi_state {
 	ATH12K_MHI_INIT,
@@ -28,6 +29,7 @@
 	ATH12K_MHI_TRIGGER_RDDM,
 	ATH12K_MHI_RDDM,
 	ATH12K_MHI_RDDM_DONE,
+	ATH12K_MHI_SOC_RESET,
 };
 
 extern const struct mhi_controller_config ath12k_mhi_config_qcn9274;
@@ -42,5 +44,7 @@
 
 void ath12k_mhi_suspend(struct ath12k_pci *ar_pci);
 void ath12k_mhi_resume(struct ath12k_pci *ar_pci);
-
+void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool state);
+int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
+                                enum ath12k_mhi_state mhi_state);
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/pci.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/pci.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/pci.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/pci.c	2024-03-18 14:40:14.855741442 +0100
@@ -5,31 +5,23 @@
  */
 
 #include <linux/module.h>
-#include <linux/msi.h>
-#include <linux/pci.h>
+#include <linux/of.h>
 
 #include "pci.h"
-#include "core.h"
-#include "hif.h"
 #include "mhi.h"
 #include "debug.h"
+#include "ppe.h"
+#include "bondif.h"
+#include "pcic.h"
 
 #define ATH12K_PCI_BAR_NUM		0
 #define ATH12K_PCI_DMA_MASK		32
 
-#define ATH12K_PCI_IRQ_CE0_OFFSET		3
-
-#define WINDOW_ENABLE_BIT		0x40000000
-#define WINDOW_REG_ADDRESS		0x310c
-#define WINDOW_VALUE_MASK		GENMASK(24, 19)
-#define WINDOW_START			0x80000
-#define WINDOW_RANGE_MASK		GENMASK(18, 0)
 #define WINDOW_STATIC_MASK		GENMASK(31, 6)
 
 #define TCSR_SOC_HW_VERSION		0x1B00000
 #define TCSR_SOC_HW_VERSION_MAJOR_MASK	GENMASK(11, 8)
 #define TCSR_SOC_HW_VERSION_MINOR_MASK	GENMASK(7, 4)
-
 /* BAR0 + 4k is always accessible, and no
  * need to force wakeup.
  * 4K - 32 = 0xFE0
@@ -39,6 +31,16 @@
 #define QCN9274_DEVICE_ID		0x1109
 #define WCN7850_DEVICE_ID		0x1107
 
+#define PCIE_PCIE_LOCAL_REG_PCIE_LOCAL_RSV0	0x1E03164
+#define QRTR_INSTANCE_MASK			0x000000FF
+#define ATH12K_MAX_PCI_DOMAINS		0x5
+
+
+unsigned int ath12k_fw_mem_seg;
+EXPORT_SYMBOL(ath12k_fw_mem_seg);
+module_param_named(fw_mem_seg, ath12k_fw_mem_seg, uint, 0644);
+MODULE_PARM_DESC(fw_mem_seg, "Enable/Disable FW segmentted memory");
+
 static const struct pci_device_id ath12k_pci_id_table[] = {
 	{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
 	{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
@@ -47,76 +49,8 @@
 
 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
 
-/* TODO: revisit IRQ mapping for new SRNG's */
-static const struct ath12k_msi_config ath12k_msi_config[] = {
-	{
-		.total_vectors = 16,
-		.total_users = 3,
-		.users = (struct ath12k_msi_user[]) {
-			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
-			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
-			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
-		},
-	},
-};
-
-static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
-	"bhi",
-	"mhi-er0",
-	"mhi-er1",
-	"ce0",
-	"ce1",
-	"ce2",
-	"ce3",
-	"ce4",
-	"ce5",
-	"ce6",
-	"ce7",
-	"ce8",
-	"ce9",
-	"ce10",
-	"ce11",
-	"ce12",
-	"ce13",
-	"ce14",
-	"ce15",
-	"host2wbm-desc-feed",
-	"host2reo-re-injection",
-	"host2reo-command",
-	"host2rxdma-monitor-ring3",
-	"host2rxdma-monitor-ring2",
-	"host2rxdma-monitor-ring1",
-	"reo2ost-exception",
-	"wbm2host-rx-release",
-	"reo2host-status",
-	"reo2host-destination-ring4",
-	"reo2host-destination-ring3",
-	"reo2host-destination-ring2",
-	"reo2host-destination-ring1",
-	"rxdma2host-monitor-destination-mac3",
-	"rxdma2host-monitor-destination-mac2",
-	"rxdma2host-monitor-destination-mac1",
-	"ppdu-end-interrupts-mac3",
-	"ppdu-end-interrupts-mac2",
-	"ppdu-end-interrupts-mac1",
-	"rxdma2host-monitor-status-ring-mac3",
-	"rxdma2host-monitor-status-ring-mac2",
-	"rxdma2host-monitor-status-ring-mac1",
-	"host2rxdma-host-buf-ring-mac3",
-	"host2rxdma-host-buf-ring-mac2",
-	"host2rxdma-host-buf-ring-mac1",
-	"rxdma2host-destination-ring-mac3",
-	"rxdma2host-destination-ring-mac2",
-	"rxdma2host-destination-ring-mac1",
-	"host2tcl-input-ring4",
-	"host2tcl-input-ring3",
-	"host2tcl-input-ring2",
-	"host2tcl-input-ring1",
-	"wbm2host-tx-completions-ring4",
-	"wbm2host-tx-completions-ring3",
-	"wbm2host-tx-completions-ring2",
-	"wbm2host-tx-completions-ring1",
-	"tcl2host-status-ring",
+static const struct ath12k_bus_params ath12k_pci_bus_params = {
+	.fixed_bdf_addr = false,
 };
 
 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab)
@@ -164,8 +98,9 @@
 	}
 }
 
-static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
+static void ath12k_pci_select_static_window(struct ath12k_base *ab)
 {
+	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
 	u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
 	u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
 	u32 window;
@@ -176,30 +111,7 @@
 	ab_pci->register_window = window;
 	spin_unlock_bh(&ab_pci->window_lock);
 
-	iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
-}
-
-static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
-				       u32 offset)
-{
-	u32 window_start;
-
-	/* If offset lies within DP register range, use 3rd window */
-	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
-		window_start = 3 * WINDOW_START;
-	/* If offset lies within CE register range, use 2nd window */
-	else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
-		window_start = 2 * WINDOW_START;
-	/* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
-	 * use 0th window
-	 */
-	else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
-		 !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
-		window_start = 0;
-	else
-		window_start = WINDOW_START;
-
-	return window_start;
+	iowrite32(WINDOW_ENABLE_BIT | window, ab->mem + WINDOW_REG_ADDRESS);
 }
 
 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
@@ -313,10 +225,15 @@
 
 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
 {
+	mdelay(100);
+
 	if (power_on) {
 		ath12k_pci_enable_ltssm(ab);
 		ath12k_pci_clear_all_intrs(ab);
 		ath12k_pci_set_wlaon_pwr_ctrl(ab);
+
+		if (ab->hw_params->fix_l1ss)
+			ath12k_dbg(ab, ATH12K_DBG_PCI, "L1ss fix required\n");
 	}
 
 	ath12k_mhi_clear_vector(ab);
@@ -325,294 +242,160 @@
 	ath12k_mhi_set_mhictrl_reset(ab);
 }
 
-static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
-{
-	int i, j;
-
-	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
-		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
-		for (j = 0; j < irq_grp->num_irq; j++)
-			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
-
-		netif_napi_del(&irq_grp->napi);
-	}
-}
-
-static void ath12k_pci_free_irq(struct ath12k_base *ab)
-{
-	int i, irq_idx;
-
-	for (i = 0; i < ab->hw_params->ce_count; i++) {
-		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
-			continue;
-		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
-		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
-	}
-
-	ath12k_pci_free_ext_irq(ab);
-}
-
-static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
-{
-	u32 irq_idx;
-
-	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
-	enable_irq(ab->irq_num[irq_idx]);
-}
-
-static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
-{
-	u32 irq_idx;
-
-	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
-	disable_irq_nosync(ab->irq_num[irq_idx]);
-}
-
-static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
-{
-	int i;
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+int ath12k_pci_ppeds_register_interrupts(struct ath12k_base *ab, int type, int vector,
+					int ring_num)
+{
+	struct ath12k_pci *ar_pci = (struct ath12k_pci *)ab->drv_priv;
+	int irq;
+	u8 bus_id = pci_domain_nr(ar_pci->pdev->bus);
+	int ret;
 
-	for (i = 0; i < ab->hw_params->ce_count; i++) {
-		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
-			continue;
-		ath12k_pci_ce_irq_disable(ab, i);
-	}
+	if (ab->ppeds_node_idx == -1) {
+		ath12k_err(ab, "invalid ppeds_node_idx in ppeds_register_interrupts\n");
+		return -EINVAL;
 }
 
-static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
-{
-	int i;
-	int irq_idx;
-
-	for (i = 0; i < ab->hw_params->ce_count; i++) {
-		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
-			continue;
+	irq = ath12k_hif_get_msi_irq(ab, vector);
 
-		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
-		synchronize_irq(ab->irq_num[irq_idx]);
-	}
+	irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+	if (type == HAL_PPE2TCL) {
+		snprintf(&ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE2TCL], sizeof(ab->dp.ppeds_irq_name),
+			 "pci%d_ppe2tcl_%d", bus_id, ab->ppeds_node_idx);
+		ret = request_irq(irq,  ath12k_ds_ppe2tcl_irq_handler,
+				  IRQF_SHARED,
+			    ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE2TCL], (void *)ath12k_dp_get_ppe_ds_ctxt(ab));
+		if (ret)
+			goto irq_fail;
+		ab->dp.ppeds_irq[PPEDS_IRQ_PPE2TCL] = irq;
+	} else if (type == HAL_REO2PPE) {
+		snprintf(&ab->dp.ppeds_irq_name[PPEDS_IRQ_REO2PPE], sizeof(ab->dp.ppeds_irq_name),
+			 "pci%d_reo2ppe_%d", bus_id, ab->ppeds_node_idx);
+		ret = request_irq(irq,  ath12k_ds_reo2ppe_irq_handler,
+				  IRQF_SHARED,
+				  ab->dp.ppeds_irq_name[PPEDS_IRQ_REO2PPE], (void *)ath12k_dp_get_ppe_ds_ctxt(ab));
+		if (ret)
+			goto irq_fail;
+		ab->dp.ppeds_irq[PPEDS_IRQ_REO2PPE] = irq;
+	} else if (type == HAL_WBM2SW_RELEASE && ring_num == HAL_WBM2SW_PPEDS_TX_CMPLN_RING_NUM) {
+		snprintf(&ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE_WBM2SW_REL], sizeof(ab->dp.ppeds_irq_name),
+			 "pci%d_ppe_wbm_rel_%d", bus_id, ab->ppeds_node_idx);
+		ret = request_irq(irq,  ath12k_dp_ppeds_handle_tx_comp,
+				  IRQF_SHARED,
+				  ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE_WBM2SW_REL],(void *)ab);
+		if (ret)
+			goto irq_fail;
+		ab->dp.ppeds_irq[PPEDS_IRQ_PPE_WBM2SW_REL] = irq;
+	} else {
+		return 0;
 }
+	disable_irq_nosync(irq);
 
-static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
-{
-	struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
-
-	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+	return 0;
 
-	ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+irq_fail:
+	return ret;
 }
 
-static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
+void ath12k_pci_ppeds_irq_disable(struct ath12k_base *ab, enum ppeds_irq_type type)
 {
-	struct ath12k_ce_pipe *ce_pipe = arg;
-
-	/* last interrupt received for this CE */
-	ce_pipe->timestamp = jiffies;
-
-	ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
-	tasklet_schedule(&ce_pipe->intr_tq);
-
-	return IRQ_HANDLED;
+	disable_irq_nosync(ab->dp.ppeds_irq[type]);
 }
 
-static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+void ath12k_pci_ppeds_irq_enable(struct ath12k_base *ab, enum ppeds_irq_type type)
 {
-	int i;
-
-	for (i = 0; i < irq_grp->num_irq; i++)
-		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+	enable_irq(ab->dp.ppeds_irq[type]);
 }
 
-static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc)
+void ath12k_pci_ppeds_free_interrupts(struct ath12k_base *ab)
 {
-	int i;
-
-	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
-		struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+	disable_irq_nosync(ab->dp.ppeds_irq[PPEDS_IRQ_PPE2TCL]);
+	free_irq(ab->dp.ppeds_irq[PPEDS_IRQ_PPE2TCL], ath12k_dp_get_ppe_ds_ctxt(ab));
 
-		ath12k_pci_ext_grp_disable(irq_grp);
+	disable_irq_nosync(ab->dp.ppeds_irq[PPEDS_IRQ_REO2PPE]);
+	free_irq(ab->dp.ppeds_irq[PPEDS_IRQ_REO2PPE], ath12k_dp_get_ppe_ds_ctxt(ab));
 
-		napi_synchronize(&irq_grp->napi);
-		napi_disable(&irq_grp->napi);
+	disable_irq_nosync(ab->dp.ppeds_irq[PPEDS_IRQ_PPE_WBM2SW_REL]);
+	free_irq(ab->dp.ppeds_irq[PPEDS_IRQ_PPE_WBM2SW_REL], ab);
 	}
-}
-
-static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
-{
-	int i;
+#endif
 
-	for (i = 0; i < irq_grp->num_irq; i++)
-		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
-}
-
-static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
+int ath12k_pci_get_msi_irq(struct ath12k_base *ab, unsigned int vector)
 {
-	int i, j, irq_idx;
-
-	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
-		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+	struct device *dev = ab->dev;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
 
-		for (j = 0; j < irq_grp->num_irq; j++) {
-			irq_idx = irq_grp->irqs[j];
-			synchronize_irq(ab->irq_num[irq_idx]);
-		}
-	}
+	return pci_irq_vector(pci_dev, vector);
 }
 
-static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+static void ath12k_umac_reset_tasklet_handler(struct tasklet_struct *t)
 {
-	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
-						struct ath12k_ext_irq_grp,
-						napi);
-	struct ath12k_base *ab = irq_grp->ab;
-	int work_done;
+	struct ath12k_dp_umac_reset *umac_reset = from_tasklet(umac_reset, t, intr_tq);
+	struct ath12k_base *ab = container_of(umac_reset, struct ath12k_base, dp_umac_reset);
 
-	work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
-	if (work_done < budget) {
-		napi_complete_done(napi, work_done);
-		ath12k_pci_ext_grp_enable(irq_grp);
+	ath12k_dp_umac_reset_handle(ab);
+	enable_irq(umac_reset->irq_num);
 	}
 
-	if (work_done > budget)
-		work_done = budget;
-
-	return work_done;
-}
-
-static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
+static irqreturn_t ath12k_dp_umac_reset_interrupt_handler(int irq, void *arg)
 {
-	struct ath12k_ext_irq_grp *irq_grp = arg;
-
-	ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
-
-	/* last interrupt received for this group */
-	irq_grp->timestamp = jiffies;
-
-	ath12k_pci_ext_grp_disable(irq_grp);
-
-	napi_schedule(&irq_grp->napi);
+	struct ath12k_base *ab = arg;
+	struct ath12k_dp_umac_reset *umac_reset = &ab->dp_umac_reset;
 
+	disable_irq_nosync(umac_reset->irq_num);
+	tasklet_schedule(&umac_reset->intr_tq);
 	return IRQ_HANDLED;
 }
 
-static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
+static void ath12k_dp_umac_reset_enable_irq(struct ath12k_base *ab)
 {
-	int i, j, ret, num_vectors = 0;
-	u32 user_base_data = 0, base_vector = 0, base_idx;
-
-	base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
-	ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
-						 &num_vectors,
-						 &user_base_data,
-						 &base_vector);
-	if (ret < 0)
-		return ret;
-
-	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
-		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-		u32 num_irq = 0;
-
-		irq_grp->ab = ab;
-		irq_grp->grp_id = i;
-		init_dummy_netdev(&irq_grp->napi_ndev);
-		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
-			       ath12k_pci_ext_grp_napi_poll);
-
-		if (ab->hw_params->ring_mask->tx[i] ||
-		    ab->hw_params->ring_mask->rx[i] ||
-		    ab->hw_params->ring_mask->rx_err[i] ||
-		    ab->hw_params->ring_mask->rx_wbm_rel[i] ||
-		    ab->hw_params->ring_mask->reo_status[i] ||
-		    ab->hw_params->ring_mask->host2rxdma[i] ||
-		    ab->hw_params->ring_mask->rx_mon_dest[i]) {
-			num_irq = 1;
-		}
-
-		irq_grp->num_irq = num_irq;
-		irq_grp->irqs[0] = base_idx + i;
-
-		for (j = 0; j < irq_grp->num_irq; j++) {
-			int irq_idx = irq_grp->irqs[j];
-			int vector = (i % num_vectors) + base_vector;
-			int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
+	struct ath12k_dp_umac_reset *umac_reset = &ab->dp_umac_reset;
 
-			ab->irq_num[irq_idx] = irq;
-
-			ath12k_dbg(ab, ATH12K_DBG_PCI,
-				   "irq:%d group:%d\n", irq, i);
-
-			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
-			ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
-					  IRQF_SHARED,
-					  "DP_EXT_IRQ", irq_grp);
-			if (ret) {
-				ath12k_err(ab, "failed request irq %d: %d\n",
-					   vector, ret);
-				return ret;
-			}
-
-			disable_irq_nosync(ab->irq_num[irq_idx]);
-		}
-	}
-
-	return 0;
+	enable_irq(umac_reset->irq_num);
 }
 
-static int ath12k_pci_config_irq(struct ath12k_base *ab)
+static int ath12k_dp_umac_pci_config_irq(struct ath12k_base *ab)
 {
-	struct ath12k_ce_pipe *ce_pipe;
-	u32 msi_data_start;
-	u32 msi_data_count, msi_data_idx;
-	u32 msi_irq_start;
+        u32 msi_data_start, msi_data_count, msi_irq_start;
 	unsigned int msi_data;
-	int irq, i, ret, irq_idx;
+        int irq, ret;
+	struct ath12k_dp_umac_reset *umac_reset = &ab->dp_umac_reset;
 
-	ret = ath12k_pci_get_user_msi_assignment(ab,
-						 "CE", &msi_data_count,
+	ret = ath12k_pcic_get_user_msi_assignment(ab, "DP", &msi_data_count,
 						 &msi_data_start, &msi_irq_start);
 	if (ret)
 		return ret;
 
-	/* Configure CE irqs */
+	msi_data = (umac_reset->intr_offset % msi_data_count) + msi_irq_start;
+	irq = ath12k_pci_get_msi_irq(ab, msi_data);
+	umac_reset->irq_num = irq;
+	tasklet_setup(&umac_reset->intr_tq, ath12k_umac_reset_tasklet_handler);
 
-	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
-		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
-			continue;
-
-		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
-		irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
-		ce_pipe = &ab->ce.ce_pipe[i];
-
-		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
-
-		tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
-
-		ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
-				  IRQF_SHARED, irq_name[irq_idx],
-				  ce_pipe);
+	ret = request_irq(irq, ath12k_dp_umac_reset_interrupt_handler,
+			  IRQF_NO_SUSPEND, "umac_dp_reset", ab);
 		if (ret) {
-			ath12k_err(ab, "failed to request irq %d: %d\n",
-				   irq_idx, ret);
+		ath12k_err(ab, "failed to request irq for umac dp reset %d\n", ret);
 			return ret;
 		}
 
-		ab->irq_num[irq_idx] = irq;
-		msi_data_idx++;
+	disable_irq_nosync(umac_reset->irq_num);
 
-		ath12k_pci_ce_irq_disable(ab, i);
+	return 0;
 	}
 
-	ret = ath12k_pci_ext_irq_config(ab);
-	if (ret)
-		return ret;
+static void ath12k_dp_umac_reset_free_irq(struct ath12k_base *ab)
+{
+	struct ath12k_dp_umac_reset *umac_reset = &ab->dp_umac_reset;
 
-	return 0;
+	disable_irq_nosync(umac_reset->irq_num);
+	free_irq(umac_reset->irq_num, ab);
 }
 
 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
 {
 	struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+	struct pci_bus *bus = ab_pci->pdev->bus;
 
 	cfg->tgt_ce = ab->hw_params->target_ce_config;
 	cfg->tgt_ce_len = ab->hw_params->target_ce_count;
@@ -620,17 +403,10 @@
 	cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
 	cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
 	ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
-}
-
-static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
-{
-	int i;
 
-	for (i = 0; i < ab->hw_params->ce_count; i++) {
-		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
-			continue;
-		ath12k_pci_ce_irq_enable(ab, i);
-	}
+	ab_pci->instance = (((pci_domain_nr(bus) & 0xF) << 4) |
+			    (bus->number & 0xF)) & QRTR_INSTANCE_MASK;
+	ab->qmi.service_ins_id += ab_pci->instance;
 }
 
 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
@@ -661,7 +437,7 @@
 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
 {
 	struct ath12k_base *ab = ab_pci->ab;
-	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
+	const struct ath12k_msi_config *msi_config = ab->msi.config;
 	struct msi_desc *msi_desc;
 	int num_vectors;
 	int ret;
@@ -689,11 +465,11 @@
 		goto free_msi_vector;
 	}
 
-	ab_pci->msi_ep_base_data = msi_desc->msg.data;
+	ab->msi.ep_base_data = msi_desc->msg.data;
 	if (msi_desc->pci.msi_attrib.is_64)
 		set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
 
-	ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
+	ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab->msi.ep_base_data);
 
 	return 0;
 
@@ -800,110 +576,32 @@
 	set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
 }
 
-static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
-{
-	if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
-		pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
-					   ab_pci->link_ctl);
-}
-
-static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
-{
-	int i;
-
-	for (i = 0; i < ab->hw_params->ce_count; i++) {
-		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
-
-		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
-			continue;
-
-		tasklet_kill(&ce_pipe->intr_tq);
-	}
-}
-
-static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
-{
-	ath12k_pci_ce_irqs_disable(ab);
-	ath12k_pci_sync_ce_irqs(ab);
-	ath12k_pci_kill_tasklets(ab);
-}
-
-int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
-				   u8 *ul_pipe, u8 *dl_pipe)
-{
-	const struct service_to_pipe *entry;
-	bool ul_set = false, dl_set = false;
-	int i;
-
-	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
-		entry = &ab->hw_params->svc_to_ce_map[i];
-
-		if (__le32_to_cpu(entry->service_id) != service_id)
-			continue;
-
-		switch (__le32_to_cpu(entry->pipedir)) {
-		case PIPEDIR_NONE:
-			break;
-		case PIPEDIR_IN:
-			WARN_ON(dl_set);
-			*dl_pipe = __le32_to_cpu(entry->pipenum);
-			dl_set = true;
-			break;
-		case PIPEDIR_OUT:
-			WARN_ON(ul_set);
-			*ul_pipe = __le32_to_cpu(entry->pipenum);
-			ul_set = true;
-			break;
-		case PIPEDIR_INOUT:
-			WARN_ON(dl_set);
-			WARN_ON(ul_set);
-			*dl_pipe = __le32_to_cpu(entry->pipenum);
-			*ul_pipe = __le32_to_cpu(entry->pipenum);
-			dl_set = true;
-			ul_set = true;
-			break;
-		}
-	}
-
-	if (WARN_ON(!ul_set || !dl_set))
-		return -ENOENT;
-
-	return 0;
-}
-
-int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-
-	return pci_irq_vector(pci_dev, vector);
-}
-
-int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
-				       int *num_vectors, u32 *user_base_data,
-				       u32 *base_vector)
+static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
 {
 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
-	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
-	int idx;
+	u32 reg;
 
-	for (idx = 0; idx < msi_config->total_users; idx++) {
-		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
-			*num_vectors = msi_config->users[idx].num_vectors;
-			*user_base_data = msi_config->users[idx].base_vector
-				+ ab_pci->msi_ep_base_data;
-			*base_vector = msi_config->users[idx].base_vector;
-
-			ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
-				   user_name, *num_vectors, *user_base_data,
-				   *base_vector);
+	/*
+	 * On platforms with two or more identical mhi devices, qmi service run
+	 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
+	 * cannot register more than one qmi service with identical node ID.
+	 *
+	 * This generates a unique instance ID from PCIe domain number and bus number,
+	 * writes to the given register, it is available for firmware when the QMI service
+	 * is spawned.
+	 */
+	reg = PCIE_PCIE_LOCAL_REG_PCIE_LOCAL_RSV0 & WINDOW_RANGE_MASK;
+	ath12k_pci_write32(ab, reg, ab_pci->instance);
 
-			return 0;
-		}
+	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
+		   reg, ab_pci->instance, ath12k_pci_read32(ab, reg));
 	}
 
-	ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
-
-	return -EINVAL;
+static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
+{
+	if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
+		pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+					   ab_pci->link_ctl);
 }
 
 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
@@ -942,30 +640,12 @@
 
 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
 {
-	ath12k_pci_ce_irqs_enable(ab);
+	ath12k_pcic_ce_irqs_enable(ab);
 }
 
 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
 {
-	ath12k_pci_ce_irq_disable_sync(ab);
-}
-
-void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
-{
-	int i;
-
-	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
-		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
-
-		napi_enable(&irq_grp->napi);
-		ath12k_pci_ext_grp_enable(irq_grp);
-	}
-}
-
-void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
-{
-	__ath12k_pci_ext_irq_disable(ab);
-	ath12k_pci_sync_ext_irqs(ab);
+	ath12k_pcic_ce_irq_disable_sync(ab);
 }
 
 int ath12k_pci_hif_suspend(struct ath12k_base *ab)
@@ -986,12 +666,6 @@
 	return 0;
 }
 
-void ath12k_pci_stop(struct ath12k_base *ab)
-{
-	ath12k_pci_ce_irq_disable_sync(ab);
-	ath12k_ce_cleanup_pipes(ab);
-}
-
 int ath12k_pci_start(struct ath12k_base *ab)
 {
 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
@@ -1000,17 +674,25 @@
 
 	ath12k_pci_aspm_restore(ab_pci);
 
-	ath12k_pci_ce_irqs_enable(ab);
-	ath12k_ce_rx_post_buf(ab);
-
+	ath12k_pcic_start(ab);
 	return 0;
 }
 
+#define PCI_BAR_WINDOW0_BASE	0x1E00000
+#define PCI_BAR_WINDOW0_END	0x1E7FFFC
+#define PCI_SOC_PCI_REG_BASE	0x1E04000
+#define PCI_SOC_PCI_REG_END	0x1E07FFC
+#define PCI_PARF_BASE		0x1E08000
+#define PCI_PARF_END		0x1E0BFFC
+#define PCI_MHIREGLEN_REG	0x1E0E100
+#define PCI_MHI_REGION_END	0x1E0EFFC
+
 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
 {
 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
 	u32 val, window_start;
 	int ret = 0;
+	bool is_window0;
 
 	/* for offset beyond BAR + 4K - 32, may
 	 * need to wakeup MHI to access.
@@ -1023,22 +705,30 @@
 		val = ioread32(ab->mem + offset);
 	} else {
 		if (ab->static_window_map)
-			window_start = ath12k_pci_get_window_start(ab, offset);
+			window_start = ath12k_pcic_get_window_start(ab, offset);
 		else
 			window_start = WINDOW_START;
 
 		if (window_start == WINDOW_START) {
 			spin_lock_bh(&ab_pci->window_lock);
 			ath12k_pci_select_window(ab_pci, offset);
+
+			is_window0 = ((offset >= PCI_BAR_WINDOW0_BASE &&
+				       offset <= PCI_BAR_WINDOW0_END) &&
+				      (offset >= PCI_MHIREGLEN_REG &&
+				       offset <= PCI_MHI_REGION_END));
+
+			if (is_window0) {
+				offset = offset - PCI_MHIREGLEN_REG;
+
+				val = ioread32(ab->mem + (offset & WINDOW_RANGE_MASK));
+
+			} else {
 			val = ioread32(ab->mem + window_start +
 				       (offset & WINDOW_RANGE_MASK));
+			}
 			spin_unlock_bh(&ab_pci->window_lock);
 		} else {
-			if ((!window_start) &&
-			    (offset >= PCI_MHIREGLEN_REG &&
-			     offset <= PCI_MHI_REGION_END))
-				offset = offset - PCI_MHIREGLEN_REG;
-
 			val = ioread32(ab->mem + window_start +
 				       (offset & WINDOW_RANGE_MASK));
 		}
@@ -1056,6 +746,7 @@
 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
 	u32 window_start;
 	int ret = 0;
+	bool is_window0;
 
 	/* for offset beyond BAR + 4K - 32, may
 	 * need to wakeup MHI to access.
@@ -1068,22 +759,30 @@
 		iowrite32(value, ab->mem + offset);
 	} else {
 		if (ab->static_window_map)
-			window_start = ath12k_pci_get_window_start(ab, offset);
+			window_start = ath12k_pcic_get_window_start(ab, offset);
 		else
 			window_start = WINDOW_START;
 
 		if (window_start == WINDOW_START) {
 			spin_lock_bh(&ab_pci->window_lock);
 			ath12k_pci_select_window(ab_pci, offset);
+
+			is_window0 = ((offset >= PCI_BAR_WINDOW0_BASE &&
+				       offset <= PCI_BAR_WINDOW0_END) &&
+				      (offset >= PCI_MHIREGLEN_REG &&
+				       offset <= PCI_MHI_REGION_END));
+
+			if (is_window0) {
+				offset = offset - PCI_MHIREGLEN_REG;
+
+				iowrite32(value, ab->mem +
+					  (offset & WINDOW_RANGE_MASK));
+			} else {
 			iowrite32(value, ab->mem + window_start +
 				  (offset & WINDOW_RANGE_MASK));
+			}
 			spin_unlock_bh(&ab_pci->window_lock);
 		} else {
-			if ((!window_start) &&
-			    (offset >= PCI_MHIREGLEN_REG &&
-			     offset <= PCI_MHI_REGION_END))
-				offset = offset - PCI_MHIREGLEN_REG;
-
 			iowrite32(value, ab->mem + window_start +
 				  (offset & WINDOW_RANGE_MASK));
 		}
@@ -1111,6 +810,8 @@
 
 	ath12k_pci_msi_enable(ab_pci);
 
+	ath12k_pci_update_qrtr_node_id(ab);
+
 	ret = ath12k_mhi_start(ab_pci);
 	if (ret) {
 		ath12k_err(ab, "failed to start mhi: %d\n", ret);
@@ -1118,7 +819,7 @@
 	}
 
 	if (ab->static_window_map)
-		ath12k_pci_select_static_window(ab_pci);
+		ath12k_pci_select_static_window(ab);
 
 	return 0;
 }
@@ -1127,35 +828,81 @@
 {
 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
 
+
+	ath12k_mhi_set_state(ab_pci, ATH12K_MHI_SOC_RESET);
+	if (!wait_for_completion_timeout(&ab->rddm_reset_done, msecs_to_jiffies(200))) {
+		ath12k_warn(ab, "failed to set RDDM mode\n");
+		if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)) {
+			ath12k_warn(ab, "failed to clear MHI SOC RESET as mhi already in rddm state due to recovery in progress, clearing it here\n");
+			clear_bit(ATH12K_MHI_SOC_RESET, &ab_pci->mhi_state);
+			reinit_completion(&ab->rddm_reset_done);
+		}
+	}
+	if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
+		ath12k_qmi_free_target_mem_chunk(ab);
+	}
 	/* restore aspm in case firmware bootup fails */
 	ath12k_pci_aspm_restore(ab_pci);
 
+	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
 	ath12k_pci_force_wake(ab_pci->ab);
 	ath12k_pci_msi_disable(ab_pci);
 	ath12k_mhi_stop(ab_pci);
-	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
 	ath12k_pci_sw_reset(ab_pci->ab, false);
 }
 
 static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
 	.start = ath12k_pci_start,
-	.stop = ath12k_pci_stop,
+	.stop = ath12k_pcic_stop,
 	.read32 = ath12k_pci_read32,
 	.write32 = ath12k_pci_write32,
 	.power_down = ath12k_pci_power_down,
 	.power_up = ath12k_pci_power_up,
 	.suspend = ath12k_pci_hif_suspend,
 	.resume = ath12k_pci_hif_resume,
-	.irq_enable = ath12k_pci_ext_irq_enable,
-	.irq_disable = ath12k_pci_ext_irq_disable,
+	.irq_enable = ath12k_pcic_ext_irq_enable,
+	.irq_disable = ath12k_pcic_ext_irq_disable,
 	.get_msi_address = ath12k_pci_get_msi_address,
-	.get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
-	.map_service_to_pipe = ath12k_pci_map_service_to_pipe,
+	.get_user_msi_vector = ath12k_pcic_get_user_msi_assignment,
+	.map_service_to_pipe = ath12k_pcic_map_service_to_pipe,
 	.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
 	.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
 	.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	.ppeds_register_interrupts = ath12k_pci_ppeds_register_interrupts,
+	.ppeds_free_interrupts = ath12k_pci_ppeds_free_interrupts,
+	.ppeds_irq_enable = ath12k_pci_ppeds_irq_enable,
+	.ppeds_irq_disable = ath12k_pci_ppeds_irq_disable,
+#endif
+	.dp_umac_reset_irq_config = ath12k_dp_umac_pci_config_irq,
+	.dp_umac_reset_enable_irq = ath12k_dp_umac_reset_enable_irq,
+	.dp_umac_reset_free_irq = ath12k_dp_umac_reset_free_irq,
+	.get_msi_irq =  ath12k_pci_get_msi_irq,
 };
 
+bool ath12k_pci_has_board_id_override(struct ath12k_base *ab,
+				      u16 *ov_board_id)
+{
+	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+	struct pci_dev *pdev = ab_pci->pdev;
+	const struct ath12k_bid_override *ov;
+
+	list_for_each_entry(ov, &ab->board_id_overrides, next) {
+		if (ov->domain != pci_domain_nr(pdev->bus))
+			continue;
+
+		if (ov->bus_nr != pdev->bus->number)
+			continue;
+
+		if (pdev->devfn != PCI_DEVFN(ov->slot, ov->func))
+			continue;
+
+		*ov_board_id = ov->board_id;
+		return true;
+	}
+	return false;
+}
+
 static
 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
 {
@@ -1178,9 +925,13 @@
 	struct ath12k_base *ab;
 	struct ath12k_pci *ab_pci;
 	u32 soc_hw_version_major, soc_hw_version_minor;
+	u32 addr;
 	int ret;
-
-	ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+       ath12k_bond_enable_ppe_ds();
+#endif
+	ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI,
+			       &ath12k_pci_bus_params);
 	if (!ab) {
 		dev_err(&pdev->dev, "failed to allocate ath12k base\n");
 		return -ENOMEM;
@@ -1193,9 +944,21 @@
 	ab_pci->ab = ab;
 	ab_pci->pdev = pdev;
 	ab->hif.ops = &ath12k_pci_hif_ops;
+	ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
 	pci_set_drvdata(pdev, ab);
 	spin_lock_init(&ab_pci->window_lock);
 
+	/* Set fixed_mem_region to true for platforms support reserved memory
+	 * from DT. If memory is reserved from DT for FW, ath11k driver need not
+	 * allocate memory.
+ 	 */
+	if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
+		ab->bus_params.fixed_mem_region = true;
+
+	/* This is HACK to bring up the qcn9224 with segemnted memory */
+	if (ath12k_fw_mem_seg)
+		ab->bus_params.fixed_mem_region = false;
+
 	ret = ath12k_pci_claim(ab_pci, pdev);
 	if (ret) {
 		ath12k_err(ab, "failed to claim device: %d\n", ret);
@@ -1204,7 +967,7 @@
 
 	switch (pci_dev->device) {
 	case QCN9274_DEVICE_ID:
-		ab_pci->msi_config = &ath12k_msi_config[0];
+		ab->msi.config = &ath12k_msi_config[ATH12K_MSI_CONFIG_PCI];
 		ab->static_window_map = true;
 		ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
 		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
@@ -1225,10 +988,22 @@
 		}
 		break;
 	case WCN7850_DEVICE_ID:
-		ab_pci->msi_config = &ath12k_msi_config[0];
+		ab->msi.config = &ath12k_msi_config[ATH12K_MSI_CONFIG_PCI];
 		ab->static_window_map = false;
-		ab->hw_rev = ATH12K_HW_WCN7850_HW20;
 		ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
+		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
+					   &soc_hw_version_minor);
+		switch (soc_hw_version_major) {
+		case ATH12K_PCI_SOC_HW_VERSION_2:
+			ab->hw_rev = ATH12K_HW_WCN7850_HW20;
+			break;
+		default:
+			dev_err(&pdev->dev,
+				"Unknown hardware version found for WCN7850: 0x%x\n",
+				soc_hw_version_major);
+			ret = -EOPNOTSUPP;
+			goto err_pci_free_region;
+		}
 		break;
 
 	default:
@@ -1266,7 +1041,7 @@
 
 	ath12k_pci_init_qmi_ce_config(ab);
 
-	ret = ath12k_pci_config_irq(ab);
+	ret = ath12k_pcic_config_irq(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to config irq: %d\n", ret);
 		goto err_ce_free;
@@ -1277,10 +1052,11 @@
 		ath12k_err(ab, "failed to init core: %d\n", ret);
 		goto err_free_irq;
 	}
+
 	return 0;
 
 err_free_irq:
-	ath12k_pci_free_irq(ab);
+	ath12k_pcic_free_irq(ab);
 
 err_ce_free:
 	ath12k_ce_free_pipes(ab);
@@ -1310,11 +1086,13 @@
 
 	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
 		ath12k_pci_power_down(ab);
+		ath12k_debugfs_soc_destroy(ab);
 		ath12k_qmi_deinit_service(ab);
+		ath12k_core_put_hw_group(ab);
 		goto qmi_fail;
 	}
 
-	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
+	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->ag->dev_flags);
 
 	cancel_work_sync(&ab->reset_work);
 	ath12k_core_deinit(ab);
@@ -1322,7 +1100,7 @@
 qmi_fail:
 	ath12k_mhi_unregister(ab_pci);
 
-	ath12k_pci_free_irq(ab);
+	ath12k_pcic_free_irq(ab);
 	ath12k_pci_msi_free(ab_pci);
 	ath12k_pci_free_region(ab_pci);
 
@@ -1335,7 +1113,16 @@
 {
 	struct ath12k_base *ab = pci_get_drvdata(pdev);
 
-	ath12k_pci_power_down(ab);
+	if (!ath12k_en_shutdown)
+		return
+
+	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->ag->dev_flags);
+	if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
+		ath12k_warn(ab, "SSR recovery in progress, interrupting ssr recovery due to shutdown request\n");
+		return;
+	}
+        cancel_work_sync(&ab->reset_work);
+        ath12k_core_deinit(ab);
 }
 
 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
@@ -1375,7 +1162,7 @@
 	.driver.pm = &ath12k_pci_pm_ops,
 };
 
-static int ath12k_pci_init(void)
+int ath12k_pci_init(void)
 {
 	int ret;
 
@@ -1383,19 +1170,12 @@
 	if (ret) {
 		pr_err("failed to register ath12k pci driver: %d\n",
 		       ret);
-		return ret;
 	}
 
-	return 0;
+	return ret;
 }
-module_init(ath12k_pci_init);
 
-static void ath12k_pci_exit(void)
+void ath12k_pci_exit(void)
 {
 	pci_unregister_driver(&ath12k_pci_driver);
 }
-
-module_exit(ath12k_pci_exit);
-
-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices");
-MODULE_LICENSE("Dual BSD/GPL");
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/pci.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/pci.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/pci.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/pci.h	2024-01-19 17:01:19.869847139 +0100
@@ -53,6 +53,9 @@
 #define WLAON_QFPROM_PWR_CTRL_REG		0x01f8031c
 #define QFPROM_PWR_CTRL_VDD4BLOW_MASK		0x4
 
+#define QCN9224_QFPROM_RAW_RFA_PDET_ROW13_LSB	0x1E20338
+#define OTP_BOARD_ID_MASK			GENMASK(15,0)
+
 #define PCI_BAR_WINDOW0_BASE	0x1E00000
 #define PCI_BAR_WINDOW0_END	0x1E7FFFC
 #define PCI_SOC_RANGE_MASK	0x3FFF
@@ -68,6 +71,12 @@
 #define ATH12K_PCI_SOC_HW_VERSION_1	1
 #define ATH12K_PCI_SOC_HW_VERSION_2	2
 
+enum ppeds_irq_type {
+	PPEDS_IRQ_PPE2TCL,
+	PPEDS_IRQ_REO2PPE,
+	PPEDS_IRQ_PPE_WBM2SW_REL,
+};
+
 struct ath12k_msi_user {
 	const char *name;
 	int num_vectors;
@@ -108,6 +117,7 @@
 	/* enum ath12k_pci_flags */
 	unsigned long flags;
 	u16 link_ctl;
+	u32 instance;
 	const struct ath12k_pci_ops *pci_ops;
 };
 
@@ -116,10 +126,12 @@
 	return (struct ath12k_pci *)ab->drv_priv;
 }
 
+bool ath12k_pci_has_board_id_override(struct ath12k_base *ab,
+				      u16 *ov_board_id);
 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
 				       int *num_vectors, u32 *user_base_data,
 				       u32 *base_vector);
-int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector);
+int ath12k_pci_get_msi_irq(struct ath12k_base *ab, unsigned int vector);
 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value);
 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset);
 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@@ -138,4 +150,11 @@
 int ath12k_pci_start(struct ath12k_base *ab);
 int ath12k_pci_power_up(struct ath12k_base *ab);
 void ath12k_pci_power_down(struct ath12k_base *ab);
+void ath12k_pci_ppeds_free_interrupts(struct ath12k_base *ab);
+int ath12k_pci_ppeds_register_interrupts(struct ath12k_base *ab, int type,
+					int vector, int ring_num);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+void ath12k_pci_ppeds_irq_enable(struct ath12k_base *ab, enum ppeds_irq_type type);
+void ath12k_pci_ppeds_irq_disable(struct ath12k_base *ab, enum ppeds_irq_type type);
+#endif
 #endif /* ATH12K_PCI_H */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/peer.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/peer.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/peer.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/peer.c	2024-03-18 14:40:14.859741552 +0100
@@ -8,6 +8,23 @@
 #include "peer.h"
 #include "debug.h"
 
+static struct ath12k_ml_peer *ath12k_ml_peer_find(struct ath12k_hw *ah,
+						  const u8 *addr)
+{
+	struct ath12k_ml_peer *ml_peer;
+
+	lockdep_assert_held(&ah->data_lock);
+
+	list_for_each_entry(ml_peer, &ah->ml_peers, list) {
+		if (!ether_addr_equal(ml_peer->addr, addr))
+			continue;
+
+		return ml_peer;
+	}
+
+	return NULL;
+}
+
 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
 				     const u8 *addr)
 {
@@ -49,33 +66,71 @@
 struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
 					     const u8 *addr)
 {
-	struct ath12k_peer *peer;
 
 	lockdep_assert_held(&ab->base_lock);
 
-	list_for_each_entry(peer, &ab->peers, list) {
-		if (!ether_addr_equal(peer->addr, addr))
-			continue;
+	if (!ab->rhead_peer_addr)
+		return NULL;
+
+	return rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
+				      ab->rhash_peer_addr_param);
 
-		return peer;
 	}
 
+static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
+						     int ml_peer_id)
+{
+	struct ath12k_peer *peer;
+
+	lockdep_assert_held(&ab->base_lock);
+
+	list_for_each_entry(peer, &ab->peers, list)
+		if (ml_peer_id == peer->ml_peer_id)
+			return peer;
+
 	return NULL;
 }
 
-struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
+struct ath12k_peer *ath12k_peer_find_list_by_id(struct ath12k_base *ab,
 					   int peer_id)
 {
 	struct ath12k_peer *peer;
 
 	lockdep_assert_held(&ab->base_lock);
 
-	list_for_each_entry(peer, &ab->peers, list)
-		if (peer_id == peer->peer_id)
+	if (peer_id == ATH12K_PEER_ID_INVALID)
+		return NULL;
+
+	if (peer_id & ATH12K_ML_PEER_ID_VALID) {
+		return ath12k_peer_find_by_ml_id(ab, peer_id);
+	} else {
+		list_for_each_entry(peer, &ab->peers, list) {
+			if (peer->peer_id == peer_id)
 			return peer;
+			}
 
 	return NULL;
 }
+}
+
+struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
+					    int peer_id)
+{
+	lockdep_assert_held(&ab->base_lock);
+
+	if (peer_id == ATH12K_PEER_ID_INVALID)
+		return NULL;
+
+	if (peer_id & ATH12K_ML_PEER_ID_VALID) {
+		return ath12k_peer_find_by_ml_id(ab, peer_id);
+	} else {
+		if (!ab->rhead_peer_id)
+			return NULL;
+
+		return rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
+		  			      ab->rhash_peer_id_param);
+	}
+}
 
 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
 {
@@ -92,18 +147,20 @@
 	spin_unlock_bh(&ab->base_lock);
 	return false;
 }
-
-struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
-					    int ast_hash)
+struct ath12k_peer *ath12k_peer_find_by_vdev_id(struct ath12k_base *ab,
+		                        int vdev_id)
 {
 	struct ath12k_peer *peer;
 
-	lockdep_assert_held(&ab->base_lock);
+	spin_lock_bh(&ab->base_lock);
 
-	list_for_each_entry(peer, &ab->peers, list)
-		if (ast_hash == peer->ast_hash)
+	list_for_each_entry(peer, &ab->peers, list) {
+		if (vdev_id == peer->vdev_id) {
+			spin_unlock_bh(&ab->base_lock);
 			return peer;
-
+		}
+	}
+	spin_unlock_bh(&ab->base_lock);
 	return NULL;
 }
 
@@ -113,17 +170,21 @@
 
 	spin_lock_bh(&ab->base_lock);
 
-	peer = ath12k_peer_find_by_id(ab, peer_id);
+	peer = ath12k_peer_find_list_by_id(ab, peer_id);
 	if (!peer) {
 		ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
 			    peer_id);
 		goto exit;
 	}
 
-	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+	ath12k_dbg(ab, ATH12K_DBG_PEER, "htt peer unmap vdev %d peer %pM id %d\n",
 		   peer->vdev_id, peer->addr, peer_id);
 
 	list_del(&peer->list);
+#ifdef CONFIG_ATH12K_SAWF
+	if (peer->sawf_ctx_peer.telemetry_peer_ctx)
+		ath12k_telemetry_peer_ctx_free(peer->sawf_ctx_peer.telemetry_peer_ctx);
+#endif
 	kfree(peer);
 	wake_up(&ab->peer_mapping_wq);
 
@@ -148,17 +209,75 @@
 		peer->ast_hash = ast_hash;
 		peer->hw_peer_id = hw_peer_id;
 		ether_addr_copy(peer->addr, mac_addr);
+#ifdef CONFIG_ATH12K_SAWF
+		if (ath12k_sawf_enable)
+			ath12k_sdwf_fill_hbucket_type(peer);
+#endif
 		list_add(&peer->list, &ab->peers);
 		wake_up(&ab->peer_mapping_wq);
 	}
 
-	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
+	ath12k_dbg(ab, ATH12K_DBG_PEER, "htt peer map vdev %d peer %pM id %d\n",
 		   vdev_id, mac_addr, peer_id);
 
 exit:
 	spin_unlock_bh(&ab->base_lock);
 }
 
+void ath12k_peer_mlo_map_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	struct ath11k_htt_mlo_peer_map_msg *msg;
+	u16 ml_peer_id;
+	struct ath12k_peer *peer;
+	u16 mld_mac_h16;
+	u8 mld_addr[ETH_ALEN];
+
+	msg = (struct ath11k_htt_mlo_peer_map_msg *)skb->data;
+
+	ml_peer_id = FIELD_GET(ATH12K_HTT_MLO_PEER_MAP_INFO0_PEER_ID, msg->info0);
+
+	ml_peer_id |= ATH12K_ML_PEER_ID_VALID;
+
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find_list_by_id(ab, ml_peer_id);
+
+	/* TODO a sync wait to check ml peer map success or delete
+	 * ml peer info in all link peers and make peer assoc failure
+	 * TBA after testing basic changes
+	 */
+	if (!peer) {
+		ath12k_warn(ab, "peer corresponding to ml peer id %d not found", ml_peer_id);
+		spin_unlock_bh(&ab->base_lock);
+		return;
+	}
+	mld_mac_h16 = FIELD_GET(ATH12K_HTT_MLO_PEER_MAP_MAC_ADDR_H16,
+				msg->mac_addr.mac_addr_h16);
+	ath12k_dp_get_mac_addr(msg->mac_addr.mac_addr_l32, mld_mac_h16, mld_addr);
+
+	WARN_ON(memcmp(mld_addr, peer->ml_addr, ETH_ALEN));
+
+	spin_unlock_bh(&ab->base_lock);
+
+	ath12k_dbg(ab, ATH12K_DBG_PEER, "htt MLO peer map peer %pM id %d\n",
+		   mld_addr, ml_peer_id);
+
+	/* TODO rx queue setup for the ML peer */
+}
+
+void ath12k_peer_mlo_unmap_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	struct ath11k_htt_mlo_peer_unmap_msg *msg;
+	u16 ml_peer_id;
+
+	msg = (struct ath11k_htt_mlo_peer_unmap_msg *)skb->data;
+
+	ml_peer_id = FIELD_GET(ATH12K_HTT_MLO_PEER_UNMAP_PEER_ID, msg->info0);
+
+	ml_peer_id |= ATH12K_ML_PEER_ID_VALID;
+
+	ath12k_dbg(ab, ATH12K_DBG_PEER, "htt MLO peer unmap peer ml id %d\n", ml_peer_id);
+}
+
 static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
 				       const u8 *addr, bool expect_mapped)
 {
@@ -181,6 +300,80 @@
 	return 0;
 }
 
+static inline int ath12k_peer_rhash_insert(struct ath12k_base *ab,
+                                           struct rhashtable *rtbl,
+                                           struct rhash_head *rhead,
+                                           struct rhashtable_params *params,
+                                           void *key)
+{
+        struct ath12k_peer *tmp;
+
+        lockdep_assert_held(&ab->tbl_mtx_lock);
+
+        tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
+
+        if (!tmp)
+                return 0;
+        else if (IS_ERR(tmp))
+                return PTR_ERR(tmp);
+        else
+                return -EEXIST;
+}
+static inline int ath12k_peer_rhash_remove(struct ath12k_base *ab,
+                                           struct rhashtable *rtbl,
+                                           struct rhash_head *rhead,
+                                           struct rhashtable_params *params)
+{
+	int ret;
+
+	lockdep_assert_held(&ab->tbl_mtx_lock);
+
+	ret = rhashtable_remove_fast(rtbl, rhead, *params);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	return 0;
+}
+
+static int ath12k_peer_rhash_add(struct ath12k_base *ab, struct ath12k_peer *peer)
+{
+	int ret;
+
+	lockdep_assert_held(&ab->base_lock);
+	lockdep_assert_held(&ab->tbl_mtx_lock);
+
+	if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+		return -EPERM;
+
+	if (peer->rhash_done)
+		return 0;
+
+	ret = ath12k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
+                                       &ab->rhash_peer_id_param, &peer->peer_id);
+	if (ret) {
+                ath12k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
+                            peer->addr, peer->peer_id, ret);
+                return ret;
+        }
+
+        ret = ath12k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+                                       &ab->rhash_peer_addr_param, &peer->addr);
+        if (ret) {
+                ath12k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
+                            peer->addr, peer->peer_id, ret);
+                goto err_clean;
+        }
+
+	peer->rhash_done = true;
+        return 0;
+
+err_clean:
+        ath12k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+                                 &ab->rhash_peer_id_param);
+	return ret;
+}
+
+
 void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
 {
 	struct ath12k_peer *peer, *tmp;
@@ -188,6 +381,7 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	mutex_lock(&ab->tbl_mtx_lock);
 	spin_lock_bh(&ab->base_lock);
 	list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
 		if (peer->vdev_id != vdev_id)
@@ -195,13 +389,18 @@
 
 		ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
 			    peer->addr, vdev_id);
-
+		ath12k_peer_rhash_delete(ab, peer);
 		list_del(&peer->list);
+#ifdef CONFIG_ATH12K_SAWF
+		if (peer->sawf_ctx_peer.telemetry_peer_ctx)
+			ath12k_telemetry_peer_ctx_free(peer->sawf_ctx_peer.telemetry_peer_ctx);
+#endif
 		kfree(peer);
 		ar->num_peers--;
 	}
 
 	spin_unlock_bh(&ab->base_lock);
+	mutex_unlock(&ab->tbl_mtx_lock);
 }
 
 static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
@@ -217,72 +416,251 @@
 
 	ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed wait for peer deleted");
+		ath12k_warn(ar->ab, "failed wait for peer deleted peer_addr : %pM\n", addr);
 		return ret;
 	}
 
 	time_left = wait_for_completion_timeout(&ar->peer_delete_done,
 						3 * HZ);
 	if (time_left == 0) {
-		ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
+		ath12k_warn(ar->ab, "Timeout in receiving peer delete response peer_addr : %pM\n",
+			    addr);
 		return -ETIMEDOUT;
 	}
 
 	return 0;
 }
 
-int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
 {
+	struct ath12k_peer *peer;
+	struct ath12k_base *ab = ar->ab;
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	mutex_lock(&ab->tbl_mtx_lock);
+	spin_lock_bh(&ab->base_lock);
+
+	peer = ath12k_peer_find_by_addr(ab, addr);
+	if (peer && peer->vdev_id == vdev_id)
+		ath12k_peer_rhash_delete(ab, peer);
+
+	if (!peer)
+		peer = ath12k_peer_find(ab, vdev_id, addr);
+
+	if (!peer) {
+		spin_unlock_bh(&ab->base_lock);
+		mutex_unlock(&ab->tbl_mtx_lock);
+
+		ath12k_warn(ab,
+			    "failed to find peer vdev_id %d addr %pM in delete\n",
+			    vdev_id, addr);
+			return -EINVAL;
+	}
+
+	spin_unlock_bh(&ab->base_lock);
+	mutex_unlock(&ab->tbl_mtx_lock);
+
+
 	reinit_completion(&ar->peer_delete_done);
 
 	ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
 	if (ret) {
-		ath12k_warn(ar->ab,
+		ath12k_warn(ab,
 			    "failed to delete peer vdev_id %d addr %pM ret %d\n",
 			    vdev_id, addr, ret);
 		return ret;
 	}
 
+	return 0;
+}
+
+static int __ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, const u8 *addr)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath12k_peer_delete_send(ar, vdev_id, addr);
+	if (ret)
+		return ret;
+
 	ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
 	if (ret)
 		return ret;
 
+	return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, const u8 *addr)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = __ath12k_peer_delete(ar, vdev_id, addr);
+	if (ret)
+		return ret;
+
 	ar->num_peers--;
 
 	return 0;
 }
 
+int ath12k_ml_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
+{
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	struct ieee80211_sta *sta;
+	struct ath12k *ar;
+	int ret, err_ret = 0;
+	u8 link_id = 0;
+	struct ath12k_hw *ah = ahvif->ah;
+	unsigned long links;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	sta = container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
+
+	if (!sta->mlo)
+		return -EINVAL;
+
+	/* FW expects delete of all link peers at once before waiting for reception
+	 * of peer unmap or delete responses
+	 */
+	links = sta->valid_links;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = ahvif->link[link_id];
+		arsta = ahsta->link[link_id];
+
+		if (!arvif || !arsta)
+			continue;
+
+		ar = arvif->ar;
+
+		if (!ar)
+			continue;
+		cancel_work_sync(&arsta->update_wk);
+
+		if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags) ||
+		    test_bit(ATH12K_FLAG_RECOVERY, &ar->ab->dev_flags) ||
+		    test_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &ar->ab->dev_flags))
+			continue;
+
+		cancel_work_sync(&arsta->update_wk);
+		mutex_lock(&ar->conf_mutex);
+		ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+		ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
+		if (ret) {
+			mutex_unlock(&ar->conf_mutex);
+			ath12k_warn(ar->ab,
+				    "failed to delete peer vdev_id %d addr %pM ret %d\n",
+				    arvif->vdev_id, arsta->addr, ret);
+			err_ret = ret;
+			continue;
+		}
+		mutex_unlock(&ar->conf_mutex);
+	}
+
+	/* Ensure all link peers are deleted and unmapped */
+	links = sta->valid_links;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = ahvif->link[link_id];
+		arsta = ahsta->link[link_id];
+
+		if (!arvif || !arsta)
+			continue;
+
+		ar = arvif->ar;
+
+		if (!ar)
+			continue;
+
+		if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags) ||
+		    test_bit(ATH12K_FLAG_RECOVERY, &ar->ab->dev_flags) ||
+		    test_bit(ATH12K_FLAG_UMAC_RECOVERY_START, &ar->ab->dev_flags))
+			continue;
+		mutex_lock(&ar->conf_mutex);
+		ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
+		if (ret) {
+			err_ret = ret;
+			mutex_unlock(&ar->conf_mutex);
+			continue;
+		}
+		ar->num_peers--;
+		mutex_unlock(&ar->conf_mutex);
+	}
+
+	return err_ret;
+}
+
 static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
 {
 	return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
 }
 
-int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
 		       struct ieee80211_sta *sta,
 		       struct ath12k_wmi_peer_create_arg *arg)
 {
 	struct ath12k_peer *peer;
-	int ret;
+	struct ath12k_sta *ahsta;
+	struct ath12k_link_sta *arsta;
+	u8 link_id = arvif->link_id;
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ath12k_ml_peer *ml_peer;
+	int ret, fbret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (ar->num_peers > (ar->max_num_peers - 1)) {
+	if (ar->num_peers >= (ar->max_num_peers - 1)) {
 		ath12k_warn(ar->ab,
 			    "failed to create peer due to insufficient peer entry resource in firmware\n");
 		return -ENOBUFS;
 	}
 
+	/* Check if a ML peer with same address as this link peer already
+	 * exists
+	 */
+	if (sta) {
+		ahsta = ath12k_sta_to_ahsta(sta);
+		spin_lock_bh(&ar->ah->data_lock);
+		ml_peer = ath12k_ml_peer_find(ar->ah, arg->peer_addr);
+		if (ml_peer && (!sta->mlo || ml_peer->id != ahsta->ml_peer_id)) {
+			spin_unlock_bh(&ar->ah->data_lock);
+			ath12k_warn(ar->ab,
+				    "failed to create link peer %pM due to conflicting address with already associated ML peer %pM with ml peer id %d\n",
+				    arg->peer_addr, ml_peer->addr,
+				    ml_peer->id);
+			return -EINVAL;
+		}
+		spin_unlock_bh(&ar->ah->data_lock);
+	}
+
+	mutex_lock(&ar->ab->tbl_mtx_lock);
 	spin_lock_bh(&ar->ab->base_lock);
 	peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
 	if (peer) {
+		ath12k_warn(ar->ab, "Peer %pM already found in pdev_idx %d vdev %d\n",
+			    arg->peer_addr, ar->pdev_idx, peer->vdev_id);
 		spin_unlock_bh(&ar->ab->base_lock);
+		mutex_unlock(&ar->ab->tbl_mtx_lock);
 		return -EINVAL;
 	}
+
+	/* In case of Split PHY and roaming scenario, pdev idx
+	 * might differ but both the pdev will share same rhash
+	 * table. In that case update the rhash table if peer is
+	 * already present
+	 */
+	peer = ath12k_peer_find_by_addr(ar->ab, arg->peer_addr);
+	if (peer)
+		ath12k_peer_rhash_delete(ar->ab, peer);
+
 	spin_unlock_bh(&ar->ab->base_lock);
+	mutex_unlock(&ar->ab->tbl_mtx_lock);
 
 	ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
 	if (ret) {
@@ -297,46 +675,385 @@
 	if (ret)
 		return ret;
 
+	mutex_lock(&ar->ab->tbl_mtx_lock);
 	spin_lock_bh(&ar->ab->base_lock);
 
 	peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
 	if (!peer) {
 		spin_unlock_bh(&ar->ab->base_lock);
+		mutex_unlock(&ar->ab->tbl_mtx_lock);
 		ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
 			    arg->peer_addr, arg->vdev_id);
 
-		reinit_completion(&ar->peer_delete_done);
-
-		ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
-						      arg->vdev_id);
-		if (ret) {
-			ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
-				    arg->vdev_id, arg->peer_addr);
-			return ret;
+		goto cleanup;
 		}
-
-		ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
-						       arg->peer_addr);
-		if (ret)
-			return ret;
-
-		return -ENOENT;
+	ret = ath12k_peer_rhash_add(ar->ab, peer);
+	if (ret) {
+		spin_unlock_bh(&ar->ab->base_lock);
+		mutex_unlock(&ar->ab->tbl_mtx_lock);
+		goto cleanup;
 	}
 
 	peer->pdev_idx = ar->pdev_idx;
 	peer->sta = sta;
 
-	if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+	if (vif->type == NL80211_IFTYPE_STATION) {
 		arvif->ast_hash = peer->ast_hash;
 		arvif->ast_idx = peer->hw_peer_id;
+		arvif->desc.info4 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX, arvif->ast_idx) |
+				    FIELD_PREP(HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM, arvif->ast_hash);
 	}
 
 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+	peer->vif = vif;
+#ifdef CONFIG_MAC80211_PPE_SUPPORT
+	peer->ppe_vp_num = peer->vif->ppe_vp_num;
+#endif
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+	peer->ppe_vp_num = arvif->ppe_vp_num;
+#endif
+
+	if (sta) {
+		ahsta = ath12k_sta_to_ahsta(sta);
+		arsta = ahsta->link[link_id];
+		arsta->tcl_metadata |= HTT_TCL_META_DATA_GET(0,
+							     HTT_TCL_META_DATA_TYPE) |
+				       HTT_TCL_META_DATA_GET(peer->peer_id,
+				       			     HTT_TCL_META_DATA_PEER_ID);
+		peer->link_id = arsta->link_id;
+
+		/* set HTT extension valid bit to 0 by default */
+		arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+		/* Fill ML info into created peer */
+		if (sta->mlo) {
+			peer->ml_peer_id = ahsta->ml_peer_id | ATH12K_ML_PEER_ID_VALID;
+			ether_addr_copy(peer->ml_addr, sta->addr);
+			peer->mlo = true;
+		} else {
+			peer->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+			peer->mlo = false;
+		}
+	}
 
 	ar->num_peers++;
 
+	ath12k_dbg(ar->ab, ATH12K_DBG_PEER, "peer created %pM\n", arg->peer_addr);
+
 	spin_unlock_bh(&ar->ab->base_lock);
+	mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+	return 0;
+
+cleanup:
+	fbret = __ath12k_peer_delete(ar, arg->vdev_id, arg->peer_addr);
+	if (fbret)
+		ath12k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
+			    arg->peer_addr, arg->vdev_id, fbret);
+
+	return ret;
+
+}
+
+static u16 ath12k_mac_alloc_ml_peer_id(struct ath12k_hw *ah)
+{
+
+	u16 ml_peer_id;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
+		if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
+			continue;
+
+		set_bit(ml_peer_id, ah->free_ml_peer_id_map);
+		break;
+	}
+
+	if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
+		ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+	ath12k_dbg(NULL, ATH12K_DBG_PEER, "Allocated ml_peer_id:%d", ml_peer_id);
+
+	return ml_peer_id;
+}
+
+int ath12k_ml_peer_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_ml_peer *ml_peer;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	if (!sta->mlo)
+		return -EINVAL;
+
+	spin_lock_bh(&ah->data_lock);
+	ml_peer = ath12k_ml_peer_find(ah, sta->addr);
+	if (ml_peer) {
+		spin_unlock_bh(&ah->data_lock);
+		ath12k_err(NULL, "ML peer(id=%d) exists already, unable to add new entry for %pM",
+			   ml_peer->id, sta->addr);
+		return -EEXIST;
+	}
+
+	ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
+	if (!ml_peer) {
+		spin_unlock_bh(&ah->data_lock);
+		ath12k_err(NULL, "unable to allocate new ML peer for %pM",
+			   sta->addr);
+		return -ENOMEM;
+	}
+
+	ahsta->ml_peer_id = ath12k_mac_alloc_ml_peer_id(ah);
+
+	if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+		kfree(ml_peer);
+		spin_unlock_bh(&ah->data_lock);
+		ath12k_err(NULL, "unable to allocate ml peer id for sta %pM", sta->addr);
+		return -ENOMEM;
+	}
+
+	ether_addr_copy(ml_peer->addr, sta->addr);
+	ml_peer->id = ahsta->ml_peer_id;
+	list_add(&ml_peer->list, &ah->ml_peers);
+	spin_unlock_bh(&ah->data_lock);
+
+	ath12k_dbg(NULL, ATH12K_DBG_PEER, "ML peer created for %pM id %d\n",
+		   sta->addr, ahsta->ml_peer_id);
+	return 0;
+}
+
+int ath12k_ml_peer_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_ml_peer *ml_peer;
+
+	lockdep_assert_held(&ah->conf_mutex);
+	if (!sta->mlo)
+		return -EINVAL;
+
+	clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+	ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+	spin_lock_bh(&ah->data_lock);
+	ml_peer = ath12k_ml_peer_find(ah, sta->addr);
+	if (!ml_peer) {
+		spin_unlock_bh(&ah->data_lock);
+		ath12k_err(NULL, "ML peer for %pM not found", sta->addr);
+		return -EINVAL;
+	}
+
+	list_del(&ml_peer->list);
+	kfree(ml_peer);
+	spin_unlock_bh(&ah->data_lock);
+
+	ath12k_dbg(NULL, ATH12K_DBG_PEER, "ML peer deleted for %pM\n",
+		   sta->addr);
+	return 0;
+}
+
+int ath12k_peer_rhash_delete(struct ath12k_base *ab, struct ath12k_peer *peer)
+{
+	int ret;
+
+	lockdep_assert_held(&ab->base_lock);
+	lockdep_assert_held(&ab->tbl_mtx_lock);
+
+	if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+		return -EPERM;
 
+	if (!peer->rhash_done)
 	return 0;
+
+	ret = ath12k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+				       &ab->rhash_peer_addr_param);
+	if (ret) {
+		ath12k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
+			    peer->addr, peer->peer_id, ret);
+		return ret;
+	}
+
+	ret = ath12k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+				       &ab->rhash_peer_id_param);
+	if (ret) {
+		ath12k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
+			    peer->addr, peer->peer_id, ret);
+		return ret;
+	}
+
+	peer->rhash_done = false;
+
+	return 0;
+}
+
+static int ath12k_peer_rhash_id_tbl_init(struct ath12k_base *ab)
+{
+	struct rhashtable_params *param;
+	struct rhashtable *rhash_id_tbl;
+	int ret;
+	size_t size;
+
+	lockdep_assert_held(&ab->tbl_mtx_lock);
+
+	if (ab->rhead_peer_id)
+		return 0;
+
+	size = sizeof(*ab->rhead_peer_id);
+	rhash_id_tbl = kzalloc(size, GFP_KERNEL);
+	if (!rhash_id_tbl) {
+		ath12k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
+			    size);
+		return -ENOMEM;
+	}
+
+	param = &ab->rhash_peer_id_param;
+
+	param->key_offset = offsetof(struct ath12k_peer, peer_id);
+	param->head_offset = offsetof(struct ath12k_peer, rhash_id);
+	param->key_len = sizeof_field(struct ath12k_peer, peer_id);
+	param->automatic_shrinking = true;
+	param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV;
+
+	ret = rhashtable_init(rhash_id_tbl, param);
+	if (ret) {
+		ath12k_warn(ab, "failed to init peer id rhash table %d\n", ret);
+		goto err_free;
+	}
+
+	spin_lock_bh(&ab->base_lock);
+
+	if (!ab->rhead_peer_id) {
+		ab->rhead_peer_id = rhash_id_tbl;
+	} else {
+		spin_unlock_bh(&ab->base_lock);
+		goto cleanup_tbl;
+	}
+
+	spin_unlock_bh(&ab->base_lock);
+
+	return 0;
+
+cleanup_tbl:
+	rhashtable_destroy(rhash_id_tbl);
+err_free:
+	kfree(rhash_id_tbl);
+
+	return ret;
+}
+
+static int ath12k_peer_rhash_addr_tbl_init(struct ath12k_base *ab)
+{
+	struct rhashtable_params *param;
+	struct rhashtable *rhash_addr_tbl;
+	int ret;
+	size_t size;
+
+	lockdep_assert_held(&ab->tbl_mtx_lock);
+
+	if (ab->rhead_peer_addr)
+		return 0;
+
+	size = sizeof(*ab->rhead_peer_addr);
+	rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
+	if (!rhash_addr_tbl) {
+		ath12k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
+			    size);
+		return -ENOMEM;
+	}
+
+	param = &ab->rhash_peer_addr_param;
+
+	param->key_offset = offsetof(struct ath12k_peer, addr);
+	param->head_offset = offsetof(struct ath12k_peer, rhash_addr);
+	param->key_len = sizeof_field(struct ath12k_peer, addr);
+	param->automatic_shrinking = true;
+	param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV;
+
+	ret = rhashtable_init(rhash_addr_tbl, param);
+	if (ret) {
+		ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
+		goto err_free;
 }
+
+	spin_lock_bh(&ab->base_lock);
+
+	if (!ab->rhead_peer_addr) {
+		ab->rhead_peer_addr = rhash_addr_tbl;
+	} else {
+		spin_unlock_bh(&ab->base_lock);
+		goto cleanup_tbl;
+	}
+
+	spin_unlock_bh(&ab->base_lock);
+
+	return 0;
+
+cleanup_tbl:
+	rhashtable_destroy(rhash_addr_tbl);
+err_free:
+	kfree(rhash_addr_tbl);
+
+	return ret;
+}
+
+static inline void ath12k_peer_rhash_id_tbl_destroy(struct ath12k_base *ab)
+{
+	lockdep_assert_held(&ab->tbl_mtx_lock);
+
+	if (!ab->rhead_peer_id)
+		return;
+
+	rhashtable_destroy(ab->rhead_peer_id);
+	kfree(ab->rhead_peer_id);
+	ab->rhead_peer_id = NULL;
+}
+
+static inline void ath12k_peer_rhash_addr_tbl_destroy(struct ath12k_base *ab)
+{
+	lockdep_assert_held(&ab->tbl_mtx_lock);
+
+	if (!ab->rhead_peer_addr)
+		return;
+
+	rhashtable_destroy(ab->rhead_peer_addr);
+	kfree(ab->rhead_peer_addr);
+	ab->rhead_peer_addr = NULL;
+}
+
+int ath12k_peer_rhash_tbl_init(struct ath12k_base *ab)
+{
+	int ret;
+
+	mutex_lock(&ab->tbl_mtx_lock);
+
+	ret = ath12k_peer_rhash_id_tbl_init(ab);
+	if (ret)
+		goto out;
+
+	ret = ath12k_peer_rhash_addr_tbl_init(ab);
+	if (ret)
+		goto cleanup_tbl;
+
+	mutex_unlock(&ab->tbl_mtx_lock);
+
+	return 0;
+
+cleanup_tbl:
+	ath12k_peer_rhash_id_tbl_destroy(ab);
+out:
+	mutex_unlock(&ab->tbl_mtx_lock);
+	return ret;
+}
+
+void ath12k_peer_rhash_tbl_destroy(struct ath12k_base *ab)
+{
+	mutex_lock(&ab->tbl_mtx_lock);
+
+	ath12k_peer_rhash_addr_tbl_destroy(ab);
+	ath12k_peer_rhash_id_tbl_destroy(ab);
+
+	mutex_unlock(&ab->tbl_mtx_lock);
+}
+
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/peer.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/peer.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/peer.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/peer.h	2024-03-18 14:40:14.859741552 +0100
@@ -8,6 +8,7 @@
 #define ATH12K_PEER_H
 
 #include "dp_rx.h"
+#include "sawf.h"
 
 struct ppdu_user_delayba {
 	u16 sw_peer_id;
@@ -22,6 +23,11 @@
 struct ath12k_peer {
 	struct list_head list;
 	struct ieee80211_sta *sta;
+	struct ieee80211_vif *vif;
+#ifdef CONFIG_ATH12K_SAWF
+	struct ath12k_sawf_peer_ctx sawf_ctx_peer;
+	struct ath12k_sawf_stats sawf_stats;
+#endif /* CONFIG_ATH12K_SAWF */
 	int vdev_id;
 	u8 addr[ETH_ALEN];
 	int peer_id;
@@ -31,8 +37,19 @@
 
 	/* protected by ab->data_lock */
 	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+
+	/* rx tid queue is setup once for primary link peer
+	 * in case of ML and cloned into partner peer data
+	 * but would be accessed (only if required)and safely
+	 * by ensuring primary parner is still valid
+	 */
 	struct ath12k_dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
 
+	/* peer id based rhashtable list pointer */
+	struct rhash_head rhash_id;
+	/* peer addr based rhashtable list pointer */
+	struct rhash_head rhash_addr;
+
 	/* Info used in MMIC verification of
 	 * RX fragments
 	 */
@@ -44,6 +61,38 @@
 	struct ppdu_user_delayba ppdu_stats_delayba;
 	bool delayba_flag;
 	bool is_authorized;
+
+	/* The below flag indicates whether
+	 * a peer has setup its datapath or not
+	 */
+	bool dp_setup_done;
+
+	bool mlo;
+	u16 ml_peer_id;
+	/* TODO remove or fill below these info if required/not required during dp change */
+	/* for reference to ath12k_link_sta */
+	u8 link_id;
+
+	/* To ensure only certain work related to dp is done once */
+	bool primary_link;
+
+	/* To check if the peer entry is part of rhash table or not */
+	bool rhash_done;
+
+	/* any other ML info common for all partners can be added
+	 * here and would be same for all partner peers
+	 */
+	u8 ml_addr[ETH_ALEN];
+#if defined(CONFIG_MAC80211_PPE_SUPPORT) || defined(CONFIG_ATH12K_BONDED_DS_SUPPORT)
+	/* Duplicate PPE port number to avoid link vif lookup in rx data path */
+	int ppe_vp_num;
+#endif
+};
+
+struct ath12k_ml_peer {
+	struct list_head list;
+	u8 addr[ETH_ALEN];
+	u16 id;
 };
 
 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
@@ -51,17 +100,50 @@
 			   u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
 				     const u8 *addr);
+struct ath12k_peer *ath12k_peer_find_list_by_id(struct ath12k_base *ab,
+						int peer_id);
 struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
 					     const u8 *addr);
 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id);
 void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id);
-int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr);
-int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, const u8 *addr);
+int ath12k_ml_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
+int ath12k_ml_peer_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_ml_peer_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
 		       struct ieee80211_sta *sta,
 		       struct ath12k_wmi_peer_create_arg *arg);
 int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
 				     const u8 *addr);
 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
-struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
-
+int ath12k_peer_rhash_tbl_init(struct ath12k_base *ab);
+void ath12k_peer_rhash_tbl_destroy(struct ath12k_base *ab);
+int ath12k_peer_rhash_delete(struct ath12k_base *ab, struct ath12k_peer *peer);
+void ath12k_peer_mlo_map_event(struct ath12k_base *ab, struct sk_buff *skb);
+void ath12k_peer_mlo_unmap_event(struct ath12k_base *ab, struct sk_buff *skb);
+int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr);
+struct ath12k_peer *ath12k_peer_find_by_vdev_id(struct ath12k_base *ab,
+		                        int vdev_id);
+static inline
+struct ath12k_link_sta *ath12k_peer_get_link_sta(struct ath12k_base *ab,
+						 struct ath12k_peer *peer)
+{
+	struct ath12k_sta *ahsta;
+
+	if (!peer->sta)
+		return NULL;
+
+	ahsta = ath12k_sta_to_ahsta(peer->sta);
+	if (peer->ml_peer_id & ATH12K_ML_PEER_ID_VALID) {
+		if (!(ahsta->links_map & BIT(peer->link_id))) {
+			ath12k_warn(ab, "peer %pM id %d link_id %d can't found in STA link_map %lu\n",
+				    peer->addr, peer->peer_id, peer->link_id, ahsta->links_map);
+			return NULL;
+		}
+
+		return ahsta->link[peer->link_id];
+	} else {
+		return &ahsta->deflink;
+	}
+}
 #endif /* _PEER_H_ */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/qmi.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/qmi.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/qmi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/qmi.c	2024-04-11 13:36:13.766433759 +0200
@@ -8,15 +8,141 @@
 
 #include "qmi.h"
 #include "core.h"
+#include "hif.h"
+#include "pci.h"
 #include "debug.h"
+#include "coredump.h"
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/ioport.h>
 #include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/devcoredump.h>
 
 #define SLEEP_CLOCK_SELECT_INTERNAL_BIT	0x02
 #define HOST_CSTATE_BIT			0x04
 #define PLATFORM_CAP_PCIE_GLOBAL_RESET	0x08
 #define ATH12K_QMI_MAX_CHUNK_SIZE	2097152
 
+bool ath12k_cold_boot_cal = 0;
+module_param_named(cold_boot_cal, ath12k_cold_boot_cal, bool, 0644);
+MODULE_PARM_DESC(cold_boot_cal,
+		 "Decrease the channel switch time but increase the driver load time (Default: true)");
+
+static struct qmi_elem_info qmi_wlfw_qdss_trace_config_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+				data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+				data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info qmi_wlanfw_qdss_trace_config_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_wlanfw_qdss_trace_config_download_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static bool ath12k_skip_caldata;
+module_param_named(skip_caldata, ath12k_skip_caldata, bool, 0444);
+MODULE_PARM_DESC(ath12k_skip_caldata, "Skip caldata download");
+
 static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
 	{
 		.data_type      = QMI_UNSIGNED_1_BYTE,
@@ -61,6 +187,73 @@
 	},
 };
 
+static struct qmi_elem_info qmi_wlanfw_qdss_trace_mode_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_mode_req_msg_v01,
+				mode_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum qmi_wlanfw_qdss_trace_mode_enum_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_mode_req_msg_v01,
+				mode),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_mode_req_msg_v01,
+				option_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_mode_req_msg_v01,
+				option),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info qmi_wlanfw_qdss_trace_mode_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_mode_resp_msg_v01,
+				resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
 	{
 		.data_type	= QMI_OPT_FLAG,
@@ -387,7 +580,7 @@
 					   mlo_capable_valid),
 	},
 	{
-		.data_type	= QMI_OPT_FLAG,
+		.data_type	= QMI_UNSIGNED_1_BYTE,
 		.elem_len	= 1,
 		.elem_size	= sizeof(u8),
 		.array_type	= NO_ARRAY,
@@ -505,6 +698,24 @@
 					   feature_list),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x2E,
+		.offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+					   fw_cfg_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x2E,
+		.offset         = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+					   fw_cfg_support),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -528,6 +739,192 @@
 	},
 };
 
+struct qmi_elem_info wlanfw_cfg_download_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   file_type_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum wlanfw_cfg_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   file_type),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   total_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   seg_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   seg_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   end_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct wlanfw_cfg_download_req_msg_v01,
+					   end),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlanfw_cfg_download_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlanfw_cfg_download_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+					   num_phy_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+					   num_phy),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+					   board_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+					   board_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
 	{
 		.data_type	= QMI_OPT_FLAG,
@@ -747,6 +1144,60 @@
 					   cal_done_enable),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+		                           qdss_trace_req_mem_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+		                           qdss_trace_req_mem_enable),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+		                           qdss_trace_save_enable_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+		                           qdss_trace_save_enable),
+	},
+	{
+                .data_type      = QMI_OPT_FLAG,
+                .elem_len       = 1,
+                .elem_size      = sizeof(u8),
+                .array_type     = NO_ARRAY,
+                .tlv_type       = 0x20,
+                .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                           m3_dump_upload_req_enable_valid),
+        },
+        {
+                .data_type      = QMI_UNSIGNED_1_BYTE,
+                .elem_len       = 1,
+                .elem_size      = sizeof(u8),
+                .array_type     = NO_ARRAY,
+                .tlv_type       = 0x20,
+                .offset         = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+                                           m3_dump_upload_req_enable),
+        },
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -983,6 +1434,68 @@
 	},
 };
 
+static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_device_info_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+					   bar_addr_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+					   bar_addr),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset		= offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+					   bar_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+					   bar_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
 	{
 		.data_type	= QMI_UNSIGNED_4_BYTE,
@@ -1342,6 +1855,24 @@
 		.ei_array	= qmi_wlanfw_dev_mem_info_s_v01_ei,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x25,
+		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+					   rxgainlut_support_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x25,
+		.offset         = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+					   rxgainlut_support),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.array_type	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -1879,6 +2410,85 @@
 	},
 };
 
+struct qmi_elem_info qmi_wlanfw_qdss_trace_save_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_save_ind_msg_v01,
+				source),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_save_ind_msg_v01,
+				total_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_save_ind_msg_v01,
+				mem_seg_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_save_ind_msg_v01,
+				mem_seg_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+		.elem_size      = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_save_ind_msg_v01,
+				mem_seg),
+		.ei_array       = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_save_ind_msg_v01,
+				file_name_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+		.elem_size      = sizeof(char),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_wlanfw_qdss_trace_save_ind_msg_v01,
+				file_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
 	{
 		.data_type = QMI_EOTI,
@@ -1893,34 +2503,649 @@
 	},
 };
 
-static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
 {
-	req->mlo_capable_valid = 1;
-	req->mlo_capable = 1;
-	req->mlo_chip_id_valid = 1;
-	req->mlo_chip_id = 0;
-	req->mlo_group_id_valid = 1;
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ini_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_read_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_read_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_write_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_write_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+int ath12k_qmi_mem_read(struct ath12k_base *ab, u32 mem_addr, void *mem_value,size_t count)
+{
+	struct qmi_wlanfw_mem_read_req_msg_v01 *req;
+	struct qmi_wlanfw_mem_read_resp_msg_v01 *resp;
+	struct qmi_txn txn = {};
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->offset = mem_addr;
+
+	/* Firmware uses mem type to map to various memory regions.
+	 * If this is set to 0, firmware uses automatic mapping of regions.
+	 * i.e, if mem address is given and mem_type is 0, firmware will
+	 * find under which memory region that address belongs
+	 */
+	req->mem_type = QMI_MEM_REGION_TYPE;
+	req->data_len = count;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_mem_read_resp_msg_v01_ei, resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLANFW_MEM_READ_REQ_V01,
+			       QMI_WLANFW_MEM_READ_REQ_MSG_V01_MAX_MSG_LEN,
+			       qmi_wlanfw_mem_read_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		ath12k_warn(ab, "Failed to send mem read request, err %d\n",
+			    ret);
+
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "qmi mem read req failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!resp->data_valid || resp->data_len != req->data_len) {
+		ath12k_warn(ab, "qmi mem read is invalid\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	memcpy(mem_value, resp->data, resp->data_len);
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
+int ath12k_qmi_mem_write(struct ath12k_base *ab, u32 mem_addr, void* mem_value, size_t count)
+{
+	struct qmi_wlanfw_mem_write_req_msg_v01 *req;
+	struct qmi_wlanfw_mem_write_resp_msg_v01 *resp;
+	struct qmi_txn txn = {};
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->offset = mem_addr;
+	req->mem_type = QMI_MEM_REGION_TYPE;
+	req->data_len = count;
+	memcpy(req->data, mem_value, req->data_len);
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_mem_write_resp_msg_v01_ei, resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLANFW_MEM_WRITE_REQ_V01,
+			       QMI_WLANFW_MEM_WRITE_REQ_MSG_V01_MAX_MSG_LEN,
+			       qmi_wlanfw_mem_write_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		ath12k_warn(ab, "Failed to send mem write request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "qmi mem write req failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
+static struct qmi_elem_info qmi_wlanfw_m3_dump_upload_req_ind_msg_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x01,
+		.offset = offsetof(struct qmi_wlanfw_m3_dump_upload_req_ind_msg_v01,
+				   pdev_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct qmi_wlanfw_m3_dump_upload_req_ind_msg_v01,
+				   addr),
+	},
+	{
+		.data_type = QMI_UNSIGNED_8_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u64),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x03,
+		.offset = offsetof(struct qmi_wlanfw_m3_dump_upload_req_ind_msg_v01,
+				   size),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_dump_upload_done_req_msg_v01_ei[] = {
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x01,
+		.offset = offsetof(struct
+				   qmi_wlanfw_m3_dump_upload_done_req_msg_v01,
+				   pdev_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct
+				   qmi_wlanfw_m3_dump_upload_done_req_msg_v01,
+				   status),
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_dump_upload_done_resp_msg_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type = 0x02,
+		.offset = offsetof(struct qmi_wlanfw_m3_dump_upload_done_resp_msg_v01,
+				   resp),
+		.ei_array = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.array_type = NO_ARRAY,
+		.tlv_type = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static int ath12k_qmi_send_qdss_trace_config_download_req(struct ath12k_base *ab,
+							  const u8 *buffer,
+							  unsigned int buffer_len)
+{
+	int ret = 0;
+	struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01 *req;
+	struct qmi_wlanfw_qdss_trace_config_download_resp_msg_v01 resp;
+	struct qmi_txn txn;
+	const u8 *temp = buffer;
+	int  max_len = QMI_WLANFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_LEN;
+	unsigned int  remaining;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	remaining = buffer_len;
+	while (remaining) {
+		memset(&resp, 0, sizeof(resp));
+		req->total_size_valid = 1;
+		req->total_size = buffer_len;
+		req->seg_id_valid = 1;
+		req->data_valid = 1;
+		req->end_valid = 1;
+
+		if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+			req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+		} else {
+			req->data_len = remaining;
+			req->end = 1;
+		}
+		memcpy(req->data, temp, req->data_len);
+
+		ret = qmi_txn_init(&ab->qmi.handle, &txn,
+				   qmi_wlanfw_qdss_trace_config_download_resp_msg_v01_ei,
+				   &resp);
+		if (ret < 0)
+			goto out;
+
+		ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+				       QMI_WLANFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01,
+				       max_len,
+				       qmi_wlfw_qdss_trace_config_download_req_msg_v01_ei,
+				       req);
+		if (ret < 0) {
+			ath12k_warn(ab, "Failed to send QDSS config download request = %d\n",
+				    ret);
+			qmi_txn_cancel(&txn);
+			goto out;
+		}
+
+		ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+		if (ret < 0)
+			goto out;
+
+		if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+			ath12k_warn(ab, "QDSS config download request failed, result: %d, err: %d\n",
+				    resp.resp.result, resp.resp.error);
+			ret = -EINVAL;
+			goto out;
+		}
+		remaining -= req->data_len;
+		temp += req->data_len;
+		req->seg_id++;
+	}
+
+out:
+	kfree(req);
+	return ret;
+}
+
+static int ath12k_qmi_send_qdss_config(struct ath12k_base *ab)
+{
+	struct device *dev = ab->dev;
+	const struct firmware *fw_entry;
+	char filename[ATH12K_QMI_MAX_QDSS_CONFIG_FILE_NAME_SIZE];
+	int ret;
+
+	snprintf(filename, sizeof(filename),
+		 "%s/%s/%s", ATH12K_FW_DIR, ab->hw_params->fw.dir,
+		 ATH12K_QMI_DEFAULT_QDSS_CONFIG_FILE_NAME);
+	ret = request_firmware(&fw_entry, filename, dev);
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
+		   filename, fw_entry->size);
+	if (ret) {
+		/* for backward compatibility */
+		snprintf(filename, sizeof(filename),
+			 "%s", ATH12K_QMI_DEFAULT_QDSS_CONFIG_FILE_NAME);
+		ret = request_firmware(&fw_entry, filename, dev);
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
+			   filename, fw_entry->size);
+		if (ret) {
+			ath12k_warn(ab, "qmi failed to load QDSS config: %s\n", filename);
+			return ret;
+		}
+	}
+
+	ret = ath12k_qmi_send_qdss_trace_config_download_req(ab, fw_entry->data,
+							     fw_entry->size);
+	if (ret < 0) {
+		ath12k_warn(ab, "qmi failed to load QDSS config to FW: %d\n", ret);
+		goto out;
+	}
+out:
+	release_firmware(fw_entry);
+	return ret;
+}
+
+static int ath12k_qmi_fill_mlo_host_caps(struct ath12k_base *ab,
+										 struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+{
+	struct wlfw_host_mlo_chip_info_s_v01 *info;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_base *partner_ab;
+	int ret, i, j, link_id;
+
+	mutex_lock(&ag->mutex_lock);
+
+	if (!ag->mlo_capable) {
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "Skip MLO cap send for chip id %d since it's not MLO capable\n",
+			   ab->chip_id);
+		mutex_unlock(&ag->mutex_lock);
+		return 0;
+	}
+
+	if (ath12k_cold_boot_cal && ab->qmi.cal_done == 0 &&
+	    ab->hw_params->cold_boot_calib &&
+		ab->qmi.cal_timeout == 0) {
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "Skip MLO cap send for chip id %d since it's in cold_boot\n",
+				ab->chip_id);
+		mutex_unlock(&ag->mutex_lock);
+		return 0;
+	}
+
+	if (ag->id == ATH12K_INVALID_GRP_ID || !ab->qmi.num_radios) {
+		ag->mlo_capable = false;
+
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "Skip MLO cap send for chip id %d due to group_id %d qmi num_radio %d\n",
+			   ab->chip_id, ag->id, ab->qmi.num_radios);
+		mutex_unlock(&ag->mutex_lock);
+		return 0;
+	}
+
+	if (ab->chip_id == ATH12K_INVALID_CHIP_ID) {
+		ath12k_err(ab, "failed to send MLO cap send due to Invalid chip id\n");
+
+		ret = -EINVAL;
+		goto out;
+	}
+
+	req->mlo_capable = true;
+	req->mlo_capable_valid = true;
+
+	req->mlo_chip_id = ab->chip_id;
+	req->mlo_chip_id_valid = true;
+
+	req->mlo_group_id = ag->id;
+	req->mlo_group_id_valid = true;
+
+	req->max_mlo_peer = ab->hw_params->max_mlo_peer;
+	req->max_mlo_peer_valid = true;
+
+	req->mlo_num_chips = ag->num_chip;
+	req->mlo_num_chips_valid = true;
+
+	link_id = 0;
+	for (i = 0; i < ag->num_chip; i++) {
+		info = &req->mlo_chip_info[i];
+		partner_ab = ag->ab[i];
+
+		if (partner_ab->chip_id == ATH12K_INVALID_CHIP_ID) {
+			ret = -EINVAL;
+			goto chip_cleanup;
+		}
+
+		info->chip_id = partner_ab->chip_id;
+
+		info->num_local_links = partner_ab->qmi.num_radios;
+
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "MLO chip id %d num_link %d\n",
+			    partner_ab->chip_id, info->num_local_links);
+
+		for (j = 0; j < info->num_local_links; j++) {
+			info->hw_link_id[j] = link_id;
+			info->valid_mlo_link_id[j] = true;
+
+			ath12k_dbg(ab, ATH12K_DBG_QMI, "MLO link id %d\n",
+				    info->hw_link_id[j]);
+
+			link_id++;
+		}
+	}
+
+	/* Disable MLO capable if there is no Multi-link in a group */
+	if (link_id <= 1)
+		ag->mlo_capable = false;
+
+	req->mlo_chip_info_valid = true;
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "MLO host cap chosen\n");
+
+	mutex_unlock(&ag->mutex_lock);
+	return 0;
+
+chip_cleanup:
+	for (i = i - 1; i >= 0; i--) {
+		info = &req->mlo_chip_info[i];
+
+		memset(info, 0, sizeof(*info));
+	}
+
+	req->mlo_num_chips = 0;
+	req->mlo_num_chips_valid = 0;
+
+	req->max_mlo_peer = 0;
+	req->max_mlo_peer_valid = 0;
 	req->mlo_group_id = 0;
-	req->max_mlo_peer_valid = 1;
-	/* Max peer number generally won't change for the same device
-	 * but needs to be synced with host driver.
-	 */
-	req->max_mlo_peer = 32;
-	req->mlo_num_chips_valid = 1;
-	req->mlo_num_chips = 1;
-	req->mlo_chip_info_valid = 1;
-	req->mlo_chip_info[0].chip_id = 0;
-	req->mlo_chip_info[0].num_local_links = 2;
-	req->mlo_chip_info[0].hw_link_id[0] = 0;
-	req->mlo_chip_info[0].hw_link_id[1] = 1;
-	req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
-	req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
+	req->mlo_group_id_valid = 0;
+	req->mlo_chip_id = 0;
+	req->mlo_chip_id_valid = 0;
+	req->mlo_capable = 0;
+	req->mlo_capable_valid = 0;
+
+	ag->mlo_capable = false;
+out:
+	mutex_unlock(&ag->mutex_lock);
+
+	return ret;
 }
 
 static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
 {
 	struct qmi_wlanfw_host_cap_req_msg_v01 req;
 	struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+	struct ath12k_board_data bd;
+	struct device_node *root;
+	const char *model = NULL;
 	struct qmi_txn txn = {};
 	int ret = 0;
 
@@ -1934,16 +3159,39 @@
 	req.bdf_support_valid = 1;
 	req.bdf_support = 1;
 
+	if (ab->hw_params->m3_fw_support) {
 	req.m3_support_valid = 1;
 	req.m3_support = 1;
 	req.m3_cache_support_valid = 1;
 	req.m3_cache_support = 1;
+	} else {
+		req.m3_support_valid = 0;
+		req.m3_support = 0;
+		req.m3_cache_support_valid = 0;
+		req.m3_cache_support = 0;
+	}
 
 	req.cal_done_valid = 1;
 	req.cal_done = ab->qmi.cal_done;
 
+	if (ab->hw_params->send_platform_model) {
+		root = of_find_node_by_path("/");
+		if (root) {
+			model = of_get_property(root, "model", NULL);
+			if (model) {
+				req.platform_name_valid = 1;
+				strlcpy(req.platform_name, model,
+					QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01);
+				ath12k_info(ab, "Platform name: %s", req.platform_name);
+			}
+			of_node_put(root);
+		}
+        }
+
+	if (ab->hw_params->qmi_cnss_feature_bitmap) {
 	req.feature_list_valid = 1;
-	req.feature_list = BIT(CNSS_QDSS_CFG_MISS_V01);
+		req.feature_list = ab->hw_params->qmi_cnss_feature_bitmap;
+	}
 
 	/* BRINGUP: here we are piggybacking a lot of stuff using
 	 * internal_sleep_clock, should it be split?
@@ -1961,8 +3209,20 @@
 		 */
 		req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
 		req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+	}
+
+	ret = ath12k_core_fetch_fw_cfg(ab, &bd);
+	if (!ret) {
+		req.fw_cfg_support_valid = 1;
+		req.fw_cfg_support = 1;
+	}
+	ab->fw_cfg_support = !!req.fw_cfg_support;
+	ath12k_core_free_bdf(ab, &bd);
 
-		ath12k_host_cap_parse_mlo(&req);
+	ret = ath12k_qmi_fill_mlo_host_caps(ab, &req);
+	if (ret < 0) {
+		ath12k_warn(ab, "Failed to get MLO capability,err = %d\n", ret);
+		goto out;
 	}
 
 	ret = qmi_txn_init(&ab->qmi.handle, &txn,
@@ -1994,6 +3254,53 @@
 	return ret;
 }
 
+static int ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
+{
+	struct qmi_wlanfw_phy_cap_req_msg_v01 req = { };
+	struct qmi_wlanfw_phy_cap_resp_msg_v01 resp = { };
+	struct qmi_txn txn = { };
+	int ret;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLANFW_PHY_CAP_REQ_V01,
+			       QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN,
+			       qmi_wlanfw_phy_cap_req_msg_v01_ei, &req);
+	if (ret < 0) {
+		ath12k_warn(ab, "failed to send phy capability request, err = %d\n", ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (!resp.num_phy_valid) {
+		ret = -ENODATA;
+		goto out;
+	}
+
+	ab->qmi.num_radios = resp.num_phy;
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "PHY capability resp valid %d num_phy %d valid %d board_id %d\n",
+		   resp.num_phy_valid, resp.num_phy,
+		   resp.board_id_valid, resp.board_id);
+	return 0;
+
+out:
+	ab->qmi.num_radios = ab->hw_params->num_local_link;
+	return ret;
+}
+
 static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
 {
 	struct qmi_wlanfw_ind_register_req_msg_v01 *req;
@@ -2024,6 +3331,14 @@
 	req->cal_done_enable = 1;
 	req->fw_init_done_enable_valid = 1;
 	req->fw_init_done_enable = 1;
+	req->qdss_trace_req_mem_enable_valid = 1;
+	req->qdss_trace_req_mem_enable = 1;
+	req->qdss_trace_save_enable_valid = 1;
+	req->qdss_trace_save_enable = 1;
+	req->qdss_trace_free_enable_valid = 1;
+	req->qdss_trace_free_enable = 1;
+	req->m3_dump_upload_req_enable_valid = 1;
+	req->m3_dump_upload_req_enable = 1;
 
 	req->pin_connect_result_enable_valid = 0;
 	req->pin_connect_result_enable = 0;
@@ -2082,7 +3397,7 @@
 	 * failure to firmware and firmware then request multiple blocks of
 	 * small chunk size memory.
 	 */
-	if (ab->qmi.target_mem_delayed) {
+	if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
 		delayed = true;
 		ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
 			   ab->qmi.mem_seg_count);
@@ -2094,8 +3409,7 @@
 			req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
 			req->mem_seg[i].size = ab->qmi.target_mem[i].size;
 			req->mem_seg[i].type = ab->qmi.target_mem[i].type;
-			ath12k_dbg(ab, ATH12K_DBG_QMI,
-				   "qmi req mem_seg[%d] %pad %u %u\n", i,
+			ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi req mem_seg[%d] %pad %u %u\n", i,
 				   &ab->qmi.target_mem[i].paddr,
 				   ab->qmi.target_mem[i].size,
 				   ab->qmi.target_mem[i].type);
@@ -2140,11 +3454,117 @@
 	return ret;
 }
 
-static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
+/**
+ * ath12k_free_mlo_glb_per_chip_crash_info() - Free MLO per_chip crash info
+ * @snapshot_info: Pointer to MLO Global crash info
+ *
+ * Return: None
+ */
+static void ath12k_free_mlo_glb_per_chip_crash_info(
+		struct ath12k_host_mlo_glb_chip_crash_info *global_chip_crash_info)
 {
-	int i;
+	if (global_chip_crash_info->per_chip_crash_info) {
+		kfree(global_chip_crash_info->per_chip_crash_info);
+		global_chip_crash_info->per_chip_crash_info = NULL;
+	}
+}
+
+/**
+ * ath12k_free_mlo_mgmt_rx_reo_per_link_info() - Free Rx REO per-link info
+ * @snapshot_info: Pointer to MGMT Rx REO snapshot info
+ *
+ * Return: None
+ */
+void ath12k_free_mlo_mgmt_rx_reo_per_link_info
+	(struct ath12k_host_mlo_glb_rx_reo_snapshot_info *snapshot_info)
+{
+	if (snapshot_info && snapshot_info->link_info) {
+		kfree(snapshot_info->link_info);
+		snapshot_info->link_info =  NULL;
+	}
+}
+
+static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
+					  struct target_mem_chunk *chunk,
+					  int idx)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct target_mem_chunk *mlo_chunk;
+	bool fixed_mem = ab->bus_params.fixed_mem_region;
+	struct ath12k_host_mlo_mem_arena
+		*mlomem_arena_ctx = &ab->ag->mlomem_arena;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	if (!ag->mlo_mem.init_done || ag->num_started)
+		return;
+
+	if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) {
+		ath12k_warn(ab, "invalid idx %d for MLO memory chunk free\n", idx);
+		return;
+	}
+
+	mlo_chunk = &ag->mlo_mem.chunk[idx];
+
+	if (fixed_mem) {
+		if (mlo_chunk->v.ioaddr) {
+			iounmap(mlo_chunk->v.ioaddr);
+			mlo_chunk->v.ioaddr = NULL;
+		}
+	} else {
+		if (mlo_chunk->v.addr) {
+			dma_free_coherent(ab->dev,
+					  mlo_chunk->size,
+					  mlo_chunk->v.addr,
+					  mlo_chunk->paddr);
+			mlo_chunk->v.addr = NULL;
+		}
+	}
 
-	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+	mlo_chunk->paddr = 0;
+	mlo_chunk->size = 0;
+
+	if (fixed_mem)
+		chunk->v.ioaddr = NULL;
+	else
+		chunk->v.addr = NULL;
+	chunk->paddr = 0;
+	chunk->size = 0;
+
+	ag->mlo_mem.is_mlo_mem_avail = false;
+
+	/* We need to de-initialize when mlo memory is cleaned */
+	mutex_lock(&mlomem_arena_ctx->mutex_lock);
+	if (mlomem_arena_ctx->init_done) {
+		mlomem_arena_ctx->init_done = false;
+		ath12k_free_mlo_mgmt_rx_reo_per_link_info
+			(&mlomem_arena_ctx->rx_reo_snapshot_info);
+		ath12k_free_mlo_glb_per_chip_crash_info
+			(&mlomem_arena_ctx->global_chip_crash_info);
+	}
+	mutex_unlock(&mlomem_arena_ctx->mutex_lock);
+
+	return;
+}
+
+void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	int i, mlo_idx;
+
+	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
+		if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
+			ath12k_qmi_free_mlo_mem_chunk(ab,
+						      &ab->qmi.target_mem[i],
+						      mlo_idx);
+		} else {
+			if (ab->bus_params.fixed_mem_region) {
+				if (!ab->qmi.target_mem[i].v.ioaddr)
+					continue;
+
+				iounmap(ab->qmi.target_mem[i].v.ioaddr);
+				ab->qmi.target_mem[i].v.ioaddr = NULL;
+			} else {
 		if (!ab->qmi.target_mem[i].v.addr)
 			continue;
 		dma_free_coherent(ab->dev,
@@ -2154,26 +3574,18 @@
 		ab->qmi.target_mem[i].v.addr = NULL;
 	}
 }
+	}
 
-static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
-{
-	int i;
-	struct target_mem_chunk *chunk;
-
-	ab->qmi.target_mem_delayed = false;
-
-	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
-		chunk = &ab->qmi.target_mem[i];
+	if (!ag->num_started && ag->mlo_mem.init_done) {
+		memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+		ag->mlo_mem.rsv = NULL;
+		ag->mlo_mem.init_done = false;
+	}
+}
 
-		/* Allocate memory for the region and the functionality supported
-		 * on the host. For the non-supported memory region, host does not
-		 * allocate memory, assigns NULL and FW will handle this without crashing.
-		 */
-		switch (chunk->type) {
-		case HOST_DDR_REGION_TYPE:
-		case M3_DUMP_REGION_TYPE:
-		case PAGEABLE_MEM_REGION_TYPE:
-		case CALDB_MEM_REGION_TYPE:
+static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
+				  struct target_mem_chunk *chunk)
+{
 			chunk->v.addr = dma_alloc_coherent(ab->dev,
 							   chunk->size,
 							   &chunk->paddr,
@@ -2186,12 +3598,67 @@
 						    chunk->size,
 						    chunk->type);
 					ath12k_qmi_free_target_mem_chunk(ab);
-					return 0;
+			return -EAGAIN;
 				}
 				ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
 					    chunk->type, chunk->size);
 				return -ENOMEM;
 			}
+
+	return 0;
+}
+
+static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	int i, mlo_idx, ret;
+	struct target_mem_chunk *chunk, *mlo_chunk;
+
+	mutex_lock(&ag->mutex_lock);
+
+	if (!ag->mlo_mem.init_done) {
+		memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+		ag->mlo_mem.init_done = true;
+	}
+
+	ab->qmi.target_mem_delayed = false;
+
+	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
+		chunk = &ab->qmi.target_mem[i];
+
+		/* Allocate memory for the region and the functionality supported
+		 * on the host. For the non-supported memory region, host does not
+		 * allocate memory, assigns NULL and FW will handle this without crashing.
+		 */
+		switch (chunk->type) {
+		case MLO_GLOBAL_MEM_REGION_TYPE:
+			mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
+			if (!mlo_chunk->paddr) {
+				mlo_chunk->size = chunk->size;
+				mlo_chunk->type = chunk->type;
+
+				ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
+				if (ret) {
+					ret = (ret == -EAGAIN) ? 0 : ret;
+					goto out;
+				}
+
+				memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
+			}
+			chunk->paddr = mlo_chunk->paddr;
+			chunk->v.addr = mlo_chunk->v.addr;
+			ag->mlo_mem.is_mlo_mem_avail = true;
+			mlo_idx++;
+			break;
+		case HOST_DDR_REGION_TYPE:
+		case M3_DUMP_REGION_TYPE:
+		case PAGEABLE_MEM_REGION_TYPE:
+		case CALDB_MEM_REGION_TYPE:
+			ret = ath12k_qmi_alloc_chunk(ab, chunk);
+			if (ret) {
+				ret = (ret == -EAGAIN) ? 0 : ret;
+				goto out;
+			}
 			break;
 		default:
 			ath12k_warn(ab, "memory type %u not supported\n",
@@ -2201,17 +3668,904 @@
 			break;
 		}
 	}
+
+	mutex_unlock(&ag->mutex_lock);
+	return 0;
+
+out:
+	ath12k_qmi_free_target_mem_chunk(ab);
+	mutex_unlock(&ag->mutex_lock);
+	return ret;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_process_tlv_header() - Process a given TLV header
+ * @data: Pointer to start of the TLV
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @expected_tag: Expected TLV tag
+ * @tlv_len: Address of TLV length variable to be populated. This API populates
+ * the entire length(payload + header) of the TLV into @tlv_len
+ * @tlv_tag: Address of TLV Tag variable to be populated.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int
+ath12k_mgmt_rx_reo_process_tlv_header(struct ath12k_base *ab,
+				      const u8 *data, size_t remaining_len,
+				      u32 expected_tag, u32 *tlv_len,
+				      u32 *tlv_tag)
+{
+	if (remaining_len < MLO_SHMEM_TLV_HDR_SIZE) {
+		ath12k_err(ab, "Not enough space(%zu) to read TLV header(%u)\n",
+			   remaining_len, (u32)MLO_SHMEM_TLV_HDR_SIZE);
+		return -EINVAL;
+	}
+	*tlv_len = MLO_SHMEMTLV_GET_TLVLEN(MLO_SHMEMTLV_GET_HDR(data));
+	*tlv_len += MLO_SHMEM_TLV_HDR_SIZE;
+	if (remaining_len < *tlv_len) {
+		ath12k_err(ab, "Not enough space(%zu) to read TLV payload(%u)\n",
+			   remaining_len, *tlv_len);
+		return -EINVAL;
+	}
+
+	*tlv_tag = MLO_SHMEMTLV_GET_TLVTAG(MLO_SHMEMTLV_GET_HDR(data));
+	if (*tlv_tag != expected_tag) {
+		ath12k_err(ab, "Unexpected TLV tag: %u is seen. Expected: %u\n",
+			   *tlv_tag,
+			   expected_tag);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
+/**
+ * ath12k_mgmt_rx_reo_extract_ath12k_mlo_glb_shmem_tlv() - extract ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_H_SHMEM
+ * TLV
+ * @data: Pointer to start of the TLV
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @shmem_params: Pointer to MLO Global shared memory parameters. Extracted
+ * information will be populated in this data structure.
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_extract_ath12k_mlo_glb_shmem_tlv(struct ath12k_base *ab,
+					       u8 *data, size_t remaining_len,
+	struct ath12k_host_ath12k_mlo_glb_shmem_params *shmem_params)
+{
+	struct ath12k_mlo_glb_shmem *ptlv;
+	u32 tlv_len, tlv_tag;
+	u32 major_minor_version;
+
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+						  ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_H_SHMEM,
+						  &tlv_len, &tlv_tag) < 0) {
+		ath12k_err(ab, "%s:process tlv hdr failed\n", __func__);
+		return -EINVAL;
+	}
+
+	ptlv = (struct ath12k_mlo_glb_shmem *)data;
+	major_minor_version = get_field_value_in_tlv(ptlv, major_minor_version,
+						     tlv_len);
+	shmem_params->major_version =
+		MLO_SHMEM_GLB_H_SHMEM_PARAM_MAJOR_VERSION_GET(
+				major_minor_version);
+	shmem_params->minor_version =
+		MLO_SHMEM_GLB_H_SHMEM_PARAM_MINOR_VERSION_GET(
+				major_minor_version);
+
+	return tlv_len;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_extract_mlo_glb_link_info_tlv() - extract lobal link info from shmem
+ * @data: Pointer to the first TLV in the arena
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @link_info: Pointer to which link info needs to be copied
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_extract_mlo_glb_link_info_tlv(struct ath12k_base *ab,
+						 u8 *data,
+				  size_t remaining_len,
+				  u32 *link_info)
+{
+	struct mlo_glb_link_info *ptlv;
+	u32 tlv_len, tlv_tag;
+
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+						  ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_LINK_INFO,
+						  &tlv_len, &tlv_tag) < 0) {
+		ath12k_err(ab, "%s:process tlv hdr failed\n", __func__);
+		return -EPERM;
+	}
+
+	ptlv = (struct mlo_glb_link_info *)data;
+
+	*link_info = get_field_value_in_tlv(ptlv, link_info, tlv_len);
+
+	return tlv_len;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_process_mlo_glb_per_link_status_tlv() - process per link info
+ * @data: Pointer to the first TLV in the arena
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_process_mlo_glb_per_link_status_tlv(struct ath12k_base *ab,
+						       u8 *data, size_t remaining_len)
+{
+	u32 tlv_len, tlv_tag;
+
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+						  ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_LINK,
+						  &tlv_len, &tlv_tag) < 0) {
+		ath12k_err(ab, "%s:process tlv hdr failed\n", __func__);
+		return -EPERM;
+	}
+
+	return tlv_len;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_parse_global_link_info() - parse lobal link info
+ * @data: Pointer to the first TLV in the arena
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_parse_global_link_info(struct ath12k_base *ab, u8 *data, size_t remaining_len)
+{
+	int parsed_bytes, len;
+	u8 link;
+	u32 link_info;
+	u8 num_links;
+
+	/* Extract ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_LINK_INFO_TLV */
+	len = ath12k_mgmt_rx_reo_extract_mlo_glb_link_info_tlv(ab, data, remaining_len, &link_info);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes = len;
+
+	num_links = MLO_SHMEM_GLB_LINK_INFO_PARAM_NO_OF_LINKS_GET(link_info);
+
+	for (link = 0; link < num_links; link++) {
+		len = ath12k_mgmt_rx_reo_process_mlo_glb_per_link_status_tlv(ab, data, remaining_len);
+		ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data,
+							   remaining_len);
+		parsed_bytes += len;
+	}
+
+	return parsed_bytes;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_get_num_links_from_valid_link_bitmap() - Get the number of valid links
+ * @valid_link_bmap: Link bit map where the valid links are set to 1
+ *
+ * Return: Number of valid links
+ */
+static u8
+ath12k_mgmt_rx_reo_get_num_links_from_valid_link_bitmap(u16 valid_link_bmap)
+{
+	u8 num_links = 0;
+
+	/* Find the number of set bits */
+	while (valid_link_bmap) {
+		num_links++;
+		valid_link_bmap &= (valid_link_bmap - 1);
+	}
+
+	return num_links;
+}
+
+static int
+ath12k_mgmt_mlo_global_per_chip_crash_info_tlv(struct ath12k_base *ab,
+					u8 *data,
+					size_t remaining_len, u8 cur_chip_id,
+					struct ath12k_host_mlo_glb_per_chip_crash_info *per_chip_crash_info)
+{
+	struct mlo_glb_per_chip_crash_info *ptlv;
+	u32 tlv_len, tlv_tag;
+	u8 *crash_reason, *recovery_mode;
+
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+				ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_PER_CHIP_CRASH_INFO,
+				&tlv_len, &tlv_tag) < 0) {
+		return -EPERM;
+	}
+
+	ptlv = (struct mlo_glb_per_chip_crash_info *)data;
+	per_chip_crash_info->chip_id = cur_chip_id;
+	crash_reason = (u8 *)get_field_pointer_in_tlv(ptlv, crash_reason, tlv_len);
+	recovery_mode = (u8 *)get_field_pointer_in_tlv(ptlv, recovery_mode, tlv_len);
+
+	per_chip_crash_info->crash_reason = (void *)crash_reason;
+	per_chip_crash_info->recovery_mode = (void *)recovery_mode;
+
+	return tlv_len;
+}
+
+static int
+ath12k_mgmt_mlo_global_chip_crash_info_tlv(struct ath12k_base *ab,
+				       u8 *data,
+				       size_t remaining_len,
+				       struct ath12k_host_mlo_glb_chip_crash_info *global_chip_crash_info)
+{
+	struct mlo_glb_chip_crash_info *ptlv;
+	u32 tlv_len, tlv_tag;
+	u32 chip_info;
+	u8 chip_map;
+
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+			       ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_CHIP_CRASH_INFO,
+			       &tlv_len, &tlv_tag) < 0) {
+		return -EPERM;
+	}
+
+	ptlv = (struct  mlo_glb_chip_crash_info *)data;
+	chip_info = get_field_value_in_tlv(ptlv, chip_info, tlv_len);
+	global_chip_crash_info->no_of_chips =
+			MLO_SHMEM_CHIP_CRASH_INFO_PARAM_NO_OF_CHIPS_GET(chip_info);
+	chip_map = MLO_SHMEM_CHIP_CRASH_INFO_PARAM_VALID_CHIP_BMAP_GET(chip_info);
+
+	memcpy(&global_chip_crash_info->valid_chip_bmap,
+	       &chip_map,
+	       min(sizeof(global_chip_crash_info->valid_chip_bmap),
+		   sizeof(chip_map)));
+
+	/* Allocate memory to extrace per chip crash info */
+	global_chip_crash_info->per_chip_crash_info = kmalloc_array(
+						global_chip_crash_info->no_of_chips,
+						sizeof(*global_chip_crash_info->per_chip_crash_info),
+						GFP_KERNEL);
+
+	if (!global_chip_crash_info->per_chip_crash_info)
+	{
+		ath12k_warn(ab, "Couldn't allocate memory for per chip crash info!\n");
+		return -ENOBUFS;
+	}
+
+	return tlv_len;
+}
+
+static int
+ath12k_mgmt_mlo_global_chip_crash_info(struct ath12k_base *ab,
+				       u8 *data,
+				       size_t remaining_len,
+				       struct ath12k_host_mlo_glb_chip_crash_info *global_chip_crash_info)
+{
+	u8 chip_id;
+	s32 len,parsed_bytes;
+	unsigned long valid_chip_bmap, cur_chip_id;
+
+	len = ath12k_mgmt_mlo_global_chip_crash_info_tlv(ab, data, remaining_len,
+							global_chip_crash_info);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes = len;
+
+	if (len < 0)
+		return len;
+
+	memcpy(&valid_chip_bmap,
+	       &global_chip_crash_info->valid_chip_bmap,
+	       min(sizeof(valid_chip_bmap),
+		   sizeof(global_chip_crash_info->valid_chip_bmap)));
+
+	/* Extract per chip crash info */
+	for (chip_id = 0; chip_id < global_chip_crash_info->no_of_chips;
+	     chip_id++) {
+		cur_chip_id = find_first_bit(&valid_chip_bmap, 8);
+		__clear_bit(cur_chip_id, &valid_chip_bmap);
+
+		if (WARN_ON(cur_chip_id < 0)) {
+			/* Exit gracefully */
+			return 0;
+		}
+
+		len = ath12k_mgmt_mlo_global_per_chip_crash_info_tlv(ab, data,
+						remaining_len, cur_chip_id,
+						&global_chip_crash_info->per_chip_crash_info[chip_id]);
+		if (len < 0) {
+			WARN_ON(1);
+			return len;
+		}
+
+		ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+		parsed_bytes += len;
+	}
+
+	return parsed_bytes;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_snapshot_info_tlv() - extract
+ * ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_SNAPSHOT_INFO TLV
+ * @data: Pointer to start of the TLV
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @snapshot_info: Pointer to MGMT Rx REO snapshot info. Extracted information
+ * will be populated in this data structure.
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_snapshot_info_tlv(struct ath12k_base *ab,
+							    u8 *data, size_t remaining_len,
+	struct ath12k_host_mlo_glb_rx_reo_snapshot_info *snapshot_info)
+{
+	struct mlo_glb_rx_reo_snapshot_info *ptlv;
+	u32 tlv_len, tlv_tag;
+	u32 link_info;
+	u16 valid_link_bmap;
+
+	/* process ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_SNAPSHOT_INFO TLV */
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+						  ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_SNAPSHOT_INFO,
+						  &tlv_len, &tlv_tag) < 0) {
+		ath12k_err(ab, "%s:process tlv hdr failed\n", __func__);
+		return -EINVAL;
+	}
+
+	ptlv = (struct mlo_glb_rx_reo_snapshot_info *)data;
+	link_info = get_field_value_in_tlv(ptlv, link_info, tlv_len);
+	valid_link_bmap =
+		MLO_SHMEM_GLB_LINK_INFO_PARAM_VALID_LINK_BMAP_GET(link_info);
+	snapshot_info->valid_link_bmap = valid_link_bmap;
+
+	if (is_field_present_in_tlv(ptlv, snapshot_ver_info, tlv_len)) {
+		u32 snapshot_ver_info;
+
+		snapshot_ver_info = get_field_value_in_tlv
+			(ptlv, snapshot_ver_info, tlv_len);
+		snapshot_info->hw_forwarded_snapshot_ver =
+			MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_HW_FWD_SNAPSHOT_VER_GET
+			(snapshot_ver_info);
+		snapshot_info->fw_forwarded_snapshot_ver =
+			MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_FWD_SNAPSHOT_VER_GET
+			(snapshot_ver_info);
+		snapshot_info->fw_consumed_snapshot_ver =
+			MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_CONSUMED_SNAPSHOT_VER_GET
+			(snapshot_ver_info);
+	}
+
+	snapshot_info->num_links =
+		ath12k_mgmt_rx_reo_get_num_links_from_valid_link_bitmap(valid_link_bmap);
+	snapshot_info->link_info = kmalloc_array(snapshot_info->num_links,
+						 sizeof(*snapshot_info->link_info),
+			GFP_KERNEL);
+	if (!snapshot_info->link_info) {
+		ath12k_err(ab, "Couldn't allocate memory for rx_reo_per_link_info\n");
+		return -EINVAL;
+	}
+
+	return tlv_len;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_get_next_valid_link_id() - Get next valid link ID
+ * @valid_link_bmap: Link bit map where the valid links are set to 1
+ * @prev_link_id: Previous link ID
+ *
+ * Return: Next valid link ID if there are valid links after @prev_link_id,
+ * else -1
+ */
+static int
+ath12k_mgmt_rx_reo_get_next_valid_link_id(u16 valid_link_bmap, int prev_link_id)
+{
+	int cur_link_id;
+	u16 mask;
+	u8 maxbits = sizeof(valid_link_bmap) * 8;
+
+	cur_link_id = prev_link_id + 1;
+	mask = 1 << cur_link_id;
+
+	while (!(valid_link_bmap & mask)) {
+		cur_link_id++;
+		if (cur_link_id == maxbits)
+			return -EINVAL;
+		mask = mask << 1;
+	}
+
+	return cur_link_id;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_extract_mgmt_rx_reo_snapshot_tlv() - extract MGMT_RX_REO_SNAPSHOT TLV
+ * @data: Pointer to start of the TLV
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @address_ptr: Pointer to the snapshot address. This API will populate the
+ * snapshot address into the variable pointed by @address_ptr
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_extract_mgmt_rx_reo_snapshot_tlv(struct ath12k_base *ab,
+						    u8 *data, size_t remaining_len,
+				 void **address_ptr)
+{
+	struct mgmt_rx_reo_snapshot *ptlv;
+	u32 tlv_len, tlv_tag;
+
+	/* process ATH12K_MLO_SHMEM_TLV_STRUCT_MGMT_RX_REO_SNAPSHOT TLV */
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+						  ATH12K_MLO_SHMEM_TLV_STRUCT_MGMT_RX_REO_SNAPSHOT,
+						  &tlv_len, &tlv_tag) < 0) {
+		ath12k_err(ab, "%s:process tlv hdr failed\n", __func__);
+		return -EINVAL;
+	}
+
+	ptlv = (struct mgmt_rx_reo_snapshot *)data;
+	*address_ptr = get_field_pointer_in_tlv(ptlv, mgmt_rx_reo_snapshot_low,
+						tlv_len);
+
+	return tlv_len;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_per_link_info_tlv() - extract
+ * RX_REO_PER_LINK_SNAPSHOT_INFO TLV
+ * @data: Pointer to start of the TLV
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @link_info: Pointer to MGMT Rx REO per link info. Extracted information
+ * will be populated in this data structure.
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_per_link_info_tlv(struct ath12k_base *ab,
+							    u8 *data, size_t remaining_len, u8 link_id,
+	struct ath12k_host_mlo_glb_rx_reo_per_link_info *link_info)
+{
+	struct mlo_glb_rx_reo_per_link_snapshot_info *ptlv;
+	u32 tlv_len, tlv_tag;
+	int len;
+	u8 *fw_consumed;
+	int parsed_bytes;
+
+	/* process ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_PER_LINK_SNAPSHOT_INFO TLV */
+	if (ath12k_mgmt_rx_reo_process_tlv_header(ab, data, remaining_len,
+						  ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_PER_LINK_SNAPSHOT_INFO,
+						  &tlv_len, &tlv_tag) < 0) {
+		ath12k_err(ab, "%s:process tlv hdr failed\n", __func__);
+		return -EINVAL;
+	}
+
+	ptlv = (struct mlo_glb_rx_reo_per_link_snapshot_info *)data;
+
+	link_info->link_id = link_id;
+
+	/**
+	 * Get the pointer to the fw_consumed snapshot with in the TLV.
+	 * Note that snapshots are nested TLVs within link_sanpshot_info TLV.
+	 */
+	data += offsetof(struct mlo_glb_rx_reo_per_link_snapshot_info,
+			fw_consumed);
+	fw_consumed = (u8 *)get_field_pointer_in_tlv(ptlv, fw_consumed,
+			tlv_len);
+	remaining_len -= offsetof(struct mlo_glb_rx_reo_per_link_snapshot_info,
+			fw_consumed);
+	parsed_bytes = offsetof(struct mlo_glb_rx_reo_per_link_snapshot_info,
+				fw_consumed);
+
+	/* extract fw_consumed snapshot */
+	len = ath12k_mgmt_rx_reo_extract_mgmt_rx_reo_snapshot_tlv(ab, data, remaining_len,
+								  &link_info->fw_consumed);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes += len;
+
+	/* extract fw_forwarded snapshot */
+	len = ath12k_mgmt_rx_reo_extract_mgmt_rx_reo_snapshot_tlv(ab, data, remaining_len,
+								  &link_info->fw_forwarded);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes += len;
+
+	/* extract hw_forwarded snapshot */
+	len = ath12k_mgmt_rx_reo_extract_mgmt_rx_reo_snapshot_tlv(ab, data, remaining_len,
+								  &link_info->hw_forwarded);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes += len;
+
+	/**
+	 * Return the length of link_sanpshot_info TLV itself as the snapshots
+	 * are nested inside link_sanpshot_info TLV and hence no need to add
+	 * their lengths separately.
+	 */
+	return tlv_len;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_snapshot_info() - extract MGMT Rx REO snapshot info
+ * @data: Pointer to start of ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_SNAPSHOT_INFO
+ * TLV
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @snapshot_info: Pointer to MGMT Rx REO snapshot info. Extracted information
+ * will be populated in this data structure.
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int
+ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_snapshot_info(struct ath12k_base *ab,
+							u8 *data, size_t remaining_len,
+	struct ath12k_host_mlo_glb_rx_reo_snapshot_info *snapshot_info)
+{
+	int parsed_bytes, len;
+	u8 link;
+	int cur_link_id, prev_link_id = -1;
+	u16 valid_link_bmap;
+
+	/* Extract ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_SNAPSHOT_INFO TLV */
+	len = ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_snapshot_info_tlv(ab, data, remaining_len,
+									  snapshot_info);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes = len;
+
+	valid_link_bmap = snapshot_info->valid_link_bmap;
+	/* Foreach valid link */
+	for (link = 0; link < snapshot_info->num_links; ++link) {
+		cur_link_id = ath12k_mgmt_rx_reo_get_next_valid_link_id(valid_link_bmap,
+									prev_link_id);
+
+		BUG_ON(!(cur_link_id >= 0));
+
+		/* Extract per_link_info */
+		len  = ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_per_link_info_tlv(ab,
+										   data, remaining_len, cur_link_id,
+				&snapshot_info->link_info[link]);
+		ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data,
+							   remaining_len);
+		parsed_bytes += len;
+		prev_link_id = cur_link_id;
+	}
+
+	return parsed_bytes;
+}
+
+/**
+ * ath12k_qmi_parse_mlo_mem_arena() - Parse MLO Global shared memory arena
+ * @data: Pointer to the first TLV in the arena
+ * @remaining_len: Length (in bytes) remaining in the arena from @data pointer
+ * @mlomem_arena_ctx: Pointer to MLO Global shared memory arena context.
+ * Extracted information will be populated in this data structure.
+ *
+ * Return: On success, the number of bytes parsed. On failure, errno is returned.
+ */
+static int ath12k_qmi_parse_mlo_mem_arena(struct ath12k_base *ab,
+					  u8 *data, size_t remaining_len,
+	struct ath12k_host_mlo_mem_arena *mlomem_arena_ctx)
+{
+	int parsed_bytes;
+	int len;
+
+	if (!data)
+		return -EINVAL;
+
+	len = ath12k_mgmt_rx_reo_extract_ath12k_mlo_glb_shmem_tlv(ab, data, remaining_len,
+							     &mlomem_arena_ctx->shmem_params);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes = len;
+
+	len = ath12k_mgmt_rx_reo_extract_mlo_glb_rx_reo_snapshot_info(ab,
+								      data, remaining_len, &mlomem_arena_ctx->rx_reo_snapshot_info);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes += len;
+
+	len = ath12k_mgmt_rx_reo_parse_global_link_info(ab, data, remaining_len);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes += len;
+
+	len = ath12k_mgmt_mlo_global_chip_crash_info(ab, data, remaining_len,
+						     &mlomem_arena_ctx->global_chip_crash_info);
+	ATH12K_VALIDATE_PARSED_DATA_POINTER(len, data, remaining_len);
+	parsed_bytes += len;
+
+	return parsed_bytes;
+}
+
+int ath12k_qmi_mlo_global_snapshot_mem_init(struct ath12k_base *ab)
+{
+	struct ath12k_host_mlo_mem_arena *mlomem_arena_ctx = &ab->ag->mlomem_arena;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct target_mem_chunk *mlo_chunk;
+	int ret = 0, mlo_idx = 0;
+
+	if (!ag->mlo_mem.is_mlo_mem_avail)
+		return 0;
+
+	mlo_chunk = &ab->ag->mlo_mem.chunk[mlo_idx];
+
+	/* We need to initialize only for the first invocation */
+	mutex_lock(&mlomem_arena_ctx->mutex_lock);
+	if (mlomem_arena_ctx->init_done) {
+		mutex_unlock(&mlomem_arena_ctx->mutex_lock);
+		return 0;
+	}
+
+	if (ab->bus_params.fixed_mem_region)
+		ret = ath12k_qmi_parse_mlo_mem_arena(ab,
+						mlo_chunk->v.ioaddr,
+						mlo_chunk->size,
+						mlomem_arena_ctx);
+	else
+		ret = ath12k_qmi_parse_mlo_mem_arena(ab,
+						mlo_chunk->v.addr,
+						mlo_chunk->size,
+						mlomem_arena_ctx);
+
+	if (ret < 0) {
+		ath12k_err(ab, "parsing of mlo shared memory failed ret %d\n", ret);
+		ath12k_free_mlo_mgmt_rx_reo_per_link_info(
+				&mlomem_arena_ctx->rx_reo_snapshot_info);
+		ath12k_free_mlo_glb_per_chip_crash_info
+			(&mlomem_arena_ctx->global_chip_crash_info);
+		mutex_unlock(&mlomem_arena_ctx->mutex_lock);
+		return ret;
+	}
+
+	mlomem_arena_ctx->init_done = true;
+
+	mutex_unlock(&mlomem_arena_ctx->mutex_lock);
+
+	return 0;
+}
+
+#define MAX_TGT_MEM_MODES 5
+static int ath12k_qmi_assign_target_mem_chunk(struct ath12k_base *ab)
+{
+	struct device *dev = ab->dev;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct device_node *hremote_node = NULL, *mem_node, *dev_node = NULL;
+	struct resource res, m3_dump;
+	int host_ddr_sz, mlo_ddr_sz, sz, mlo_sz = 0;
+	int i, idx, mlo_idx, ret;
+	unsigned int bdf_location[MAX_TGT_MEM_MODES],
+		     caldb_location[MAX_TGT_MEM_MODES],
+		     caldb_size[1];
+
+	sz = ab->host_ddr_fixed_mem_off;
+	hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+	if (!hremote_node) {
+		ath12k_warn(ab, "qmi fail to get hremote_node\n");
+		return ret;
+	}
+
+	ret = of_address_to_resource(hremote_node, 0, &res);
+	of_node_put(hremote_node);
+	if (ret) {
+		ath12k_warn(ab, "qmi fail to get reg from hremote\n");
+		return ret;
+	}
+
+	mutex_lock(&ag->mutex_lock);
+
+	if (ag->mlo_mem.init_done)
+		goto skip_mlo_mem_init;
+
+	mem_node = of_find_node_by_name(NULL, "mlo_global_mem_0");
+	if (!mem_node) {
+		ath12k_warn(ab, "qmi fail to get MLO global memory node\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ag->mlo_mem.rsv = of_reserved_mem_lookup(mem_node);
+	of_node_put(mem_node);
+	if (!ag->mlo_mem.rsv) {
+		ath12k_warn(ab, "qmi fail to get MLO memory resource node %px\n",
+			    ag->mlo_mem.rsv);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+	ag->mlo_mem.init_done = true;
+
+skip_mlo_mem_init:
+	mlo_ddr_sz = ag->mlo_mem.rsv->size;
+	host_ddr_sz = (res.end - res.start) + 1;
+
+	for (i = 0, idx = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
+		struct target_mem_chunk *mlo_chunk;
+		phys_addr_t paddr;
+		int remain_sz;
+
+		if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
+			paddr = ag->mlo_mem.rsv->base + mlo_sz;
+			remain_sz = mlo_ddr_sz - mlo_sz;
+		} else {
+			paddr = res.start + sz;
+			remain_sz = host_ddr_sz - sz;
+		}
+
+		ab->qmi.target_mem[idx].paddr = paddr;
+
+		if (ab->qmi.target_mem[i].size > remain_sz) {
+			ath12k_warn(ab, "No fixed mem to assign for type %d\n",
+				    ab->qmi.target_mem[i].type);
+			ret = -EINVAL;
+			goto out;
+ 		}
+
+		switch (ab->qmi.target_mem[i].type) {
+		case HOST_DDR_REGION_TYPE:
+			ab->qmi.target_mem[idx].v.ioaddr =
+					ioremap(ab->qmi.target_mem[idx].paddr,
+						ab->qmi.target_mem[i].size);
+			ab->qmi.target_mem[idx].size =
+					ab->qmi.target_mem[i].size;
+			ab->qmi.target_mem[idx].type =
+					ab->qmi.target_mem[i].type;
+			sz += ab->qmi.target_mem[i].size;
+			idx++;
+			break;
+		case CALDB_MEM_REGION_TYPE:
+			if (ath12k_cold_boot_cal &&
+			    ab->hw_params->cold_boot_calib) {
+				if (ab->hif.bus == ATH12K_BUS_AHB ||
+				    ab->hif.bus == ATH12K_BUS_HYBRID) {
+					if (of_property_read_u32_array(dev->of_node,
+								       "qcom,caldb-addr", caldb_location,
+								       ARRAY_SIZE(caldb_location))) {
+						ath12k_err(ab, "CALDB_MEM_REGION Not defined in device_tree\n");
+						ret = -EINVAL;
+						goto out;
+					}
+
+					if (of_property_read_u32_array(dev->of_node,
+								       "qcom,caldb-size", caldb_size,
+								       ARRAY_SIZE(caldb_size))) {
+						ath12k_err(ab, "CALDB_SIZE Not defined in device_tree\n");
+						ret = -EINVAL;
+						goto out;
+					}
+
+					ab->qmi.target_mem[idx].paddr = caldb_location[ab->hw_params->fw_mem_mode];
+					ab->qmi.target_mem[i].size = caldb_size[0];
+
+					ab->qmi.target_mem[idx].v.ioaddr =
+						ioremap(ab->qmi.target_mem[idx].paddr,
+							ab->qmi.target_mem[i].size);
+				} else {
+					ab->qmi.target_mem[idx].v.ioaddr =
+						ioremap(ab->qmi.target_mem[idx].paddr,
+							ab->qmi.target_mem[i].size);
+					sz += ab->qmi.target_mem[i].size;
+				}
+			} else {
+				ab->qmi.target_mem[idx].paddr = 0;
+				ab->qmi.target_mem[idx].v.ioaddr = NULL;
+			}
+
+			ab->qmi.target_mem[idx].size =
+					ab->qmi.target_mem[i].size;
+			ab->qmi.target_mem[idx].type =
+					ab->qmi.target_mem[i].type;
+			idx++;
+			break;
+		case PAGEABLE_MEM_REGION_TYPE:
+                        ab->qmi.target_mem[idx].v.ioaddr =
+                                        ioremap(ab->qmi.target_mem[idx].paddr,
+						ab->qmi.target_mem[i].size);
+                        ab->qmi.target_mem[idx].size =
+					ab->qmi.target_mem[i].size;
+                        ab->qmi.target_mem[idx].type =
+					ab->qmi.target_mem[i].type;
+			sz += ab->qmi.target_mem[i].size;
+			if (ret < 0)
+				ath12k_warn(ab,
+				            "qmi fail to update BHI table %d\n", ret);
+			idx++;
+			break;
+		case M3_DUMP_REGION_TYPE:
+			if (ab->hif.bus == ATH12K_BUS_AHB) {
+				dev_node = of_find_node_by_name(NULL, "m3_dump");
+				if (of_address_to_resource(dev_node, 0, &m3_dump)) {
+					ath12k_err(ab, "M3_MEM_REGION Not defined in device_tree\n");
+					ret = -EINVAL;
+					goto out;
+				} else {
+					ab->qmi.target_mem[idx].paddr = m3_dump.start;
+				}
+			} else if (ab->hif.bus == ATH12K_BUS_HYBRID) {
+				if (ab->userpd_id == USERPD_1)
+					dev_node = of_find_node_by_name(NULL, "m3_dump_qcn6432_1");
+				else if (ab->userpd_id == USERPD_2)
+					dev_node = of_find_node_by_name(NULL, "m3_dump_qcn6432_2");
+
+				if (of_address_to_resource(dev_node, 0, &m3_dump)) {
+					ath12k_err(ab, "M3_MEM_REGION Not defined in device_tree\n");
+					ret = -EINVAL;
+					goto out;
+				} else {
+					ab->qmi.target_mem[idx].paddr = m3_dump.start;
+				}
+
+			} else {
+				sz += ab->qmi.target_mem[i].size;
+			}
+                        ab->qmi.target_mem[idx].v.ioaddr =
+                                        ioremap(ab->qmi.target_mem[idx].paddr,
+						ab->qmi.target_mem[i].size);
+                        ab->qmi.target_mem[idx].size =
+					ab->qmi.target_mem[i].size;
+                        ab->qmi.target_mem[idx].type =
+					ab->qmi.target_mem[i].type;
+			idx++;
+			break;
+		case MLO_GLOBAL_MEM_REGION_TYPE:
+			mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
+			if (!mlo_chunk->paddr) {
+				mlo_chunk->size = ab->qmi.target_mem[i].size;
+				mlo_chunk->type = ab->qmi.target_mem[i].type;
+				mlo_chunk->paddr = paddr;
+				mlo_chunk->v.ioaddr = ioremap(mlo_chunk->paddr,
+							      mlo_chunk->size);
+				memset_io(mlo_chunk->v.ioaddr, 0, mlo_chunk->size);
+			}
+
+			ab->qmi.target_mem[idx].paddr = mlo_chunk->paddr;
+                        ab->qmi.target_mem[idx].v.ioaddr = mlo_chunk->v.ioaddr;
+			ab->qmi.target_mem[idx].size = mlo_chunk->size;
+			ab->qmi.target_mem[idx].type = mlo_chunk->type;
+			mlo_sz += mlo_chunk->size;
+			ag->mlo_mem.is_mlo_mem_avail = true;
+			idx++;
+			mlo_idx++;
+			break;
+		case BDF_MEM_REGION_TYPE:
+			if (of_property_read_u32_array(dev->of_node,
+						       "qcom,bdf-addr", bdf_location,
+						       ARRAY_SIZE(bdf_location))) {
+				ath12k_err(ab, "BDF_MEM_REGION Not defined in device_tree\n");
+				ret = -EINVAL;
+				goto out;
+                        }
+			ab->qmi.target_mem[idx].paddr = bdf_location[ab->hw_params->fw_mem_mode];
+			ab->qmi.target_mem[idx].v.ioaddr =
+					ioremap(ab->qmi.target_mem[idx].paddr,
+						ab->qmi.target_mem[i].size);
+			ab->qmi.target_mem[idx].size =
+					ab->qmi.target_mem[i].size;
+			ab->qmi.target_mem[idx].type =
+					ab->qmi.target_mem[i].type;
+			idx++;
+			break;
+		default:
+			ath12k_warn(ab, "qmi ignore invalid mem req type %d\n",
+				    ab->qmi.target_mem[i].type);
+			break;
+		}
+
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi target mem seg idx %d i %d type %d size %d\n",
+			   idx, i, ab->qmi.target_mem[idx - 1].type, ab->qmi.target_mem[idx - 1].size);
+	}
+
+	mutex_unlock(&ag->mutex_lock);
+
+	ab->host_ddr_fixed_mem_off = sz;
+	ab->qmi.mem_seg_count = idx;
+
+	return 0;
+
+out:
+	ath12k_qmi_free_target_mem_chunk(ab);
+	mutex_unlock(&ag->mutex_lock);
+	return ret;
+}
+
 static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
 {
 	struct qmi_wlanfw_cap_req_msg_v01 req;
 	struct qmi_wlanfw_cap_resp_msg_v01 resp;
 	struct qmi_txn txn = {};
-	unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
+	struct device *dev = ab->dev;
+	unsigned int board_id;
 	int ret = 0;
-	int i;
+	int r, i;
 
 	memset(&req, 0, sizeof(req));
 	memset(&resp, 0, sizeof(resp));
@@ -2249,10 +4603,19 @@
 		ab->qmi.target.chip_family = resp.chip_info.chip_family;
 	}
 
-	if (resp.board_info_valid)
+	if (!of_property_read_u32(dev->of_node, "qcom,board_id", &board_id) &&
+	    board_id != 0xFF)
+		ab->qmi.target.board_id = board_id;
+	else if (resp.board_info_valid)
 		ab->qmi.target.board_id = resp.board_info.board_id;
-	else
+	else {
+		u16 ov_board_id;
+
+		if (ab->hif.bus == ATH12K_BUS_PCI &&
+		    ath12k_pci_has_board_id_override(ab, &ov_board_id))
+			board_id = ov_board_id;
 		ab->qmi.target.board_id = board_id;
+	}
 
 	if (resp.soc_info_valid)
 		ab->qmi.target.soc_id = resp.soc_info.soc_id;
@@ -2281,6 +4644,11 @@
 		}
 	}
 
+	if (resp.rxgainlut_support_valid)
+		ab->rxgainlut_support = !!resp.rxgainlut_support;
+
+	ath12k_info(ab, "rxgainlut_support %u\n", ab->rxgainlut_support);
+
 	if (resp.eeprom_caldata_read_timeout_valid) {
 		ab->qmi.target.eeprom_caldata = resp.eeprom_caldata_read_timeout;
 		ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi cal data supported from eeprom\n");
@@ -2295,6 +4663,10 @@
 		    ab->qmi.target.fw_build_timestamp,
 		    ab->qmi.target.fw_build_id);
 
+	r = ath12k_core_check_dt(ab);
+	if (r)
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "DT bdf variant name not set.\n");
+
 out:
 	return ret;
 }
@@ -2306,6 +4678,7 @@
 	struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
 	struct qmi_txn txn = {};
 	const u8 *temp = data;
+	void __iomem *bdf_addr = NULL;
 	int ret;
 	u32 remaining = len;
 
@@ -2314,6 +4687,15 @@
 		return -ENOMEM;
 	memset(&resp, 0, sizeof(resp));
 
+	if (ab->bus_params.fixed_bdf_addr) {
+		bdf_addr = ioremap(ab->hw_params->bdf_addr, ab->hw_params->fw.board_size);
+		if (!bdf_addr) {
+			ath12k_warn(ab, "qmi ioremap error for BDF\n");
+			ret = -EIO;
+			goto out;
+		}
+	}
+
 	while (remaining) {
 		req->valid = 1;
 		req->file_id_valid = 1;
@@ -2334,7 +4716,8 @@
 			req->end = 1;
 		}
 
-		if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+		if ((ab->bus_params.fixed_bdf_addr) ||
+		    (type == ATH12K_QMI_FILE_TYPE_EEPROM)) {
 			req->data_valid = 0;
 			req->end = 1;
 			req->data_len = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
@@ -2342,6 +4725,13 @@
 			memcpy(req->data, temp, req->data_len);
 		}
 
+		if (ab->bus_params.fixed_bdf_addr) {
+			if (type == ATH12K_QMI_FILE_TYPE_CALDATA)
+				bdf_addr += ab->hw_params->fw.cal_offset;
+
+			memcpy_toio(bdf_addr, temp, len);
+		}
+
 		ret = qmi_txn_init(&ab->qmi.handle, &txn,
 				   qmi_wlanfw_bdf_download_resp_msg_v01_ei,
 				   &resp);
@@ -2371,7 +4761,8 @@
 			goto out;
 		}
 
-		if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+		if (type == ATH12K_QMI_FILE_TYPE_EEPROM || 
+				ab->bus_params.fixed_bdf_addr) {
 			remaining = 0;
 		} else {
 			remaining -= req->data_len;
@@ -2384,10 +4775,97 @@
 	}
 
 out:
+	if (ab->bus_params.fixed_bdf_addr)
+		iounmap(bdf_addr);
 	kfree(req);
 	return ret;
 }
 
+static int ath12k_qmi_fw_cfg_send_sync(struct ath12k_base *ab,
+				       const u8 *data, u32 len,
+				       enum wlanfw_cfg_type_v01 file_type)
+{
+	struct wlanfw_cfg_download_req_msg_v01 *req;
+	struct wlanfw_cfg_download_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	int ret = 0;
+	const u8 *temp = data;
+	unsigned int remaining = len;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	while (remaining) {
+		req->file_type_valid = 1;
+		req->file_type = file_type;
+		req->total_size_valid = 1;
+		req->total_size = remaining;
+		req->seg_id_valid = 1;
+		req->data_valid = 1;
+		req->end_valid = 1;
+
+		if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+			req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+		} else {
+			req->data_len = remaining;
+			req->end = 1;
+		}
+
+		memcpy(req->data, temp, req->data_len);
+
+		ret = qmi_txn_init(&ab->qmi.handle, &txn,
+				   wlanfw_cfg_download_resp_msg_v01_ei,
+				   resp);
+		if (ret < 0) {
+			ath12k_dbg(ab, ATH12K_DBG_QMI, "Failed to initialize txn for FW file download request, err: %d\n",
+				   ret);
+			goto err;
+		}
+
+		ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+				       QMI_WLANFW_CFG_DOWNLOAD_REQ_V01,
+				       WLANFW_CFG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+				       wlanfw_cfg_download_req_msg_v01_ei, req);
+		if (ret < 0) {
+			qmi_txn_cancel(&txn);
+			ath12k_dbg(ab, ATH12K_DBG_QMI, "Failed to send FW File download request, err: %d\n",
+				   ret);
+			goto err;
+		}
+
+		ret = qmi_txn_wait(&txn, ATH12K_QMI_WLANFW_TIMEOUT_MS);
+		if (ret < 0) {
+			ath12k_dbg(ab, ATH12K_DBG_QMI, "Failed to wait for response of FW File download request, err: %d\n",
+				   ret);
+			goto err;
+		}
+
+		if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+			ath12k_dbg(ab, ATH12K_DBG_QMI, "FW file download request failed, result: %d, err: %d\n",
+				   resp->resp.result, resp->resp.error);
+			ret = -resp->resp.result;
+			goto err;
+		}
+
+		remaining -= req->data_len;
+		temp += req->data_len;
+		req->seg_id++;
+	}
+
+err:
+	kfree(req);
+	kfree(resp);
+
+	return ret;
+}
+
 static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
 				   enum ath12k_qmi_bdf_type type)
 {
@@ -2416,14 +4894,32 @@
 
 		break;
 	case ATH12K_QMI_BDF_TYPE_REGDB:
-		ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
-							 ATH12K_REGDB_FILE_NAME);
+		ret = ath12k_core_fetch_regdb(ab, &bd);
 		if (ret) {
-			ath12k_warn(ab, "qmi failed to load regdb bin:\n");
+			ath12k_warn(ab, "qmi failed to load regdb:\n");
+			goto out;
+		}
+		type = ATH12K_QMI_BDF_TYPE_REGDB;
+		break;
+	case ATH12K_QMI_BDF_TYPE_RXGAINLUT:
+		ret = ath12k_core_fetch_rxgainlut(ab, &bd);
+		if (ret < 0) {
+			ath12k_warn(ab, "qmi failed to load rxgainlut\n");
 			goto out;
 		}
 		break;
 	case ATH12K_QMI_BDF_TYPE_CALIBRATION:
+		if (ath12k_skip_caldata) {
+			if (ath12k_ftm_mode) {
+				ath12k_warn(ab, "Skipping caldata download "
+					    "in FTM mode\n");
+				goto out;
+			}
+			ath12k_err(ab, "skip_caldata=1 module param is "
+				   "unsupported when FTM mode is disabled.\n");
+			ret = -EOPNOTSUPP;
+			goto out;
+		}
 
 		if (ab->qmi.target.eeprom_caldata) {
 			file_type = ATH12K_QMI_FILE_TYPE_EEPROM;
@@ -2436,11 +4932,21 @@
 			snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
 				 ath12k_bus_str(ab->hif.bus), dev_name(dev));
 			fw_entry = ath12k_core_firmware_request(ab, filename);
+
 			if (!IS_ERR(fw_entry))
 				goto success;
 
-			fw_entry = ath12k_core_firmware_request(ab,
-								ATH12K_DEFAULT_CAL_FILE);
+			snprintf(filename, sizeof(filename), "%s", ATH12K_DEFAULT_CAL_FILE);
+
+			if (ab->userpd_id) {
+				snprintf(filename, sizeof(filename), "%s%d%s",
+					 ATH12K_QMI_DEF_CAL_FILE_PREFIX,
+					 ab->userpd_id,
+					 ATH12K_QMI_DEF_CAL_FILE_SUFFIX);
+			}
+
+			fw_entry = ath12k_core_firmware_request(ab, filename);
+
 			if (IS_ERR(fw_entry)) {
 				ret = PTR_ERR(fw_entry);
 				ath12k_warn(ab,
@@ -2527,7 +5033,7 @@
 {
 	struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
 
-	if (!m3_mem->vaddr)
+	if (!ab->hw_params->m3_fw_support || !m3_mem->vaddr)
 		return;
 
 	dma_free_coherent(ab->dev, m3_mem->size,
@@ -2546,14 +5052,18 @@
 	memset(&req, 0, sizeof(req));
 	memset(&resp, 0, sizeof(resp));
 
+	if (ab->hw_params->m3_fw_support) {
 	ret = ath12k_qmi_m3_load(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to load m3 firmware: %d", ret);
 		return ret;
 	}
-
 	req.addr = m3_mem->paddr;
 	req.size = m3_mem->size;
+	} else {
+		req.addr = 0;
+		req.size = 0;
+	}
 
 	ret = qmi_txn_init(&ab->qmi.handle, &txn,
 			   qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
@@ -2725,6 +5235,23 @@
 	return ret;
 }
 
+int ath12k_config_qdss(struct ath12k_base *ab)
+{
+	int ret;
+
+	/* Disabling qdss trace for FTM as it causes hig evt latency in FW
+*/
+	if (ab->fw_mode == ATH12K_FIRMWARE_MODE_FTM)
+		return 0;
+
+	ret = ath12k_qmi_send_qdss_config(ab);
+	if (ret < 0)
+		ath12k_warn(ab,
+			    "Failed to download QDSS config to FW: %d\n",
+			    ret);
+	return ret;
+}
+
 void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
 {
 	int ret;
@@ -2756,6 +5283,162 @@
 	return 0;
 }
 
+static int ath12k_qmi_process_coldboot_calibration(struct ath12k_base *ab)
+{
+	int timeout;
+	int ret;
+
+	ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_COLD_BOOT);
+	if (ret < 0) {
+		ath12k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+		return ret;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "Coldboot calibration wait started\n");
+
+	timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
+				     (ab->qmi.cal_done  == 1),
+				     ATH12K_COLD_BOOT_FW_RESET_DELAY);
+	if (timeout <= 0) {
+		ath12k_warn(ab, "Coldboot Calibration failed - wait ended\n");
+		return 0;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "Coldboot calibration done\n");
+
+	return 0;
+}
+
+int ath12k_qmi_fwreset_from_cold_boot(struct ath12k_base *ab)
+{
+	int timeout;
+
+	if (ath12k_cold_boot_cal == 0 ||
+	    ab->hw_params->cold_boot_calib == 0){
+		ath12k_info(ab, "Cold boot cal is not supported/enabled\n");
+		return 0;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "wait for cold boot done\n");
+
+	timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
+				     (ab->qmi.cal_done  == 1),
+				     ATH12K_COLD_BOOT_FW_RESET_DELAY);
+	if (timeout <= 0) {
+		ath12k_warn(ab, "Coldboot Calibration timed out\n");
+		/*set cal_timeout to switch to mission mode on firware reset*/
+		ab->qmi.cal_timeout = 1;
+	}
+
+	/* reset the firmware */
+	ath12k_info(ab, "power down to restart firmware in mission mode\n");
+	ath12k_qmi_firmware_stop(ab);
+	ath12k_hif_power_down(ab);
+	ath12k_info(ab, "power up to restart firmware in mission mode\n");
+	/* reset host fixed mem off to zero */
+	ab->host_ddr_fixed_mem_off = 0;
+	ath12k_hif_power_up(ab);
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "exit wait for cold boot done\n");
+	return 0;
+}
+EXPORT_SYMBOL(ath12k_qmi_fwreset_from_cold_boot);
+
+int ath12k_qmi_m3_dump_upload_done_ind_send(struct ath12k_base *ab,
+					    u32 pdev_id, int status)
+{
+	struct qmi_wlanfw_m3_dump_upload_done_req_msg_v01 *req;
+	struct qmi_wlanfw_m3_dump_upload_done_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->pdev_id = pdev_id;
+	req->status = status;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_m3_dump_upload_done_resp_msg_v01_ei, resp);
+	if (ret < 0)
+		goto out;
+
+	ret =
+	qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			 QMI_WLFW_M3_DUMP_UPLOAD_DONE_REQ_V01,
+			 QMI_WLANFW_M3_DUMP_UPLOAD_DONE_REQ_MSG_V01_MAX_MSG_LEN,
+			 qmi_wlanfw_m3_dump_upload_done_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		ath12k_warn(ab, "Failed to send M3 dump upload done request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "qmi M3 upload done req failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+	ath12k_info(ab, "qmi m3 dump uploaded\n");
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
+static void ath12k_qmi_event_m3_dump_upload_req(struct ath12k_qmi *qmi,
+						void *data)
+{
+	struct ath12k_base *ab = qmi->ab;
+	struct ath12k_qmi_m3_dump_upload_req_data *event_data = data;
+
+	ath12k_coredump_m3_dump(ab, event_data);
+}
+
+static void ath12k_qmi_qdss_mem_free(struct ath12k_qmi *qmi)
+{
+	struct ath12k_base *ab = qmi->ab;
+	int i;
+
+	for (i = 0; i < ab->qmi.qdss_mem_seg_len; i++) {
+		if (ab->qmi.qdss_mem[i].v.ioaddr) {
+			iounmap(ab->qmi.qdss_mem[i].v.ioaddr);
+			ab->qmi.qdss_mem[i].v.ioaddr = NULL;
+		}
+	}
+}
+
+static void ath12k_qmi_event_qdss_trace_save_hdlr(struct ath12k_qmi *qmi,
+						  void *data)
+{
+	struct ath12k_qmi_event_qdss_trace_save_data *event_data = data;
+	struct ath12k_base *ab = qmi->ab;
+
+	if (!ab->qmi.qdss_mem_seg_len) {
+		ath12k_warn(ab, "Memory for QDSS trace is not available\n");
+		return;
+	}
+
+	ath12k_coredump_qdss_dump(ab, event_data);
+
+	ath12k_qmi_qdss_mem_free(qmi);
+	ab->qmi.qdss_mem_seg_len = 0;
+	ab->is_qdss_tracing = false;
+}
+
 static int
 ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
 			     enum ath12k_qmi_event_type type,
@@ -2779,9 +5462,75 @@
 	return 0;
 }
 
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
+{
+	struct ath12k_qmi *qmi = &ab->qmi;
+	bool block;
+
+	spin_lock(&qmi->event_lock);
+
+	block = ath12k_qmi_get_event_block(qmi);
+	if (block)
+		ath12k_qmi_set_event_block(qmi, false);
+
+	spin_unlock(&qmi->event_lock);
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "Trigger host cap for chip id %d\n",
+			ab->chip_id);
+
+	ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
+}
+
+static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		if (!(ab && ab->qmi.num_radios != ATH12K_QMI_INVALID_RADIO))
+			return false;
+	}
+
+	return true;
+}
+
+static struct ath12k_base *
+ath12k_qmi_hw_group_find_blocked_chip(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	struct ath12k_qmi *qmi;
+	bool block;
+	int i;
+
+	lockdep_assert_held(&ag->mutex_lock);
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		qmi = &ab->qmi;
+
+		spin_lock(&qmi->event_lock);
+		block = ath12k_qmi_get_event_block(qmi);
+		spin_unlock(&qmi->event_lock);
+
+		if (block)
+			return ab;
+	}
+
+	return NULL;
+}
+
 static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
 {
-	struct ath12k_base *ab = qmi->ab;
+	struct ath12k_base *ab = qmi->ab, *block_ab;
+	struct ath12k_hw_group *ag = ab->ag;
+	bool host_cap_ready;
 	int ret;
 
 	ret = ath12k_qmi_fw_ind_register_send(ab);
@@ -2790,11 +5539,72 @@
 		return ret;
 	}
 
+	ath12k_qmi_phy_cap_send(ab);
+	if (ath12k_cold_boot_cal && ab->qmi.cal_done == 0 &&
+			ab->hw_params->cold_boot_calib &&
+			ab->qmi.cal_timeout == 0) {
+		/* Coldboot calibration mode */
+		ath12k_qmi_trigger_host_cap(ab);
+	} else {
+		spin_lock(&qmi->event_lock);
+		ath12k_qmi_set_event_block(&ab->qmi, true);
+		spin_unlock(&qmi->event_lock);
+
+		mutex_lock(&ag->mutex_lock);
+		host_cap_ready = ath12k_qmi_hw_group_host_cap_ready(ag);
+		if (host_cap_ready) {
+			block_ab = ath12k_qmi_hw_group_find_blocked_chip(ag);
+			if (block_ab)
+				ath12k_qmi_trigger_host_cap(block_ab);
+		}
+		mutex_unlock(&ag->mutex_lock);
+	}
+
+	return ret;
+}
+
+static int ath12k_qmi_fw_cfg(struct ath12k_base *ab)
+{
+	struct ath12k_board_data bd;
+	int ret;
+
+	ret = ath12k_core_fetch_fw_cfg(ab, &bd);
+	if (ret < 0) {
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi failed to fetch FW CFG file:%d\n", ret);
+		goto out;
+	}
+	ret = ath12k_qmi_fw_cfg_send_sync(ab, bd.data,
+					  bd.len, WLANFW_CFG_FILE_V01);
+	if (ret < 0)
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi failed to load FW CFG file\n");
+
+out:
+	ath12k_core_free_bdf(ab, &bd);
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi FW CFG download sequence completed, ret: %d\n",
+		   ret);
+
+	return ret;
+}
+
+static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
+{
+	struct ath12k_base *ab = qmi->ab;
+	int ret;
+
 	ret = ath12k_qmi_host_cap_send(ab);
 	if (ret < 0) {
+		clear_bit(ATH12K_FLAG_QMI_HOST_CAP_SENT, &ab->dev_flags);
 		ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
 		return ret;
 	}
+	set_bit(ATH12K_FLAG_QMI_HOST_CAP_SENT, &ab->dev_flags);
+
+	if (!ab->fw_cfg_support) {
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "FW CFG file is not supported\n");
+		return 0;
+	}
+
+	ret = ath12k_qmi_fw_cfg(ab);
 
 	return ret;
 }
@@ -2813,6 +5623,345 @@
 	return ret;
 }
 
+int ath12k_send_qdss_trace_mode_req(struct ath12k_base *ab,
+				    enum qmi_wlanfw_qdss_trace_mode_enum_v01 mode)
+{
+	int ret;
+	struct qmi_txn txn;
+	struct qmi_wlanfw_qdss_trace_mode_req_msg_v01 req = {};
+	struct qmi_wlanfw_qdss_trace_mode_resp_msg_v01 resp = {};
+
+	req.mode_valid = 1;
+	req.mode = mode;
+	req.option_valid = 1;
+	req.option = mode == QMI_WLANFW_QDSS_TRACE_OFF_V01 ?
+		     QMI_WLANFW_QDSS_STOP_ALL_TRACE : 0;
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_qdss_trace_mode_resp_msg_v01_ei, &resp);
+	if (ret < 0)
+		return ret;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLANFW_QDSS_TRACE_MODE_REQ_V01,
+			       QMI_WLANFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_LEN,
+			       qmi_wlanfw_qdss_trace_mode_req_msg_v01_ei, &req);
+	if (ret < 0) {
+		ath12k_warn(ab, "Failed to send QDSS trace mode request,err = %d\n", ret);
+		qmi_txn_cancel(&txn);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "QDSS trace mode request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+out:
+	return ret;
+}
+
+int ath12k_qmi_pci_alloc_qdss_mem(struct ath12k_qmi *qmi)
+{
+	struct ath12k_base *ab = qmi->ab;
+	struct device *dev = ab->dev;
+	struct resource res;
+	int ret;
+
+	if (ab->qmi.qdss_mem_seg_len > 1) {
+		ath12k_warn(ab, "%s: FW requests %d segments, max allowed is 1\n",
+			    __func__, ab->qmi.qdss_mem_seg_len);
+		return -EINVAL;
+	}
+
+	switch (ab->qmi.qdss_mem[0].type) {
+	case QDSS_ETR_MEM_REGION_TYPE:
+		if (ab->qmi.qdss_mem[0].size > QMI_Q6_QDSS_ETR_SIZE_QCN9274) {
+			ath12k_warn(ab, "%s: FW requests more memory 0x%x\n",
+				    __func__, ab->qmi.qdss_mem[0].size);
+			return -ENOMEM;
+		}
+
+		ab->hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+		if (!ab->hremote_node) {
+			ath12k_warn(ab, "qmi fail to get hremote_node for pci device\n");
+			return ret;
+		}
+
+		ret = of_address_to_resource(ab->hremote_node, 0, &res);
+		of_node_put(ab->hremote_node);
+		if (ret) {
+			ath12k_warn(ab, "qmi fail to get reg from hremote\n");
+			return ret;
+		}
+
+		if ((ab->host_ddr_fixed_mem_off + ab->qmi.qdss_mem[0].size) >
+			((res.end - res.start) + 1)) {
+			ath12k_warn(ab,
+				    "No Fixed mem to allocate for QDSS_ETR_MEM_REGION_TYPE");
+			return -EINVAL;
+		}
+
+		ab->qmi.qdss_mem[0].paddr = (phys_addr_t)res.start + ab->host_ddr_fixed_mem_off;
+		ab->qmi.qdss_mem[0].v.ioaddr =
+			ioremap(ab->qmi.qdss_mem[0].paddr,
+				ab->qmi.qdss_mem[0].size);
+		if (!ab->qmi.qdss_mem[0].v.ioaddr) {
+			ath12k_warn(ab, "WARNING etr-addr remap failed\n");
+			return -ENOMEM;
+		}
+		ab->host_ddr_fixed_mem_off += ab->qmi.qdss_mem[0].size;
+		break;
+	default:
+		ath12k_warn(ab, "qmi ignore invalid qdss mem req type %d\n",
+			    ab->qmi.qdss_mem[0].type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static
+struct device_node *ath12k_get_etr_dev_node(struct ath12k_base *ab)
+{
+	struct device_node *dev_node = NULL;
+
+	if (ab->userpd_id) {
+		if (ab->userpd_id == USERPD_1)
+			dev_node = of_find_node_by_name(NULL,
+							"q6_qcn6432_etr_1");
+		else if (ab->userpd_id == USERPD_2)
+			dev_node = of_find_node_by_name(NULL,
+							"q6_qcn6432_etr_2");
+	} else {
+		dev_node = of_find_node_by_name(NULL, "q6_etr_dump");
+	}
+
+	return dev_node;
+}
+
+int ath12k_qmi_qdss_mem_alloc(struct ath12k_qmi *qmi)
+{
+	int ret, i;
+	struct ath12k_base *ab = qmi->ab;
+	struct device_node *dev_node = NULL;
+	struct resource q6_etr;
+
+	switch (ab->hif.bus) {
+	case ATH12K_BUS_AHB:
+	case ATH12K_BUS_HYBRID:
+		dev_node = ath12k_get_etr_dev_node(ab);
+		if (!dev_node) {
+			ath12k_err(ab, "No q6_etr_dump available in dts\n");
+			return -ENODEV;
+		}
+		ret = of_address_to_resource(dev_node, 0, &q6_etr);
+		of_node_put(dev_node);
+		if (ret) {
+			ath12k_err(ab, "Failed to get resource for q6_etr_dump\n");
+			return ret;
+		}
+		for (i = 0; i < ab->qmi.qdss_mem_seg_len; i++) {
+			ab->qmi.qdss_mem[i].paddr = q6_etr.start;
+			ab->qmi.qdss_mem[i].size = resource_size(&q6_etr);
+			ab->qmi.qdss_mem[i].type = QDSS_ETR_MEM_REGION_TYPE;
+			ab->qmi.qdss_mem[i].v.ioaddr =
+				ioremap(ab->qmi.qdss_mem[i].paddr,
+					ab->qmi.qdss_mem[i].size);
+			if (!ab->qmi.qdss_mem[i].v.ioaddr) {
+				ath12k_err(ab, "Error: etr-addr remap failed\n");
+				return -ENOMEM;
+			}
+			ath12k_dbg(ab, ATH12K_DBG_QMI,
+				   "QDSS mem addr pa 0x%x va 0x%p, size 0x%x",
+				   (unsigned int)ab->qmi.qdss_mem[i].paddr,
+				   ab->qmi.qdss_mem[i].v.ioaddr,
+				   (unsigned int)ab->qmi.qdss_mem[i].size);
+		}
+		break;
+	case ATH12K_BUS_PCI:
+		ret = ath12k_qmi_pci_alloc_qdss_mem(qmi);
+		break;
+	default:
+		ath12k_warn(ab, "invalid bus type: %d", ab->hif.bus);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int ath12k_qmi_qdss_trace_mem_info_send_sync(struct ath12k_base *ab)
+{
+	struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+	struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
+	struct qmi_txn txn;
+	int ret, i;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	req->mem_seg_len = ab->qmi.qdss_mem_seg_len;
+
+	for (i = 0; i < req->mem_seg_len ; i++) {
+		req->mem_seg[i].addr = ab->qmi.qdss_mem[i].paddr;
+		req->mem_seg[i].size = ab->qmi.qdss_mem[i].size;
+		req->mem_seg[i].type = ab->qmi.qdss_mem[i].type;
+	}
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+
+	if (ret < 0) {
+		ath12k_warn(ab, "Fail to initialize txn for QDSS trace mem request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
+			       QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+			       qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+
+	if (ret < 0) {
+		ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+			    ret);
+		qmi_txn_cancel(&txn);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn,
+			   msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0) {
+		ath12k_warn(ab, "qmi failed memory request, err = %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+out:
+	kfree(req);
+	return ret;
+}
+
+static void ath12k_qmi_event_qdss_trace_req_mem_hdlr(struct ath12k_qmi *qmi)
+{
+	int ret;
+	struct ath12k_base *ab = qmi->ab;
+
+	ret = ath12k_qmi_qdss_mem_alloc(qmi);
+	if (ret < 0) {
+		ath12k_err(ab, "failed to allocate memory for qdss:%d\n", ret);
+		return;
+	}
+
+	ret = ath12k_qmi_qdss_trace_mem_info_send_sync(ab);
+	if (ret < 0) {
+		ath12k_warn(ab,
+			    "qdss trace mem info send sync failed:%d\n", ret);
+		ath12k_qmi_qdss_mem_free(qmi);
+		return;
+	}
+	/* After qdss_trace_mem_info(QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01),
+	 * the firmware will take one second at max
+	 * for its configuration. We shouldn't send qdss_trace request
+	 * before that.
+	 */
+	msleep(1000);
+	ret = ath12k_send_qdss_trace_mode_req(ab, QMI_WLANFW_QDSS_TRACE_ON_V01);
+	if (ret < 0) {
+		ath12k_warn(ab, "Failed to enable QDSS trace: %d\n", ret);
+		ath12k_qmi_qdss_mem_free(qmi);
+		return;
+	}
+	ab->is_qdss_tracing = true;
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "QDSS configuration is completed and trace started\n");
+}
+
+static int ath12k_qmi_request_device_info(struct ath12k_base *ab)
+{
+	struct qmi_wlanfw_device_info_req_msg_v01 req;
+	struct qmi_wlanfw_device_info_resp_msg_v01 resp;
+	struct qmi_txn txn = {};
+	void *bar_addr_va = NULL;
+	int ret = 0;
+
+	/*device info message only supported for internal-PCI devices */
+	if (ab->hw_rev != ATH12K_HW_QCN6432_HW10)
+		return 0;
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_device_info_resp_msg_v01_ei, &resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLANFW_DEVICE_INFO_REQ_V01,
+			       QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01,
+			       qmi_wlanfw_device_info_req_msg_v01_ei, &req);
+	if (ret < 0) {
+		ath12k_warn(ab, "qmi failed to send target device info request, err = %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0) {
+		ath12k_warn(ab, "qmi failed target device info request %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "qmi device info req failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!resp.bar_addr_valid || !resp.bar_size_valid) {
+		ath12k_warn(ab, "qmi device info response invalid, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!resp.bar_addr ||
+	    resp.bar_size != QCN6432_DEVICE_BAR_SIZE) {
+		ath12k_warn(ab, "qmi device info invalid addr and size, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+        bar_addr_va = ioremap(resp.bar_addr, resp.bar_size);
+	if (!bar_addr_va) {
+		ath12k_warn(ab, "qmi device info ioremap failed\n");
+		ab->mem_len = 0;
+		ret = -EIO;
+		goto out;
+	}
+
+	ab->mem = bar_addr_va;
+	ab->mem_len = resp.bar_size;
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "Device BAR Info pa: 0x%llx, va: 0x%p, size: 0x%lx\n",
+		   resp.bar_addr, ab->mem, ab->mem_len);
+
+	ath12k_hif_config_static_window(ab);
+	return 0;
+out:
+	return ret;
+}
+
 static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
 {
 	struct ath12k_base *ab = qmi->ab;
@@ -2824,11 +5973,19 @@
 		return ret;
 	}
 
+	ret = ath12k_qmi_request_device_info(ab);
+	if (ret < 0) {
+		ath12k_warn(ab, "qmi failed to req device info:%d\n", ret);
+		return ret;
+	}
+
+	if (!ab->bus_params.fixed_bdf_addr) {
 	ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_REGDB);
 	if (ret < 0) {
 		ath12k_warn(ab, "qmi failed to load regdb file:%d\n", ret);
 		return ret;
 	}
+	}
 
 	ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_ELF);
 	if (ret < 0) {
@@ -2836,6 +5993,12 @@
 		return ret;
 	}
 
+	if (ab->rxgainlut_support) {
+		ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_RXGAINLUT);
+		if (ret < 0)
+			ath12k_warn(ab, "qmi failed to load rxgainlut: %d\n", ret);
+	}
+
 	if (ab->hw_params->download_calib) {
 		ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_CALIBRATION);
 		if (ret < 0)
@@ -2873,16 +6036,26 @@
 	for (i = 0; i < qmi->mem_seg_count ; i++) {
 		ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
 		ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+
 		ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi mem seg type %d size %d\n",
 			   msg->mem_seg[i].type, msg->mem_seg[i].size);
 	}
 
+	if (ab->bus_params.fixed_mem_region) {
+		ret = ath12k_qmi_assign_target_mem_chunk(ab);
+		if (ret) {
+			ath12k_warn(ab, "qmi failed to assign target memory: %d\n",
+				    ret);
+			return;
+		}
+	} else {
 	ret = ath12k_qmi_alloc_target_mem_chunk(ab);
 	if (ret) {
 		ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
 			    ret);
 		return;
 	}
+	}
 
 	ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL);
 }
@@ -2911,6 +6084,171 @@
 	ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_READY, NULL);
 }
 
+static void ath12k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
+						 struct sockaddr_qrtr *sq,
+						 struct qmi_txn *txn,
+						 const void *decoded)
+{
+	struct ath12k_qmi *qmi = container_of(qmi_hdl,
+					      struct ath12k_qmi, handle);
+	struct ath12k_base *ab = qmi->ab;
+
+	ab->qmi.cal_done = 1;
+	wake_up(&ab->qmi.cold_boot_waitq);
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi cold boot calibration done\n");
+}
+
+static void ath12k_qmi_m3_dump_upload_req_ind_cb(struct qmi_handle *qmi_hdl,
+						 struct sockaddr_qrtr *sq,
+						 struct qmi_txn *txn,
+						 const void *data)
+{
+	struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+	struct ath12k_base *ab = qmi->ab;
+	const struct qmi_wlanfw_m3_dump_upload_req_ind_msg_v01 *msg = data;
+	struct ath12k_qmi_m3_dump_upload_req_data *event_data;
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi m3 dump memory request\n");
+
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+	if (!event_data)
+		return;
+
+	event_data->pdev_id = msg->pdev_id;
+	event_data->addr = msg->addr;
+	event_data->size = msg->size;
+
+	ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_M3_DUMP_UPLOAD_REQ,
+				     event_data);
+}
+
+static void ath12k_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_hdl,
+						  struct sockaddr_qrtr *sq,
+						  struct qmi_txn *txn,
+						  const void *data)
+{
+	struct ath12k_qmi *qmi = container_of(qmi_hdl,
+					      struct ath12k_qmi,
+					      handle);
+	struct ath12k_base *ab = qmi->ab;
+	const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+	int i;
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "qdss trace request memory from firmware\n");
+	ab->qmi.qdss_mem_seg_len = msg->mem_seg_len;
+
+	if (msg->mem_seg_len > 1) {
+		ath12k_warn(ab, "%s: FW requests %d segments, overwriting it with 1",
+			    __func__, msg->mem_seg_len);
+		ab->qmi.qdss_mem_seg_len = 1;
+	}
+
+	for (i = 0; i < ab->qmi.qdss_mem_seg_len; i++) {
+		ab->qmi.qdss_mem[i].type = msg->mem_seg[i].type;
+		ab->qmi.qdss_mem[i].size = msg->mem_seg[i].size;
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi mem seg type %d size %d\n",
+			   msg->mem_seg[i].type, msg->mem_seg[i].size);
+	}
+
+	ath12k_qmi_driver_event_post(qmi,
+				     ATH12K_QMI_EVENT_QDSS_TRACE_REQ_MEM,
+				     NULL);
+}
+
+static void ath12k_wlfw_qdss_trace_save_ind_cb(struct qmi_handle *qmi_hdl,
+					       struct sockaddr_qrtr *sq,
+					       struct qmi_txn *txn,
+					       const void *data)
+{
+	struct ath12k_qmi *qmi = container_of(qmi_hdl,
+					      struct ath12k_qmi,
+					      handle);
+	struct ath12k_base *ab = qmi->ab;
+	const struct qmi_wlanfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
+	struct ath12k_qmi_event_qdss_trace_save_data *event_data;
+	int i;
+
+	if (ind_msg->source == 1)
+		return;
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "Received qdss trace save indication\n");
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+
+	if (!event_data)
+		return;
+
+	if (ind_msg->mem_seg_valid) {
+		if (ind_msg->mem_seg_len > QDSS_TRACE_SEG_LEN_MAX) {
+			ath12k_err(ab, "Invalid seg len %u\n",
+				   ind_msg->mem_seg_len);
+			goto free_event_data;
+		}
+
+		event_data->mem_seg_len = ind_msg->mem_seg_len;
+		for (i = 0; i < ind_msg->mem_seg_len; i++) {
+			event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
+			event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
+		}
+	}
+
+	event_data->total_size = ind_msg->total_size;
+	ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_QDSS_TRACE_SAVE,
+				     event_data);
+	return;
+
+free_event_data:
+	kfree(event_data);
+}
+
+int ath12k_enable_fwlog(struct ath12k_base *ab)
+{
+	struct wlfw_ini_req_msg_v01 *req;
+	struct wlfw_ini_resp_msg_v01 resp = {};
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	if (!ab->hw_params->en_fwlog)
+		return 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	req->enablefwlog_valid = 1;
+	req->enablefwlog = 1;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn, wlfw_ini_resp_msg_v01_ei, &resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLFW_INI_REQ_V01,
+			       WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_ini_req_msg_v01_ei, req);
+
+	if (ret < 0) {
+		ath12k_warn(ab, "Failed to send init request for enabling fwlog = %d\n",
+			    ret);
+		qmi_txn_cancel(&txn);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0) {
+		ath12k_warn(ab, "fwlog enable wait for resp failed: %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "fwlog enable request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+	}
+out:
+	kfree(req);
+	return ret;
+}
+
 static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
 	{
 		.type = QMI_INDICATION,
@@ -2933,6 +6271,48 @@
 		.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
 		.fn = ath12k_qmi_msg_fw_ready_cb,
 	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
+		.ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
+		.decoded_size =
+			sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
+		.fn = ath12k_qmi_msg_cold_boot_cal_done_cb,
+	},
+	{
+                .type = QMI_INDICATION,
+                .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
+                .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
+                .decoded_size =
+                        sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
+                .fn = ath12k_qmi_msg_cold_boot_cal_done_cb,
+    },
+	{
+ 		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_M3_DUMP_UPLOAD_REQ_IND_V01,
+		.ei = qmi_wlanfw_m3_dump_upload_req_ind_msg_v01_ei,
+		.decoded_size =
+			sizeof(struct qmi_wlanfw_m3_dump_upload_req_ind_msg_v01),
+		.fn = ath12k_qmi_m3_dump_upload_req_ind_cb,
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
+		.ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+		.decoded_size =
+				sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
+		.fn = ath12k_wlfw_qdss_trace_req_mem_ind_cb,
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
+		.ei = qmi_wlanfw_qdss_trace_save_ind_msg_v01_ei,
+		.decoded_size =
+				sizeof(struct qmi_wlanfw_qdss_trace_save_ind_msg_v01),
+		.fn = ath12k_wlfw_qdss_trace_save_ind_cb,
+	},
+	/* (Additions here) */
+	{ /* terminator entry */ }
 };
 
 static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -2975,6 +6355,23 @@
 	.del_server = ath12k_qmi_ops_del_server,
 };
 
+static int ath12k_wait_for_gic_msi(struct ath12k_base *ab)
+{
+	int timeout;
+
+	if (ab->hw_rev != ATH12K_HW_QCN6432_HW10)
+		return 0;
+
+	timeout = wait_event_timeout(ab->ipci.gic_msi_waitq,
+				     (ab->ipci.gic_enabled == 1),
+				     ATH12K_RCV_GIC_MSI_HDLR_DELAY);
+	if (timeout <= 0) {
+		ath12k_warn(ab, "Receive gic msi handler timed out\n");
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
 static void ath12k_qmi_driver_event_work(struct work_struct *work)
 {
 	struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
@@ -2984,6 +6381,7 @@
 	int ret;
 
 	spin_lock(&qmi->event_lock);
+
 	while (!list_empty(&qmi->event_list)) {
 		event = list_first_entry(&qmi->event_list,
 					 struct ath12k_qmi_driver_event, list);
@@ -3001,7 +6399,7 @@
 			break;
 		case ATH12K_QMI_EVENT_SERVER_EXIT:
 			set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
-			set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+			clear_bit(ATH12K_FLAG_WMI_INIT_DONE, &ab->dev_flags);
 			break;
 		case ATH12K_QMI_EVENT_REQUEST_MEM:
 			ret = ath12k_qmi_event_mem_request(qmi);
@@ -3015,19 +6413,53 @@
 			break;
 		case ATH12K_QMI_EVENT_FW_READY:
 			clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
-			if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+			if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+				     &ab->dev_flags)) {
 				ath12k_hal_dump_srng_stats(ab);
 				queue_work(ab->workqueue, &ab->restart_work);
 				break;
 			}
 
+			if (ath12k_cold_boot_cal && ab->qmi.cal_done == 0 &&
+			    ab->hw_params->cold_boot_calib) {
+				ath12k_qmi_process_coldboot_calibration(ab);
+			} else {
 			clear_bit(ATH12K_FLAG_CRASH_FLUSH,
 				  &ab->dev_flags);
 			clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
-			ath12k_core_qmi_firmware_ready(ab);
-			set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+			ret = ath12k_wait_for_gic_msi(ab);
+			if (ret) {
+				ath12k_warn(ab, "failed to get qgic handler for dev %d ret: %d\n",
+					    ab->hw_rev, ret);
+				break;
+			}
+			ret = ath12k_core_qmi_firmware_ready(ab);
+			if (ret) {
+				ath12k_warn(ab, "failed to init after firmware ready: %d\n", ret);
+				set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+				break;
+			}
+			set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+				&ab->dev_flags);
+			}
 
 			break;
+		case ATH12K_QMI_EVENT_QDSS_TRACE_REQ_MEM:
+			ath12k_qmi_event_qdss_trace_req_mem_hdlr(qmi);
+			break;
+		case ATH12K_QMI_EVENT_QDSS_TRACE_SAVE:
+			ath12k_qmi_event_qdss_trace_save_hdlr(qmi, event->data);
+			break;
+		case ATH12K_QMI_EVENT_COLD_BOOT_CAL_DONE:
+			break;
+		case ATH12K_QMI_EVENT_M3_DUMP_UPLOAD_REQ:
+			ath12k_qmi_event_m3_dump_upload_req(qmi, event->data);
+			break;
+		case ATH12K_QMI_EVENT_HOST_CAP:
+			ret = ath12k_qmi_event_host_cap(qmi);
+			if (ret < 0)
+				set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+			break;
 		default:
 			ath12k_warn(ab, "invalid event type: %d", event->type);
 			break;
@@ -3048,7 +6480,7 @@
 	memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
 	ab->qmi.ab = ab;
 
-	ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
+	ab->qmi.target_mem_mode = ab->hw_params->fw_mem_mode;
 	ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX,
 			      &ath12k_qmi_ops, ath12k_qmi_msg_handlers);
 	if (ret < 0) {
@@ -3056,8 +6488,7 @@
 		return ret;
 	}
 
-	ab->qmi.event_wq = alloc_workqueue("ath12k_qmi_driver_event",
-					   WQ_UNBOUND, 1);
+	ab->qmi.event_wq = alloc_ordered_workqueue("ath12k_qmi_driver_event", 0);
 	if (!ab->qmi.event_wq) {
 		ath12k_err(ab, "failed to allocate workqueue\n");
 		return -EFAULT;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/qmi.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/qmi.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/qmi.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/qmi.h	2024-03-18 14:40:14.859741552 +0100
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_QMI_H
@@ -15,33 +15,61 @@
 #define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE	64
 #define ATH12K_QMI_CALDB_ADDRESS		0x4BA00000
 #define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01	128
-#define ATH12K_QMI_WLFW_NODE_ID_BASE		0x07
 #define ATH12K_QMI_WLFW_SERVICE_ID_V01		0x45
 #define ATH12K_QMI_WLFW_SERVICE_VERS_V01	0x01
 #define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01	0x02
 #define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850 0x1
 
 #define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274	0x07
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332	0x2
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN6432	0x60
 #define ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01	32
 #define ATH12K_QMI_RESP_LEN_MAX			8192
 #define ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01	52
-#define ATH12K_QMI_CALDB_SIZE			0x480000
+#define ATH12K_QMI_CALDB_SIZE			0x800000
 #define ATH12K_QMI_BDF_EXT_STR_LENGTH		0x20
 #define ATH12K_QMI_FW_MEM_REQ_SEGMENT_CNT	3
 #define ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01 4
 #define ATH12K_QMI_DEVMEM_CMEM_INDEX	0
+#define ATH12K_QMI_MAX_QDSS_CONFIG_FILE_NAME_SIZE 64
+#define ATH12K_QMI_DEFAULT_QDSS_CONFIG_FILE_NAME "qdss_trace_config.bin"
+#define ATH12K_QMI_M3_DUMP_SIZE			0x100000
 
 #define QMI_WLFW_REQUEST_MEM_IND_V01		0x0035
 #define QMI_WLFW_FW_MEM_READY_IND_V01		0x0037
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01	0x0021
 #define QMI_WLFW_FW_READY_IND_V01		0x0038
+#define QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01     0x003F
+#define QMI_Q6_QDSS_ETR_SIZE_QCN9274            0x100000
+#define QMI_WLFW_QDSS_TRACE_SAVE_IND_V01        0x0041
+#define QMI_Q6_QDSS_ETR_OFFSET_QCN9274		0x2500000
+#define QMI_WLFW_M3_DUMP_UPLOAD_REQ_IND_V01	0x004D
+#define QMI_WLFW_M3_DUMP_UPLOAD_DONE_REQ_V01	0x004E
 
 #define QMI_WLANFW_MAX_DATA_SIZE_V01		6144
 #define ATH12K_FIRMWARE_MODE_OFF		4
-#define ATH12K_QMI_TARGET_MEM_MODE_DEFAULT	0
+#define ATH12K_COLD_BOOT_FW_RESET_DELAY		(60 * HZ)
 
 #define ATH12K_BOARD_ID_DEFAULT	0xFF
 
+#define ATH12K_QMI_INVALID_RADIO	0xFF
+#define QCN6432_DEVICE_BAR_SIZE		0x200000
+#define ATH12K_RCV_GIC_MSI_HDLR_DELAY		(3 * HZ)
+
+enum ath12k_target_mem_mode {
+	ATH12K_QMI_TARGET_MEM_MODE_DEFAULT = 0,
+	ATH12K_QMI_TARGET_MEM_MODE_512M,
+};
+
+/* userpd_id in multi pd arch */
+enum userpd_id {
+	USERPD_0 = 0,
+	USERPD_1,
+	USERPD_2,
+};
+
 struct ath12k_base;
+struct ath12k_host_mlo_glb_rx_reo_snapshot_info;
 
 enum ath12k_qmi_file_type {
 	ATH12K_QMI_FILE_TYPE_BDF_GOLDEN	= 0,
@@ -55,6 +83,7 @@
 	ATH12K_QMI_BDF_TYPE_ELF			= 1,
 	ATH12K_QMI_BDF_TYPE_REGDB		= 4,
 	ATH12K_QMI_BDF_TYPE_CALIBRATION		= 5,
+	ATH12K_QMI_BDF_TYPE_RXGAINLUT		= 7,
 };
 
 enum ath12k_qmi_event_type {
@@ -63,12 +92,18 @@
 	ATH12K_QMI_EVENT_REQUEST_MEM,
 	ATH12K_QMI_EVENT_FW_MEM_READY,
 	ATH12K_QMI_EVENT_FW_READY,
+	ATH12K_QMI_EVENT_COLD_BOOT_CAL_START,
+	ATH12K_QMI_EVENT_COLD_BOOT_CAL_DONE,
 	ATH12K_QMI_EVENT_REGISTER_DRIVER,
 	ATH12K_QMI_EVENT_UNREGISTER_DRIVER,
 	ATH12K_QMI_EVENT_RECOVERY,
 	ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
 	ATH12K_QMI_EVENT_POWER_UP,
 	ATH12K_QMI_EVENT_POWER_DOWN,
+	ATH12K_QMI_EVENT_QDSS_TRACE_REQ_MEM = 15,
+	ATH12K_QMI_EVENT_QDSS_TRACE_SAVE,
+	ATH12K_QMI_EVENT_M3_DUMP_UPLOAD_REQ = 18,
+	ATH12K_QMI_EVENT_HOST_CAP,
 	ATH12K_QMI_EVENT_MAX,
 };
 
@@ -78,6 +113,13 @@
 	void *data;
 };
 
+struct ath12k_qmi_m3_dump_data {
+	u32 pdev_id;
+	u32 size;
+	u64 timestamp;
+	char *addr;
+};
+
 struct ath12k_qmi_ce_cfg {
 	const struct ce_pipe_config *tgt_ce;
 	int tgt_ce_len;
@@ -138,22 +180,90 @@
 	struct ath12k_qmi_ce_cfg ce_cfg;
 	struct target_mem_chunk target_mem[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
 	u32 mem_seg_count;
+	struct target_mem_chunk qdss_mem[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+	u32 qdss_mem_seg_len;
 	u32 target_mem_mode;
 	bool target_mem_delayed;
 	u8 cal_done;
+	u8 cal_timeout;
+	bool block_event;	/* protect by event spinlock */
+	u8 num_radios;
 	struct target_info target;
 	struct m3_mem_region m3_mem;
 	unsigned int service_ins_id;
+	wait_queue_head_t cold_boot_waitq;
 	struct dev_mem_info dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
 };
 
-#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN		261
+struct ath12k_qmi_m3_dump_upload_req_data {
+	u32 pdev_id;
+	u64 addr;
+	u64 size;
+};
+
+#define QMI_WLANFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6167
+#define QMI_WLANFW_QDSS_TRACE_CONFIG_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01 0x0044
+#define QMI_WLANFW_QDSS_TRACE_CONFIG_DOWNLOAD_RESP_V01 0x0044
+
+struct qmi_wlanfw_qdss_trace_config_download_req_msg_v01 {
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+};
+
+struct qmi_wlanfw_qdss_trace_config_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_QDSS_TRACE_MODE_REQ_V01 0x0045
+#define QMI_WLANFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_QDSS_TRACE_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_QDSS_TRACE_MODE_RESP_V01 0x0045
+#define QMI_WLANFW_QDSS_STOP_ALL_TRACE 0x01
+
+enum qmi_wlanfw_qdss_trace_mode_enum_v01 {
+	WLFW_QDSS_TRACE_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+	QMI_WLANFW_QDSS_TRACE_OFF_V01 = 0,
+	QMI_WLANFW_QDSS_TRACE_ON_V01 = 1,
+	WLFW_QDSS_TRACE_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_qdss_trace_mode_req_msg_v01 {
+	u8 mode_valid;
+	enum qmi_wlanfw_qdss_trace_mode_enum_v01 mode;
+	u8 option_valid;
+	u64 option;
+};
+
+struct qmi_wlanfw_qdss_trace_mode_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN		355
+
+struct qmi_wlanfw_m3_dump_upload_done_req_msg_v01 {
+	u32 pdev_id;
+	u32 status;
+};
+
+struct qmi_wlanfw_m3_dump_upload_done_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
 #define QMI_WLANFW_HOST_CAP_REQ_V01			0x0034
 #define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN	7
 #define QMI_WLFW_HOST_CAP_RESP_V01			0x0034
 #define QMI_WLFW_MAX_NUM_GPIO_V01			32
 #define QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01		64
 #define QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01		3
+#define QDSS_ETR_MEM_REGION_TYPE                        0x6
 
 struct qmi_wlanfw_host_ddr_range {
 	u64 start;
@@ -165,6 +275,7 @@
 	BDF_MEM_REGION_TYPE = 0x2,
 	M3_DUMP_REGION_TYPE = 0x3,
 	CALDB_MEM_REGION_TYPE = 0x4,
+	MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
 	PAGEABLE_MEM_REGION_TYPE = 0x9,
 };
 
@@ -189,6 +300,7 @@
 enum ath12k_qmi_cnss_feature {
 	CNSS_FEATURE_MIN_ENUM_VAL_V01 = INT_MIN,
 	CNSS_QDSS_CFG_MISS_V01 = 3,
+	CNSS_PCIE_PERST_NO_PULL_V01 = 4,
 	CNSS_MAX_FEATURE_V01 = 64,
 	CNSS_FEATURE_MAX_ENUM_VAL_V01 = INT_MAX,
 };
@@ -243,13 +355,31 @@
 	struct wlfw_host_mlo_chip_info_s_v01 mlo_chip_info[QMI_WLFW_MAX_NUM_MLO_CHIPS_V01];
 	u8 feature_list_valid;
 	u64 feature_list;
-
+	u8 fw_cfg_support_valid;
+	u8 fw_cfg_support;
 };
 
 struct qmi_wlanfw_host_cap_resp_msg_v01 {
 	struct qmi_response_type_v01 resp;
 };
 
+#define QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN		0
+#define QMI_WLANFW_PHY_CAP_REQ_V01			0x0057
+#define QMI_WLANFW_PHY_CAP_RESP_MSG_V01_MAX_LEN		18
+#define QMI_WLANFW_PHY_CAP_RESP_V01			0x0057
+
+struct qmi_wlanfw_phy_cap_req_msg_v01 {
+	char placeholder;
+};
+
+struct qmi_wlanfw_phy_cap_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 num_phy_valid;
+	u8 num_phy;
+	u8 board_id_valid;
+	u32 board_id;
+};
+
 #define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN		54
 #define QMI_WLANFW_IND_REGISTER_REQ_V01				0x0020
 #define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN		18
@@ -281,6 +411,14 @@
 	u8 xo_cal_enable;
 	u8 cal_done_enable_valid;
 	u8 cal_done_enable;
+	u8 qdss_trace_req_mem_enable_valid;
+	u8 qdss_trace_req_mem_enable;
+	u8 qdss_trace_save_enable_valid;
+	u8 qdss_trace_save_enable;
+	u8 qdss_trace_free_enable_valid;
+	u8 qdss_trace_free_enable;
+	u8 m3_dump_upload_req_enable_valid;
+	u8 m3_dump_upload_req_enable;
 };
 
 struct qmi_wlanfw_ind_register_resp_msg_v01 {
@@ -296,6 +434,7 @@
 #define QMI_WLANFW_RESPOND_MEM_REQ_V01			0x0036
 #define QMI_WLANFW_RESPOND_MEM_RESP_V01			0x0036
 #define QMI_WLANFW_MAX_NUM_MEM_CFG_V01			2
+#define QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01            0x0040
 #define QMI_WLANFW_MAX_STR_LEN_V01                      16
 
 struct qmi_wlanfw_mem_cfg_s_v01 {
@@ -312,6 +451,11 @@
 	QMI_WLANFW_MEM_M3_V01 = 3,
 	QMI_WLANFW_MEM_CAL_V01 = 4,
 	QMI_WLANFW_MEM_DPD_V01 = 5,
+	QMI_WLANFW_MEM_QDSS_V01 = 6,
+	QMI_WLANFW_MEM_HANG_DATA_V01 = 7,
+	QMI_WLANFW_MEM_MLO_GLOBAL_V01 = 8,
+	QMI_WLANFW_MEM_PAGEABLE_V01 = 9,
+	QMI_WLANFW_MEM_AFC_V01 = 10,
 	WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
@@ -351,10 +495,16 @@
 	char placeholder;
 };
 
+struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
+	char placeholder;
+};
+
 #define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN	0
 #define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN	207
 #define QMI_WLANFW_CAP_REQ_V01			0x0024
 #define QMI_WLANFW_CAP_RESP_V01			0x0024
+#define QMI_WLANFW_DEVICE_INFO_REQ_V01		0x004C
+#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01	0
 
 enum qmi_wlanfw_pipedir_enum_v01 {
 	QMI_WLFW_PIPEDIR_NONE_V01 = 0,
@@ -460,12 +610,26 @@
 	enum qmi_wlanfw_rd_card_chain_cap_v01 rd_card_chain_cap;
 	u8 dev_mem_info_valid;
 	struct qmi_wlanfw_dev_mem_info_s_v01 dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
+	u8 rxgainlut_support_valid;
+	u8 rxgainlut_support;
 };
 
 struct qmi_wlanfw_cap_req_msg_v01 {
 	char placeholder;
 };
 
+struct qmi_wlanfw_device_info_req_msg_v01 {
+	char placeholder;
+};
+
+struct qmi_wlanfw_device_info_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u64 bar_addr;
+	u32 bar_size;
+	u8 bar_addr_valid;
+	u8 bar_size_valid;
+};
+
 #define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN	6182
 #define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN	7
 #define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01		0x0025
@@ -500,6 +664,8 @@
 #define QMI_WLANFW_M3_INFO_RESP_V01		0x003C
 #define QMI_WLANFW_M3_INFO_REQ_V01		0x003C
 
+#define QMI_WLANFW_M3_DUMP_UPLOAD_DONE_REQ_MSG_V01_MAX_MSG_LEN	14
+
 struct qmi_wlanfw_m3_info_req_msg_v01 {
 	u64 addr;
 	u32 size;
@@ -558,6 +724,110 @@
 	struct qmi_response_type_v01 resp;
 };
 
+struct qmi_wlanfw_qdss_trace_save_ind_msg_v01 {
+	u32 source;
+	u32 total_size;
+	u8 mem_seg_valid;
+	u32 mem_seg_len;
+	struct qmi_wlanfw_mem_seg_resp_s_v01
+			mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+	u8 file_name_valid;
+	char file_name[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+};
+
+#define QDSS_TRACE_SEG_LEN_MAX 32
+
+struct qdss_trace_mem_seg {
+	u64 addr;
+	u32 size;
+};
+
+struct ath12k_qmi_event_qdss_trace_save_data {
+	u32 total_size;
+	u32 mem_seg_len;
+	struct qdss_trace_mem_seg mem_seg[QDSS_TRACE_SEG_LEN_MAX];
+};
+
+#define QMI_MEM_REGION_TYPE                             0
+#define QMI_WLANFW_MEM_WRITE_REQ_V01                    0x0031
+#define QMI_WLANFW_MEM_WRITE_REQ_MSG_V01_MAX_MSG_LEN    6163
+#define QMI_WLANFW_MEM_READ_REQ_V01                     0x0030
+#define QMI_WLANFW_MEM_READ_REQ_MSG_V01_MAX_MSG_LEN     21
+
+struct qmi_wlanfw_mem_read_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+};
+
+struct qmi_wlanfw_mem_read_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+};
+
+struct qmi_wlanfw_mem_write_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+};
+
+struct qmi_wlanfw_mem_write_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+int ath12k_qmi_mem_read(struct ath12k_base *ab, u32 mem_addr, void *mem_value,size_t count);
+
+int ath12k_qmi_mem_write(struct ath12k_base *ab, u32 mem_addr, void* mem_value, size_t count);
+
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+
+struct wlfw_ini_req_msg_v01 {
+	u8 enablefwlog_valid;
+	u8 enablefwlog;
+};
+
+struct wlfw_ini_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_m3_dump_upload_req_ind_msg_v01 {
+	u32 pdev_id;
+	u64 addr;
+	u64 size;
+};
+
+#define QMI_WLANFW_CFG_DOWNLOAD_REQ_V01 0x0056
+#define QMI_WLANFW_CFG_DOWNLOAD_RESP_V01 0x0056
+enum wlanfw_cfg_type_v01 {
+	WLANFW_CFG_TYPE_MIN_VAL_V01 = INT_MIN,
+	WLANFW_CFG_FILE_V01 = 0,
+	WLANFW_CFG_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+#define WLANFW_CFG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6174
+struct wlanfw_cfg_download_req_msg_v01 {
+	u8 file_type_valid;
+	enum wlanfw_cfg_type_v01 file_type;
+	u8 total_size_valid;
+	u32 total_size;
+	u8 seg_id_valid;
+	u32 seg_id;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+	u8 end_valid;
+	u8 end;
+};
+
+#define WLANFW_CFG_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+struct wlanfw_cfg_download_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
 int ath12k_qmi_firmware_start(struct ath12k_base *ab,
 			      u32 mode);
 void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
@@ -566,4 +836,29 @@
 void ath12k_qmi_deinit_service(struct ath12k_base *ab);
 int ath12k_qmi_init_service(struct ath12k_base *ab);
 
+int ath12k_send_qdss_trace_mode_req(struct ath12k_base *ab,
+				    enum qmi_wlanfw_qdss_trace_mode_enum_v01 mode);
+int ath12k_qmi_fwreset_from_cold_boot(struct ath12k_base *ab);
+int ath12k_enable_fwlog(struct ath12k_base *ab);
+int ath12k_qmi_m3_dump_upload_done_ind_send(struct ath12k_base *ab,
+					    u32 pdev_id, int status);
+void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab);
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
+int ath12k_qmi_mlo_global_snapshot_mem_init(struct ath12k_base *ab);
+void ath12k_free_mlo_mgmt_rx_reo_per_link_info(
+		struct ath12k_host_mlo_glb_rx_reo_snapshot_info *snapshot_info);
+
+static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
+{
+	lockdep_assert_held(&qmi->event_lock);
+
+	qmi->block_event = block;
+}
+
+static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
+{
+	lockdep_assert_held(&qmi->event_lock);
+
+	return qmi->block_event;
+}
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/reg.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/reg.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/reg.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/reg.c	2024-01-26 16:32:21.055710360 +0100
@@ -28,11 +28,37 @@
 	}
 };
 
-static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2)
+enum wmi_reg_6g_ap_type
+ath12k_ieee80211_ap_pwr_type_convert(enum ieee80211_ap_reg_power power_type)
+{
+       switch (power_type) {
+       case IEEE80211_REG_LPI_AP:
+               return WMI_REG_INDOOR_AP;
+       case IEEE80211_REG_SP_AP:
+               return WMI_REG_STD_POWER_AP;
+       case IEEE80211_REG_VLP_AP:
+               return WMI_REG_VLP_AP;
+       default:
+               return WMI_REG_MAX_AP_TYPE;
+       }
+}
+
+static void ath12k_copy_reg_rule(struct ath12k_reg_freq *ath12k_reg_freq,
+                                 struct ath12k_reg_rule *reg_rule)
+{
+        if (!ath12k_reg_freq->start_freq)
+                ath12k_reg_freq->start_freq = reg_rule->start_freq;
+
+        if ((!ath12k_reg_freq->end_freq) ||
+            (ath12k_reg_freq->end_freq < reg_rule->end_freq))
+                ath12k_reg_freq->end_freq = reg_rule->end_freq;
+}
+
+static bool ath12k_regdom_changes(struct ath12k_hw *ah, char *alpha2)
 {
 	const struct ieee80211_regdomain *regd;
 
-	regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+	regd = rcu_dereference_rtnl(ah->hw->wiphy->regd);
 	/* This can happen during wiphy registration where the previous
 	 * user request is received before we update the regd received
 	 * from firmware.
@@ -44,38 +70,11 @@
 }
 
 static void
-ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+ath12k_reg_send_request(struct ath12k *ar, struct regulatory_request *request)
 {
-	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
 	struct ath12k_wmi_init_country_arg arg;
-	struct ath12k *ar = hw->priv;
 	int ret;
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_REG,
-		   "Regulatory Notification received for %s\n", wiphy_name(wiphy));
-
-	/* Currently supporting only General User Hints. Cell base user
-	 * hints to be handled later.
-	 * Hints from other sources like Core, Beacons are not expected for
-	 * self managed wiphy's
-	 */
-	if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
-	      request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
-		ath12k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
-		return;
-	}
-
-	if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
-		ath12k_dbg(ar->ab, ATH12K_DBG_REG,
-			   "Country Setting is not allowed\n");
-		return;
-	}
-
-	if (!ath12k_regdom_changes(ar, request->alpha2)) {
-		ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n");
-		return;
-	}
-
 	/* Set the country code to the firmware and wait for
 	 * the WMI_REG_CHAN_LIST_CC EVENT for updating the
 	 * reg info
@@ -84,18 +83,104 @@
 	memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
 	arg.cc_info.alpha2[2] = 0;
 
+	ar->ab->regd_change_user_request[ar->pdev_idx] = true;
 	ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
 	if (ret)
 		ath12k_warn(ar->ab,
 			    "INIT Country code set to fw failed : %d\n", ret);
 }
 
+static bool ath12k_reg_validate_pdev_state(struct ath12k *ar)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_pdev *pdev;
+	struct ath12k *tmp_ar;
+	int i;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (!pdev)
+			continue;
+
+		tmp_ar = pdev->ar;
+		if (tmp_ar) {
+			mutex_lock(&tmp_ar->conf_mutex);
+			if ((tmp_ar->state == ATH12K_STATE_ON ||
+			    tmp_ar->state == ATH12K_STATE_RESTARTED) &&
+			    tmp_ar->num_started_vdevs) {
+				if (tmp_ar == ar)
+					ath12k_warn(ab, "%s has active interface, please bring down to set country code",
+						    wiphy_name(ar->ah->hw->wiphy));
+				mutex_unlock(&tmp_ar->conf_mutex);
+				return false;
+			}
+			mutex_unlock(&tmp_ar->conf_mutex);
+		}
+	}
+	return true;
+}
+
+static bool ath12k_reg_validate_ah_state(struct ath12k_hw *ah)
+{
+	struct ath12k *ar = ah->radio;
+	int i;
+
+	lockdep_assert_held(&ah->conf_mutex);
+
+	for (i = 0; i < ah->num_radio; i++, ar++) {
+		/* The SET_INIT_COUNTRY command should not be sent to firmware while any vdev is active
+		 * Also it does not make sense to give the command for certain pdev's alone.
+		 * Hence check all the pdev's if any have an active vdev before sending the command.
+		 */
+		if (!ath12k_reg_validate_pdev_state(ar))
+			return false;
+	}
+
+	return true;
+}
+
+void ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	int i;
+
+	/* Currently supporting only General User Hints. Cell base user
+	 * hints to be handled later.
+	 * Hints from other sources like Core, Beacons are not expected for
+	 * self managed wiphy's
+	 */
+	if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+	      request->user_reg_hint_type == NL80211_USER_REG_HINT_USER))
+		return;
+
+	if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+		return;
+
+	 mutex_lock(&ah->conf_mutex);
+	if (!ath12k_regdom_changes(ah, request->alpha2))
+		goto exit;
+
+	if (!ath12k_reg_validate_ah_state(ah))
+		goto exit;
+
+	ah->regd_updated = false;
+	ar = ah->radio;
+
+	/* Send the reg change request to all the radios */
+	for (i = 0; i < ah->num_radio; i++, ar++)
+		ath12k_reg_send_request(ar, request);
+exit:
+	mutex_unlock(&ah->conf_mutex);
+}
+
 int ath12k_reg_update_chan_list(struct ath12k *ar)
 {
 	struct ieee80211_supported_band **bands;
 	struct ath12k_wmi_scan_chan_list_arg *arg;
 	struct ieee80211_channel *channel;
-	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_hw *hw = ar->ah->hw;
 	struct ath12k_wmi_channel_arg *ch;
 	enum nl80211_band band;
 	int num_channels = 0;
@@ -103,7 +188,7 @@
 
 	bands = hw->wiphy->bands;
 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
-		if (!bands[band])
+		if (!(ar->mac.sbands[band].channels && bands[band]))
 			continue;
 
 		for (i = 0; i < bands[band]->n_channels; i++) {
@@ -111,12 +196,20 @@
 			    IEEE80211_CHAN_DISABLED)
 				continue;
 
+			if (band == NL80211_BAND_5GHZ)
+				if (bands[band]->channels[i].center_freq <
+				    ar->chan_info.low_freq ||
+				    bands[band]->channels[i].center_freq >
+				    ar->chan_info.high_freq)
+					continue;
 			num_channels++;
 		}
 	}
 
-	if (WARN_ON(!num_channels))
-		return -EINVAL;
+	if (!num_channels) {
+		ath12k_warn(ar->ab, "pdev is not supported for this country\n");
+		return -ENOTSUPP;
+	}
 
 	arg = kzalloc(struct_size(arg, channel, num_channels), GFP_KERNEL);
 
@@ -129,7 +222,7 @@
 	ch = arg->channel;
 
 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
-		if (!bands[band])
+		if (!(ar->mac.sbands[band].channels && bands[band]))
 			continue;
 
 		for (i = 0; i < bands[band]->n_channels; i++) {
@@ -138,6 +231,13 @@
 			if (channel->flags & IEEE80211_CHAN_DISABLED)
 				continue;
 
+			if (band == NL80211_BAND_5GHZ)
+				if (bands[band]->channels[i].center_freq <
+				    ar->chan_info.low_freq ||
+				    bands[band]->channels[i].center_freq >
+				    ar->chan_info.high_freq)
+					continue;
+
 			/* TODO: Set to true/false based on some condition? */
 			ch->allow_ht = true;
 			ch->allow_vht = true;
@@ -197,18 +297,97 @@
 		       sizeof(struct ieee80211_reg_rule));
 }
 
+int ath12k_reg_get_num_chans_in_band(struct ath12k *ar,
+				     struct ieee80211_supported_band *band,
+			             u32 freq_low, u32 freq_high)
+{
+	int i, count = 0;
+
+	if (!(freq_low && freq_high) || !band)
+		return 0;
+
+	for (i = 0; i < band->n_channels; i++) {
+		if (band->channels[i].center_freq >= freq_low &&
+		    band->channels[i].center_freq <= freq_high)
+			count++;
+	}
+
+	return count;
+}
+
 int ath12k_regd_update(struct ath12k *ar, bool init)
 {
 	struct ieee80211_regdomain *regd, *regd_copy = NULL;
 	int ret, regd_len, pdev_id;
 	struct ath12k_base *ab;
+	struct ath12k_hw *ah;
+	struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+	int i;
+	u32 phy_id, freq_low, freq_high, supported_bands, band;
 
 	ab = ar->ab;
+	ah = ar->ah;
+
+	mutex_lock(&ah->conf_mutex);
+
+	supported_bands = ar->pdev->cap.supported_bands;
+	if (supported_bands & WMI_HOST_WLAN_2G_CAP)
+		band = NL80211_BAND_2GHZ;
+	else if(supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz)
+		band = NL80211_BAND_5GHZ;
+	else if(supported_bands & WMI_HOST_WLAN_5G_CAP && ar->supports_6ghz)
+		band = NL80211_BAND_6GHZ;
+
+	reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
+
+	if (ab->hw_params->single_pdev_only && !ar->supports_6ghz) {
+		phy_id = ar->pdev->cap.band[band].phy_id;
+		reg_cap = &ab->hal_reg_cap[phy_id];
+	}
+
+	/* Possible that due to reg change, current limits for supported
+	 * frequency changed. Update that
+	 */
+	if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+		freq_low = max(reg_cap->low_2ghz_chan, ab->reg_freq_2g.start_freq);
+		freq_high = min(reg_cap->high_2ghz_chan, ab->reg_freq_2g.end_freq);
+	} else if(supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) {
+		freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_5g.start_freq);
+		freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_5g.end_freq);
+	} else if(supported_bands & WMI_HOST_WLAN_5G_CAP && ar->supports_6ghz) {
+		freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6g.start_freq);
+		freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6g.end_freq);
+	}
+
+	ar->chan_info.low_freq = freq_low;
+	ar->chan_info.high_freq = freq_high;
+	ar->chan_info.num_channels = ath12k_reg_get_num_chans_in_band(ar,
+								      &ar->mac.sbands[band],
+								      freq_low,
+								      freq_high);
+
+	ath12k_dbg(ab, ATH12K_DBG_REG, "pdev %u reg updated freq limits %u->%u MHz no of channel %u\n",
+		   ar->pdev->pdev_id, ar->chan_info.low_freq,
+		   ar->chan_info.high_freq, ar->chan_info.num_channels);
+
+	/* Since FW provides reg rules which are similar for 2G/5G pdev
+	 * but since 6G pdev has superset of all rules including rules for
+	 * all bands, we prefer 6G pdev. If 6GHz pdev was part of the
+	 * ath12k_hw, wait for the 6GHz pdev, else pick the first pdev
+	 * which calls this function and use its regd to update global
+	 * hw regd. The regd_updated flag set at the end will not allow
+	 * any further updates.
+	 */
+	if ((ah->supported_band_mask & BIT(NL80211_BAND_6GHZ)) && !ar->supports_6ghz) {
+		mutex_unlock(&ah->conf_mutex);
+		return 0;
+	}
+
 	pdev_id = ar->pdev_idx;
 
 	spin_lock_bh(&ab->base_lock);
 
-	if (init) {
+	if (init && !ab->new_regd[pdev_id]) {
 		/* Apply the regd received during init through
 		 * WMI_REG_CHAN_LIST_CC event. In case of failure to
 		 * receive the regd, initialize with a default world
@@ -245,25 +424,38 @@
 		goto err;
 	}
 
-	rtnl_lock();
-	wiphy_lock(ar->hw->wiphy);
-	ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
-	wiphy_unlock(ar->hw->wiphy);
-	rtnl_unlock();
+	ret = regulatory_set_wiphy_regd(ah->hw->wiphy, regd_copy);
 
 	kfree(regd_copy);
 
 	if (ret)
 		goto err;
 
+	ah->regd_updated = true;
+
+	/* Apply the new regd to all the radios, this is expected to be received only once
+	 * since we check for ah->regd_updated and allow here only once
+	 */
+	ar = ah->radio;
+	ab = ar->ab;
+
+	for (i = 0; i < ah->num_radio; i++) {
+		ab = ar->ab;
+
 	if (ar->state == ATH12K_STATE_ON) {
 		ret = ath12k_reg_update_chan_list(ar);
-		if (ret)
+			if (ret && ret == -ENOTSUPP)
+				continue;
+			else if (ret)
 			goto err;
 	}
+		ar++;
+	}
 
+	mutex_unlock(&ah->conf_mutex);
 	return 0;
 err:
+	mutex_unlock(&ah->conf_mutex);
 	ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
 	return ret;
 }
@@ -276,16 +468,45 @@
 	case ATH12K_DFS_REG_CN:
 		return NL80211_DFS_FCC;
 	case ATH12K_DFS_REG_ETSI:
-	case ATH12K_DFS_REG_KR:
 		return NL80211_DFS_ETSI;
 	case ATH12K_DFS_REG_MKK:
 	case ATH12K_DFS_REG_MKK_N:
+	case ATH12K_DFS_REG_KR:
 		return NL80211_DFS_JP;
 	default:
 		return NL80211_DFS_UNSET;
 	}
 }
 
+static u32 ath12k_update_bw_reg_flags(u16 max_bw)
+{
+	u32 flags = 0;
+	switch (max_bw) {
+	case 20:
+		flags = (NL80211_RRF_NO_HT40 |
+			NL80211_RRF_NO_80MHZ |
+			NL80211_RRF_NO_160MHZ |
+			NL80211_RRF_NO_320MHZ);
+		break;
+	case 40:
+		flags = (NL80211_RRF_NO_80MHZ |
+			NL80211_RRF_NO_160MHZ |
+			NL80211_RRF_NO_320MHZ );
+		break;
+	case 80:
+		flags = (NL80211_RRF_NO_160MHZ |
+			NL80211_RRF_NO_320MHZ);
+		break;
+	case 160:
+		flags = NL80211_RRF_NO_320MHZ;
+		break;
+	default:
+		break;
+	}
+
+	return flags;
+}
+
 static u32 ath12k_map_fw_reg_flags(u16 reg_flags)
 {
 	u32 flags = 0;
@@ -314,128 +535,22 @@
 	return flags;
 }
 
-static bool
-ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
-			 struct ieee80211_reg_rule *rule2)
-{
-	u32 start_freq1, end_freq1;
-	u32 start_freq2, end_freq2;
-
-	start_freq1 = rule1->freq_range.start_freq_khz;
-	start_freq2 = rule2->freq_range.start_freq_khz;
-
-	end_freq1 = rule1->freq_range.end_freq_khz;
-	end_freq2 = rule2->freq_range.end_freq_khz;
-
-	if ((start_freq1 >= start_freq2 &&
-	     start_freq1 < end_freq2) ||
-	    (start_freq2 > start_freq1 &&
-	     start_freq2 < end_freq1))
-		return true;
-
-	/* TODO: Should we restrict intersection feasibility
-	 *  based on min bandwidth of the intersected region also,
-	 *  say the intersected rule should have a  min bandwidth
-	 * of 20MHz?
-	 */
-
-	return false;
-}
-
-static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
-				       struct ieee80211_reg_rule *rule2,
-				       struct ieee80211_reg_rule *new_rule)
-{
-	u32 start_freq1, end_freq1;
-	u32 start_freq2, end_freq2;
-	u32 freq_diff, max_bw;
-
-	start_freq1 = rule1->freq_range.start_freq_khz;
-	start_freq2 = rule2->freq_range.start_freq_khz;
-
-	end_freq1 = rule1->freq_range.end_freq_khz;
-	end_freq2 = rule2->freq_range.end_freq_khz;
-
-	new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
-						    start_freq2);
-	new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
-
-	freq_diff = new_rule->freq_range.end_freq_khz -
-			new_rule->freq_range.start_freq_khz;
-	max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
-		       rule2->freq_range.max_bandwidth_khz);
-	new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
-
-	new_rule->power_rule.max_antenna_gain =
-		min_t(u32, rule1->power_rule.max_antenna_gain,
-		      rule2->power_rule.max_antenna_gain);
-
-	new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
-					      rule2->power_rule.max_eirp);
-
-	/* Use the flags of both the rules */
-	new_rule->flags = rule1->flags | rule2->flags;
-
-	/* To be safe, lts use the max cac timeout of both rules */
-	new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
-				     rule2->dfs_cac_ms);
-}
-
-static struct ieee80211_regdomain *
-ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
-		      struct ieee80211_regdomain *curr_regd)
+static u32 ath12k_map_fw_phy_flags(u32 phy_flags)
 {
-	u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
-	struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
-	struct ieee80211_regdomain *new_regd = NULL;
-	u8 i, j, k;
+	u32 flags = 0;
 
-	num_old_regd_rules = default_regd->n_reg_rules;
-	num_curr_regd_rules = curr_regd->n_reg_rules;
-	num_new_regd_rules = 0;
+	if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11AX)
+		flags |= NL80211_RRF_NO_HE;
 
-	/* Find the number of intersecting rules to allocate new regd memory */
-	for (i = 0; i < num_old_regd_rules; i++) {
-		old_rule = default_regd->reg_rules + i;
-		for (j = 0; j < num_curr_regd_rules; j++) {
-			curr_rule = curr_regd->reg_rules + j;
+	if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11BE)
+		flags |= NL80211_RRF_NO_EHT;
 
-			if (ath12k_reg_can_intersect(old_rule, curr_rule))
-				num_new_regd_rules++;
-		}
+	return flags;
 	}
 
-	if (!num_new_regd_rules)
-		return NULL;
-
-	new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
-			sizeof(struct ieee80211_reg_rule)),
-			GFP_ATOMIC);
-
-	if (!new_regd)
-		return NULL;
-
-	/* We set the new country and dfs region directly and only trim
-	 * the freq, power, antenna gain by intersecting with the
-	 * default regdomain. Also MAX of the dfs cac timeout is selected.
-	 */
-	new_regd->n_reg_rules = num_new_regd_rules;
-	memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
-	new_regd->dfs_region = curr_regd->dfs_region;
-	new_rule = new_regd->reg_rules;
-
-	for (i = 0, k = 0; i < num_old_regd_rules; i++) {
-		old_rule = default_regd->reg_rules + i;
-		for (j = 0; j < num_curr_regd_rules; j++) {
-			curr_rule = curr_regd->reg_rules + j;
-
-			if (ath12k_reg_can_intersect(old_rule, curr_rule))
-				ath12k_reg_intersect_rules(old_rule, curr_rule,
-							   (new_rule + k++));
-		}
-	}
-	return new_regd;
-}
+/* TODO: API ath12k_reg_can_intersect() to handle whether two
+ * ieee80211_reg_rules can intersect or not based on their
+ * frequency range and power mode. */
 
 static const char *
 ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
@@ -473,13 +588,15 @@
 static void
 ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
 		       u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
-		       u32 reg_flags)
+		       s8 psd, u32 reg_flags,
+		       int pwr_mode)
 {
 	reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
 	reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
 	reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
 	reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
 	reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+	reg_rule->psd = psd;
 	reg_rule->flags = reg_flags;
 }
 
@@ -501,7 +618,7 @@
 	ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
 			       ETSI_WEATHER_RADAR_BAND_LOW, bw,
 			       reg_rule->ant_gain, reg_rule->reg_power,
-			       flags);
+			       reg_rule->psd_eirp, flags, 0);
 
 	ath12k_dbg(ab, ATH12K_DBG_REG,
 		   "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -523,7 +640,7 @@
 	ath12k_reg_update_rule(regd->reg_rules + i,
 			       ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
 			       reg_rule->ant_gain, reg_rule->reg_power,
-			       flags);
+			       reg_rule->psd_eirp, flags, 0);
 
 	regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
 
@@ -548,7 +665,7 @@
 	ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
 			       reg_rule->end_freq, bw,
 			       reg_rule->ant_gain, reg_rule->reg_power,
-			       flags);
+			       reg_rule->psd_eirp, flags, 0);
 
 	ath12k_dbg(ab, ATH12K_DBG_REG,
 		   "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -562,15 +679,17 @@
 
 struct ieee80211_regdomain *
 ath12k_reg_build_regd(struct ath12k_base *ab,
-		      struct ath12k_reg_info *reg_info, bool intersect)
+		      struct ath12k_reg_info *reg_info,
+		      enum ieee80211_ap_reg_power power_type)
 {
-	struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+	struct ieee80211_regdomain *new_regd = NULL;
 	struct ath12k_reg_rule *reg_rule;
 	u8 i = 0, j = 0, k = 0;
 	u8 num_rules;
 	u16 max_bw;
-	u32 flags;
+	u32 flags = 0;
 	char alpha2[3];
+	int pwr_mode;
 
 	num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
 
@@ -582,50 +701,59 @@
 		num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP];
 
 	if (!num_rules)
-		goto ret;
+		return new_regd;
 
 	/* Add max additional rules to accommodate weather radar band */
 	if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
 		num_rules += 2;
 
-	tmp_regd = kzalloc(sizeof(*tmp_regd) +
+	new_regd =  kzalloc(sizeof(*new_regd) +
 			   (num_rules * sizeof(struct ieee80211_reg_rule)),
 			   GFP_ATOMIC);
-	if (!tmp_regd)
-		goto ret;
+	if (!new_regd)
+		return new_regd;
 
-	memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+	memcpy(new_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
 	memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
 	alpha2[2] = '\0';
-	tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
+	new_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
 
 	ath12k_dbg(ab, ATH12K_DBG_REG,
 		   "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
-		   alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
+		   alpha2, ath12k_reg_get_regdom_str(new_regd->dfs_region),
 		   reg_info->dfs_region, num_rules);
 	/* Update reg_rules[] below. Firmware is expected to
 	 * send these rules in order(2G rules first and then 5G)
 	 */
-	for (; i < num_rules; i++) {
+	for (i = 0, j = 0; i < num_rules; i++) {
 		if (reg_info->num_2g_reg_rules &&
 		    (i < reg_info->num_2g_reg_rules)) {
 			reg_rule = reg_info->reg_rules_2g_ptr + i;
 			max_bw = min_t(u16, reg_rule->max_bw,
 				       reg_info->max_bw_2g);
-			flags = 0;
+			flags = ath12k_update_bw_reg_flags(reg_info->max_bw_2g);
+			pwr_mode = 0;
+			ath12k_copy_reg_rule(&ab->reg_freq_2g, reg_rule);
 		} else if (reg_info->num_5g_reg_rules &&
 			   (j < reg_info->num_5g_reg_rules)) {
 			reg_rule = reg_info->reg_rules_5g_ptr + j++;
 			max_bw = min_t(u16, reg_rule->max_bw,
 				       reg_info->max_bw_5g);
-
+			flags = NL80211_RRF_AUTO_BW | ath12k_update_bw_reg_flags(reg_info->max_bw_5g);
 			/* FW doesn't pass NL80211_RRF_AUTO_BW flag for
 			 * BW Auto correction, we can enable this by default
 			 * for all 5G rules here. The regulatory core performs
 			 * BW correction if required and applies flags as
 			 * per other BW rule flags we pass from here
 			 */
-			flags = NL80211_RRF_AUTO_BW;
+			if ((reg_rule->start_freq == 5490) && (reg_rule->end_freq == 5730))
+				flags &= ~NL80211_RRF_NO_320MHZ;
+
+			pwr_mode = 0;
+			if (reg_rule->end_freq <= ATH12K_MAX_5G_FREQ)
+                                ath12k_copy_reg_rule(&ab->reg_freq_5g, reg_rule);
+                        else if (reg_rule->start_freq >= ATH12K_MIN_6G_FREQ)
+                                ath12k_copy_reg_rule(&ab->reg_freq_6g, reg_rule);
 		} else if (reg_info->is_ext_reg_event &&
 			   reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
 			(k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
@@ -633,17 +761,25 @@
 			max_bw = min_t(u16, reg_rule->max_bw,
 				       reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
 			flags = NL80211_RRF_AUTO_BW;
+			if (reg_rule->psd_flag)
+				flags |= NL80211_RRF_PSD;
+
+			if (reg_rule->end_freq <= ATH12K_MAX_6G_FREQ)
+				ath12k_copy_reg_rule(&ab->reg_freq_6g, reg_rule);
+			else if (reg_rule->start_freq >= ATH12K_MIN_6G_FREQ)
+				ath12k_copy_reg_rule(&ab->reg_freq_6g, reg_rule);
 		} else {
 			break;
 		}
 
 		flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
+		flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap);
 
-		ath12k_reg_update_rule(tmp_regd->reg_rules + i,
+		ath12k_reg_update_rule(new_regd->reg_rules + i,
 				       reg_rule->start_freq,
 				       reg_rule->end_freq, max_bw,
 				       reg_rule->ant_gain, reg_rule->reg_power,
-				       flags);
+				       reg_rule->psd_eirp, flags, pwr_mode);
 
 		/* Update dfs cac timeout if the dfs domain is ETSI and the
 		 * new rule covers weather radar band.
@@ -654,7 +790,7 @@
 		    reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
 		    (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
 		    reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
-			ath12k_reg_update_weather_radar_band(ab, tmp_regd,
+			ath12k_reg_update_weather_radar_band(ab, new_regd,
 							     reg_rule, &i,
 							     flags, max_bw);
 			continue;
@@ -664,37 +800,20 @@
 			ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
 				   i + 1, reg_rule->start_freq, reg_rule->end_freq,
 				   max_bw, reg_rule->ant_gain, reg_rule->reg_power,
-				   tmp_regd->reg_rules[i].dfs_cac_ms,
+				   new_regd->reg_rules[i].dfs_cac_ms,
 				   flags, reg_rule->psd_flag, reg_rule->psd_eirp);
 		} else {
 			ath12k_dbg(ab, ATH12K_DBG_REG,
 				   "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
 				   i + 1, reg_rule->start_freq, reg_rule->end_freq,
 				   max_bw, reg_rule->ant_gain, reg_rule->reg_power,
-				   tmp_regd->reg_rules[i].dfs_cac_ms,
+				   new_regd->reg_rules[i].dfs_cac_ms,
 				   flags);
 		}
 	}
 
-	tmp_regd->n_reg_rules = i;
+	new_regd->n_reg_rules = i;
 
-	if (intersect) {
-		default_regd = ab->default_regd[reg_info->phy_id];
-
-		/* Get a new regd by intersecting the received regd with
-		 * our default regd.
-		 */
-		new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
-		kfree(tmp_regd);
-		if (!new_regd) {
-			ath12k_warn(ab, "Unable to create intersected regdomain\n");
-			goto ret;
-		}
-	} else {
-		new_regd = tmp_regd;
-	}
-
-ret:
 	return new_regd;
 }
 
@@ -715,12 +834,6 @@
 	}
 }
 
-void ath12k_reg_init(struct ath12k *ar)
-{
-	ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
-	ar->hw->wiphy->reg_notifier = ath12k_reg_notifier;
-}
-
 void ath12k_reg_free(struct ath12k_base *ab)
 {
 	int i;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/reg.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/reg.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/reg.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/reg.h	2024-01-19 17:01:19.873847249 +0100
@@ -25,6 +25,17 @@
 	ATH12K_DFS_REG_UNDEF,
 };
 
+/* Phybitmap supported in Firmware */
+enum ath12k_reg_phy_bitmap {
+	ATH12K_REG_PHY_BITMAP_NO11A	 = 0x0001,
+	ATH12K_REG_PHY_BITMAP_NO11B      = 0x0002,
+	ATH12K_REG_PHY_BITMAP_NO11G      = 0x0004,
+	ATH12K_REG_PHY_BITMAP_NO11N      = 0x0008,
+	ATH12K_REG_PHY_BITMAP_NO11AC     = 0x0010,
+	ATH12K_REG_PHY_BITMAP_NO11AX     = 0x0020,
+	ATH12K_REG_PHY_BITMAP_NO11BE     = 0x0040,
+};
+
 enum ath12k_reg_cc_code {
 	REG_SET_CC_STATUS_PASS = 0,
 	REG_CURRENT_ALPHA2_NOT_FOUND = 1,
@@ -42,7 +53,7 @@
 	u8 ant_gain;
 	u16 flags;
 	bool psd_flag;
-	u16 psd_eirp;
+	s8 psd_eirp;
 };
 
 struct ath12k_reg_info {
@@ -83,13 +94,19 @@
 		[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
 };
 
-void ath12k_reg_init(struct ath12k *ar);
+void ath12k_reg_notifier(struct wiphy *wiphy,
+			 struct regulatory_request *request);
 void ath12k_reg_free(struct ath12k_base *ab);
 void ath12k_regd_update_work(struct work_struct *work);
 struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
 						  struct ath12k_reg_info *reg_info,
-						  bool intersect);
+                      enum ieee80211_ap_reg_power power_type);
+enum wmi_reg_6g_ap_type
+ath12k_ieee80211_ap_pwr_type_convert(enum ieee80211_ap_reg_power power_type);
 int ath12k_regd_update(struct ath12k *ar, bool init);
 int ath12k_reg_update_chan_list(struct ath12k *ar);
+int ath12k_reg_get_num_chans_in_band(struct ath12k *ar,
+				     struct ieee80211_supported_band *band,
+			             u32 freq_low, u32 freq_high);
 
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/rx_desc.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/rx_desc.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/rx_desc.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/rx_desc.h	2024-01-19 17:01:19.873847249 +0100
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef ATH12K_RX_DESC_H
 #define ATH12K_RX_DESC_H
@@ -59,6 +59,8 @@
 #define RX_MPDU_START_INFO3_AST_LOOKUP_VALID		BIT(13)
 #define RX_MPDU_START_INFO3_RANGING			BIT(14)
 
+#define RX_MPDU_START_SW_PEER_ID_PEER                  GENMASK(13, 0)
+
 #define RX_MPDU_START_INFO4_MPDU_FCTRL_VALID		BIT(0)
 #define RX_MPDU_START_INFO4_MPDU_DUR_VALID		BIT(1)
 #define RX_MPDU_START_INFO4_MAC_ADDR1_VALID		BIT(2)
@@ -117,6 +119,28 @@
 #define RX_MPDU_START_INFO8_AUTH_TO_SEND_WDS		BIT(0)
 
 struct rx_mpdu_start_qcn9274 {
+	__le32 info1;
+	__le32 pn[4];
+	__le32 info2;
+	__le32 peer_meta_data;
+	__le16 info3;
+	__le16 phy_ppdu_id;
+	__le16 ast_index;
+	__le16 sw_peer_id;
+	__le32 info4;
+	__le32 info5;
+	__le32 info6;
+	__le16 frame_ctrl;
+	__le16 duration;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 addr3[ETH_ALEN];
+	__le16 seq_ctrl;
+	u8 addr4[ETH_ALEN];
+	__le16 qos_ctrl;
+} __packed;
+
+struct rx_mpdu_start_wcn7850 {
 	__le32 info0;
 	__le32 reo_queue_desc_lo;
 	__le32 info1;
@@ -582,6 +606,8 @@
 	RX_MSDU_START_PKT_TYPE_11N,
 	RX_MSDU_START_PKT_TYPE_11AC,
 	RX_MSDU_START_PKT_TYPE_11AX,
+	RX_MSDU_START_PKT_TYPE_11BA,
+	RX_MSDU_START_PKT_TYPE_11BE,
 };
 
 enum rx_msdu_start_sgi {
@@ -608,6 +634,10 @@
 	RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
 };
 
+#define RX_MSDU_END_64_TLV_TAG			GENMASK(9, 1)
+#define RX_MSDU_END_64_TLV_LEN			GENMASK(21, 10)
+#define RX_MSDU_END_64_TLV_SRC_LINK_ID		GENMASK(24, 22)
+
 #define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER	GENMASK(1, 0)
 #define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID	GENMASK(8, 2)
 
@@ -661,7 +691,7 @@
 
 #define RX_MSDU_END_INFO9_SERVICE_CODE		GENMASK(14, 6)
 #define RX_MSDU_END_INFO9_PRIORITY_VALID	BIT(15)
-#define RX_MSDU_END_INFO9_INRA_BSS		BIT(16)
+#define RX_MSDU_END_INFO9_INTRA_BSS		BIT(16)
 #define RX_MSDU_END_INFO9_DEST_CHIP_ID		GENMASK(18, 17)
 #define RX_MSDU_END_INFO9_MCAST_ECHO		BIT(19)
 #define RX_MSDU_END_INFO9_WDS_LEARN_EVENT	BIT(20)
@@ -737,6 +767,22 @@
 #define RX_MSDU_END_INFO14_MSDU_DONE		BIT(31)
 
 struct rx_msdu_end_qcn9274 {
+	__le64 msdu_end_tag;
+	__le16 sa_sw_peer_id;
+	__le16 info5;
+	__le16 sa_idx;
+	__le16 da_idx_or_sw_peer_id;
+	__le32 info10;
+	__le32 info11;
+	__le32 info12;
+	__le32 flow_id_toeplitz;
+	__le32 ppdu_start_timestamp_63_32;
+	__le32 phy_meta_data;
+	__le32 info13;
+	__le32 info14;
+} __packed;
+
+struct rx_msdu_end_wcn7850 {
 	__le16 info0;
 	__le16 phy_ppdu_id;
 	__le16 ip_hdr_cksum;
@@ -1410,10 +1456,10 @@
 
 struct hal_rx_desc_wcn7850 {
 	__le64 msdu_end_tag;
-	struct rx_msdu_end_qcn9274 msdu_end;
+	struct rx_msdu_end_wcn7850 msdu_end;
 	u8 rx_padding0[RX_BE_PADDING0_BYTES];
 	__le64 mpdu_start_tag;
-	struct rx_mpdu_start_qcn9274 mpdu_start;
+	struct rx_mpdu_start_wcn7850 mpdu_start;
 	struct rx_pkt_hdr_tlv	 pkt_hdr_tlv;
 	u8 msdu_payload[];
 };
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/wmi.c linux-6.4-fbx/drivers/net/wireless/ath/ath12k/wmi.c
--- linux-6.4/drivers/net/wireless/ath/ath12k/wmi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/wmi.c	2024-04-19 16:04:28.961735885 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/skbuff.h>
 #include <linux/ctype.h>
@@ -19,6 +19,7 @@
 #include "mac.h"
 #include "hw.h"
 #include "peer.h"
+#include "testmode.h"
 
 struct ath12k_wmi_svc_ready_parse {
 	bool wmi_svc_bitmap_done;
@@ -38,6 +39,9 @@
 	u32 max_bssid_rx_filters;
 	u32 num_hw_modes;
 	u32 num_phy;
+	u32 num_chainmask_tables;
+	struct ath12k_chainmask_table
+		chainmask_table[ATH12K_MAX_CHAINMASK_TABLES];
 };
 
 struct ath12k_wmi_svc_rdy_ext_parse {
@@ -56,6 +60,7 @@
 	bool hw_mode_done;
 	bool mac_phy_done;
 	bool ext_hal_reg_done;
+	u32 n_mac_phy_chainmask_combo;
 	bool mac_phy_chainmask_combo_done;
 	bool mac_phy_chainmask_cap_done;
 	bool oem_dma_ring_cap_done;
@@ -63,8 +68,11 @@
 };
 
 struct ath12k_wmi_svc_rdy_ext2_parse {
+	struct ath12k_service_ext2_param param;
 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
 	bool dma_ring_cap_done;
+	bool spectral_bin_scaling_done;
+	bool mac_phy_caps_ext_done;
 };
 
 struct ath12k_wmi_rdy_parse {
@@ -81,6 +89,25 @@
 	bool meta_data_done;
 };
 
+struct wmi_pdev_sscan_fw_param_parse {
+	struct ath12k_wmi_pdev_sscan_fw_cmd_fixed_param fixed;
+	struct ath12k_wmi_pdev_sscan_fft_bin_index *bin;
+	struct ath12k_wmi_pdev_sscan_chan_info ch_info;
+	struct ath12k_wmi_pdev_sscan_per_detector_info *det_info;
+	bool bin_entry_done;
+	bool det_info_entry_done;
+
+};
+
+struct wmi_spectral_capabilities_parse {
+	struct ath12k_wmi_spectral_scan_bw_capabilities *sscan_bw_caps;
+	struct ath12k_wmi_spectral_fft_size_capabilities *fft_size_caps;
+	bool sscan_bw_caps_entry_done;
+	bool fft_size_caps_entry_done;
+	u32 num_bw_caps_entry;
+	u32 num_fft_size_caps_entry;
+};
+
 struct ath12k_wmi_tlv_policy {
 	size_t min_len;
 };
@@ -89,11 +116,18 @@
 	const struct ath12k_wmi_mgmt_rx_params *fixed;
 	const u8 *frame_buf;
 	bool frame_buf_done;
+	struct ath12k_wmi_mgmt_rx_reo_params *reo_params;
+	struct ath12k_wmi_mgmt_rx_fw_consumed_hdr *fw_consumed_reo_params;
+	struct ath12k_mgmt_rx_cu_arg cu_params;
+	bool mgmt_ml_info_done;
+	bool bpcc_buf_done;
 };
 
 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
+	[WMI_TAG_ARRAY_STRUCT] = { .min_len = 0 },
+	[WMI_TAG_ARRAY_INT16] = { .min_len = 0 },	
 	[WMI_TAG_SERVICE_READY_EVENT] = {
 		.min_len = sizeof(struct wmi_service_ready_event) },
 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
@@ -134,6 +168,8 @@
 		.min_len = sizeof(struct wmi_service_available_event) },
 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+	[WMI_TAG_STATS_EVENT] = {
+		.min_len = sizeof(struct wmi_stats_event) },
 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
 	[WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -142,8 +178,31 @@
 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+	[WMI_TAG_MUEDCA_PARAMS_CONFIG_EVENT] = {
+		.min_len = sizeof(struct wmi_pdev_update_muedca_event) },
+	[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+		.min_len = sizeof(struct wmi_twt_add_dialog_event) },
+	[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
+		.min_len = sizeof(struct wmi_obss_color_collision_event) },
+	[WMI_TAG_CTRL_PATH_EVENT_FIXED_PARAM] = {
+		.min_len = sizeof(struct wmi_cp_stats_event_fixed_param) },
+	[WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT] = {
+		.min_len = sizeof(struct wmi_offchan_data_tx_compl_event) },
 };
 
+static const u32 eml_trans_timeout[EMLCAP_TIMEOUT_MAX] = {
+					EMLCAP_TRANSTIMEOUT_0,
+					EMLCAP_TRANSTIMEOUT_1,
+					EMLCAP_TRANSTIMEOUT_2,
+					EMLCAP_TRANSTIMEOUT_3,
+					EMLCAP_TRANSTIMEOUT_4,
+					EMLCAP_TRANSTIMEOUT_5,
+					EMLCAP_TRANSTIMEOUT_6,
+					EMLCAP_TRANSTIMEOUT_7,
+					EMLCAP_TRANSTIMEOUT_8,
+					EMLCAP_TRANSTIMEOUT_9,
+					EMLCAP_TRANSTIMEOUT_10};
+
 static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
 {
 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
@@ -155,9 +214,188 @@
 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
 }
 
+static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
+				      const void *ptr,
+				      struct ath12k_service_ext2_param *param)
+{
+	const struct wmi_service_ready_ext2_event *ev = ptr;
+
+	if (!ev)
+		return -EINVAL;
+
+	param->reg_db_version = ev->reg_db_version;
+	param->hw_min_max_tx_power_2g =	ev->hw_min_max_tx_power_2g;
+	param->hw_min_max_tx_power_5g = ev->hw_min_max_tx_power_5g;
+	param->chwidth_num_peer_caps = ev->chwidth_num_peer_caps;
+	param->ru_punct_supp_bw = ev->ru_punct_supp_bw;
+	param->max_user_per_ppdu_ofdma = ev->max_user_per_ppdu_ofdma;
+	param->max_user_per_ppdu_mumimo = ev->max_user_per_ppdu_mumimo;
+	param->target_cap_flags = ev->target_cap_flags;
+	param->max_msduq_per_tid = ev->max_num_msduq_supported_per_tid;
+	param->default_msduq_per_tid = ev->default_num_msduq_supported_per_tid;
+	return 0;
+}
+
+static int ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
+						 struct wmi_mac_phy_caps_ext *caps,
+						 struct ath12k_pdev *pdev,
+						 struct ath12k_wmi_svc_rdy_ext2_parse *data)
+{
+	struct ath12k_band_cap *cap_band;
+	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+
+	if (pdev_cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+		memcpy(cap_band->eht_cap_mac_info, &caps->eht_cap_mac_info_2G,
+		       sizeof(u32) * PSOC_HOST_MAX_MAC_SIZE);
+		memcpy(cap_band->eht_cap_phy_info, &caps->eht_cap_phy_info_2G,
+		       sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+		cap_band->eht_mcs_20_only = caps->eht_supp_mcs_ext_2G[0];
+		cap_band->eht_mcs_80 = caps->eht_supp_mcs_ext_2G[1];
+		cap_band->eht_mcs_160 = 0xFFFFFFFF;
+		cap_band->eht_mcs_320 = 0xFFFFFFFF;
+		memcpy(&cap_band->eht_ppet, &caps->eht_ppet2G,
+		       sizeof(struct ath12k_ppe_threshold));
+		cap_band->eht_cap_info_internal = caps->eht_cap_info_internal;
+	}
+
+	if (pdev_cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+		memcpy(cap_band->eht_cap_mac_info, &caps->eht_cap_mac_info_5G,
+		       sizeof(u32) * PSOC_HOST_MAX_MAC_SIZE);
+		memcpy(cap_band->eht_cap_phy_info, &caps->eht_cap_phy_info_5G,
+		       sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+		cap_band->eht_mcs_20_only = caps->eht_supp_mcs_ext_5G[0];
+		cap_band->eht_mcs_80 = caps->eht_supp_mcs_ext_5G[1];
+		cap_band->eht_mcs_160 = caps->eht_supp_mcs_ext_5G[2];
+		cap_band->eht_mcs_320 = caps->eht_supp_mcs_ext_5G[3];
+		memcpy(&cap_band->eht_ppet, &caps->eht_ppet5G,
+		       sizeof(struct ath12k_ppe_threshold));
+		cap_band->eht_cap_info_internal = caps->eht_cap_info_internal;
+
+		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+		memcpy(cap_band->eht_cap_mac_info, &caps->eht_cap_mac_info_5G,
+		       sizeof(u32) * PSOC_HOST_MAX_MAC_SIZE);
+		memcpy(cap_band->eht_cap_phy_info, &caps->eht_cap_phy_info_5G,
+		       sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+		cap_band->eht_mcs_20_only = caps->eht_supp_mcs_ext_5G[0];
+		cap_band->eht_mcs_80 = caps->eht_supp_mcs_ext_5G[1];
+		cap_band->eht_mcs_160 = caps->eht_supp_mcs_ext_5G[2];
+		cap_band->eht_mcs_320 = caps->eht_supp_mcs_ext_5G[3];
+		memcpy(&cap_band->eht_ppet, &caps->eht_ppet5G,
+		       sizeof(struct ath12k_ppe_threshold));
+		cap_band->eht_cap_info_internal = caps->eht_cap_info_internal;
+	}
+
+	pdev_cap->eml_cap = caps->eml_cap_u.eml_capability;
+	pdev_cap->mld_cap = caps->mld_cap_u.mld_capability;
+
+	if (data)
+		pdev_cap->ru_punct_supp_bw = data->param.ru_punct_supp_bw;
+
+	return 0;
+}
+
+static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
+					   u16 len, const void *ptr, void *data)
+{
+	struct wmi_mac_phy_caps_ext *caps = (struct wmi_mac_phy_caps_ext *)ptr;
+	int i, ret;
+
+	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
+		return -EPROTO;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		if (ab->pdevs[i].pdev_id == caps->u.wmi_pdev_to_link_map.pdev_id)
+			break;
+	}
+
+	if (i == ab->num_radios)
+		return -EINVAL;
+
+	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i],
+						    data);
+	if (ret) {
+		ath12k_warn(ab,
+			    "failed to extract mac phy caps ext, pdev_id:%d\n",
+			    ab->pdevs[i].pdev_id);
+		return ret;
+	}
+
+	return 0;
+}
+
 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
 			     struct ath12k_wmi_resource_config_arg *config)
 {
+	struct ath12k_hw_group *ag = ab->ag;
+
+	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+	if (ab->num_radios == 2) {
+		config->num_peers = TARGET_NUM_PEERS(DBS);
+		config->num_tids = TARGET_NUM_TIDS(DBS);
+	} else if (ab->num_radios == 3) {
+		config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
+		config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
+	} else {
+		/* Control should not reach here */
+		config->num_peers = TARGET_NUM_PEERS(SINGLE);
+		config->num_tids = TARGET_NUM_TIDS(SINGLE);
+	}
+	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+	if (test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags))
+		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+	else
+		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+	config->rx_skip_defrag_timeout_dup_detection_check =
+		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+	config->vow_config = TARGET_VOW_CONFIG;
+	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+	config->rx_batchmode = TARGET_RX_BATCHMODE;
+	/* Indicates host supports peer map v3 and unmap v2 support */
+	config->peer_map_unmap_version = 0x32;
+	config->twt_ap_pdev_count = ab->num_radios;
+	config->twt_ap_sta_count = 1000;
+	config->ema_max_vap_cnt = ab->num_radios;
+	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
+
+	if (test_bit(WMI_TLV_SERVICE_SAWF_LEVEL0, ab->wmi_ab.svc_map))
+		config->sawf = true;
+	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+		config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
+
+}
+
+void ath12k_wmi_init_ipq5332(struct ath12k_base *ab,
+			     struct ath12k_wmi_resource_config_arg *config)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+
 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
 
 	if (ab->num_radios == 2) {
@@ -182,7 +420,7 @@
 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
 
-	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+	if (test_bit(ATH12K_FLAG_RAW_MODE, &ag->dev_flags))
 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
 	else
 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
@@ -208,8 +446,18 @@
 	config->peer_map_unmap_version = 0x32;
 	config->twt_ap_pdev_count = ab->num_radios;
 	config->twt_ap_sta_count = 1000;
+	config->ema_max_vap_cnt = ab->num_radios;
+	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
+
+	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+		config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
+
+	if (test_bit(WMI_TLV_SERVICE_SAWF_LEVEL0, ab->wmi_ab.svc_map))
+		config->sawf = true;
 }
 
+
 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
 			     struct ath12k_wmi_resource_config_arg *config)
 {
@@ -254,6 +502,10 @@
 	config->num_multicast_filter_entries = 0x20;
 	config->num_wow_filters = 0x16;
 	config->num_keep_alive_pattern = 0;
+
+	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+		config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1A;
+
 }
 
 #define PRIMAP(_hw_mode_) \
@@ -270,6 +522,69 @@
 	PRIMAP(WMI_HOST_HW_MODE_MAX),
 };
 
+
+enum wmi_host_channel_width
+ath12k_wmi_get_host_chan_width(u32 width)
+{
+	enum wmi_host_channel_width host_width;
+
+	switch (width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+		host_width = WMI_HOST_CHAN_WIDTH_20;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		host_width = WMI_HOST_CHAN_WIDTH_40;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		host_width = WMI_HOST_CHAN_WIDTH_80;
+		break;
+	case NL80211_CHAN_WIDTH_160:
+		host_width = WMI_HOST_CHAN_WIDTH_160;
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+		host_width = WMI_HOST_CHAN_WIDTH_80P80;
+		break;
+	case NL80211_CHAN_WIDTH_320:
+		host_width = WMI_HOST_CHAN_WIDTH_320;
+		break;
+	default:
+		host_width = WMI_HOST_CHAN_WIDTH_MAX;
+		break;
+	}
+
+	return host_width;
+}
+
+enum wmi_host_channel_width ath12k_wmi_get_host_chan_switch_width(u32 width)
+{
+	enum wmi_host_channel_width host_width;
+
+	switch (width) {
+	case IEEE80211_STA_RX_BW_20:
+		host_width = WMI_HOST_CHAN_WIDTH_20;
+		break;
+	case IEEE80211_STA_RX_BW_40:
+		host_width = WMI_HOST_CHAN_WIDTH_40;
+		break;
+	case IEEE80211_STA_RX_BW_80:
+		host_width = WMI_HOST_CHAN_WIDTH_80;
+		break;
+	case IEEE80211_STA_RX_BW_160:
+		host_width = WMI_HOST_CHAN_WIDTH_160;
+		break;
+	case IEEE80211_STA_RX_BW_320:
+		host_width = WMI_HOST_CHAN_WIDTH_320;
+		break;
+	default:
+		ath12k_warn(NULL, "invalid bandwidth %d switching back to 20 MHz\n", width);
+		host_width = WMI_HOST_CHAN_WIDTH_20;
+		break;
+	}
+
+	return host_width;
+}
+
 static int
 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
@@ -281,6 +596,7 @@
 	u16 tlv_tag, tlv_len;
 	int ret;
 
+
 	while (len > 0) {
 		if (len < sizeof(*tlv)) {
 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
@@ -390,21 +706,41 @@
 {
 	struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab;
 	int ret = -EOPNOTSUPP;
+	struct ath12k_base *ab = wmi_sc->ab;
+
+	if (!(test_bit(ATH12K_FLAG_WMI_INIT_DONE, &wmi_sc->ab->dev_flags)) &&
+		cmd_id != WMI_INIT_CMDID)
+		return -ESHUTDOWN;
 
 	might_sleep();
 
+	if (ab->hw_params->credit_flow) {
 	wait_event_timeout(wmi_sc->tx_credits_wq, ({
 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
 
-		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
+			if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH,
+					    &wmi_sc->ab->dev_flags))
 			ret = -ESHUTDOWN;
-
 		(ret != -EAGAIN);
 	}), WMI_SEND_TIMEOUT_HZ);
+	} else {
+		wait_event_timeout(wmi->tx_ce_desc_wq, ({
+			ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+			if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH,
+			    		    &wmi_sc->ab->dev_flags))
+				ret = -ESHUTDOWN;
+
+			(ret != -ENOBUFS);
+			}), WMI_SEND_TIMEOUT_HZ);
+	}
 
 	if (ret == -EAGAIN)
 		ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
 
+	if (ret == -ENOBUFS)
+		ath12k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
+			    cmd_id);
+
 	return ret;
 }
 
@@ -472,14 +808,18 @@
 	mac_caps = wmi_mac_phy_caps + phy_idx;
 
 	pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+	pdev->hw_link_id = mac_caps->hw_link_id;
 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
+	pdev_cap->chainmask_table_id = mac_caps->chainmask_table_id;
 
 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
 	 * band to band for a single radio, need to see how this should be
 	 * handled.
 	 */
 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_2g);
+		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_2g);
 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
@@ -488,6 +828,10 @@
 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
+		pdev_cap->nss_ratio_enabled =
+			WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_caps->nss_ratio);
+		pdev_cap->nss_ratio_info =
+			WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
 	} else {
 		return -EINVAL;
 	}
@@ -567,6 +911,225 @@
 	return 0;
 }
 
+static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
+						struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_mlo_setup_complete_event_fixed_param *ev;
+	struct ath12k *ar = NULL;
+	struct ath12k_pdev *pdev;
+	int ret, i;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab,
+			    "failed to parse mlo setup complete event tlv %d\n",
+			    ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
+		kfree(tb);
+		return;
+	}
+
+	if (ev->status)
+		ath12k_warn(ab, "mlo setup, pdev id %u, status %u\n",
+			    ev->pdev_id, ev->status);
+
+	if (ev->pdev_id > ab->num_radios)
+		goto skip_lookup;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (pdev && pdev->pdev_id == ev->pdev_id) {
+			ar = pdev->ar;
+			break;
+		}
+	}
+
+skip_lookup:
+	if (!ar) {
+		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
+			    ev->pdev_id, ev->status);
+		goto out;
+	}
+
+	ar->mlo_setup_status = ev->status;
+	if (ab->hw_params->hal_ops->hal_get_tqm_scratch_reg)
+		ab->hw_params->hal_ops->hal_get_tqm_scratch_reg(ab, &ar->delta_tqm);
+	complete(&ar->mlo_setup_done);
+
+out:
+	kfree(tb);
+}
+
+static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
+					       struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_mlo_teardown_complete_fixed_param *ev;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar;
+	int ret, i, j;
+	bool complete_flag = true;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab,
+			    "failed to parse teardown complete event tlv %d\n",
+			    ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch teardown complete event\n");
+		kfree(tb);
+		return;
+	}
+
+	if (ev->status)
+		ath12k_warn(ab, "mlo teardown, pdev id %u, status %u\n",
+			    ev->pdev_id, ev->status);
+
+	kfree(tb);
+
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+	if (!ar) {
+		ath12k_warn(ab, "invalid pdev id in teardown complete ev %d",
+			    ev->pdev_id);
+		return;
+	}
+	ar->mlo_complete_event = true;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		ab = ag->ab[i];
+
+		for (j = 0; j < ab->num_radios; j++) {
+			pdev = &ab->pdevs[j];
+			ar = pdev->ar;
+
+			if (!ar->mlo_complete_event)
+				complete_flag = false;
+		}
+	}
+
+	if (complete_flag && ag->recovery_mode == ATH12K_MLO_RECOVERY_MODE1)
+		complete(&ag->umac_reset_complete);
+}
+
+static void ath12k_wmi_process_mvr_event(struct ath12k_base *ab, u32 *vdev_id_bm,
+					 u32 num_vdev_bm)
+{
+	struct ath12k *ar = NULL;
+	struct ath12k_link_vif *arvif = NULL;
+	u32 bit_pos;
+	unsigned long vdev_bitmap;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "wmi mvr resp num_vdev_bm %d vdev_id_bm[0]=0x%x vdev_id_bm[1]=0x%x\n",
+		   num_vdev_bm, vdev_id_bm[0],
+		   (num_vdev_bm == WMI_MVR_RESP_VDEV_BM_MAX_LEN ?
+				   vdev_id_bm[1] : 0x00));
+
+	/* 31-0 bits processing */
+	vdev_bitmap = vdev_id_bm[0];
+
+	for (bit_pos = 0; bit_pos < 32; bit_pos++) {
+
+		if (!(vdev_bitmap & BIT(bit_pos)))
+			continue;
+
+		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, bit_pos);
+		if (!arvif) {
+			ar = ath12k_mac_get_ar_by_vdev_id(ab, bit_pos);
+			if (!(ar && bit_pos == ar->monitor_vdev_id)) {
+				ath12k_warn(ab, "wmi mvr resp for unknown vdev %d", bit_pos);
+				continue;
+			}
+		} else {
+			arvif->mvr_processing = false;
+		}
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+			   "wmi mvr vdev %d restarted\n", bit_pos);
+	}
+
+	/* TODO: 63-32 bits processing
+	 * Add support to parse bitmap once support for
+	 * TARGET_NUM_VDEVS > 32 is added
+	 */
+
+	if (arvif)
+		ar = arvif->ar;
+
+	if (ar)
+		complete(&ar->mvr_complete);
+}
+
+static int ath12k_wmi_tlv_mvr_event_parse(struct ath12k_base *ab,
+					  u16 tag, u16 len,
+					  const void *ptr, void *data)
+{
+	struct wmi_pdev_mvr_resp_event_parse *parse = data;
+	struct wmi_pdev_mvr_resp_event_fixed_param *fixed_param;
+
+	switch(tag) {
+	case WMI_TAG_MULTIPLE_VDEV_RESTART_RESPONSE_EVENT:
+		fixed_param = (struct wmi_pdev_mvr_resp_event_fixed_param *)ptr;
+
+		if (fixed_param->status) {
+			ath12k_warn(ab, "wmi mvr resp event status %u\n",
+				    fixed_param->status);
+			return -EINVAL;
+		}
+
+		memcpy(&parse->fixed_param, fixed_param,
+		       sizeof(struct wmi_pdev_mvr_resp_event_fixed_param));
+		break;
+	case WMI_TAG_ARRAY_UINT32:
+		if ((len > WMI_MVR_RESP_VDEV_BM_MAX_LEN_BYTES) || (len == 0)) {
+			ath12k_warn(ab, "wmi invalid vdev id len in mvr resp %u\n",
+				    len);
+			return -EINVAL;
+		}
+
+		parse->num_vdevs_bm = len / sizeof(u32);
+		memcpy(parse->vdev_id_bm, ptr, len);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void ath12k_wmi_event_mvr_response(struct ath12k_base *ab,
+					  struct sk_buff *skb)
+{
+	struct wmi_pdev_mvr_resp_event_parse parse = {};
+	int ret;
+
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath12k_wmi_tlv_mvr_event_parse,
+				  &parse);
+	if (ret) {
+		ath12k_warn(ab, "wmi failed to parse mvr response tlv %d\n",
+			    ret);
+		return;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi mvr resp for pdev %d\n",
+		   parse.fixed_param.pdev_id);
+
+	ath12k_wmi_process_mvr_event(ab, parse.vdev_id_bm, parse.num_vdevs_bm);
+}
+
 static int
 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
@@ -720,18 +1283,25 @@
 }
 
 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
-			 struct sk_buff *frame)
+			 struct sk_buff *frame, bool link_agnostic)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_mgmt_send_cmd *cmd;
+	struct wmi_mlo_mgmt_send_params *ml_params;
 	struct wmi_tlv *frame_tlv;
 	struct sk_buff *skb;
 	u32 buf_len;
 	int ret, len;
+	void *ptr;
+	struct wmi_tlv *tlv;
 
 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
 
-	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, sizeof(u32));
+
+	if (link_agnostic)
+		len += sizeof(struct wmi_mgmt_send_params) +
+				TLV_HDR_SIZE + sizeof(*ml_params);
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
@@ -750,10 +1320,34 @@
 	cmd->tx_params_valid = 0;
 
 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
-	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
+	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, roundup(buf_len, sizeof(u32)));
 
 	memcpy(frame_tlv->value, frame->data, buf_len);
 
+	if (!link_agnostic)
+		goto send;
+
+	ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, sizeof(u32));
+
+	tlv = ptr;
+
+	/* Tx params not used currently */
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TX_SEND_PARAMS) |
+		      FIELD_PREP(WMI_TLV_LEN, sizeof(struct wmi_mgmt_send_params) - TLV_HDR_SIZE);
+	ptr += sizeof(struct wmi_mgmt_send_params);
+
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+		      FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_params));
+	ptr += TLV_HDR_SIZE;
+
+	ml_params = ptr;
+	ml_params->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_TX_SEND_PARAMS) |
+				FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_params) - TLV_HDR_SIZE);
+
+	ml_params->hw_link_id = WMI_MLO_MGMT_TID;
+
+send:
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
 	if (ret) {
 		ath12k_warn(ar->ab,
@@ -764,23 +1358,125 @@
 	return ret;
 }
 
+static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
+				    struct ieee80211_tx_info *info)
+{
+	u32 freq = 0;
+
+	if (ar->scan.is_roc &&
+	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+		freq = ar->scan.roc_freq;
+
+	return freq;
+}
+
+/* For Big Endian Host, Copy Engine byte_swap is enabled
+ * When Copy Engine does byte_swap, need to byte swap again for the
+ * Host to get/put buffer content in the correct byte order
+ */
+void ath12k_ce_byte_swap(void *mem, u32 len)
+{
+        int i;
+
+        if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+                if (!mem)
+                        return;
+
+                for (i = 0; i < (len / 4); i++) {
+                        *(u32 *)mem = swab32(*(u32 *)mem);
+                        mem += 4;
+                }
+        }
+}
+
+/* Send off-channel managemnt frame to firmware. when driver receive a
+ * packet with off channel tx flag enabled. This API will send the
+ * packet to firmware with WMI command WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD
+ * for off-chan tx.
+ */
+int ath12k_wmi_offchan_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+				 struct sk_buff *frame)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
+	struct wmi_mgmt_send_cmd *cmd;
+	struct wmi_tlv *frame_tlv;
+	struct sk_buff *skb;
+	u32 buf_len, buf_len_padded;
+	int ret, len;
+	void *ptr;
+	struct wmi_tlv *tlv;
+
+	buf_len = min(frame->len, (unsigned int)WMI_MGMT_SEND_DOWNLD_LEN);
+	buf_len_padded = roundup(buf_len, sizeof(u32));
+
+	len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_padded +
+	      sizeof(struct wmi_mgmt_send_params);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	ptr = skb->data;
+	cmd = ptr;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+						 sizeof(*cmd));
+	cmd->vdev_id = cpu_to_le32(vdev_id);
+	cmd->desc_id = cpu_to_le32(buf_id);
+	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
+	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
+	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
+	cmd->frame_len = cpu_to_le32(frame->len);
+	cmd->buf_len = cpu_to_le32(buf_len);
+	cmd->tx_params_valid = 1;
+	ptr += sizeof(*cmd);
+
+	frame_tlv = ptr;
+	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_padded);
+	ptr += sizeof(*frame_tlv);
+
+	memcpy(ptr, frame->data, buf_len);
+	ath12k_ce_byte_swap(ptr, buf_len);
+	ptr += buf_len_padded;
+
+	tlv = ptr;
+	/* Tx params not used currently */
+	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS,
+					     sizeof(struct wmi_mgmt_send_params));
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_OFFCHAN_DATA_TX_SEND_CMDID);
+
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to submit WMI_OFFCHAN_DATA_TX_SEND_CMDID cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
 			   struct ath12k_wmi_vdev_create_arg *args)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_vdev_create_cmd *cmd;
+	struct wmi_vdev_create_mlo_params *ml_params;
 	struct sk_buff *skb;
 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
 	struct wmi_tlv *tlv;
 	int ret, len;
 	void *ptr;
+	bool is_ml_vdev;
+
+	is_ml_vdev = is_valid_ether_addr(args->mld_addr);
 
 	/* It can be optimized my sending tx/rx chain configuration
 	 * only for supported bands instead of always sending it for
 	 * both the bands.
 	 */
 	len = sizeof(*cmd) + TLV_HDR_SIZE +
-		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
+		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
@@ -796,6 +1492,8 @@
 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
+	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
+	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
 
 	ptr = skb->data + sizeof(*cmd);
@@ -824,6 +1522,23 @@
 	txrx_streams->supported_rx_streams =
 				 args->chains[NL80211_BAND_5GHZ].rx;
 
+	if (is_ml_vdev) {
+		ptr = skb->data + sizeof(*cmd) + TLV_HDR_SIZE +
+			(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+		tlv = ptr;
+		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+			      FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_params));
+
+		ptr += TLV_HDR_SIZE;
+		ml_params = ptr;
+
+		ml_params->tlv_header =
+			FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_VDEV_CREATE_PARAMS) |
+			FIELD_PREP(WMI_TLV_LEN, sizeof(ml_params->mld_macaddr.addr));
+
+		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
 		   args->if_id, args->type, args->subtype,
@@ -855,7 +1570,8 @@
 						 sizeof(*cmd));
 	cmd->vdev_id = cpu_to_le32(vdev_id);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d num_peers : %d\n",
+		   vdev_id, ar->num_peers);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
 	if (ret) {
@@ -883,7 +1599,7 @@
 						 sizeof(*cmd));
 	cmd->vdev_id = cpu_to_le32(vdev_id);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
+	ath12k_dbg(ar->ab, ATH12K_DBG_SET(WMI, L1), "WMI vdev stop id 0x%x\n", vdev_id);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
 	if (ret) {
@@ -911,55 +1627,173 @@
 						 sizeof(*cmd));
 	cmd->vdev_id = cpu_to_le32(vdev_id);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
+	ath12k_dbg(ar->ab, ATH12K_DBG_SET(WMI, L1), "WMI vdev down id 0x%x\n", vdev_id);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd vdev id : %d\n",
+			    vdev_id);
 		dev_kfree_skb(skb);
 	}
 
 	return ret;
 }
 
+static inline bool
+ath12k_wmi_check_device_present(u32 width_device,
+				u32 center_freq_device,
+				u32 center_freq_oper)
+{
+	return (center_freq_device && width_device &&
+		center_freq_device != center_freq_oper);
+}
+
+static u32 ath12k_wmi_set_ru_punc_bitmap_device(u32 oper_freq,
+						enum nl80211_chan_width width_device,
+						u32 device_freq,
+						u32 oper_ru_punct_bitmap)
+{
+	u32 device_bitmap = oper_ru_punct_bitmap;
+
+	if (oper_freq == device_freq || oper_freq < device_freq)
+		return device_bitmap;
+
+	switch (width_device) {
+	case NL80211_CHAN_WIDTH_160:
+		if (oper_freq > device_freq)
+			device_bitmap = (oper_ru_punct_bitmap << 4) | 0x0000FF0F;
+		break;
+	case NL80211_CHAN_WIDTH_320:
+		if (oper_freq > device_freq)
+			device_bitmap = (oper_ru_punct_bitmap << 8) | 0x000000FF;
+		break;
+	default:
+		return device_bitmap;
+	}
+
+	return device_bitmap;
+}
+
+static void ath12k_wmi_set_wmi_channel_device(struct ath12k_wmi_channel_params *chan_device,
+					      struct wmi_channel_arg *channel,
+					      u32 cf_device, u32 width_device)
+{
+	enum wmi_phy_mode mode_device;
+
+	memset(chan_device, 0, sizeof(*chan_device));
+
+	chan_device->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
+							 sizeof(*chan_device));
+	chan_device->mhz = cpu_to_le32(channel->freq);
+	chan_device->band_center_freq1 = cpu_to_le32(cf_device);
+
+	if (width_device == NL80211_CHAN_WIDTH_320) {
+		mode_device = MODE_11BE_EHT320;
+		if (channel->freq > chan_device->band_center_freq1)
+			chan_device->band_center_freq1 = cf_device + 80;
+		else
+			chan_device->band_center_freq1 = cf_device - 80;
+		chan_device->band_center_freq2 = cf_device;
+	} else if (width_device == NL80211_CHAN_WIDTH_160) {
+		mode_device = MODE_11BE_EHT160;
+		if (channel->freq > chan_device->band_center_freq1)
+			chan_device->band_center_freq1 = cf_device + 40;
+		else
+			chan_device->band_center_freq1 = cf_device - 40;
+		chan_device->band_center_freq2 = cf_device;
+	} else if (width_device == NL80211_CHAN_WIDTH_80) {
+		mode_device = MODE_11BE_EHT80;
+	} else if (width_device == NL80211_CHAN_WIDTH_40) {
+		mode_device = MODE_11BE_EHT40;
+	} else {
+		mode_device = MODE_UNKNOWN;
+	}
+
+	chan_device->info |= le32_encode_bits(mode_device, WMI_CHAN_INFO_MODE);
+	if (channel->passive)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
+	if (channel->allow_ibss)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
+	if (channel->allow_ht)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
+	if (channel->allow_vht)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
+	if (channel->allow_he)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
+	if (channel->ht40plus)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
+	if (channel->chan_radar)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
+	if (channel->freq2_radar)
+		chan_device->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
+
+	chan_device->reg_info_1 = le32_encode_bits(channel->max_power,
+						   WMI_CHAN_REG_INFO1_MAX_PWR) |
+				  le32_encode_bits(channel->max_reg_power,
+						   WMI_CHAN_REG_INFO1_MAX_REG_PWR);
+
+	chan_device->reg_info_2 = le32_encode_bits(channel->max_antenna_gain,
+						   WMI_CHAN_REG_INFO2_ANT_MAX) |
+				  le32_encode_bits(channel->max_power,
+						   WMI_CHAN_REG_INFO2_MAX_TX_PWR);
+}
+
 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
-				       struct wmi_vdev_start_req_arg *arg)
+				       struct wmi_channel_arg chan_arg)
 {
+	u32 center_freq1 = chan_arg.band_center_freq1;
 	memset(chan, 0, sizeof(*chan));
 
-	chan->mhz = cpu_to_le32(arg->freq);
-	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
-	if (arg->mode == MODE_11AC_VHT80_80)
-		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
+	chan->mhz = chan_arg.freq;
+	chan->band_center_freq1 = chan_arg.band_center_freq1;
+	if (chan_arg.mode == MODE_11BE_EHT320) {
+		if (chan_arg.freq > chan_arg.band_center_freq1)
+			chan->band_center_freq1 = center_freq1 + 80;
 	else
+			chan->band_center_freq1 = center_freq1 - 80;
+
+		chan->band_center_freq2 = chan_arg.band_center_freq1;
+
+	} else if (chan_arg.mode == MODE_11BE_EHT160) {
+		if (chan_arg.freq > chan_arg.band_center_freq1)
+			chan->band_center_freq1 = center_freq1 + 40;
+		else
+			chan->band_center_freq1 = center_freq1 - 40;
+
+		chan->band_center_freq2 = chan_arg.band_center_freq1;
+
+	} else if (chan_arg.mode == MODE_11BE_EHT80_80) {
+		chan->band_center_freq2 = chan_arg.band_center_freq2;
+	}else{
 		chan->band_center_freq2 = 0;
+	}
 
-	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
-	if (arg->passive)
+	chan->info |= u32_encode_bits(chan_arg.mode, WMI_CHAN_INFO_MODE);
+	if (chan_arg.passive)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
-	if (arg->allow_ibss)
+	if (chan_arg.allow_ibss)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
-	if (arg->allow_ht)
+	if (chan_arg.allow_ht)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
-	if (arg->allow_vht)
+	if (chan_arg.allow_vht)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
-	if (arg->allow_he)
+	if (chan_arg.allow_he)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
-	if (arg->ht40plus)
+	if (chan_arg.ht40plus)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
-	if (arg->chan_radar)
+	if (chan_arg.chan_radar)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
-	if (arg->freq2_radar)
+	if (chan_arg.freq2_radar)
 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
 
-	chan->reg_info_1 = le32_encode_bits(arg->max_power,
+	chan->reg_info_1 = le32_encode_bits(chan_arg.max_power,
 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
-		le32_encode_bits(arg->max_reg_power,
+		le32_encode_bits(chan_arg.max_reg_power,
 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
 
-	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
+	chan->reg_info_2 = le32_encode_bits(chan_arg.max_antenna_gain,
 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
-		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
+		le32_encode_bits(chan_arg.max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
 }
 
 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
@@ -967,16 +1801,32 @@
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_vdev_start_request_cmd *cmd;
+	struct wmi_vdev_start_mlo_params *ml_params;
+	struct wmi_partner_link_info *partner_info;
 	struct sk_buff *skb;
 	struct ath12k_wmi_channel_params *chan;
 	struct wmi_tlv *tlv;
+	struct ath12k_wmi_channel_params *chan_device;
 	void *ptr;
-	int ret, len;
+	int ret, len, ml_arg_size = 0;
+	u8 i;
+	struct ath12k_hw_group *ag = ar->ab->ag;
+	bool device_params_present = false;
 
 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
 		return -EINVAL;
 
-	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+	if (!restart && arg->ml.enabled)
+		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
+			      TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+
+	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE + ml_arg_size;
+
+	device_params_present = ath12k_wmi_check_device_present(arg->width_device,
+								arg->center_freq_device,
+								arg->channel.band_center_freq1);
+	if (device_params_present)
+		len += TLV_HDR_SIZE + sizeof(*chan_device);
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
@@ -995,6 +1845,16 @@
 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
 	cmd->regdomain = cpu_to_le32(arg->regdomain);
 	cmd->he_ops = cpu_to_le32(arg->he_ops);
+	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
+	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
+
+	if (device_params_present) {
+		arg->ru_punct_bitmap = ath12k_wmi_set_ru_punc_bitmap_device(arg->channel.freq,
+									    arg->width_device,
+									    arg->center_freq_device,
+									    arg->ru_punct_bitmap);
+	}
+	cmd->ru_punct_bitmap = cpu_to_le32(arg->ru_punct_bitmap);
 
 	if (!restart) {
 		if (arg->ssid) {
@@ -1009,10 +1869,13 @@
 
 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
 
+	if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ag->dev_flags))
+		cmd->flags |= cpu_to_le32(WMI_VDEV_START_HW_ENCRYPTION_DISABLED);
+
 	ptr = skb->data + sizeof(*cmd);
 	chan = ptr;
 
-	ath12k_wmi_put_wmi_channel(chan, arg);
+	ath12k_wmi_put_wmi_channel(chan, arg->channel);
 
 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
 						  sizeof(*chan));
@@ -1024,12 +1887,74 @@
 	/* Note: This is a nested TLV containing:
 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
 	 */
-
 	ptr += sizeof(*tlv);
 
+	if (ml_arg_size) {
+		tlv = ptr;
+		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+				  FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_params));
+		ptr += TLV_HDR_SIZE;
+
+		ml_params = ptr;
+
+		ml_params->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_VDEV_START_PARAMS) |
+					 FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_params) - TLV_HDR_SIZE);
+
+		ml_params->flags = FIELD_PREP(ATH12K_WMI_FLAG_MLO_ENABLED,
+					      arg->ml.enabled) |
+				   FIELD_PREP(ATH12K_WMI_FLAG_MLO_ASSOC_LINK,
+					      arg->ml.assoc_link) |
+				   FIELD_PREP(ATH12K_WMI_FLAG_MLO_MCAST_VDEV,
+					      arg->ml.mcast_link) |
+				   FIELD_PREP(ATH12K_WMI_FLAG_MLO_LINK_ADD,
+					      arg->ml.link_add);
+
+		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
+			   arg->vdev_id, ml_params->flags);
+
+		ptr += sizeof(*ml_params);
+
+		tlv = ptr;
+		tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+				  FIELD_PREP(WMI_TLV_LEN, arg->ml.num_partner_links *
+						sizeof(*partner_info));
+		ptr += TLV_HDR_SIZE;
+
+		partner_info = ptr;
+
+		for (i = 0; i < arg->ml.num_partner_links; i++) {
+			partner_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+							      WMI_TAG_MLO_PARTNER_LINK_PARAMS) |
+						   FIELD_PREP(WMI_TLV_LEN, sizeof(*partner_info) -
+							      TLV_HDR_SIZE);
+
+			partner_info->vdev_id = arg->ml.partner_info[i].vdev_id;
+			partner_info->hw_link_id = arg->ml.partner_info[i].hw_link_id;
+			ether_addr_copy(partner_info->vdev_addr.addr, arg->ml.partner_info[i].addr);
+
+			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
+				   partner_info->vdev_id, partner_info->hw_link_id,
+				   partner_info->vdev_addr.addr);
+			partner_info++;
+		}
+	}
+
+	if (device_params_present) {
+		tlv = ptr;
+		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+						 sizeof(*chan_device));
+		ptr += TLV_HDR_SIZE;
+
+		chan_device = ptr;
+		ath12k_wmi_set_wmi_channel_device(chan_device, &arg->channel,
+						  arg->center_freq_device,
+						  arg->width_device);
+		ptr += sizeof(*chan_device);
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
 		   restart ? "restart" : "start", arg->vdev_id,
-		   arg->freq, arg->mode);
+		   arg->channel.freq, arg->channel.mode);
 
 	if (restart)
 		ret = ath12k_wmi_cmd_send(wmi, skb,
@@ -1046,7 +1971,7 @@
 	return ret;
 }
 
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct vdev_up_params *params)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_vdev_up_cmd *cmd;
@@ -1061,14 +1986,17 @@
 
 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
 						 sizeof(*cmd));
-	cmd->vdev_id = cpu_to_le32(vdev_id);
-	cmd->vdev_assoc_id = cpu_to_le32(aid);
-
-	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+	cmd->vdev_id = cpu_to_le32(params->vdev_id);
+	cmd->vdev_assoc_id = cpu_to_le16(params->aid);
+	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
+	cmd->profile_idx = cpu_to_le32(params->profile_idx);
+	cmd->profile_count = cpu_to_le32(params->profile_count);
+	if (params->tx_bssid)
+		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
-		   vdev_id, aid, bssid);
+		   params->vdev_id, params->aid, params->bssid);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
 	if (ret) {
@@ -1085,9 +2013,14 @@
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_peer_create_cmd *cmd;
 	struct sk_buff *skb;
-	int ret;
+	int ret, len;
+	struct wmi_peer_create_mlo_params *ml_param;
+	void *ptr;
+	struct wmi_tlv *tlv;
 
-	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
 		return -ENOMEM;
 
@@ -1099,9 +2032,22 @@
 	cmd->peer_type = cpu_to_le32(arg->peer_type);
 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
-		   "WMI peer create vdev_id %d peer_addr %pM\n",
-		   arg->vdev_id, arg->peer_addr);
+	ptr = skb->data + sizeof(*cmd);
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+		      FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_param));
+	ptr += TLV_HDR_SIZE;
+
+	ml_param = ptr;
+	ml_param->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_PEER_CREATE_PARAMS) |
+				FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_param) - TLV_HDR_SIZE);
+
+	ml_param->flags = FIELD_PREP(ATH12K_WMI_FLAG_MLO_ENABLED, arg->ml_enabled);
+	
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_PEER,
+		   "WMI peer create vdev_id %d peer_addr %pM ml_enabled %d\n",
+		   arg->vdev_id, arg->peer_addr, arg->ml_enabled);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
 	if (ret) {
@@ -1131,13 +2077,162 @@
 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
 	cmd->vdev_id = cpu_to_le32(vdev_id);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
-		   "WMI peer delete vdev_id %d peer_addr %pM\n",
-		   vdev_id,  peer_addr);
+	ath12k_dbg(ar->ab, ATH12K_DBG_PEER,
+		   "WMI peer delete vdev_id %d peer_addr %pM num_peer : %d\n",
+		   vdev_id,  peer_addr, ar->num_peers);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd"
+			   " peer_addr %pM num_peer : %d\n",
+			    peer_addr, ar->num_peers);
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+int ath12k_wmi_config_peer_ppeds_routing(struct ath12k *ar,
+					 const u8 *peer_addr, u8 vdev_id,
+					 u32 service_code, u32 priority_valid,
+					 u32 src_info, bool ppe_routing_enable)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	struct wmi_peer_config_ppeds_cmd *cmd;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd =(struct wmi_peer_config_ppeds_cmd *)skb->data;
+	cmd->tlv_header  = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CONFIG_PPEDS_ROUTING,
+			   sizeof(*cmd));
+
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+	cmd->vdev_id = cpu_to_le32(vdev_id);
+	cmd->ppe_routing_enable = cpu_to_le32(ppe_routing_enable);
+	cmd->service_code = cpu_to_le32(service_code);
+	cmd->priority_valid = cpu_to_le32(priority_valid);
+	cmd->src_info = cpu_to_le32(src_info);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CONFIG_PPE_DS_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_PEER_CONFIG_PPE_DS cmd"
+			   " peer_addr %pM num_peer : %d\n",
+			    peer_addr, ar->num_peers);
+		dev_kfree_skb(skb);
+	}
+	ath12k_dbg(ar->ab, ATH12K_DBG_PPE,
+		   "ppe ds routing cmd peer_addr %pM vdev_id %d service_code %d " \
+		   "priority_valid %d src_info %d ppe_routing_enable %d \n",
+		   peer_addr, vdev_id, service_code, priority_valid,
+		   src_info, ppe_routing_enable);
+
+	return ret;
+}
+
+int
+ath12k_wmi_send_vdev_set_intra_bss_cmd(struct ath12k *ar,
+				       u32 vdev_id, u32 enable)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_vdev_set_intra_bss_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(struct ath12k_vdev_set_intra_bss_cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct ath12k_vdev_set_intra_bss_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_INTRA_BSS_PARAMS,
+						 sizeof(*cmd));
+	cmd->vdev_id = vdev_id;
+	cmd->enable = enable;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_INTRA_BSS_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to send WMI_VDEV_SET_INTRA_BSS_CMDID\n");
+		dev_kfree_skb(skb);
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "WMI vdev id 0x%x set inra bss %s\n",
+		   vdev_id, enable?"enable":"disable");
+
+	return ret;
+}
+
+int ath12k_wmi_set_peer_intra_bss_cmd(struct ath12k *ar, u32 vdev_id, const u8 *peer_addr,
+				      u32 enable)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_peer_set_intra_bss_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_peer_set_intra_bss_param_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_INTRA_BSS_PARAMS,
+						 sizeof(*cmd));
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+	cmd->vdev_id = cpu_to_le32(vdev_id);
+	cmd->enable = cpu_to_le32(enable);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_INTRA_BSS_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_INTRA_BSS_CMD\n");
+		dev_kfree_skb(skb);
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "WMI vdev %d peer 0x%pM set intra_bss %s\n",
+		   vdev_id, peer_addr, enable ? "enable" : "disable");
+
+	return ret;
+}
+#endif /* CONFIG_ATH12K_PPE_DS_SUPPORT */
+
+int ath12k_wmi_send_pdev_pkt_route(struct ath12k *ar,
+				   struct ath12k_wmi_pkt_route_param *param)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_pdev_pkt_route_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_pkt_route_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+			WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD) |
+			FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+	cmd->pdev_id = ar->pdev->pdev_id;
+	cmd->opcode = param->opcode;
+	cmd->route_type_bmap = param->route_type_bmap;
+	cmd->dst_ring = param->dst_ring;
+	cmd->meta_data = param->meta_data;
+	cmd->dst_ring_handler = param->dst_ring_handler;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "CCE PPE WMI pdev pkt route opcode %d route_bmap %d dst_ring %d meta_data %d" \
+		   "dst_ring_handler %d\n", param->opcode, param->route_type_bmap,
+		   param->dst_ring, param->meta_data, param->dst_ring_handler);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_UPDATE_PKT_ROUTING_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			"failed to send WMI_PDEV_UPDATE_PKT_ROUTING cmd\n");
 		dev_kfree_skb(skb);
 	}
 
@@ -1331,7 +2426,7 @@
 }
 
 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
-			      u32 param_value, u8 pdev_id)
+			      s32 param_value, u8 pdev_id)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_pdev_set_param_cmd *cmd;
@@ -1349,7 +2444,7 @@
 	cmd->param_id = cpu_to_le32(param_id);
 	cmd->param_value = cpu_to_le32(param_value);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+	ath12k_dbg(ar->ab, ATH12K_DBG_SET(WMI, L2),
 		   "WMI pdev set param %d pdev id %d value %d\n",
 		   param_id, pdev_id, param_value);
 
@@ -1474,6 +2569,7 @@
 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
 						 sizeof(*cmd));
 	cmd->req_type = cpu_to_le32(type);
+	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 		   "WMI bss chan info req type %d\n", type);
@@ -1557,12 +2653,12 @@
 	return ret;
 }
 
-int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
+int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms, bool nowait)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_force_fw_hang_cmd *cmd;
 	struct sk_buff *skb;
-	int ret, len;
+	int ret = 0, len;
 
 	len = sizeof(*cmd);
 
@@ -1577,8 +2673,10 @@
 	cmd->type = cpu_to_le32(type);
 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
 
+	if (nowait)
+		ath12k_wmi_cmd_send_nowait(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+	else
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
-
 	if (ret) {
 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
 		dev_kfree_skb(skb);
@@ -1620,6 +2718,142 @@
 	return ret;
 }
 
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+				      u32 vdev_id, u32 pdev_id)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_request_stats_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_request_stats_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
+						 sizeof(*cmd));
+
+	cmd->stats_id = cpu_to_le32(stats_id);
+	cmd->vdev_id = cpu_to_le32(vdev_id);
+	cmd->pdev_id = cpu_to_le32(pdev_id);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
+		   stats_id, vdev_id, pdev_id);
+
+	return ret;
+}
+
+static void ath12k_wmi_copy_coex_config(struct ath12k *ar, struct wmi_coex_config_cmd *cmd,
+                                       struct coex_config_arg *coex_config)
+{
+        switch (coex_config->config_type) {
+        case WMI_COEX_CONFIG_BTC_ENABLE:
+                cmd->coex_enable = coex_config->coex_enable;
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "WMI coex config type %u vdev id %d"
+                           " coex_enable %u\n",
+                           coex_config->config_type,
+                           coex_config->vdev_id,
+                           coex_config->coex_enable);
+                break;
+        case WMI_COEX_CONFIG_WLAN_PKT_PRIORITY:
+                cmd->wlan_pkt_type = coex_config->wlan_pkt_type;
+                cmd->wlan_pkt_weight = coex_config->wlan_pkt_weight;
+                cmd->bt_pkt_weight = coex_config->bt_pkt_weight;
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "WMI coex config type %u vdev id %d"
+                           " wlan pkt type 0x%x wlan pkt weight %u"
+                           " bt pkt weight %u\n",
+                           coex_config->config_type,
+                           coex_config->vdev_id,
+                           coex_config->wlan_pkt_type,
+                           coex_config->wlan_pkt_weight,
+                           coex_config->bt_pkt_weight);
+                break;
+        case WMI_COEX_CONFIG_PTA_INTERFACE:
+                cmd->pta_num = coex_config->pta_num;
+                cmd->coex_mode = coex_config->coex_mode;
+                cmd->bt_txrx_time = coex_config->bt_txrx_time;
+                cmd->bt_priority_time = coex_config->bt_priority_time;
+                cmd->pta_algorithm = coex_config->pta_algorithm;
+                cmd->pta_priority = coex_config->pta_priority;
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "WMI coex config type %u vdev id %d"
+                           " pta num %u coex mode 0x%x"
+                           " bt_txrx_time 0x%x"
+                           " bt_priority_time 0x%x pta alogrithm 0x%x"
+                           " pta priority 0x%x\n",
+                           coex_config->config_type,
+                           coex_config->vdev_id,
+                           coex_config->pta_num,
+                           coex_config->coex_mode,
+                           coex_config->bt_txrx_time,
+                           coex_config->bt_priority_time,
+                           coex_config->pta_algorithm,
+                           coex_config->pta_priority);
+                break;
+        case WMI_COEX_CONFIG_AP_TDM:
+                cmd->duty_cycle = coex_config->duty_cycle;
+                cmd->wlan_duration = coex_config->wlan_duration;
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "WMI coex config type %u vdev id %d"
+                           " duty_cycle %u wlan_duration %u\n",
+                           coex_config->config_type,
+                           coex_config->vdev_id,
+                           coex_config->duty_cycle,
+                           coex_config->wlan_duration);
+                break;
+        case WMI_COEX_CONFIG_FORCED_ALGO:
+                cmd->coex_algo = coex_config->coex_algo;
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "WMI coex config type %u vdev id %d"
+                           " coex_algorithm %u\n",
+                           coex_config->config_type,
+                           coex_config->vdev_id,
+                           coex_config->coex_algo);
+		break;
+        default:
+                break;
+        }
+}
+
+int ath12k_send_coex_config_cmd(struct ath12k *ar,
+                                struct coex_config_arg *coex_config)
+{
+        struct ath12k_wmi_pdev *wmi = ar->wmi;
+        struct wmi_coex_config_cmd *cmd;
+        struct sk_buff *skb;
+        int ret;
+
+        skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+        if (!skb)
+                return -ENOMEM;
+
+        cmd = (struct wmi_coex_config_cmd *)skb->data;
+        cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_COEX_CONFIG_CMD) |
+                          FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+        cmd->vdev_id = coex_config->vdev_id;
+        cmd->config_type = coex_config->config_type;
+        ath12k_wmi_copy_coex_config(ar, cmd, coex_config);
+
+        ret = ath12k_wmi_cmd_send(wmi, skb, WMI_COEX_CONFIG_CMDID);
+        if (ret) {
+                ath12k_warn(ar->ab, "failed to send WMI_COEX_CONFIG_CMD cmd\n");
+                dev_kfree_skb(skb);
+        }
+
+        return ret;
+}
+
 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
@@ -1681,9 +2915,167 @@
 	return ret;
 }
 
+static void ath12k_wmi_bcn_fill_ml_info(struct ath12k_link_vif *arvif,
+					struct wmi_bcn_tmpl_ml_info *ml_info,
+					u64 non_tx_ml_vdev_bmap)
+{
+	struct ath12k_base *ab = arvif->ar->ab;
+	struct ieee80211_bss_conf *link_conf;
+	/* u32 vdev_id = arvif->vdev_id; */
+	unsigned long vdev_map_cat1 = 0;
+	unsigned long vdev_map_cat2 = 0;
+
+	rcu_read_lock();
+
+	link_conf = ath12k_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		rcu_read_unlock();
+		goto err_fill_ml_info;
+	}
+
+	rcu_read_unlock();
+
+	ml_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_ML_INFO_CMD,
+						     sizeof(*ml_info));
+	ml_info->hw_link_id = cpu_to_le32(arvif->ar->pdev->hw_link_id);
+
+	/* if this is cu cat 1 for tx vdev, then it applies to all
+	 * non-tx vdevs as well set all non-tx mld's vdev id in bitmap
+	 */
+
+	/* FIXME: disabled to make it compile without mlo */
+	WARN(1, "mlo not supported");
+	/* if (link_conf->elemid_added) { */
+	/* 	set_bit(vdev_id, &vdev_map_cat1); */
+	/* 	vdev_map_cat1 |= non_tx_ml_vdev_bmap; */
+	/* } */
+
+	/* if (link_conf->elemid_modified) { */
+	/* 	set_bit(vdev_id, &vdev_map_cat2); */
+	/* 	vdev_map_cat2 |= non_tx_ml_vdev_bmap; */
+	/* } */
+
+err_fill_ml_info:
+	ml_info->cu_vdev_map_cat1_lo =
+			   cpu_to_le32(ATH12K_GET_LOWER_32_BITS(vdev_map_cat1));
+	ml_info->cu_vdev_map_cat1_hi =
+			   cpu_to_le32(ATH12K_GET_UPPER_32_BITS(vdev_map_cat1));
+	ml_info->cu_vdev_map_cat2_lo =
+			   cpu_to_le32(ATH12K_GET_LOWER_32_BITS(vdev_map_cat2));
+	ml_info->cu_vdev_map_cat2_hi =
+			   cpu_to_le32(ATH12K_GET_UPPER_32_BITS(vdev_map_cat2));
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "wmi CU filled ml info cat1_lo=0x%x cat1_hi=0x%x cat2_lo=0x%x cat2_hi=0x%x non_tx_ml_bmap=%llu\n",
+		   ml_info->cu_vdev_map_cat1_lo, ml_info->cu_vdev_map_cat1_hi,
+		   ml_info->cu_vdev_map_cat2_lo, ml_info->cu_vdev_map_cat2_hi,
+		   non_tx_ml_vdev_bmap);
+}
+
+static void ath12k_wmi_fill_cu_arg(struct ath12k_link_vif *arvif,
+				   struct wmi_critical_update_arg *cu_arg)
+{
+	struct ath12k_base *ab = arvif->ar->ab;
+	struct ath12k_link_vif *arvif_iter;
+	struct wmi_bcn_tmpl_ml_info *ml_info;
+	int i;
+	unsigned long non_tx_ml_vdev_bmap = 0;
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_prb_resp_tmpl_ml_info *ar_ml_info;
+
+	list_for_each_entry(arvif_iter, &arvif->ar->arvifs, list) {
+		if (arvif_iter != arvif && arvif_iter->tx_vdev_id == arvif->vdev_id &&
+		    ath12k_mac_is_ml_arvif(arvif_iter))
+			set_bit(arvif_iter->vdev_id, &non_tx_ml_vdev_bmap);
+	}
+	if (!ath12k_mac_is_ml_arvif(arvif) && !non_tx_ml_vdev_bmap)
+		return;
+
+	/* Fill ML params
+	 * ML params should be filled for all partner links
+	 */
+	cu_arg->num_ml_params = 0;
+	/* TODO: Fill ML params. Will work without this info too */
+
+	/* Fill ML info
+	 * ML info should be filled for impacted link only
+	 */
+	cu_arg->num_ml_info = 1;
+	cu_arg->ml_info = (struct wmi_bcn_tmpl_ml_info *)
+			  kzalloc((cu_arg->num_ml_info * sizeof(*ml_info)),
+				  GFP_KERNEL);
+
+	if (!cu_arg->ml_info) {
+		ath12k_warn(ab, "wmi failed to get memory for ml info");
+		cu_arg->num_ml_info = 0;
+	} else {
+		for (i = 0; i < cu_arg->num_ml_info; i++) {
+			ml_info = &cu_arg->ml_info[i];
+			ath12k_wmi_bcn_fill_ml_info(arvif, ml_info, non_tx_ml_vdev_bmap);
+			/* Retain copy of CU vdev bitmap. Which are used to
+			 * update cu_vdev_map in 20TU probe response template.
+			 */
+			if (ar->supports_6ghz) {
+				ar_ml_info = &arvif->ml_info;
+				ar_ml_info->hw_link_id = ml_info->hw_link_id;
+				ar_ml_info->cu_vdev_map_cat1_lo = ml_info->cu_vdev_map_cat1_lo;
+				ar_ml_info->cu_vdev_map_cat1_hi = ml_info->cu_vdev_map_cat1_hi;
+				ar_ml_info->cu_vdev_map_cat2_lo = ml_info->cu_vdev_map_cat2_lo;
+				ar_ml_info->cu_vdev_map_cat2_hi = ml_info->cu_vdev_map_cat2_hi;
+			}
+		}
+	}
+}
+
+static void *
+ath12k_wmi_append_critical_update_params(struct ath12k *ar, u32 vdev_id,
+					 void *ptr,
+					 struct wmi_critical_update_arg *cu_arg)
+{
+	struct wmi_bcn_tmpl_ml_params *ml_params;
+	struct wmi_bcn_tmpl_ml_info *ml_info;
+	void *start = ptr;
+	struct wmi_tlv *tlv;
+	size_t ml_params_len = cu_arg->num_ml_params * sizeof(*ml_params);
+	size_t ml_info_len = cu_arg->num_ml_info * sizeof(*ml_info);
+	int i;
+
+	/* Add ML params */
+	tlv = (struct wmi_tlv *)ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, ml_params_len);
+	ml_params = (struct wmi_bcn_tmpl_ml_params *)tlv->value;
+
+	for (i = 0; i < cu_arg->num_ml_params; i++)
+		memcpy(&ml_params[i], &cu_arg->ml_params[i],
+		       sizeof(*ml_params));
+
+	if (cu_arg->num_ml_params)
+		kfree(cu_arg->ml_params);
+
+	ptr += TLV_HDR_SIZE + ml_params_len;
+
+	/* Add ML info */
+	tlv = (struct wmi_tlv *)ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, ml_info_len);
+	ml_info = (struct wmi_bcn_tmpl_ml_info *)tlv->value;
+
+	for (i = 0; i < cu_arg->num_ml_info; i++)
+		memcpy(&ml_info[i], &cu_arg->ml_info[i],
+		       sizeof(*ml_info));
+
+	if (cu_arg->num_ml_info)
+		kfree(cu_arg->ml_info);
+
+	ptr += TLV_HDR_SIZE + ml_info_len;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi %ld bytes of additional data filled for CU\n",
+		    (unsigned long)(ptr - start));
+	return ptr;
+}
+
 int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
 			struct ieee80211_mutable_offsets *offs,
-			struct sk_buff *bcn)
+			struct sk_buff *bcn, u32 ema_params)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_bcn_tmpl_cmd *cmd;
@@ -1693,12 +3085,32 @@
 	void *ptr;
 	int ret, len;
 	size_t aligned_len = roundup(bcn->len, 4);
+	struct ath12k_link_vif *arvif = ath12k_mac_get_arvif(ar, vdev_id);
+	struct wmi_critical_update_arg cu_arg = {
+						 .num_ml_params = 0,
+						 .ml_params = NULL,
+						 .num_ml_info = 0,
+						 .ml_info = NULL,
+						};
+
+	if (WARN_ON(!arvif))
+		return -EINVAL;
+
+	ath12k_wmi_fill_cu_arg(arvif, &cu_arg);
 
-	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len +
+	      TLV_HDR_SIZE + (sizeof(struct wmi_bcn_tmpl_ml_params) * cu_arg.num_ml_params) +
+	      TLV_HDR_SIZE + (sizeof(struct wmi_bcn_tmpl_ml_info) * cu_arg.num_ml_info);
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
-	if (!skb)
+	if (!skb) {
+		if (cu_arg.num_ml_params)
+			kfree(cu_arg.ml_params);
+		if (cu_arg.num_ml_info)
+			kfree(cu_arg.ml_info);
+
 		return -ENOMEM;
+	}
 
 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
@@ -1707,7 +3119,12 @@
 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
 	cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
 	cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
+	cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
 	cmd->buf_len = cpu_to_le32(bcn->len);
+	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
+	cmd->ema_params = cpu_to_le32(ema_params);
+	cmd->feature_enable_bitmap = cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
+						WMI_BEACON_PROTECTION_EN_BIT));
 
 	ptr = skb->data + sizeof(*cmd);
 
@@ -1724,6 +3141,11 @@
 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
 	memcpy(tlv->value, bcn->data, bcn->len);
 
+	ptr += (TLV_HDR_SIZE + aligned_len);
+
+	ptr = ath12k_wmi_append_critical_update_params(ar, vdev_id, ptr,
+						       &cu_arg);
+
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
@@ -1805,6 +3227,8 @@
 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
 		if (arg->bw_160)
 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
+		if (arg->bw_320)
+			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
 
 		/* Typically if STBC is enabled for VHT it should be enabled
 		 * for HT as well
@@ -1832,6 +3256,8 @@
 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
 		if (arg->twt_responder)
 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
+		if (arg->eht_flag)
+			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);			
 	}
 
 	/* Suppress authorization for all AUTH modes that need 4-way handshake
@@ -1842,7 +3268,7 @@
 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
 	if (arg->need_ptk_4_way) {
 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
-		if (!hw_crypto_disabled)
+		if (!hw_crypto_disabled && arg->is_assoc)
 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
 	}
 	if (arg->need_gtk_2_way)
@@ -1869,19 +3295,54 @@
 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
 }
 
+static int ath12k_wmi_get_emlsr_pad_delay(u8 emlsr_pad_delay,
+					  u32 *emlsr_pad_delay_us)
+{
+	u16 pad_delay_us[EMLSR_DELAY_MAX] = {0, 32, 64, 128, 256};
+
+	if (!emlsr_pad_delay_us)
+		return -EINVAL;
+
+	if (emlsr_pad_delay >= EMLSR_DELAY_MAX)
+		return -EINVAL;
+
+	*emlsr_pad_delay_us =  pad_delay_us[emlsr_pad_delay];
+	return 0;
+}
+
+static int ath12k_get_emlsr_tran_delay(u8 emlsr_trans_delay,
+				       u32 *emlsr_trans_delay_us)
+{
+	u16 delay_us[EMLSR_TRANS_DELAY_MAX] = {0, 16, 32, 64, 128, 256};
+
+	if (!emlsr_trans_delay_us)
+		return -EINVAL;
+
+	if (emlsr_trans_delay >= EMLSR_TRANS_DELAY_MAX)
+		return -EINVAL;
+
+	*emlsr_trans_delay_us = delay_us[emlsr_trans_delay];
+	return 0;
+}
+
 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
 				   struct ath12k_wmi_peer_assoc_arg *arg)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_hw_group *ag = ar->ab->ag;
 	struct wmi_peer_assoc_complete_cmd *cmd;
 	struct ath12k_wmi_vht_rate_set_params *mcs;
 	struct ath12k_wmi_he_rate_set_params *he_mcs;
+	struct wmi_eht_rate_set *eht_mcs;
+	struct wmi_peer_assoc_mlo_params *ml_params;
+	struct wmi_peer_assoc_mlo_partner_info *partner_info;
 	struct sk_buff *skb;
 	struct wmi_tlv *tlv;
 	void *ptr;
 	u32 peer_legacy_rates_align;
 	u32 peer_ht_rates_align;
 	int i, ret, len;
+	bool emlsr_support;
 
 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
 					  sizeof(u32));
@@ -1892,7 +3353,11 @@
 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
 	      sizeof(*mcs) + TLV_HDR_SIZE +
-	      (sizeof(*he_mcs) * arg->peer_he_mcs_count);
+	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
+	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
+	      TLV_HDR_SIZE + sizeof(*ml_params) +
+	      TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+	
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
@@ -1908,10 +3373,11 @@
 
 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
+	cmd->ru_punct_bitmap = cpu_to_le32(arg->ru_punct_bitmap);
 
 	ath12k_wmi_copy_peer_flags(cmd, arg,
 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
-					    &ar->ab->dev_flags));
+					    &ag->dev_flags));
 
 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
 
@@ -1939,6 +3405,17 @@
 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
 
+	/* Update 11be capabilities */
+	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
+		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
+		       0);
+	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
+		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
+		       0);
+	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
+		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
+	cmd->peer_eht_ops = arg->peer_eht_ops;
+
 	/* Update peer legacy rate information */
 	ptr += sizeof(*cmd);
 
@@ -2000,13 +3477,118 @@
 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
 							    sizeof(*he_mcs));
 
-		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
-		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
+		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
+		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
 		ptr += sizeof(*he_mcs);
 	}
 
+	/* MLO params */
+	emlsr_support = FIELD_GET(IEEE80211_EML_CAP_EMLSR_SUPP, arg->ml.eml_caps);
+
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+		      FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_params));
+	ptr += TLV_HDR_SIZE;
+
+	ml_params = ptr;
+	ml_params->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_PEER_ASSOC_PARAMS) |
+				FIELD_PREP(WMI_TLV_LEN, sizeof(*ml_params) - TLV_HDR_SIZE);
+
+	ml_params->flags = FIELD_PREP(ATH12K_WMI_FLAG_MLO_ENABLED,
+				      arg->ml.enabled) |
+			   FIELD_PREP(ATH12K_WMI_FLAG_MLO_ASSOC_LINK,
+				      arg->ml.assoc_link) |
+			   FIELD_PREP(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC,
+				      arg->ml.primary_umac) |
+			   FIELD_PREP(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID,
+				      arg->ml.logical_link_idx_valid) |
+			   FIELD_PREP(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID,
+				      arg->ml.peer_id_valid) |
+			   FIELD_PREP(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT,
+				      emlsr_support);
+
+	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
+	ml_params->logical_link_idx = arg->ml.logical_link_idx;
+	ml_params->ml_peer_id = arg->ml.ml_peer_id;
+	ml_params->ieee_link_id = arg->ml.ieee_link_id;
+
+	/* emlsr params */
+	if (emlsr_support) {
+		u8 timeout;
+		/*get emlsr padding delay subfield, if invalid use 0us*/
+		ath12k_wmi_get_emlsr_pad_delay(
+					FIELD_GET(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY,
+						  arg->ml.eml_caps),
+					&ml_params->emlsr_padding_delay_us);
+		/*get emlsr trans delay subfield value, if invalid use 0us*/
+		ath12k_get_emlsr_tran_delay(FIELD_GET(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY,
+						      arg->ml.eml_caps),
+					    &ml_params->emlsr_trans_delay_us);
+
+		timeout = FIELD_GET(IEEE80211_EML_CAP_TRANSITION_TIMEOUT,
+				    arg->ml.eml_caps);
+		if (timeout < EMLCAP_TIMEOUT_MAX)
+			ml_params->emlsr_trans_timeout_us = eml_trans_timeout[timeout];
+		else
+			ath12k_warn(ar->ab, "Invalid emlsr trans timeout \n");
+	}
+	ptr += sizeof(*ml_params);
+
+	/* Loop through the EHT rate set.
+	 */
+	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+		      FIELD_PREP(WMI_TLV_LEN, len);
+	ptr += TLV_HDR_SIZE;
+
+	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
+		eht_mcs = ptr;
+		eht_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+						 WMI_TAG_EHT_RATE_SET) |
+				      FIELD_PREP(WMI_TLV_LEN,
+						 sizeof(*eht_mcs) - TLV_HDR_SIZE);
+		eht_mcs->rx_mcs_set = arg->peer_eht_rx_mcs_set[i];
+		eht_mcs->tx_mcs_set = arg->peer_eht_tx_mcs_set[i];
+		ptr += sizeof(*eht_mcs);
+	}
+
+	/* fill ML Partner links */
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+		      FIELD_PREP(WMI_TLV_LEN,
+				 arg->ml.num_partner_links * sizeof(*partner_info));
+	ptr += TLV_HDR_SIZE;
+
+	for (i = 0; i < arg->ml.num_partner_links; i++) {
+		partner_info = ptr;
+		partner_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+						      WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC) |
+					   FIELD_PREP(WMI_TLV_LEN,
+						 sizeof(*partner_info) - TLV_HDR_SIZE);
+		partner_info->vdev_id = arg->ml.partner_info[i].vdev_id;
+		partner_info->hw_link_id = arg->ml.partner_info[i].hw_link_id;
+
+		partner_info->flags = FIELD_PREP(ATH12K_WMI_FLAG_MLO_ENABLED, 1) |
+				      FIELD_PREP(ATH12K_WMI_FLAG_MLO_ASSOC_LINK,
+						 arg->ml.partner_info[i].assoc_link) |
+				      FIELD_PREP(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC,
+						 arg->ml.partner_info[i].primary_umac) |
+				      FIELD_PREP(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID,
+						 arg->ml.partner_info[i].logical_link_idx_valid);
+		partner_info->logical_link_idx = arg->ml.partner_info[i].logical_link_idx;
+		ptr += sizeof(*partner_info);
+	}
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to send WMI_PEER_ASSOC_CMDID\n");
+		dev_kfree_skb(skb);
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
-		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x eht ops %x ml flags %x mld_addr %pM logical_link_idx %u ml peer id %d ieee_link_id %u num_partner_links %d\n",
 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
@@ -2016,14 +3598,14 @@
 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
 		   cmd->peer_he_cap_phy[2],
-		   cmd->peer_bw_rxnss_override);
-
-	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
-	if (ret) {
-		ath12k_warn(ar->ab,
-			    "failed to send WMI_PEER_ASSOC_CMDID\n");
-		dev_kfree_skb(skb);
-	}
+		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
+		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
+		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
+		   cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops,
+		   ml_params->flags, ml_params->mld_addr.addr,
+		   ml_params->logical_link_idx, ml_params->ml_peer_id,
+		   ml_params->ieee_link_id,
+		   arg->ml.num_partner_links);
 
 	return ret;
 }
@@ -2033,16 +3615,16 @@
 {
 	/* setup commonly used values */
 	arg->scan_req_id = 1;
-	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
-	arg->dwell_time_active = 50;
-	arg->dwell_time_active_2g = 0;
+	arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+	arg->dwell_time_active = 175;
+	arg->dwell_time_active_2g = 175;
 	arg->dwell_time_passive = 150;
-	arg->dwell_time_active_6g = 40;
-	arg->dwell_time_passive_6g = 30;
+	arg->dwell_time_active_6g = 70;
+	arg->dwell_time_passive_6g = 70;
 	arg->min_rest_time = 50;
 	arg->max_rest_time = 500;
 	arg->repeat_probe_time = 0;
-	arg->probe_spacing_time = 0;
+	arg->probe_spacing_time = 100;
 	arg->idle_time = 0;
 	arg->max_scan_time = 20000;
 	arg->probe_delay = 5;
@@ -2051,7 +3633,8 @@
 				  WMI_SCAN_EVENT_BSS_CHANNEL |
 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
 				  WMI_SCAN_EVENT_DEQUEUED;
-	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT |
+			   WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
 	arg->num_bssid = 1;
 
 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@@ -2145,7 +3728,7 @@
 	void *ptr;
 	int i, ret, len;
 	u32 *tmp_ptr;
-	u8 extraie_len_with_pad = 0;
+	u16 extraie_len_with_pad = 0;
 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
 
@@ -2164,7 +3747,7 @@
 		len += sizeof(*bssid) * arg->num_bssid;
 
 	len += TLV_HDR_SIZE;
-	if (arg->extraie.len)
+	if (arg->extraie.len && arg->extraie.len <= 0xFFFF)
 		extraie_len_with_pad =
 			roundup(arg->extraie.len, sizeof(u32));
 	len += extraie_len_with_pad;
@@ -2266,7 +3849,7 @@
 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
 	ptr += TLV_HDR_SIZE;
 
-	if (arg->extraie.len)
+	if (extraie_len_with_pad)
 		memcpy(ptr, arg->extraie.ptr,
 		       arg->extraie.len);
 
@@ -2311,6 +3894,69 @@
 	return ret;
 }
 
+int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
+                                       u32 vdev_id,
+                                       struct ath12k_reg_tpc_power_info *param)
+{
+        struct ath12k_wmi_pdev *wmi = ar->wmi;
+        struct wmi_vdev_set_tpc_power_cmd *cmd;
+        struct wmi_vdev_ch_power_info *ch;
+        struct sk_buff *skb;
+        struct wmi_tlv *tlv;
+        u8 *ptr;
+        int i, ret, len;
+
+        len = sizeof(*cmd) + TLV_HDR_SIZE;
+        len += (sizeof(struct wmi_vdev_ch_power_info) * param->num_pwr_levels);
+
+        skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+        if (!skb)
+                return -ENOMEM;
+
+        ptr = skb->data;
+
+        cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+        cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) |
+                          FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+        cmd->vdev_id = vdev_id;
+        cmd->psd_power = param->is_psd_power;
+        cmd->eirp_power = param->eirp_power;
+        cmd->power_type_6ghz = param->power_type_6g;
+        ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                   "wmi TPC vdev_id: %d is_psd_power: %d eirp_power: %d power_type_6g: %d\n",
+                   vdev_id, param->is_psd_power, param->eirp_power, param->power_type_6g);
+
+        ptr += sizeof(*cmd);
+        tlv = (struct wmi_tlv *)ptr;
+        tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+                      FIELD_PREP(WMI_TLV_LEN, param->num_pwr_levels * sizeof(*ch));
+
+        ptr += TLV_HDR_SIZE;
+        ch = (struct wmi_vdev_ch_power_info *)ptr;
+
+        for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+                ch->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                                            WMI_TAG_VDEV_CH_POWER_INFO) |
+                                FIELD_PREP(WMI_TLV_LEN,
+                                           sizeof(*ch) - TLV_HDR_SIZE);
+
+                ch->chan_cfreq = param->chan_power_info[i].chan_cfreq;
+                ch->tx_power = param->chan_power_info[i].tx_power;
+
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "wmi TPC chan_cfreq: %d , tx_power: %d\n",
+                           ch->chan_cfreq, ch->tx_power);
+        }
+
+        ret = ath12k_wmi_cmd_send(wmi, skb,
+                                  WMI_VDEV_SET_TPC_POWER_CMDID);
+        if (ret) {
+                ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+                dev_kfree_skb(skb);
+        }
+        return ret;
+}
+
 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
 				  struct ath12k_wmi_scan_cancel_arg *arg)
 {
@@ -2453,6 +4099,8 @@
 						  WMI_CHAN_REG_INFO1_REG_CLS);
 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
 						  WMI_CHAN_REG_INFO2_ANT_MAX);
+			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
+						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
 
 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
@@ -2525,7 +4173,7 @@
 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
 
-		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		ath12k_dbg(ar->ab, ATH12K_DBG_SET(WMI, L3),
 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
 			   ac, wmm_param->aifs, wmm_param->cwmin,
 			   wmm_param->cwmax, wmm_param->txoplimit,
@@ -2715,6 +4363,52 @@
 	return ret;
 }
 
+int ath12k_wmi_pdev_peer_pktlog_filter(struct ath12k *ar, u8 *addr, u8 enable)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_pdev_pktlog_filter_cmd *cmd;
+	struct wmi_pdev_pktlog_filter_info *info;
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	void *ptr;
+	int ret, len;
+
+	len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
+
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD,
+						 sizeof(*cmd));
+
+	cmd->pdev_id = cpu_to_le32(DP_HW2SW_MACID(ar->pdev->pdev_id));
+	cmd->num_mac = cpu_to_le32(1);
+	cmd->enable = cpu_to_le32(enable);
+
+	ptr = skb->data + sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*info));
+
+	ptr += TLV_HDR_SIZE;
+	info = ptr;
+
+	ether_addr_copy(info->peer_macaddr.addr, addr);
+	info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+						  sizeof(*info));
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_PKTLOG_FILTER_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
 				     struct ath12k_wmi_init_country_arg *arg)
 {
@@ -2768,6 +4462,125 @@
 }
 
 int
+ath12k_wmi_send_thermal_mitigation_cmd(struct ath12k *ar,
+				       struct ath12k_wmi_thermal_mitigation_arg *arg)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_therm_throt_config_request_cmd *cmd;
+	struct wmi_therm_throt_level_config_info *lvl_conf;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	int i, ret, len;
+
+	len = sizeof(*cmd) + TLV_HDR_SIZE + THERMAL_LEVELS * sizeof(*lvl_conf);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
+
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+						 sizeof(*cmd));
+
+	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+	cmd->enable = cpu_to_le32(arg->enable);
+	cmd->dc = cpu_to_le32(arg->dc);
+	cmd->dc_per_event = cpu_to_le32(arg->dc_per_event);
+	cmd->therm_throt_levels = cpu_to_le32(THERMAL_LEVELS);
+
+	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+					 THERMAL_LEVELS * sizeof(*lvl_conf));
+
+	lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
+								sizeof(*cmd) +
+								TLV_HDR_SIZE);
+	for (i = 0; i < THERMAL_LEVELS; i++) {
+		lvl_conf->tlv_header =
+			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+					       sizeof(*lvl_conf));
+
+		lvl_conf->temp_lwm = arg->levelconf[i].tmplwm;
+		lvl_conf->temp_hwm = arg->levelconf[i].tmphwm;
+		lvl_conf->dc_off_percent = arg->levelconf[i].dcoffpercent;
+		lvl_conf->prio = arg->levelconf[i].priority;
+		lvl_conf++;
+	}
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
+		   ar->pdev->pdev_id, arg->enable, arg->dc,
+		   arg->dc_per_event, THERMAL_LEVELS);
+
+	return ret;
+}
+
+int ath12k_wmi_pdev_pktlog_enable(struct ath12k *ar, u32 pktlog_filter)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_pktlog_enable_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
+
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+						 sizeof(*cmd));
+
+	cmd->pdev_id = cpu_to_le32(DP_HW2SW_MACID(ar->pdev->pdev_id));
+	cmd->evlist = cpu_to_le32(pktlog_filter);
+	cmd->enable = cpu_to_le32(ATH12K_WMI_PKTLOG_ENABLE_FORCE);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_PKTLOG_ENABLE_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int ath12k_wmi_pdev_pktlog_disable(struct ath12k *ar)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_pktlog_disable_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
+
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+						 sizeof(*cmd));
+
+	cmd->pdev_id = cpu_to_le32(DP_HW2SW_MACID(ar->pdev->pdev_id));
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_PKTLOG_DISABLE_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int
 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
 {
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
@@ -2811,11 +4624,12 @@
 	/* TODO add MBSSID support */
 	cmd->mbss_support = 0;
 
-	ret = ath12k_wmi_cmd_send(wmi, skb,
-				  WMI_TWT_ENABLE_CMDID);
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
 	if (ret) {
 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
 		dev_kfree_skb(skb);
+	} else {
+		ar->twt_enabled = 1;
 	}
 	return ret;
 }
@@ -2840,11 +4654,587 @@
 						 len);
 	cmd->pdev_id = cpu_to_le32(pdev_id);
 
-	ret = ath12k_wmi_cmd_send(wmi, skb,
-				  WMI_TWT_DISABLE_CMDID);
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
 	if (ret) {
 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
 		dev_kfree_skb(skb);
+	} else {
+		ar->twt_enabled = 0;
+	}
+	return ret;
+}
+
+int ath12k_wmi_send_twt_add_dialog_cmd(struct ath12k *ar,
+				       struct wmi_twt_add_dialog_params *params)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_twt_add_dialog_params_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+	cmd->vdev_id = params->vdev_id;
+	ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+	cmd->dialog_id = params->dialog_id;
+	cmd->wake_intvl_us = params->wake_intvl_us;
+	cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+	cmd->wake_dura_us = params->wake_dura_us;
+	cmd->sp_offset_us = params->sp_offset_us;
+	cmd->flags = params->twt_cmd;
+	if (params->flag_bcast)
+		cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+	if (params->flag_trigger)
+		cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+	if (params->flag_flow_type)
+		cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+	if (params->flag_protection)
+		cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+		   cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+		   cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+		   cmd->flags);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+
+	if (ret) {
+		ath12k_warn(ab,
+			    "failed to send wmi command to add twt dialog: %d",
+			    ret);
+		dev_kfree_skb(skb);
+	}
+	return ret;
+}
+
+int
+ath12k_wmi_send_wmi_ctrl_stats_cmd(struct ath12k *ar,
+		struct wmi_ctrl_path_stats_cmd_param *param)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	u32 pdev_id_array[2] = {0};
+	int len, ret;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	struct wmi_ctrl_path_stats_cmd_param *cmd;
+	void *ptr;
+	u32 stats_id = 0;
+#ifdef CONFIG_ATH12K_DEBUGFS
+	unsigned long time_left;
+#endif
+
+	switch (param->stats_id) {
+	case WMI_REQ_CTRL_PATH_PDEV_TX_STAT:
+	case WMI_REQ_CTRL_PATH_CAL_STAT:
+	case WMI_REQ_CTRL_PATH_BTCOEX_STAT:
+		pdev_id_array[0] = ar->pdev->pdev_id;
+		stats_id = (1 << param->stats_id);
+		break;
+	case WMI_REQ_CTRL_PATH_AWGN_STAT:
+		if (ar->supports_6ghz) {
+			pdev_id_array[0] = ar->pdev->pdev_id;
+			stats_id = (1 << param->stats_id);
+		} else {
+			ath12k_warn(ab,
+			  "Stats id %d awgn stats are only supported for 6GHz",
+			  param->stats_id);
+			return -EIO;
+		}
+		break;
+		/* Add case for newly wmi ctrl path stats here */
+	default:
+		ath12k_warn(ab, "Unsupported stats id %d", param->stats_id);
+		return -EIO;
+		break;
+	}
+
+	len = sizeof(*cmd) +
+		TLV_HDR_SIZE + sizeof(u32) +
+		TLV_HDR_SIZE + TLV_HDR_SIZE;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (void *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+			WMI_CTRL_PATH_STATS_CMD_FIXED_PARAM) |
+		FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+	cmd->stats_id = stats_id;
+	cmd->req_id = param->req_id;
+	cmd->action = param->action;
+
+	ptr = skb->data + sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+		FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+	ptr += TLV_HDR_SIZE;
+	memcpy(ptr, pdev_id_array, sizeof(pdev_id_array));
+	ptr += sizeof(u32);
+
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+		FIELD_PREP(WMI_TLV_LEN, 0);
+	ptr += TLV_HDR_SIZE;
+
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+		FIELD_PREP(WMI_TLV_LEN, 0);
+	ptr += TLV_HDR_SIZE;
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+	if (param->action == WMI_REQUEST_CTRL_PATH_STAT_GET)
+		reinit_completion(&ar->debug.wmi_ctrl_path_stats_rcvd);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+			WMI_REQUEST_CTRL_PATH_STATS_CMDID);
+	if (ret) {
+		dev_kfree_skb(skb);
+		ath12k_warn(ab, "Failed to send WMI_REQUEST_CTRL_PATH_STATS_CMDID: %d", ret);
+	} else {
+		if (param->action == WMI_REQUEST_CTRL_PATH_STAT_GET) {
+			time_left = wait_for_completion_timeout(
+					&ar->debug.wmi_ctrl_path_stats_rcvd,
+					WMI_CTRL_STATS_READY_TIMEOUT_HZ * HZ);
+			if (time_left == 0) {
+				ath12k_warn(ab, "timeout in receiving wmi ctrl path stats\n");
+				return -ETIMEDOUT;
+			}
+		}
+	}
+#endif
+
+	return ret;
+}
+
+int ath12k_wmi_send_twt_del_dialog_cmd(struct ath12k *ar,
+				       struct wmi_twt_del_dialog_params *params)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_twt_del_dialog_params_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+	cmd->vdev_id = params->vdev_id;
+	ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+	cmd->dialog_id = params->dialog_id;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "wmi delete twt dialog vdev %u dialog id %u\n",
+		   cmd->vdev_id, cmd->dialog_id);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "failed to send wmi command to delete twt dialog: %d",
+			    ret);
+		dev_kfree_skb(skb);
+	}
+	return ret;
+}
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_wmi_crl_path_stats_list_free(struct ath12k *ar, struct list_head *head)
+{
+	struct wmi_ctrl_path_stats_list *stats, *tmp;
+
+	lockdep_assert_held(&ar->wmi_ctrl_path_stats_lock);
+	list_for_each_entry_safe(stats, tmp, head, list) {
+		kfree(stats->stats_ptr);
+		list_del(&stats->list);
+		kfree(stats);
+	}
+}
+
+int wmi_print_ctrl_path_pdev_tx_stats_tlv(struct ath12k_base *ab, u16 len, const void *ptr, void *data)
+{
+	struct wmi_ctrl_path_stats_ev_parse_param *stats_buff = (struct wmi_ctrl_path_stats_ev_parse_param *)data;
+	struct wmi_ctrl_path_pdev_stats *pdev_stats_skb = (struct wmi_ctrl_path_pdev_stats *)ptr;
+	struct wmi_ctrl_path_pdev_stats *pdev_stats = NULL;
+	struct wmi_ctrl_path_stats_list *stats = kzalloc(sizeof(struct wmi_ctrl_path_stats_list), GFP_ATOMIC);
+	struct ath12k *ar = NULL;
+
+	if (!stats)
+		return -ENOMEM;
+
+	pdev_stats = kzalloc(sizeof(*pdev_stats), GFP_ATOMIC);
+	if (!pdev_stats) {
+		kfree(stats);
+		return -ENOMEM;
+	}
+
+	memcpy(pdev_stats, pdev_stats_skb, sizeof(struct wmi_ctrl_path_pdev_stats));
+	stats->stats_ptr = pdev_stats;
+	list_add_tail(&stats->list, &stats_buff->list);
+
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_stats_skb->pdev_id + 1);
+	if (!ar) {
+		ath12k_warn(ab, "Failed to get ar for wmi ctrl stats\n");
+		kfree(pdev_stats);
+		kfree(stats);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ar->debug.wmi_ctrl_path_stats_tagid = WMI_CTRL_PATH_PDEV_STATS;
+	stats_buff->ar = ar;
+	return 0;
+}
+
+int wmi_print_ctrl_path_cal_stats_tlv(struct ath12k_base *ab, u16 len,
+				      const void *ptr, void *data)
+{
+	struct wmi_ctrl_path_stats_ev_parse_param *stats_buff = (struct wmi_ctrl_path_stats_ev_parse_param *)data;
+	struct wmi_ctrl_path_cal_stats *cal_stats_skb = (struct wmi_ctrl_path_cal_stats *)ptr;
+	struct wmi_ctrl_path_cal_stats *cal_stats = NULL;
+	struct wmi_ctrl_path_stats_list *stats = kzalloc(sizeof(struct wmi_ctrl_path_stats_list), GFP_ATOMIC);
+	struct ath12k *ar = NULL;
+
+	if (!stats)
+		return -ENOMEM;
+
+	cal_stats = kzalloc(sizeof(*cal_stats), GFP_ATOMIC);
+	if (!cal_stats) {
+		kfree(stats);
+		return -ENOMEM;
+	}
+
+	memcpy(cal_stats, cal_stats_skb, sizeof(struct wmi_ctrl_path_cal_stats));
+	stats->stats_ptr = cal_stats;
+	list_add_tail(&stats->list, &stats_buff->list);
+
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, cal_stats_skb->pdev_id + 1);
+	if (!ar) {
+		ath12k_warn(ab, "Failed to get ar for wmi ctrl cal stats\n");
+		kfree(cal_stats);
+		kfree(stats);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ar->debug.wmi_ctrl_path_stats_tagid = WMI_CTRL_PATH_CAL_STATS;
+	stats_buff->ar = ar;
+	return 0;
+}
+
+int wmi_print_ctrl_path_btcoex_stats_tlv(struct ath12k_base *ab, u16 len,
+					 const void *ptr, void *data)
+{
+	struct wmi_ctrl_path_stats_ev_parse_param *stats_buff =
+				(struct wmi_ctrl_path_stats_ev_parse_param *)data;
+	struct wmi_ctrl_path_btcoex_stats *btcoex_stats_skb =
+				(struct wmi_ctrl_path_btcoex_stats *)ptr;
+	struct wmi_ctrl_path_btcoex_stats *btcoex_stats = NULL;
+	struct wmi_ctrl_path_stats_list *stats;
+	struct ath12k *ar = NULL;
+
+	stats = kzalloc(sizeof(*stats), GFP_ATOMIC);
+	if (!stats)
+		return -ENOMEM;
+
+	btcoex_stats = kzalloc(sizeof(*btcoex_stats), GFP_ATOMIC);
+	if (!btcoex_stats) {
+		kfree(stats);
+		return -ENOMEM;
+	}
+
+	memcpy(btcoex_stats, btcoex_stats_skb, sizeof(*btcoex_stats));
+	stats->stats_ptr = btcoex_stats;
+	list_add_tail(&stats->list, &stats_buff->list);
+
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, btcoex_stats_skb->pdev_id + 1);
+	if (!ar) {
+		ath12k_warn(ab, "Failed to get ar for wmi ctrl cal stats\n");
+		kfree(btcoex_stats);
+		kfree(stats);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ar->debug.wmi_ctrl_path_stats_tagid = WMI_CTRL_PATH_BTCOEX_STATS;
+	stats_buff->ar = ar;
+	return 0;
+}
+
+int wmi_print_ctrl_path_awgn_stats_tlv(struct ath12k_base *ab, u16 len,
+				       const void *ptr, void *data)
+{
+	struct wmi_ctrl_path_stats_ev_parse_param *stats_buff =
+			    (struct wmi_ctrl_path_stats_ev_parse_param *)data;
+	struct wmi_ctrl_path_awgn_stats *awgn_stats_skb, *awgn_stats = NULL;
+	struct wmi_ctrl_path_stats_list *stats;
+	struct ath12k *ar = NULL;
+	int i;
+
+	awgn_stats_skb = (struct wmi_ctrl_path_awgn_stats *)ptr;
+
+	for (i = 0; i < ATH12K_GROUP_MAX_RADIO; i++) {
+		ar = ab->ag->hw_links[i];
+		if (!ar) {
+			ath12k_warn(ab, "Failed to get ar for wmi ctrl awgn stats\n");
+			return -EINVAL;
+		}
+
+		if (ar->supports_6ghz)
+			break;
+	}
+
+	stats = kzalloc(sizeof(*stats), GFP_ATOMIC);
+	if (!stats)
+		return -ENOMEM;
+
+	awgn_stats = kzalloc(sizeof(*awgn_stats), GFP_ATOMIC);
+
+	if (!awgn_stats) {
+		kfree(stats);
+		return -ENOMEM;
+	}
+
+	memcpy(awgn_stats, awgn_stats_skb, sizeof(*awgn_stats));
+	stats->stats_ptr = awgn_stats;
+	list_add_tail(&stats->list, &stats_buff->list);
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ar->debug.wmi_ctrl_path_stats_tagid = WMI_CTRL_PATH_AWGN_STATS;
+	stats_buff->ar = ar;
+
+	return 0;
+}
+
+static int ath12k_wmi_ctrl_stats_subtlv_parser(struct ath12k_base *ab,
+					       u16 tag, u16 len,
+					       const void *ptr, void *data)
+{
+	int ret = 0;
+
+	switch (tag) {
+	case WMI_CTRL_PATH_STATS_EV_FIXED_PARAM:
+		break;
+	case WMI_CTRL_PATH_PDEV_STATS:
+		ret = wmi_print_ctrl_path_pdev_tx_stats_tlv(ab, len, ptr, data);
+		break;
+	case WMI_CTRL_PATH_CAL_STATS:
+		ret = wmi_print_ctrl_path_cal_stats_tlv(ab, len, ptr, data);
+		break;
+	case WMI_CTRL_PATH_BTCOEX_STATS:
+		ret = wmi_print_ctrl_path_btcoex_stats_tlv(ab, len, ptr, data);
+		break;
+	case WMI_CTRL_PATH_AWGN_STATS:
+		ret = wmi_print_ctrl_path_awgn_stats_tlv(ab, len, ptr, data);
+		break;
+		/* Add case for newly wmi ctrl path added stats here */
+	default:
+		ath12k_warn(ab,
+			    "Received invalid tag for wmi ctrl path stats in subtlvs, tag : 0x%x\n",
+			    tag);
+		return -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ath12k_wmi_ctrl_stats_event_parser(struct ath12k_base *ab,
+				u16 tag, u16 len,
+				const void *ptr, void *data)
+{
+	int ret = 0;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi ctrl path stats tag 0x%x of len %d rcvd\n",
+			tag, len);
+
+	switch (tag) {
+	case WMI_CTRL_PATH_STATS_EV_FIXED_PARAM:
+		/* Fixed param is already processed*/
+		break;
+	case WMI_TAG_ARRAY_STRUCT:
+		/* len 0 is expected for array of struct when there
+		 * is no content of that type to pack inside that tlv
+		 */
+		if (len == 0)
+			return 0;
+
+		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+					  ath12k_wmi_ctrl_stats_subtlv_parser,
+					  data);
+		break;
+	default:
+		ath12k_warn(ab, "Received invalid tag for wmi ctrl path stats\n");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static void ath12k_wmi_ctrl_path_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	int ret;
+	const struct wmi_tlv *tlv;
+	struct wmi_ctrl_path_stats_ev_param *fixed_param;
+	u16 tlv_tag;
+	u8 *ptr = skb->data;
+	struct ath12k *ar = NULL;
+	struct wmi_ctrl_path_stats_ev_parse_param param;
+	INIT_LIST_HEAD(&param.list);
+
+	if (!skb->data) {
+		ath12k_warn(ab, "No data present in wmi ctrl stats event\n");
+		return;
+	}
+
+	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
+		ath12k_warn(ab, "wmi ctrl stats event size invalid\n");
+		return;
+	}
+
+	param.ar = NULL;
+
+	tlv = (struct wmi_tlv *)ptr;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	ptr += sizeof(*tlv);
+
+	if (tlv_tag == WMI_CTRL_PATH_STATS_EV_FIXED_PARAM)
+		fixed_param = (struct wmi_ctrl_path_stats_ev_param *)ptr;
+	else
+		ath12k_warn(ab, "wmi ctrl Stats received without fixed param tlv at start\n");
+
+	if (!fixed_param) {
+		ath12k_warn(ab, "wmi ctrl Stats received fixed param is NULL\n");
+		goto free;
+	}
+
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+			ath12k_wmi_ctrl_stats_event_parser,
+			&param);
+	if (ret) {
+		ath12k_warn(ab, "failed to parse wmi_ctrl_path_stats tlv: %d\n", ret);
+		goto free;
+	}
+
+	ar = param.ar;
+	if (!ar)
+		return;
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	list_splice_tail_init(&param.list, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+
+	if (!fixed_param->more) {
+		complete(&ar->debug.wmi_ctrl_path_stats_rcvd);
+		ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi ctrl path stats completed");
+	}
+free:
+	ath12k_wmi_crl_path_stats_list_free(ar, &param.list);
+}
+#endif
+
+int ath12k_wmi_send_twt_pause_dialog_cmd(struct ath12k *ar,
+					 struct wmi_twt_pause_dialog_params *params)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_twt_pause_dialog_params_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+				     WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+	cmd->vdev_id = params->vdev_id;
+	ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+	cmd->dialog_id = params->dialog_id;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "wmi pause twt dialog vdev %u dialog id %u\n",
+		   cmd->vdev_id, cmd->dialog_id);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "failed to send wmi command to pause twt dialog: %d",
+			    ret);
+		dev_kfree_skb(skb);
+	}
+	return ret;
+}
+
+int ath12k_wmi_send_twt_resume_dialog_cmd(struct ath12k *ar,
+					  struct wmi_twt_resume_dialog_params *params)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_twt_resume_dialog_params_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+				     WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+	cmd->vdev_id = params->vdev_id;
+	ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+	cmd->dialog_id = params->dialog_id;
+	cmd->sp_offset_us = params->sp_offset_us;
+	cmd->next_twt_size = params->next_twt_size;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+		   cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+		   cmd->next_twt_size);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "failed to send wmi command to resume twt dialog: %d",
+			    ret);
+		dev_kfree_skb(skb);
 	}
 	return ret;
 }
@@ -2925,6 +5315,232 @@
 	return ret;
 }
 
+int ath12k_wmi_pdev_set_srg_bss_color_bitmap(struct ath12k *ar, u32 *bitmap)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+				     WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+	memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "obss pd pdev_id %d bss color bitmap %08x %08x\n",
+		   cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "Failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int
+ath12k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath12k *ar, u32 *bitmap)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+	cmd->tlv_header =
+		FIELD_PREP(WMI_TLV_TAG,
+			   WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
+		FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+	memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "obss pd pdev_id %d partial bssid bitmap %08x %08x\n",
+		   cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "Failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int
+ath12k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath12k *ar, u32 *bitmap)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+	cmd->tlv_header =
+		FIELD_PREP(WMI_TLV_TAG,
+			   WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+		FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+	memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "obss pd srg pdev_id %d bss color enable bitmap %08x %08x\n",
+		   cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "Failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int
+ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath12k *ar, u32 *bitmap)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+	cmd->tlv_header =
+		FIELD_PREP(WMI_TLV_TAG,
+			   WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+		FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+	memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "obss pd srg pdev_id %d bssid enable bitmap %08x %08x\n",
+		   cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "Failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int
+ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath12k *ar, u32 *bitmap)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+	cmd->tlv_header =
+		FIELD_PREP(WMI_TLV_TAG,
+			   WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+		FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+	memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "obss pd non_srg pdev_id %d bss color enable bitmap %08x %08x\n",
+		   cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "Failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int
+ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath12k *ar, u32 *bitmap)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = wmi->wmi_ab->ab;
+	struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+	cmd->tlv_header =
+		FIELD_PREP(WMI_TLV_TAG,
+			   WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+		FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+	memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "obss pd non_srg pdev_id %d bssid enable bitmap %08x %08x\n",
+		   cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+	if (ret) {
+		ath12k_warn(ab,
+			    "Failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
 						bool enable)
 {
@@ -3000,6 +5616,41 @@
 	return ret;
 }
 
+static void *
+ath12k_wmi_append_prb_resp_cu_params(struct ath12k *ar, u32 vdev_id, void *ptr)
+{
+	struct wmi_prb_resp_tmpl_ml_info_params *ml_info;
+	struct ath12k_prb_resp_tmpl_ml_info *ar_ml_info;
+	void *start = ptr;
+	struct wmi_tlv *tlv;
+	struct ath12k_link_vif *arvif = ath12k_mac_get_arvif(ar, vdev_id);
+	size_t ml_info_len = sizeof(*ml_info);
+
+	if (!arvif)
+		return ptr;
+
+	/* Add ML info */
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, ml_info_len);
+	ml_info = (struct wmi_prb_resp_tmpl_ml_info_params *)tlv->value;
+
+	ar_ml_info = &arvif->ml_info;
+	ml_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_RESP_TMPL_ML_INFO_CMD,
+						     sizeof(*ml_info));
+	ml_info->hw_link_id = cpu_to_le32(ar_ml_info->hw_link_id);
+	ml_info->cu_vdev_map_cat1_lo = cpu_to_le32(ar_ml_info->cu_vdev_map_cat1_lo);
+	ml_info->cu_vdev_map_cat1_hi = cpu_to_le32(ar_ml_info->cu_vdev_map_cat1_hi);
+	ml_info->cu_vdev_map_cat2_lo = cpu_to_le32(ar_ml_info->cu_vdev_map_cat2_lo);
+	ml_info->cu_vdev_map_cat2_hi = cpu_to_le32(ar_ml_info->cu_vdev_map_cat2_hi);
+
+	ptr += TLV_HDR_SIZE + sizeof(*ml_info);
+	/* Reset CU bitmap and bpcc values*/
+	memset(&arvif->ml_info, 0, sizeof(arvif->ml_info));
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi %ld bytes of additional data filled for prb resp CU\n",
+		   (unsigned long)(ptr - start));
+	return ptr;
+}
+
 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
 			       struct sk_buff *tmpl)
 {
@@ -3008,18 +5659,23 @@
 	struct wmi_tlv *tlv;
 	struct sk_buff *skb;
 	void *ptr;
-	int ret, len;
+	int ret, len, mlinfo_tlv_len = 0;
 	size_t aligned_len = roundup(tmpl->len, 4);
+	struct ath12k_link_vif *arvif = ath12k_mac_get_arvif(ar, vdev_id);
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 		   "WMI vdev %i set probe response template\n", vdev_id);
 
-	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
+	if (ath12k_mac_is_ml_arvif(arvif))
+		mlinfo_tlv_len = TLV_HDR_SIZE + sizeof(struct wmi_prb_resp_tmpl_ml_info_params);
+
+	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len + mlinfo_tlv_len;
 
 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
-	if (!skb)
+	if (!skb) {
+		memset(&arvif->ml_info, 0, sizeof(arvif->ml_info));
 		return -ENOMEM;
-
+	}
 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
 						 sizeof(*cmd));
@@ -3040,6 +5696,10 @@
 	tlv = ptr;
 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
 	memcpy(tlv->value, tmpl->data, tmpl->len);
+	ptr += (TLV_HDR_SIZE + aligned_len);
+
+	if (ath12k_mac_is_ml_arvif(arvif))
+		ptr = ath12k_wmi_append_prb_resp_cu_params(ar, vdev_id, ptr);
 
 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
 	if (ret) {
@@ -3176,13 +5836,25 @@
 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
-	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
+	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config) | WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
-	wmi_cfg->host_service_flags =
-		cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+	wmi_cfg->flags2 = WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET |
+			  WMI_RSRC_CFG_FLAGS2_INTRABSS_MEC_WDS_LEARNING_DISABLE |
+			  u32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+					  WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
+#ifdef CONFIG_ATH12K_SAWF
+	wmi_cfg->flags2 |= (tg_cfg->sawf & ath12k_sawf_enable) ?
+			   (WMI_RSRC_CFG_FLAGS2_SAWF_CONFIG_ENABLE_SET) : (0);
+#endif /* CONFIG_ATH12K_SAWF */
+	wmi_cfg->host_service_flags &= ~(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+	wmi_cfg->host_service_flags |= 1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT;
+	wmi_cfg->host_service_flags |= 1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT;
+	wmi_cfg->host_service_flags |= 1 << WMI_RSRC_CFG_HOST_SVC_FLAG_FULL_BW_NOL_SUPPORT_BIT;
+	wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
+	wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
 }
 
 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
@@ -3196,6 +5868,8 @@
 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
 	struct wmi_tlv *tlv;
+	struct device *dev = ab->dev;
+	bool three_way_coex_enabled = false;
 	size_t ret, len;
 	void *ptr;
 	u32 hw_mode_len = 0;
@@ -3220,6 +5894,10 @@
 	ptr = skb->data + sizeof(*cmd);
 	cfg = ptr;
 
+	three_way_coex_enabled = of_property_read_bool(dev->of_node, "qcom,btcoex");
+	if (three_way_coex_enabled)
+		cfg->flag1 |= WMI_RSRC_CFG_FLAG1_THREE_WAY_COEX_CONFIG_OVERRIDE_SUPPORT;
+
 	ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
 
 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
@@ -3390,6 +6068,12 @@
 	struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab;
 	struct ath12k_wmi_init_cmd_arg arg = {};
 
+	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+		     ab->wmi_ab.svc_map))
+		arg.res_cfg.is_reg_cc_ext_event_supported = true;
+	if (test_bit(WMI_TLV_SERVICE_RADAR_FLAGS_SUPPORT, ab->wmi_ab.svc_map))
+		arg.res_cfg.is_full_bw_nol_feature_supported = true;
+
 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
 
 	arg.num_mem_chunks = wmi_sc->num_mem_chunks;
@@ -3509,7 +6193,7 @@
 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
 						 sizeof(*cmd));
 
-	cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
 	cmd->module_id = cpu_to_le32(arg->module_id);
 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
@@ -3633,6 +6317,7 @@
 	struct ath12k_dbring_buf_release_event param;
 	int ret;
 
+
 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
 				  ath12k_wmi_dma_buf_parse,
 				  &arg);
@@ -3654,6 +6339,102 @@
 	}
 }
 
+
+static int ath12k_wmi_tlv_mac_phy_chainmask_caps(struct ath12k_base *soc,
+						 u16 len, const void *ptr, void *data)
+{
+	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+	struct wmi_mac_phy_chainmask_caps *cmask_caps = (struct wmi_mac_phy_chainmask_caps *)ptr;
+	struct ath12k_chainmask_table *cmask_table;
+	struct ath12k_pdev_cap *pdev_cap;
+	u32 tag;
+	int i, j;
+
+	if (!svc_rdy_ext->hw_mode_caps)
+		return -EINVAL;
+
+	if ((!svc_rdy_ext->arg.num_chainmask_tables) ||
+	    (svc_rdy_ext->arg.num_chainmask_tables > ATH12K_MAX_CHAINMASK_TABLES))
+		return -EINVAL;
+
+	for (i = 0; i < svc_rdy_ext->arg.num_chainmask_tables; i++) {
+		cmask_table = &svc_rdy_ext->arg.chainmask_table[i];
+
+		for (j = 0; j < cmask_table->num_valid_chainmasks; j++) {
+			tag = FIELD_GET(WMI_TLV_TAG, cmask_caps->tlv_header);
+
+			if (tag != WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY)
+                                return -EPROTO;
+
+                        cmask_table->cap_list[j].chainmask = cmask_caps->chainmask;
+                        cmask_table->cap_list[j].supported_caps = cmask_caps->supported_flags;
+                        cmask_caps++;
+			ath12k_dbg(soc, ATH12K_DBG_WMI,"[id %d] chainmask %x supported_caps %x",
+				   cmask_table->table_id, cmask_table->cap_list[j].chainmask,
+				   cmask_table->cap_list[j].supported_caps);
+		}
+	}
+
+	for (i = 0; i < soc->num_radios; i++) {
+		pdev_cap = &soc->pdevs[i].cap;
+		for (j = 0; j < svc_rdy_ext->n_mac_phy_chainmask_combo; j++) {
+			cmask_table = &svc_rdy_ext->arg.chainmask_table[j];
+			if (cmask_table->table_id == pdev_cap->chainmask_table_id)
+				break;
+		}
+		for (j = 0; j < cmask_table->num_valid_chainmasks; j++) {
+                        if (cmask_table->cap_list[j].supported_caps & WMI_SUPPORT_CHAIN_MASK_ADFS)
+				pdev_cap->adfs_chain_mask |= (1 << cmask_table->cap_list[j].chainmask);
+		}
+		ath12k_dbg(soc, ATH12K_DBG_WMI, "updated adfs chain mask %lx for pdev %d",
+			   pdev_cap->adfs_chain_mask, i);
+	}
+	return 0;
+}
+
+static void ath12k_wmi_free_chainmask_caps(struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
+{
+	int i;
+
+	if (!svc_rdy_ext->arg.num_chainmask_tables)
+		return;
+
+	for (i = 0; i < svc_rdy_ext->arg.num_chainmask_tables; i++) {
+		if (!svc_rdy_ext->arg.chainmask_table[i].cap_list)
+			continue;
+		kfree(svc_rdy_ext->arg.chainmask_table[i].cap_list);
+		svc_rdy_ext->arg.chainmask_table[i].cap_list = NULL;
+	}
+}
+
+static int ath12k_wmi_tlv_mac_phy_chainmask_combo_parse(struct ath12k_base *soc,
+							u16 tag, u16 len,
+							const void *ptr, void *data)
+{
+	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+	struct wmi_mac_phy_chainmask_combo *cmask_combo = (struct wmi_mac_phy_chainmask_combo *) ptr;
+	u32 i = svc_rdy_ext->n_mac_phy_chainmask_combo;
+	struct ath12k_chainmask_table *cmask_table;
+
+	if (tag != WMI_TAG_MAC_PHY_CHAINMASK_COMBO)
+		return -EPROTO;
+
+	if (svc_rdy_ext->n_mac_phy_chainmask_combo >= svc_rdy_ext->arg.num_chainmask_tables)
+		return -ENOBUFS;
+
+	cmask_table = &svc_rdy_ext->arg.chainmask_table[i];
+	cmask_table->table_id = cmask_combo->chainmask_table_id;
+	cmask_table->num_valid_chainmasks = cmask_combo->num_valid_chainmask;
+	cmask_table->cap_list = kcalloc(cmask_combo->num_valid_chainmask,
+					sizeof(struct ath12k_chainmask_caps),
+					GFP_ATOMIC);
+	if (!svc_rdy_ext->arg.chainmask_table[i].cap_list)
+		return -ENOMEM;
+
+	svc_rdy_ext->n_mac_phy_chainmask_combo++;
+	return 0;
+}
+
 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
 					 u16 tag, u16 len,
 					 const void *ptr, void *data)
@@ -3954,6 +6735,7 @@
 		svc_rdy_ext->hw_caps = ptr;
 		svc_rdy_ext->arg.num_hw_modes =
 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
+		svc_rdy_ext->arg.num_chainmask_tables = le32_to_cpu(svc_rdy_ext->hw_caps->num_chainmask_tables);
 		break;
 
 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
@@ -3988,8 +6770,21 @@
 
 			svc_rdy_ext->ext_hal_reg_done = true;
 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
+			svc_rdy_ext->n_mac_phy_chainmask_combo = 0;
+			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+						  ath12k_wmi_tlv_mac_phy_chainmask_combo_parse,
+						  svc_rdy_ext);
+			if (ret) {
+				ath12k_warn(ab, "failed to parse chainmask combo tlv %d\n", ret);
+				return ret;
+			}
 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
+			ret = ath12k_wmi_tlv_mac_phy_chainmask_caps(ab, len, ptr, svc_rdy_ext);
+			if (ret) {
+				ath12k_warn(ab, "failed to parse chainmask caps tlv %d\n", ret);
+				return ret;
+			}
 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
 			svc_rdy_ext->oem_dma_ring_cap_done = true;
@@ -4027,9 +6822,12 @@
 		complete(&ab->wmi_ab.service_ready);
 
 	kfree(svc_rdy_ext.mac_phy_caps);
+	ath12k_wmi_free_chainmask_caps(&svc_rdy_ext);
 	return 0;
 
 err:
+	kfree(svc_rdy_ext.mac_phy_caps);
+	ath12k_wmi_free_chainmask_caps(&svc_rdy_ext);
 	ath12k_wmi_free_dbring_caps(ab);
 	return ret;
 }
@@ -4038,10 +6836,23 @@
 					 u16 tag, u16 len,
 					 const void *ptr, void *data)
 {
+	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
 	int ret;
 
 	switch (tag) {
+	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
+		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
+						 &parse->param);
+		if (ret) {
+			ath12k_warn(ab, "unable to extract ext params\n");
+			return ret;
+		}
+		ab->max_msduq_per_tid = parse->param.max_msduq_per_tid;
+		ab->default_msduq_per_tid = parse->param.default_msduq_per_tid;
+		ab->chwidth_num_peer_caps = parse->param.chwidth_num_peer_caps;
+		break;
+
 	case WMI_TAG_ARRAY_STRUCT:
 		if (!parse->dma_ring_cap_done) {
 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
@@ -4050,6 +6861,22 @@
 				return ret;
 
 			parse->dma_ring_cap_done = true;
+		} else if (!parse->spectral_bin_scaling_done) {
+			/* To-Do: This is a place-holder as WMI tag for
+			 * spectral scaling is before
+			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
+			 */
+			parse->spectral_bin_scaling_done = true;
+		} else if (!parse->mac_phy_caps_ext_done) {
+			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+						  ath12k_wmi_tlv_mac_phy_caps_ext,
+						  parse);
+			if (ret) {
+				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+				return ret;
+			}
+
+			parse->mac_phy_caps_ext_done = true;
 		}
 		break;
 	default:
@@ -4062,7 +6889,7 @@
 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
 					   struct sk_buff *skb)
 {
-	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
+	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { 0 };
 	int ret;
 
 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
@@ -4152,6 +6979,140 @@
 	return reg_rule_ptr;
 }
 
+static const char *ath12k_cc_status_to_str(enum ath12k_reg_cc_code code)
+{
+       switch (code) {
+       case REG_SET_CC_STATUS_PASS:
+               return "REG_SET_CC_STATUS_PASS";
+       case REG_CURRENT_ALPHA2_NOT_FOUND:
+               return "REG_CURRENT_ALPHA2_NOT_FOUND";
+       case REG_INIT_ALPHA2_NOT_FOUND:
+               return "REG_INIT_ALPHA2_NOT_FOUND";
+       case REG_SET_CC_CHANGE_NOT_ALLOWED:
+               return "REG_SET_CC_CHANGE_NOT_ALLOWED";
+       case REG_SET_CC_STATUS_NO_MEMORY:
+               return "REG_SET_CC_STATUS_NO_MEMORY";
+       case REG_SET_CC_STATUS_FAIL:
+               return "REG_SET_CC_STATUS_FAIL";
+       default:
+               return "unknown cc status";
+       }
+};
+
+static const char *ath12k_super_reg_6g_to_str(enum reg_super_domain_6g domain_id)
+{
+        switch (domain_id) {
+        case FCC1_6G:
+                return "FCC1_6G";
+        case ETSI1_6G:
+                return "ETSI1_6G";
+        case ETSI2_6G:
+                return "ETSI2_6G";
+        case APL1_6G:
+                return "APL1_6G";
+        case FCC1_6G_CL:
+                return "FCC1_6G_CL";
+        default:
+                return "unknown domain id";
+        }
+}
+
+static const char *ath12k_6g_client_type_to_str(enum wmi_reg_6g_client_type type)
+{
+        switch (type) {
+        case WMI_REG_DEFAULT_CLIENT:
+                return "DEFAULT CLIENT";
+        case WMI_REG_SUBORDINATE_CLIENT:
+                return "SUBORDINATE CLIENT";
+        default:
+                return "unknown client type";
+        }
+}
+
+static const char *ath12k_6g_ap_type_to_str(enum wmi_reg_6g_ap_type type)
+{
+        switch (type) {
+        case WMI_REG_INDOOR_AP:
+                return "INDOOR AP";
+        case WMI_REG_STD_POWER_AP:
+                return "STANDARD POWER AP";
+        case WMI_REG_VLP_AP:
+                return "VERY LOW POWER AP";
+        default:
+                return "unknown AP type";
+       }
+}
+
+static const char *ath12k_sub_reg_6g_to_str(enum reg_subdomains_6g sub_id)
+{
+        switch (sub_id) {
+        case FCC1_CLIENT_LPI_REGULAR_6G:
+                return "FCC1_CLIENT_LPI_REGULAR_6G";
+        case FCC1_CLIENT_SP_6G:
+                return "FCC1_CLIENT_SP_6G";
+        case FCC1_AP_LPI_6G:
+                return "FCC1_AP_LPI_6G/FCC1_CLIENT_LPI_SUBORDINATE";
+        case FCC1_AP_SP_6G:
+                return "FCC1_AP_SP_6G";
+        case ETSI1_LPI_6G:
+                return "ETSI1_LPI_6G";
+        case ETSI1_VLP_6G:
+                return "ETSI1_VLP_6G";
+        case ETSI2_LPI_6G:
+                return "ETSI2_LPI_6G";
+        case ETSI2_VLP_6G:
+                return "ETSI2_VLP_6G";
+        case APL1_LPI_6G:
+                return "APL1_LPI_6G";
+        case APL1_VLP_6G:
+                return "APL1_VLP_6G";
+        case EMPTY_6G:
+                return "N/A";
+        default:
+                return "unknown sub reg id";
+        }
+}
+
+static void ath12k_print_reg_rule(struct ath12k_base *ab, const char *prev,
+                                 u32 num_reg_rules,
+				  const struct ath12k_reg_rule *reg_rule_ptr)
+{
+	const struct ath12k_reg_rule *reg_rule = reg_rule_ptr;
+       u32 count;
+
+       ath12k_dbg(ab, ATH12K_DBG_WMI, "%s reg rules number %d\n", prev, num_reg_rules);
+
+       for (count = 0; count < num_reg_rules; count++) {
+               ath12k_dbg(ab, ATH12K_DBG_WMI,
+                          "reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d) (psd flag %d EIRP %d dB/MHz)\n",
+                          count + 1, reg_rule->start_freq, reg_rule->end_freq,
+                          reg_rule->max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+                          reg_rule->flags, reg_rule->psd_flag, reg_rule->psd_eirp);
+               reg_rule++;
+       }
+}
+
+static u8
+ath12k_invalid_5g_reg_ext_rules_from_wmi(u32 num_reg_rules,
+                                         struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
+{
+        u8 num_invalid_5g_rules = 0;
+        u32 count, start_freq, end_freq;
+
+        for (count = 0; count < num_reg_rules; count++) {
+                start_freq = FIELD_GET(REG_RULE_START_FREQ,
+                                       wmi_reg_rule[count].freq_info);
+                end_freq = FIELD_GET(REG_RULE_END_FREQ,
+                                     wmi_reg_rule[count].freq_info);
+
+                if (start_freq >= ATH12K_MIN_6G_FREQ &&
+                    end_freq <= ATH12K_MAX_6G_FREQ)
+                        num_invalid_5g_rules++;
+        }
+
+        return num_invalid_5g_rules;
+}
+
 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
 						   struct sk_buff *skb,
 						   struct ath12k_reg_info *reg_info)
@@ -4163,9 +7124,11 @@
 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
 	u32 total_reg_rules = 0;
-	int ret, i, j;
+	int ret, i, j, skip_6g_rules_in_5g_rules = 0;
 
-	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "%s: status_code %s", __func__,
+                   ath12k_cc_status_to_str(reg_info->status_code));
 
 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
 	if (IS_ERR(tb)) {
@@ -4190,6 +7153,18 @@
 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
 
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+                  "6g reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s , sp %s , vlp %s\n",
+                  ath12k_6g_client_type_to_str(reg_info->client_type),
+                  reg_info->rnr_tpe_usable,
+                  reg_info->unspecified_ap_usable,
+                  ath12k_sub_reg_6g_to_str
+                  (ev->domain_code_6g_ap_lpi),
+                  ath12k_sub_reg_6g_to_str
+                  (ev->domain_code_6g_ap_sp),
+                  ath12k_sub_reg_6g_to_str
+                  (ev->domain_code_6g_ap_vlp));
+
 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
@@ -4197,6 +7172,14 @@
 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+                  "6g AP BW: lpi %d - %d sp %d - %d vlp %d - %d\n",
+                  ev->min_bw_6g_ap_lpi,
+                  ev->max_bw_6g_ap_lpi,
+                  ev->min_bw_6g_ap_sp,
+                  ev->max_bw_6g_ap_sp,
+                  ev->min_bw_6g_ap_vlp,
+                  ev->max_bw_6g_ap_vlp);
 	}
 
 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
@@ -4255,20 +7238,6 @@
 
 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
 
-	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
-	 * list for country US.
-	 * Having same 6G reg rule in 5G and 6G rules list causes
-	 * intersect check to be true, and same rules will be shown
-	 * multiple times in iw cmd. So added hack below to avoid
-	 * parsing 6G rule from 5G reg rule list, and this can be
-	 * removed later, after FW updates to remove 6G reg rule
-	 * from 5G rules list.
-	 */
-	if (memcmp(reg_info->alpha2, "US", 2) == 0) {
-		reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
-		num_5g_reg_rules = reg_info->num_5g_reg_rules;
-	}
-
 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
@@ -4326,10 +7295,15 @@
 	}
 
 	ath12k_dbg(ab, ATH12K_DBG_WMI,
-		   "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
+		   "%s: status_code %s", __func__,
+                   ath12k_cc_status_to_str(reg_info->status_code));
+
+        ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap: 0x%x",
 		   __func__, reg_info->alpha2, reg_info->dfs_region,
 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
-		   reg_info->min_bw_5g, reg_info->max_bw_5g);
+		   reg_info->min_bw_5g, reg_info->max_bw_5g,
+		   reg_info->phybitmap);
 
 	ath12k_dbg(ab, ATH12K_DBG_WMI,
 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
@@ -4368,10 +7342,35 @@
 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
 			return -ENOMEM;
 		}
+		ath12k_print_reg_rule(ab, "2g",
+                                     num_2g_reg_rules,
+                                     reg_info->reg_rules_2g_ptr);
+	}
+	ext_wmi_reg_rule += num_2g_reg_rules;
+
+	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
+         * list for few countries.
+         * Having same 6G reg rule in 5G and 6G rules list causes
+         * intersect check to be true, and same rules will be shown
+         * multiple times in iw cmd. So added logic below to avoid
+         * parsing 6G rule from 5G reg rule list, and this can be
+         * removed later, after FW updates to remove 6G reg rule
+         * from 5G rules list.
+         */
+        skip_6g_rules_in_5g_rules = ath12k_invalid_5g_reg_ext_rules_from_wmi(num_5g_reg_rules,
+                                                                             ext_wmi_reg_rule);
+
+        if(skip_6g_rules_in_5g_rules) {
+                ath12k_dbg(ab, ATH12K_DBG_WMI,
+                           "CC: %s 5g reg rules number %d from fw, %d number of invalid 5g rules",
+                           reg_info->alpha2, reg_info->num_5g_reg_rules,
+                           skip_6g_rules_in_5g_rules);
+
+                num_5g_reg_rules = num_5g_reg_rules - skip_6g_rules_in_5g_rules;
+                reg_info->num_5g_reg_rules = num_5g_reg_rules;
 	}
 
 	if (num_5g_reg_rules) {
-		ext_wmi_reg_rule += num_2g_reg_rules;
 		reg_info->reg_rules_5g_ptr =
 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
 						      ext_wmi_reg_rule);
@@ -4381,9 +7380,18 @@
 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
 			return -ENOMEM;
 		}
+		ath12k_print_reg_rule(ab, "5g",
+                                     num_5g_reg_rules,
+                                     reg_info->reg_rules_5g_ptr);
 	}
 
-	ext_wmi_reg_rule += num_5g_reg_rules;
+	/* We have adjusted the number of 5g reg rules via the hack above.
+         * Here, we adjust that many extra rules which came with 5g reg rules
+         * (for cc: US)
+         *
+         * NOTE: skip_6g_rules_in_5g_rules will be 0 for rest other cases.
+         */
+        ext_wmi_reg_rule += num_5g_reg_rules + skip_6g_rules_in_5g_rules;
 
 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
 		reg_info->reg_rules_6g_ap_ptr[i] =
@@ -4396,10 +7404,17 @@
 			return -ENOMEM;
 		}
 
+		ath12k_print_reg_rule(ab, ath12k_6g_ap_type_to_str(i),
+                                     num_6g_reg_rules_ap[i],
+                                     reg_info->reg_rules_6g_ap_ptr[i]);
+
 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
 	}
 
 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+                          "AP type %s", ath12k_6g_ap_type_to_str(j));
+
 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
 			reg_info->reg_rules_6g_client_ptr[j][i] =
 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
@@ -4411,6 +7426,10 @@
 				return -ENOMEM;
 			}
 
+			ath12k_print_reg_rule(ab, ath12k_6g_client_type_to_str(i),
+                                             num_6g_reg_rules_cl[j][i],
+                                             reg_info->reg_rules_6g_client_ptr[j][i]);
+
 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
 		}
 	}
@@ -4436,8 +7455,9 @@
 
 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
 
-	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
-		   reg_info->client_type, reg_info->domain_code_6g_super_id);
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client type %s 6g super domain %s",
+                  ath12k_6g_client_type_to_str(reg_info->client_type),
+                  ath12k_super_reg_6g_to_str(reg_info->domain_code_6g_super_id));
 
 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
 
@@ -4560,11 +7580,46 @@
 	return 0;
 }
 
+static int ath12k_wmi_mgmt_rx_sub_tlv_parse(struct ath12k_base *ab,
+					    u16 tag, u16 len,
+					    const void *ptr, void *data)
+{
+	struct wmi_tlv_mgmt_rx_parse *parse = data;
+	struct ath12k_mgmt_rx_cu_arg *rx_cu_params;
+	struct ath12k_wmi_mgmt_rx_cu_params *rx_cu_params_tlv;
+
+	switch (tag) {
+	case WMI_TAG_MLO_MGMT_RX_CU_PARAMS:
+		rx_cu_params = &parse->cu_params;
+		rx_cu_params_tlv = (struct ath12k_wmi_mgmt_rx_cu_params *)ptr;
+		rx_cu_params->cu_vdev_map[0] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_1, CU_VDEV_MAP_LB);
+		rx_cu_params->cu_vdev_map[1] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_1, CU_VDEV_MAP_HB);
+		rx_cu_params->cu_vdev_map[2] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_2, CU_VDEV_MAP_LB);
+		rx_cu_params->cu_vdev_map[3] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_2, CU_VDEV_MAP_HB);
+		rx_cu_params->cu_vdev_map[4] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_3, CU_VDEV_MAP_LB);
+		rx_cu_params->cu_vdev_map[5] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_3, CU_VDEV_MAP_HB);
+		rx_cu_params->cu_vdev_map[6] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_4, CU_VDEV_MAP_LB);
+		rx_cu_params->cu_vdev_map[7] =
+			le32_get_bits(rx_cu_params_tlv->cu_vdev_map_4, CU_VDEV_MAP_HB);
+		parse->mgmt_ml_info_done = true;
+		break;
+	}
+	return 0;
+}
+
 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
 					u16 tag, u16 len,
 					const void *ptr, void *data)
 {
 	struct wmi_tlv_mgmt_rx_parse *parse = data;
+	int ret;
 
 	switch (tag) {
 	case WMI_TAG_MGMT_RX_HDR:
@@ -4574,12 +7629,130 @@
 		if (!parse->frame_buf_done) {
 			parse->frame_buf = ptr;
 			parse->frame_buf_done = true;
+		} else if (!parse->bpcc_buf_done) {
+			if (len == 0)
+				break;
+			parse->cu_params.bpcc_bufp = (void *)ptr;
+			parse->bpcc_buf_done = true;
 		}
 		break;
+	case WMI_TAG_MLO_MGMT_RX_REO_PARAMS:
+		parse->reo_params = (struct ath12k_wmi_mgmt_rx_reo_params *)ptr;
+		break;
+	case WMI_TAG_MLO_MGMT_RX_FW_CONSUMED_HDR:
+		parse->fw_consumed_reo_params = (struct ath12k_wmi_mgmt_rx_fw_consumed_hdr *)ptr;
+		break;
+	case WMI_TAG_ARRAY_STRUCT:
+		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+					  ath12k_wmi_mgmt_rx_sub_tlv_parse, parse);
+		if (ret) {
+			ath12k_warn(ab, "failed to parse mgmt rx sub tlv %d\n", ret);
+			return ret;
+		}
+		break;
+	}
+	return 0;
+}
+
+static int ath12k_pull_fw_consumed_mgmt_rx_params_tlv(struct ath12k_base *ab,
+					  struct sk_buff *skb,
+					  struct ath12k_wmi_mgmt_rx_arg *hdr)
+{
+	struct wmi_tlv_mgmt_rx_parse parse = { };
+	int ret;
+	struct ath12k_wmi_mgmt_rx_fw_consumed_hdr *fw_con_reo_params_tlv = NULL;
+
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+			ath12k_wmi_tlv_mgmt_rx_parse,
+			&parse);
+	if (ret) {
+		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
+		return ret;
 	}
+
+
+	fw_con_reo_params_tlv = parse.fw_consumed_reo_params;
+
+	if (!fw_con_reo_params_tlv) {
+		ret = -EINVAL;
+		ath12k_warn(ab, "no fw_consumed_reo_params\n");
+		return ret;
+	}
+
+	hdr->pdev_id = fw_con_reo_params_tlv->pdev_id;
+	hdr->reo_params.valid = FIELD_GET(WMI_MGMT_RX_FW_CONSUMED_PARAM_MGMT_PKT_CTR_VALID_GET,
+			fw_con_reo_params_tlv->mgmt_pkt_ctr_info);
+	hdr->reo_params.global_timestamp = fw_con_reo_params_tlv->global_timestamp;
+	hdr->reo_params.mgmt_pkt_ctr = FIELD_GET(WMI_MGMT_RX_FW_CONSUMED_PARAM_MGMT_PKT_CTR_GET,
+			fw_con_reo_params_tlv->mgmt_pkt_ctr_info);
+	hdr->reo_params.duration_us = fw_con_reo_params_tlv->rx_ppdu_duration_us;
+	hdr->reo_params.start_timestamp = hdr->reo_params.global_timestamp;
+	hdr->reo_params.end_timestamp = hdr->reo_params.start_timestamp +
+		hdr->reo_params.duration_us;
+
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "FW Consumed : Mgmt Re-order ingress: valid %u global_ts %u pkt_ctr %u\n",
+			hdr->reo_params.valid,
+			hdr->reo_params.global_timestamp,
+			hdr->reo_params.mgmt_pkt_ctr);
+
 	return 0;
 }
 
+static u32 ath12k_get_ar_next_vdev_pos(struct ath12k *ar, u32 pos)
+{
+	bool bit;
+	u32 i = 0;
+
+	for (i = pos; i < MAX_AP_MLDS_PER_LINK; i++) {
+		bit = ar->allocated_vdev_map & (1LL << i);
+		if (bit)
+			break;
+	}
+	return i;
+}
+
+static void ath12k_update_cu_params(struct ath12k_base *ab,
+				    struct ath12k_mgmt_rx_cu_arg *cu_params)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k *ar;
+	struct ath12k_link_vif *arvif;
+	bool critical_flag;
+	u8 hw_link_id;
+	u8 *bpcc_ptr, *bpcc_bufp;
+	u32 vdev_id, pos = 0;
+	int i;
+
+	if (!cu_params->bpcc_bufp)
+		return;
+	/* Iterate over all the valid hw links */
+	for (hw_link_id = 0; hw_link_id < CU_MAX_MLO_LINKS; hw_link_id++) {
+		ar = rcu_dereference(ag->hw_links[hw_link_id]);
+		if (!ar)
+			continue;
+		pos = 0;
+		for (i = 0; i < ar->num_created_vdevs; i++) {
+			pos = ath12k_get_ar_next_vdev_pos(ar, pos);
+			vdev_id = pos;
+			pos++;
+			arvif = ath12k_mac_get_arvif(ar, vdev_id);
+			if (!arvif)
+				continue;
+			if (arvif->is_up && arvif->ahvif->vif->valid_links) {
+				critical_flag = cu_params->cu_vdev_map[hw_link_id] & (1 << i);
+				bpcc_bufp = cu_params->bpcc_bufp;
+				bpcc_ptr = bpcc_bufp +
+					((hw_link_id * MAX_AP_MLDS_PER_LINK) + i);
+				WARN(1, "mlo not supported");
+				/* ieee80211_critical_update(arvif->ahvif->vif, */
+				/* 			  arvif->link_id, */
+				/* 			  critical_flag, */
+				/* 			  *bpcc_ptr); */
+			}
+		}
+	}
+}
+
 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
 					  struct sk_buff *skb,
 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
@@ -4588,7 +7761,9 @@
 	const struct ath12k_wmi_mgmt_rx_params *ev;
 	const u8 *frame;
 	int i, ret;
+	struct ath12k_wmi_mgmt_rx_reo_params *reo_params_tlv = NULL;
 
+	memset(&parse, 0, sizeof(parse));
 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
 				  ath12k_wmi_tlv_mgmt_rx_parse,
 				  &parse);
@@ -4600,6 +7775,13 @@
 	ev = parse.fixed;
 	frame = parse.frame_buf;
 
+	reo_params_tlv = parse.reo_params;
+	if (!reo_params_tlv) {
+		ret = -EINVAL;
+		ath12k_warn(ab, "no reo_params_tlv\n");
+		return ret;
+	}
+
 	if (!ev || !frame) {
 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
 		return -EPROTO;
@@ -4620,6 +7802,29 @@
 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
 
+	hdr->reo_params.pdev_id = ev->pdev_id;
+
+	hdr->reo_params.valid = FIELD_GET(WMI_MGMT_RX_REO_PARAM_MGMT_PKT_CTR_VALID_GET,
+			reo_params_tlv->mgmt_pkt_ctr_link_info);
+	hdr->reo_params.global_timestamp = reo_params_tlv->global_timestamp;
+	hdr->reo_params.mgmt_pkt_ctr = FIELD_GET(WMI_MGMT_RX_REO_PARAM_MGMT_PKT_CTR_GET,
+			reo_params_tlv->mgmt_pkt_ctr_link_info);
+	hdr->reo_params.duration_us = reo_params_tlv->rx_ppdu_duration_us;
+	hdr->reo_params.start_timestamp = hdr->reo_params.global_timestamp;
+	hdr->reo_params.end_timestamp = hdr->reo_params.start_timestamp +
+		hdr->reo_params.duration_us;
+
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Mgmt Re-order ingress: channel %d valid %u global_ts %u pkt_ctr %u\n",
+			hdr->channel,
+			hdr->reo_params.valid,
+			hdr->reo_params.global_timestamp,
+			hdr->reo_params.mgmt_pkt_ctr);
+
+	if (parse.mgmt_ml_info_done) {
+		rcu_read_lock();
+		ath12k_update_cu_params(ab, &parse.cu_params);
+		rcu_read_unlock();
+	}
 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
 		return -EPROTO;
@@ -4635,12 +7840,20 @@
 }
 
 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
-				    u32 status)
+				    u32 status,
+				    u32 ack_rssi)
 {
 	struct sk_buff *msdu;
 	struct ieee80211_tx_info *info;
 	struct ath12k_skb_cb *skb_cb;
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_mgmt_frame_stats *mgmt_stats;
+	u16 frm_type;
+	int num_mgmt;
 
+	spin_lock_bh(&ar->data_lock);
 	spin_lock_bh(&ar->txmgmt_idr_lock);
 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
 
@@ -4648,6 +7861,7 @@
 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
 			    desc_id);
 		spin_unlock_bh(&ar->txmgmt_idr_lock);
+		spin_unlock_bh(&ar->data_lock);
 		return -ENOENT;
 	}
 
@@ -4657,16 +7871,47 @@
 	skb_cb = ATH12K_SKB_CB(msdu);
 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
+	hdr = (struct ieee80211_hdr *)msdu->data;
+
+	if (ieee80211_is_mgmt(hdr->frame_control)) {
+		frm_type = FIELD_GET(IEEE80211_FCTL_STYPE, hdr->frame_control);
+		vif = skb_cb->vif;
+
+		if (!vif) {
+			ath12k_warn(ar->ab, "failed to find vif to update txcompl mgmt stats\n");
+			goto skip_mgmt_stats;
+		}
+
+	        ahvif = ath12k_vif_to_ahvif(vif);
+		mgmt_stats = &ahvif->mgmt_stats;
+
+		if (!status)
+			mgmt_stats->tx_compl_succ[frm_type]++;
+		else
+			mgmt_stats->tx_compl_fail[frm_type]++;
+	}
+
+skip_mgmt_stats:
+	spin_unlock_bh(&ar->data_lock);
+
 	info = IEEE80211_SKB_CB(msdu);
-	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) {
 		info->flags |= IEEE80211_TX_STAT_ACK;
+		info->status.ack_signal = ack_rssi;
+		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+	}
+
+	ieee80211_tx_status_irqsafe(ar->ah->hw, msdu);
 
-	ieee80211_tx_status_irqsafe(ar->hw, msdu);
+	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
 
 	/* WARN when we received this event without doing any mgmt tx */
-	if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
+	if (num_mgmt < 0)
 		WARN_ON_ONCE(1);
 
+	if (atomic_read(&ar->flush_request) && !num_mgmt)
+		wake_up(&ar->tx_empty_waitq);
+
 	return 0;
 }
 
@@ -4695,6 +7940,70 @@
 	param->pdev_id = ev->pdev_id;
 	param->desc_id = ev->desc_id;
 	param->status = ev->status;
+	param->ppdu_id = ev->ppdu_id;
+	param->ack_rssi = ev->ack_rssi;
+
+	kfree(tb);
+	return 0;
+}
+
+static void wmi_process_offchan_tx_comp(struct ath12k *ar, u32 desc_id,
+					u32 status)
+{
+	struct sk_buff *msdu;
+	struct ath12k_skb_cb *skb_cb;
+	struct ieee80211_tx_info *info;
+
+	spin_lock_bh(&ar->data_lock);
+	spin_lock_bh(&ar->txmgmt_idr_lock);
+	msdu = idr_find(&ar->txmgmt_idr, desc_id);
+
+	if (!msdu) {
+		spin_unlock_bh(&ar->txmgmt_idr_lock);
+		spin_unlock_bh(&ar->data_lock);
+		ath12k_warn(ar->ab, "received offchan tx compl for invalid msdu_id: %d\n",
+			    desc_id);
+		return;
+	}
+
+	idr_remove(&ar->txmgmt_idr, desc_id);
+	spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+	skb_cb = ATH12K_SKB_CB(msdu);
+	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+	spin_unlock_bh(&ar->data_lock);
+
+	info = IEEE80211_SKB_CB(msdu);
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+	ieee80211_tx_status_irqsafe(ar->ah->hw, msdu);
+}
+
+static int ath12k_pull_offchan_tx_compl_param_tlv(struct ath12k_base *ab,
+						  struct sk_buff *skb,
+						  struct wmi_offchan_data_tx_compl_event *params)
+{
+	const void **tb;
+	const struct wmi_offchan_data_tx_compl_event *ev;
+	int ret;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch offchan tx compl ev\n");
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	*params = *ev;
 
 	kfree(tb);
 	return 0;
@@ -4714,6 +8023,8 @@
 		break;
 	case ATH12K_SCAN_STARTING:
 		ar->scan.state = ATH12K_SCAN_RUNNING;
+		if (ar->scan.is_roc)
+			ieee80211_ready_on_channel(ar->ah->hw);
 		complete(&ar->scan.started);
 		break;
 	}
@@ -4795,7 +8106,10 @@
 		break;
 	case ATH12K_SCAN_RUNNING:
 	case ATH12K_SCAN_ABORTING:
-		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+		ar->scan_channel = ieee80211_get_channel(ar->ah->hw->wiphy,
+							 freq);
+		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+			complete(&ar->scan.on_channel);
 		break;
 	}
 }
@@ -4896,7 +8210,9 @@
 		return -EPROTO;
 	}
 
-	arg->mac_addr = ev->peer_macaddr.addr;
+	ether_addr_copy(arg->mac_addr, ev->peer_macaddr.addr);
+	arg->reason = __le32_to_cpu(ev->reason);
+	arg->rssi = __le32_to_cpu(ev->rssi);
 
 	kfree(tb);
 	return 0;
@@ -4934,13 +8250,14 @@
 static int freq_to_idx(struct ath12k *ar, int freq)
 {
 	struct ieee80211_supported_band *sband;
+	struct ieee80211_hw *hw = ar->ah->hw;
 	int band, ch, idx = 0;
 
 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
 		if (!ar->mac.sbands[band].channels)
 			continue;
 
-		sband = ar->hw->wiphy->bands[band];
+		sband = hw->wiphy->bands[band];
 		if (!sband)
 			continue;
 
@@ -5091,31 +8408,573 @@
 	return 0;
 }
 
-static int
-ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
-			 u32 len, const struct wmi_pdev_temperature_event *ev)
+static void ath12k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+					    struct ath12k_fw_stats_pdev *dst)
+{
+	dst->ch_noise_floor = src->chan_nf;
+	dst->tx_frame_count = src->tx_frame_count;
+	dst->rx_frame_count = src->rx_frame_count;
+	dst->rx_clear_count = src->rx_clear_count;
+	dst->cycle_count = src->cycle_count;
+	dst->phy_err_count = src->phy_err_count;
+	dst->chan_tx_power = src->chan_tx_pwr;
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+			      struct ath12k_fw_stats_pdev *dst)
+{
+	dst->comp_queued = src->comp_queued;
+	dst->comp_delivered = src->comp_delivered;
+	dst->msdu_enqued = src->msdu_enqued;
+	dst->mpdu_enqued = src->mpdu_enqued;
+	dst->wmm_drop = src->wmm_drop;
+	dst->local_enqued = src->local_enqued;
+	dst->local_freed = src->local_freed;
+	dst->hw_queued = src->hw_queued;
+	dst->hw_reaped = src->hw_reaped;
+	dst->underrun = src->underrun;
+	dst->tx_abort = src->tx_abort;
+	dst->mpdus_requed = src->mpdus_requed;
+	dst->tx_ko = src->tx_ko;
+	dst->data_rc = src->data_rc;
+	dst->self_triggers = src->self_triggers;
+	dst->sw_retry_failure = src->sw_retry_failure;
+	dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
+	dst->pdev_cont_xretry = src->pdev_cont_xretry;
+	dst->pdev_tx_timeout = src->pdev_tx_timeout;
+	dst->pdev_resets = src->pdev_resets;
+	dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
+	dst->phy_underrun = src->phy_underrun;
+	dst->txop_ovf = src->txop_ovf;
+}
+
+static void ath12k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+					  struct ath12k_fw_stats_pdev *dst)
+{
+	dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
+	dst->status_rcvd = src->status_rcvd;
+	dst->r0_frags = src->r0_frags;
+	dst->r1_frags = src->r1_frags;
+	dst->r2_frags = src->r2_frags;
+	dst->r3_frags = src->r3_frags;
+	dst->htt_msdus = src->htt_msdus;
+	dst->htt_mpdus = src->htt_mpdus;
+	dst->loc_msdus = src->loc_msdus;
+	dst->loc_mpdus = src->loc_mpdus;
+	dst->oversize_amsdu = src->oversize_amsdu;
+	dst->phy_errs = src->phy_errs;
+	dst->phy_err_drop = src->phy_err_drop;
+	dst->mpdu_errs = src->mpdu_errs;
+}
+
+static void
+ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
+			   struct ath12k_fw_stats_vdev *dst)
+{
+	int i;
+
+	dst->vdev_id = src->vdev_id;
+	dst->beacon_snr = src->beacon_snr;
+	dst->data_snr = src->data_snr;
+	dst->num_rx_frames = src->num_rx_frames;
+	dst->num_rts_fail = src->num_rts_fail;
+	dst->num_rts_success = src->num_rts_success;
+	dst->num_rx_err = src->num_rx_err;
+	dst->num_rx_discard = src->num_rx_discard;
+	dst->num_tx_not_acked = src->num_tx_not_acked;
+
+	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+		dst->num_tx_frames[i] = src->num_tx_frames[i];
+
+	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+		dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
+
+	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+		dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
+
+	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+		dst->tx_rate_history[i] = src->tx_rate_history[i];
+
+	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+		dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
+}
+
+static void
+ath12k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
+			  struct ath12k_fw_stats_bcn *dst)
+{
+	dst->vdev_id = src->vdev_id;
+	dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
+	dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
+}
+
+int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
+			     struct ath12k_fw_stats *stats)
 {
 	const void **tb;
-	int ret;
+	const struct wmi_stats_event *ev;
+	const void *data;
+	int i, ret;
+	struct ath12k *ar;
+	u32 len = skb->len;
 
-	tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, len, GFP_ATOMIC);
 	if (IS_ERR(tb)) {
 		ret = PTR_ERR(tb);
 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
 		return ret;
 	}
 
-	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
-	if (!ev) {
-		ath12k_warn(ab, "failed to fetch pdev temp ev");
+	ev = tb[WMI_TAG_STATS_EVENT];
+	data = tb[WMI_TAG_ARRAY_BYTE];
+	if (!ev || !data) {
+		ath12k_warn(ab, "failed to fetch update stats ev");
 		kfree(tb);
 		return -EPROTO;
 	}
 
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
+		   ev->pdev_id,
+		   ev->num_pdev_stats, ev->num_vdev_stats,
+		   ev->num_bcn_stats);
+
+	stats->pdev_id = le32_to_cpu(ev->pdev_id);
+	stats->stats_id = 0;
+
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+	if (!ar)
+		return -EPROTO;
+
+	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
+		const struct wmi_pdev_stats *src;
+		struct ath12k_fw_stats_pdev *dst;
+
+		src = data;
+		if (len < sizeof(*src)) {
+			kfree(tb);
+			return -EPROTO;
+		}
+
+		stats->stats_id = WMI_REQUEST_PDEV_STAT;
+
+		data += sizeof(*src);
+		len -= sizeof(*src);
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
+		const struct wmi_vdev_stats *src;
+		struct ath12k_fw_stats_vdev *dst;
+
+		src = data;
+		if (len < sizeof(*src)) {
+			kfree(tb);
+			return -EPROTO;
+		}
+
+		stats->stats_id = WMI_REQUEST_VDEV_STAT;
+
+		data += sizeof(*src);
+		len -= sizeof(*src);
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath12k_wmi_pull_vdev_stats(src, dst);
+		list_add_tail(&dst->list, &stats->vdevs);
+	}
+
+	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
+		const struct wmi_bcn_stats *src;
+		struct ath12k_fw_stats_bcn *dst;
+
+		src = data;
+		if (len < sizeof(*src)) {
+			kfree(tb);
+			return -EPROTO;
+		}
+
+		stats->stats_id = WMI_REQUEST_BCN_STAT;
+
+		data += sizeof(*src);
+		len -= sizeof(*src);
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath12k_wmi_pull_bcn_stats(src, dst);
+		list_add_tail(&dst->list, &stats->bcn);
+	}
+
 	kfree(tb);
 	return 0;
 }
 
+size_t ath12k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+	struct ath12k_fw_stats_vdev *i;
+	size_t num = 0;
+
+	list_for_each_entry(i, head, list)
+		++num;
+
+	return num;
+}
+
+static size_t ath12k_wmi_fw_stats_num_bcn(struct list_head *head)
+{
+	struct ath12k_fw_stats_bcn *i;
+	size_t num = 0;
+
+	list_for_each_entry(i, head, list)
+		++num;
+
+	return num;
+}
+
+static void
+ath12k_wmi_fw_pdev_base_stats_fill(const struct ath12k_fw_stats_pdev *pdev,
+				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n",
+			"ath12k PDEV stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			"=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Channel noise floor", pdev->ch_noise_floor);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Channel TX power", pdev->chan_tx_power);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"TX frame count", pdev->tx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX frame count", pdev->rx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX clear count", pdev->rx_clear_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Cycle count", pdev->cycle_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"PHY error count", pdev->phy_err_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
+			"soc drop count", fw_soc_drop_cnt);
+
+	*length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_tx_stats_fill(const struct ath12k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath12k PDEV TX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			 "====================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies queued", pdev->comp_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies disp.", pdev->comp_delivered);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDU queued", pdev->msdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU queued", pdev->mpdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs dropped", pdev->wmm_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local enqued", pdev->local_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local freed", pdev->local_freed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HW queued", pdev->hw_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs reaped", pdev->hw_reaped);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Num underruns", pdev->underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs cleaned", pdev->tx_abort);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs requeued", pdev->mpdus_requed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Excessive retries", pdev->tx_ko);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "HW rate", pdev->data_rc);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Sched self triggers", pdev->self_triggers);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Dropped due to SW retries",
+			 pdev->sw_retry_failure);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Illegal rate phy errors",
+			 pdev->illgl_rate_phy_err);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "TX timeout", pdev->pdev_tx_timeout);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "PDEV resets", pdev->pdev_resets);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Stateless TIDs alloc failures",
+			 pdev->stateless_tid_alloc_failure);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "PHY underrun", pdev->phy_underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "MPDU is more than txop limit", pdev->txop_ovf);
+	*length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_rx_stats_fill(const struct ath12k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath12k PDEV RX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			 "====================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Mid PPDU route change",
+			 pdev->mid_ppdu_route_change);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Tot. number of statuses", pdev->status_rcvd);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 0", pdev->r0_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 1", pdev->r1_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 2", pdev->r2_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 3", pdev->r3_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to HTT", pdev->htt_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to HTT", pdev->htt_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to stack", pdev->loc_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to stack", pdev->loc_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Oversized AMSUs", pdev->oversize_amsdu);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors", pdev->phy_errs);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors drops", pdev->phy_err_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+	*length = len;
+}
+
+static void
+ath12k_wmi_fw_vdev_stats_fill(struct ath12k *ar,
+			      const struct ath12k_fw_stats_vdev *vdev,
+			      char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+	struct ath12k_link_vif *arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
+	u8 *vif_macaddr;
+	int i;
+
+	/* VDEV stats has all the active VDEVs of other PDEVs as well,
+	 * ignoring those not part of requested PDEV
+	 */
+	if (!arvif)
+		return;
+
+	vif_macaddr = arvif->addr;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "VDEV ID", vdev->vdev_id);
+	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+			 "VDEV MAC address", vif_macaddr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "beacon snr", vdev->beacon_snr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "data snr", vdev->data_snr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "num rx frames", vdev->num_rx_frames);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "num rts fail", vdev->num_rts_fail);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "num rts success", vdev->num_rts_success);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "num rx err", vdev->num_rx_err);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "num rx discard", vdev->num_rx_discard);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "num tx not acked", vdev->num_tx_not_acked);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "rx msdu byte cnt", arvif->vdev_stats.rx_msdu_byte_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "rx msdu pkt cnt", arvif->vdev_stats.rx_msdu_pkt_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "ack msdu byte cnt", arvif->vdev_stats.tx_msdu_byte_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "ack msdu pkt cnt", arvif->vdev_stats.tx_msdu_pkt_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "retry byte cnt", arvif->vdev_stats.tx_retry_byte_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "retry pkt cnt", arvif->vdev_stats.tx_retry_pkt_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "drop byte cnt", arvif->vdev_stats.tx_drop_byte_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "drop pkt cnt", arvif->vdev_stats.tx_drop_pkt_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "age out drop byte cnt",
+			 arvif->vdev_stats.tx_msdu_ttl_byte_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+			 "age out drop pkt cnt",
+			 arvif->vdev_stats.tx_msdu_ttl_pkt_cnt);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames", i,
+				vdev->num_tx_frames[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames retries", i,
+				vdev->num_tx_frames_retries[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames failures", i,
+				vdev->num_tx_frames_failures[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] 0x%08x\n",
+				"tx rate history", i,
+				vdev->tx_rate_history[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"beacon rssi history", i,
+				vdev->beacon_rssi_history[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+static void
+ath12k_wmi_fw_bcn_stats_fill(struct ath12k *ar,
+			     const struct ath12k_fw_stats_bcn *bcn,
+			     char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+	struct ath12k_link_vif *arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
+	u8 *vdev_macaddr;
+
+	if (!arvif) {
+		ath12k_warn(ar->ab, "invalid vdev id %d in bcn stats",
+			    bcn->vdev_id);
+		return;
+	}
+
+	vdev_macaddr = arvif->addr;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "VDEV ID", bcn->vdev_id);
+	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+			 "VDEV MAC address", vdev_macaddr);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			 "================");
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+void ath12k_wmi_fw_stats_fill(struct ath12k *ar,
+			      struct ath12k_fw_stats *fw_stats,
+			      u32 stats_id, char *buf)
+{
+	u32 len = 0;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+	const struct ath12k_fw_stats_pdev *pdev;
+	const struct ath12k_fw_stats_vdev *vdev;
+	const struct ath12k_fw_stats_bcn *bcn;
+	size_t num_bcn;
+
+	spin_lock_bh(&ar->data_lock);
+
+	if (stats_id == WMI_REQUEST_PDEV_STAT) {
+		pdev = list_first_entry_or_null(&fw_stats->pdevs,
+						struct ath12k_fw_stats_pdev, list);
+		if (!pdev) {
+			ath12k_warn(ar->ab, "failed to get pdev stats\n");
+			goto unlock;
+		}
+
+		ath12k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len,
+						   ar->ab->fw_soc_drop_count);
+		ath12k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+		ath12k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+	}
+
+	if (stats_id == WMI_REQUEST_VDEV_STAT) {
+		len += scnprintf(buf + len, buf_len - len, "\n");
+		len += scnprintf(buf + len, buf_len - len, "%30s\n",
+				 "ath12k VDEV stats");
+		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+		list_for_each_entry(vdev, &fw_stats->vdevs, list)
+			ath12k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
+	}
+
+	if (stats_id == WMI_REQUEST_BCN_STAT) {
+		num_bcn = ath12k_wmi_fw_stats_num_bcn(&fw_stats->bcn);
+
+		len += scnprintf(buf + len, buf_len - len, "\n");
+		len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+				 "ath12k Beacon stats", num_bcn);
+		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "===================");
+
+		list_for_each_entry(bcn, &fw_stats->bcn, list)
+			ath12k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
 {
 	/* try to send pending beacons first. they take priority */
@@ -5125,42 +8984,47 @@
 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
 				       struct sk_buff *skb)
 {
+	struct ath12k_wmi_pdev *wmi = NULL;
+	u32 i;
+	u8 wmi_ep_count;
+	u8 eid;
+
+	eid = ATH12K_SKB_CB(skb)->eid;
 	dev_kfree_skb(skb);
-}
 
-static bool ath12k_reg_is_world_alpha(char *alpha)
-{
-	return alpha[0] == '0' && alpha[1] == '0';
-}
+	if (eid >= ATH12K_HTC_EP_COUNT)
+		return;
 
-static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
-{
-	struct ath12k_reg_info *reg_info = NULL;
-	struct ieee80211_regdomain *regd = NULL;
-	bool intersect = false;
-	int ret = 0, pdev_idx, i, j;
-	struct ath12k *ar;
+	wmi_ep_count = ab->htc.wmi_ep_count;
+	if (wmi_ep_count > ab->hw_params->max_radios)
+		return;
 
-	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
-	if (!reg_info) {
-		ret = -ENOMEM;
-		goto fallback;
+	for (i = 0; i < ab->htc.wmi_ep_count; i++) {
+		if (ab->wmi_ab.wmi[i].eid == eid) {
+			wmi = &ab->wmi_ab.wmi[i];
+			break;
+		}
 	}
 
-	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
-
-	if (ret) {
-		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
-		goto fallback;
+	if (wmi)
+		wake_up(&wmi->tx_ce_desc_wq);
 	}
 
+static int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
+                                      struct ath12k_reg_info *reg_info,
+                                      enum ieee80211_ap_reg_power power_type)
+{
+	struct ieee80211_regdomain *regd;
+	int pdev_idx;
+	struct ath12k *ar;
+
 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
 		/* In case of failure to set the requested ctry,
 		 * fw retains the current regd. We print a failure info
 		 * and return from here.
 		 */
 		ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
-		goto mem_free;
+		return -EINVAL;
 	}
 
 	pdev_idx = reg_info->phy_id;
@@ -5172,7 +9036,7 @@
 		 */
 		if (ab->hw_params->single_pdev_only &&
 		    pdev_idx < ab->hw_params->num_rxmda_per_pdev)
-			goto mem_free;
+			goto retfail;
 		else
 			goto fallback;
 	}
@@ -5183,26 +9047,20 @@
 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
 	    !memcmp(ab->default_regd[pdev_idx]->alpha2,
 		    reg_info->alpha2, 2))
-		goto mem_free;
+		goto retfail;
 
-	/* Intersect new rules with default regd if a new country setting was
-	 * requested, i.e a default regd was already set during initialization
-	 * and the regd coming from this event has a valid country info.
-	 */
-	if (ab->default_regd[pdev_idx] &&
-	    !ath12k_reg_is_world_alpha((char *)
-		ab->default_regd[pdev_idx]->alpha2) &&
-	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
-		intersect = true;
+    ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "wmi handle chan list power type %d\n", power_type);
 
-	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
+    regd = ath12k_reg_build_regd(ab, reg_info, power_type);
 	if (!regd) {
 		ath12k_warn(ab, "failed to build regd from reg_info\n");
 		goto fallback;
 	}
 
 	spin_lock(&ab->base_lock);
-	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+	if (ab->default_regd[pdev_idx] &&
+	    ab->regd_change_user_request[pdev_idx]) {
 		/* Once mac is registered, ar is valid and all CC events from
 		 * fw is considered to be received due to user requests
 		 * currently.
@@ -5213,7 +9071,8 @@
 		ar = ab->pdevs[pdev_idx].ar;
 		kfree(ab->new_regd[pdev_idx]);
 		ab->new_regd[pdev_idx] = regd;
-		ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+		queue_work(ab->workqueue, &ar->regd_update_work);
+
 	} else {
 		/* Multiple events for the same *ar is not expected. But we
 		 * can still clear any previously stored default_regd if we
@@ -5227,7 +9086,7 @@
 	ab->dfs_region = reg_info->dfs_region;
 	spin_unlock(&ab->base_lock);
 
-	goto mem_free;
+	return 0;
 
 fallback:
 	/* Fallback to older reg (by sending previous country setting
@@ -5239,17 +9098,42 @@
 	 */
 	/* TODO: This is rare, but still should also be handled */
 	WARN_ON(1);
+retfail:
+        return -EINVAL;
+}
+
+
+static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+       struct ath12k_reg_info *reg_info;
+       int ret, i, j;
+
+       reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+       if (!reg_info)
+               return -ENOMEM;
+
+       ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
+
+       if (ret) {
+               ath12k_warn(ab, "failed to extract regulatory info from received event\n");
+               goto mem_free;
+       }
+
+       ret = ath12k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP);
+       if (ret) {
+               ath12k_warn(ab, "failed to process regulatory info from received event\n");
+               goto mem_free;
+       }
 mem_free:
 	if (reg_info) {
 		kfree(reg_info->reg_rules_2g_ptr);
 		kfree(reg_info->reg_rules_5g_ptr);
 		if (reg_info->is_ext_reg_event) {
-			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
+			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
 				kfree(reg_info->reg_rules_6g_ap_ptr[i]);
-
-			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
-				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
-					kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
+				for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+                                	kfree(reg_info->reg_rules_6g_client_ptr[i][j]);
+                       }
 		}
 		kfree(reg_info);
 	}
@@ -5277,7 +9161,7 @@
 
 		ether_addr_copy(ab->mac_addr,
 				fixed_param.ready_event_min.mac_addr.addr);
-		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
+		ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
 		ab->wmi_ready = true;
 		break;
 	case WMI_TAG_ARRAY_FIXED_STRUCT:
@@ -5290,6 +9174,16 @@
 		for (i = 0; i < ab->num_radios; i++) {
 			pdev = &ab->pdevs[i];
 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+			if (!memcmp(pdev->mac_addr, "\x00\x03\x7f", 3)) {
+				/* randomly assigned mac address
+				 * chosen by firmware, but the problem
+				 * is that it will chose the same one
+				 * for all PCIe cards and we will get
+				 * duplicate addresses, at least make
+				 * sure to mark them locally assigned
+				 * so userspace will override them */
+				pdev->mac_addr[0] |= (1 << 1);
+			}
 		}
 		ab->pdevs_macaddr_valid = true;
 		break;
@@ -5327,7 +9221,7 @@
 	}
 
 	rcu_read_lock();
-	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
+	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id);
 	if (!ar) {
 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
 			    peer_del_resp.vdev_id);
@@ -5337,7 +9231,7 @@
 
 	complete(&ar->peer_delete_done);
 	rcu_read_unlock();
-	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
+	ath12k_dbg(ab, ATH12K_DBG_PEER, "peer delete resp for vdev id %d addr %pM\n",
 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
 }
 
@@ -5380,6 +9274,14 @@
 		return "dfs violation";
 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
 		return "invalid regdomain";
+	case WMI_VDEV_START_RESPONSE_INVALID_BAND:
+		return "invalid band";
+	case WMI_VDEV_START_RESPONSE_INVALID_PREFERRED_TX_RX_STREAMS:
+		/** Invalid preferred tx/rx streams */
+		return "invalid TX/RX streams";
+	case WMI_VDEV_START_RESPONSE_INVALID_TX_VAP_CONFIG:
+		/** Invalid tx_vap config in VDEV start */
+		return "invalid tx vap config";
 	default:
 		return "unknown";
 	}
@@ -5397,7 +9299,7 @@
 	}
 
 	rcu_read_lock();
-	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
+	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
 	if (!ar) {
 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
 			    vdev_start_resp.vdev_id);
@@ -5406,8 +9308,9 @@
 	}
 
 	ar->last_wmi_vdev_start_status = 0;
+	ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power;
 
-	status = le32_to_cpu(vdev_start_resp.status);
+	status = vdev_start_resp.status;
 
 	if (WARN_ON_ONCE(status)) {
 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
@@ -5425,6 +9328,7 @@
 
 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
 {
+	struct ath12k_link_vif *arvif;
 	u32 vdev_id, tx_status;
 
 	if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
@@ -5432,6 +9336,16 @@
 		ath12k_warn(ab, "failed to extract bcn tx status");
 		return;
 	}
+
+	rcu_read_lock();
+	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+	if (!arvif) {
+		ath12k_warn(ab, "invalid vdev id %d in bcn_tx_status",
+			    vdev_id);
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
 }
 
 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
@@ -5460,52 +9374,2394 @@
 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
 }
 
+/**
+ * ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
+ * @ctr1: Management packet counter1
+ * @ctr2: Management packet counter2
+ *
+ * We can't directly use the comparison operator here because the counters can
+ * overflow. But these counters have a property that the difference between
+ * them can never be greater than half the range of the data type.
+ * We can make use of this condition to detect which one is actually greater.
+ *
+ * Return: true if @ctr1 is greater than or equal to @ctr2, else false
+ */
+static inline bool
+ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte(u16 ctr1, u16 ctr2)
+{
+	u16 delta = ctr1 - ctr2;
+
+	return delta <= ATH12K_MGMT_RX_REO_PKT_CTR_HALF_RANGE;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
+ * @ctr1: Management packet counter1
+ * @ctr2: Management packet counter2
+ *
+ * We can't directly use the subtract operator here because the counters can
+ * overflow. But these counters have a property that the difference between
+ * them can never be greater than half the range of the data type.
+ * We can make use of this condition to detect whichone is actually greater and
+ * return the difference accordingly.
+ *
+ * Return: Difference between @ctr1 and @crt2
+ */
+static inline int
+ath12k_mgmt_rx_reo_subtract_pkt_ctrs(u16 ctr1, u16 ctr2)
+{
+	u16 delta = ctr1 - ctr2;
+
+	/**
+	 * if delta is greater than half the range (i.e, ctr1 is actually
+	 * smaller than ctr2), then the result should be a negative number.
+	 * subtracting the entire range should give the correct value.
+	 */
+	if (delta > ATH12K_MGMT_RX_REO_PKT_CTR_HALF_RANGE)
+		return delta - ATH12K_MGMT_RX_REO_PKT_CTR_FULL_RANGE;
+
+	return delta;
+}
+
+#define ATH12K_MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
+/**
+ * ath12k_mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
+ * @ts1: Global timestamp1
+ * @ts2: Global timestamp2
+ *
+ * We can't directly use the comparison operator here because the timestamps can
+ * overflow. But these timestamps have a property that the difference between
+ * them can never be greater than half the range of the data type.
+ * We can make use of this condition to detect which one is actually greater.
+ *
+ * Return: true if @ts1 is greater than or equal to @ts2, else false
+ */
+static inline bool
+ath12k_mgmt_rx_reo_compare_global_timestamps_gte(u32 ts1, u32 ts2)
+{
+	u32 delta = ts1 - ts2;
+
+	return delta <= ATH12K_MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
+}
+
+#define ATH12K_RX_REO_REORD_MAX_DELTA	0xFFFF
+/**
+ * ath12k_wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
+ * Rx REO parameters.
+ * @desc: pointer to frame descriptor
+ *
+ * Return: 0 on Success, Error value on failure of operation
+ */
+static int
+ath12k_wlan_mgmt_rx_reo_update_host_snapshot(struct ath12k *ar,
+					     struct ath12k_mgmt_rx_reo_frame_descriptor *desc)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
+	struct ath12k_mgmt_rx_reo_snapshot_params *host_ss;
+	struct ath12k_mgmt_rx_reo_params *reo_params;
+	int pkt_ctr_delta;
+
+	if (!desc) {
+		ath12k_err(ab, "Mgmt Rx REO frame descriptor null\n");
+		return -EINVAL;
+	}
+
+	if (!desc->rx_params) {
+		ath12k_err(ab, "Mgmt Rx params NULL\n");
+		return -EINVAL;
+	}
+
+	reo_params = &desc->rx_params->reo_params;
+
+	rx_reo_pdev_ctx = &ar->rx_reo_pdev_ctx;
+	if (!rx_reo_pdev_ctx) {
+		ath12k_err(ab, "Mgmt Rx REO context empty for pdev\n");
+		return -EINVAL;
+	}
+
+	/* FW should send valid REO parameters */
+	if (!reo_params->valid) {
+		ath12k_err(ab, "Mgmt Rx REO params is invalid\n");
+		return -EINVAL;
+	}
+
+	host_ss = &rx_reo_pdev_ctx->host_snapshot;
+
+	if (!host_ss->valid) {
+		desc->pkt_ctr_delta = 1;
+		goto update_host_ss;
+	}
+
+	if (ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr,
+						    reo_params->mgmt_pkt_ctr)) {
+		ath12k_err(ab, "Cur frame ctr < last frame ctr for link = %u",
+			   reo_params->link_id);
+		goto failure_debug;
+	}
+
+	pkt_ctr_delta = ath12k_mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr,
+							     host_ss->mgmt_pkt_ctr);
+	BUG_ON(!(pkt_ctr_delta > 0));
+	desc->pkt_ctr_delta = pkt_ctr_delta;
+
+	if (pkt_ctr_delta == 1)
+		goto update_host_ss;
+
+	/* Under back pressure scenarios, FW may drop management Rx frame
+	 * WMI events. So holes in the management packet counter is expected.
+	 * Add a debug print and optional assert to track the holes.
+	 */
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "---- Rx reo reordering(this info is seen since pkt_ctr_delta > 0) ----\n");
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "pkt_ctr_delta = %u\n", pkt_ctr_delta);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Cur frame valid = %u, pkt_ctr = %u, ts =%u\n",
+		   reo_params->valid, reo_params->mgmt_pkt_ctr,
+			reo_params->global_timestamp);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Last frame valid = %u, pkt_ctr = %u, ts =%u\n",
+		   host_ss->valid, host_ss->mgmt_pkt_ctr,
+			host_ss->global_timestamp);
+
+	if (pkt_ctr_delta > ATH12K_RX_REO_REORD_MAX_DELTA) {
+		ath12k_err(ab, "pkt ctr delta %u > thresh %u for link %u",
+			   pkt_ctr_delta, ATH12K_RX_REO_REORD_MAX_DELTA,
+				reo_params->link_id);
+		goto failure_debug;
+	}
+
+update_host_ss:
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Last frame valid = %u, pkt_ctr = %u, ts =%u\n",
+		   host_ss->valid, host_ss->mgmt_pkt_ctr,
+			host_ss->global_timestamp);
+
+	host_ss->valid = true;
+	host_ss->global_timestamp = reo_params->global_timestamp;
+	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
+
+	return 0;
+
+failure_debug:
+	ath12k_err(ab, "Cur frame valid = %u, pkt_ctr = %u, ts =%u\n",
+		   reo_params->valid, reo_params->mgmt_pkt_ctr,
+			reo_params->global_timestamp);
+	ath12k_err(ab, "Last frame valid = %u, pkt_ctr = %u, ts =%u\n",
+		   host_ss->valid, host_ss->mgmt_pkt_ctr,
+			host_ss->global_timestamp);
+	BUG_ON(1);
+
+	return -EINVAL;
+}
+
+/**
+ * ath12k_wmi_mgmt_rx_reo_read_snapshot_raw() - Read raw value of management
+ * rx-reorder snapshot
+ * @snapshot_address: snapshot address
+ * @ath12k_mgmt_rx_reo_snapshot_low: Pointer to lower 32 bits of snapshot value
+ * @ath12k_mgmt_rx_reo_snapshot_high: Pointer to higher 32 bits of snapshot value
+ * @snapshot_version: snapshot version
+ *
+ * Read raw value of management rx-reorder snapshots.
+ *
+ * Return: 0 on Success, Error value on failure
+ */
+static int
+ath12k_wmi_mgmt_rx_reo_read_snapshot_raw
+			(struct ath12k_mgmt_rx_reo_shared_snapshot *snapshot_address,
+			 u32 *ath12k_mgmt_rx_reo_snapshot_low,
+			 u32 *ath12k_mgmt_rx_reo_snapshot_high,
+			 u8 snapshot_version,
+			 struct ath12k_mgmt_rx_reo_shared_snapshot *raw_snapshot)
+{
+	u32 prev_snapshot_low;
+	u32 prev_snapshot_high;
+	u32 cur_snapshot_low;
+	u32 cur_snapshot_high;
+	u8 retry_count = 0;
+
+	if (snapshot_version == 1) {
+		*ath12k_mgmt_rx_reo_snapshot_low =
+			snapshot_address->u_low.ath12k_mgmt_rx_reo_snapshot_low;
+		*ath12k_mgmt_rx_reo_snapshot_high =
+			snapshot_address->u_high.ath12k_mgmt_rx_reo_snapshot_high;
+		raw_snapshot->u_low.ath12k_mgmt_rx_reo_snapshot_low =
+			*ath12k_mgmt_rx_reo_snapshot_low;
+		raw_snapshot->u_high.ath12k_mgmt_rx_reo_snapshot_high =
+			*ath12k_mgmt_rx_reo_snapshot_high;
+		return 0;
+	}
+
+	prev_snapshot_low = snapshot_address->u_low.ath12k_mgmt_rx_reo_snapshot_low;
+	prev_snapshot_high = snapshot_address->u_high.ath12k_mgmt_rx_reo_snapshot_high;
+	raw_snapshot->u_low.ath12k_mgmt_rx_reo_snapshot_low = prev_snapshot_low;
+	raw_snapshot->u_high.ath12k_mgmt_rx_reo_snapshot_high = prev_snapshot_high;
+
+	for (; retry_count < (ATH12K_MGMT_RX_REO_SNAPSHOT_B2B_READ_SWAR_RETRY_LIMIT - 1);
+			retry_count++) {
+		cur_snapshot_low = snapshot_address->u_low.ath12k_mgmt_rx_reo_snapshot_low;
+		cur_snapshot_high = snapshot_address->u_high.ath12k_mgmt_rx_reo_snapshot_high;
+
+		raw_snapshot[retry_count + 1].u_low.ath12k_mgmt_rx_reo_snapshot_low =
+			cur_snapshot_low;
+		raw_snapshot[retry_count + 1].u_high.ath12k_mgmt_rx_reo_snapshot_high =
+			cur_snapshot_high;
+
+		if (prev_snapshot_low == cur_snapshot_low &&
+		    prev_snapshot_high == cur_snapshot_high)
+			break;
+
+		prev_snapshot_low = cur_snapshot_low;
+		prev_snapshot_high = cur_snapshot_high;
+	}
+
+	BUG_ON(retry_count ==
+			(ATH12K_MGMT_RX_REO_SNAPSHOT_B2B_READ_SWAR_RETRY_LIMIT - 1));
+
+	*ath12k_mgmt_rx_reo_snapshot_low = cur_snapshot_low;
+	*ath12k_mgmt_rx_reo_snapshot_high = cur_snapshot_high;
+
+	return 0;
+}
+
+/**
+ * Helper macros/functions for params GET/SET of different hw version
+ * of the mgmt_rx_reo_snapshot
+ */
+
+static inline u8
+ath12k_mlo_shmem_mgmt_rx_reo_snapshot_valid_get(struct ath12k_base *ab,
+						u32 ath12k_mgmt_rx_reo_snapshot_low,
+						u8 snapshot_ver)
+{
+	if ((snapshot_ver != ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_TIMESTAMP_REDUNDANCY) &&
+	    (snapshot_ver != ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_PKT_CTR_REDUNDANCY)) {
+		ath12k_err(ab, "Check this error snapshot ver %d\n", snapshot_ver);
+		BUG_ON(1);
+	}
+
+	return ATH12K_MLO_SHMEM_GET_BITS(ath12k_mgmt_rx_reo_snapshot_low, 31, 1);
+}
+
+static inline u32
+ath12k_mlo_shmem_mgmt_rx_reo_snapshot_global_timestamp_get(
+		struct ath12k_base *ab,
+	u32 ath12k_mgmt_rx_reo_snapshot_low,
+	u32 ath12k_mgmt_rx_reo_snapshot_high,
+	u8 snapshot_ver)
+{
+	if (snapshot_ver == ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_TIMESTAMP_REDUNDANCY) {
+		return ath12k_mgmt_rx_reo_snapshot_high;
+	} else if (snapshot_ver == ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_PKT_CTR_REDUNDANCY) {
+		return
+			((ATH12K_MLO_SHMEM_GET_BITS(ath12k_mgmt_rx_reo_snapshot_high, 15, 17) << 15) |
+			 ATH12K_MLO_SHMEM_GET_BITS(ath12k_mgmt_rx_reo_snapshot_low, 0, 15));
+	} else {
+		ath12k_err(ab, "Check this error snapshot ver %d\n", snapshot_ver);
+		BUG_ON(1);
+		return 0;
+	}
+}
+
+static inline u16
+mlo_shmem_mgmt_rx_reo_snapshot_mgmt_pkt_ctr_get(
+		struct ath12k_base *ab,
+	u32 ath12k_mgmt_rx_reo_snapshot_low, u8 snapshot_ver)
+{
+	if (snapshot_ver == ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_TIMESTAMP_REDUNDANCY) {
+		return ATH12K_MLO_SHMEM_GET_BITS(ath12k_mgmt_rx_reo_snapshot_low, 0, 16);
+	} else if (snapshot_ver == ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_PKT_CTR_REDUNDANCY) {
+		return ATH12K_MLO_SHMEM_GET_BITS(ath12k_mgmt_rx_reo_snapshot_low, 15, 16);
+	} else {
+		ath12k_err(ab, "Check this error snapshot ver %d\n", snapshot_ver);
+		BUG_ON(1);
+		return 0;
+	}
+}
+
+#define ATH12K_MLO_SHMEM_MGMT_RX_REO_SNAPSHOT_MGMT_PKT_CTR_REDUNDANT_GET( \
+	ath12k_mgmt_rx_reo_snapshot_high) \
+	ATH12K_MLO_SHMEM_GET_BITS(ath12k_mgmt_rx_reo_snapshot_high, 0, 15)
+
+#define ATH12K_MLO_SHMEM_MGMT_RX_REO_SNAPSHOT_GLOBAL_TIMESTAMP_REDUNDANT_GET( \
+	ath12k_mgmt_rx_reo_snapshot_low) \
+	ATH12K_MLO_SHMEM_GET_BITS(ath12k_mgmt_rx_reo_snapshot_low, 16, 15)
+
+static inline bool
+ath12k_mlo_shmem_mgmt_rx_reo_snapshot_check_consistency(
+		struct ath12k_base *ab,
+		u32 ath12k_mgmt_rx_reo_snapshot_low,
+		u32 ath12k_mgmt_rx_reo_snapshot_high,
+		u8 snapshot_ver)
+{
+	if (snapshot_ver == ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_TIMESTAMP_REDUNDANCY) {
+		u32 global_timestamp;
+		u32 global_timestamp_redundant;
+
+		global_timestamp =
+			ath12k_mlo_shmem_mgmt_rx_reo_snapshot_global_timestamp_get(ab,
+					ath12k_mgmt_rx_reo_snapshot_low,
+					ath12k_mgmt_rx_reo_snapshot_high,
+					snapshot_ver);
+		global_timestamp_redundant =
+			ATH12K_MLO_SHMEM_MGMT_RX_REO_SNAPSHOT_GLOBAL_TIMESTAMP_REDUNDANT_GET(
+					ath12k_mgmt_rx_reo_snapshot_low);
+
+		return
+			(ATH12K_MLO_SHMEM_GET_BITS(global_timestamp, 0, 15) ==
+			 ATH12K_MLO_SHMEM_GET_BITS(global_timestamp_redundant, 0, 15));
+	} else if (snapshot_ver == ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_PKT_CTR_REDUNDANCY) {
+		u16 mgmt_pkt_ctr;
+		u16 mgmt_pkt_ctr_redundant;
+
+		mgmt_pkt_ctr = mlo_shmem_mgmt_rx_reo_snapshot_mgmt_pkt_ctr_get(ab,
+				ath12k_mgmt_rx_reo_snapshot_low, snapshot_ver);
+		mgmt_pkt_ctr_redundant =
+			ATH12K_MLO_SHMEM_MGMT_RX_REO_SNAPSHOT_MGMT_PKT_CTR_REDUNDANT_GET(
+					ath12k_mgmt_rx_reo_snapshot_high);
+
+		return
+			(ATH12K_MLO_SHMEM_GET_BITS(mgmt_pkt_ctr, 0, 15) ==
+			 ATH12K_MLO_SHMEM_GET_BITS(mgmt_pkt_ctr_redundant, 0, 15));
+	} else {
+		ath12k_err(ab, "Check this error snapshot ver %d\n", snapshot_ver);
+		BUG_ON(1);
+		return 0;
+	}
+}
+
+/**
+ * ath12k_mgmt_rx_reo_snapshot_get_mgmt_pkt_ctr() - Get the management packet counter
+ * from an MGMT Rx REO snapshot
+ * @snapshot_low: lower 32-bits of the snapshot
+ * @snapshot_version: snapshot version
+ *
+ * Return: Management packet counter of the snapshot
+ */
+static u16 ath12k_mgmt_rx_reo_snapshot_get_mgmt_pkt_ctr(
+		struct ath12k_base *ab,
+		u32 mgmt_rx_reo_snapshot_low,
+			      u8 snapshot_version)
+{
+	if (snapshot_version == MGMT_RX_REO_SNAPSHOT_VERSION_TIMESTAMP_REDUNDANCY) {
+		return ATH12K_MLO_SHMEM_GET_BITS(mgmt_rx_reo_snapshot_low, 0, 16);
+	} else if (snapshot_version == MGMT_RX_REO_SNAPSHOT_VERSION_PKT_CTR_REDUNDANCY) {
+		return ATH12K_MLO_SHMEM_GET_BITS(mgmt_rx_reo_snapshot_low, 15, 16);
+	} else {
+		ath12k_err(ab, "Check this error snapshot ver %d\n", snapshot_version);
+		BUG_ON(1);
+		return 0;
+	}
+}
+
+/**
+ * ath12k_wmi_mgmt_rx_reo_read_snapshot() - Read management rx-reorder snapshot
+ * @pdev: pdev pointer
+ * @snapshot_info: Snapshot info
+ * @id: Snapshot ID
+ * @snapshot_value: Pointer to snapshot value
+ *
+ * Read management rx-reorder snapshots from target.
+ *
+ * Return: 0 on Success, Error value on failure
+ */
+static int
+ath12k_wmi_mgmt_rx_reo_read_snapshot(
+			struct ath12k *ar,
+			struct ath12k_mgmt_rx_reo_snapshot_info *snapshot_info,
+			enum ath12k_mgmt_rx_reo_shared_snapshot_id id,
+			struct ath12k_mgmt_rx_reo_snapshot_params *snapshot_value,
+			struct ath12k_mgmt_rx_reo_shared_snapshot (*raw_snapshot)
+			[ATH12K_MGMT_RX_REO_SNAPSHOT_B2B_READ_SWAR_RETRY_LIMIT])
+{
+	struct ath12k_base *ab = ar->ab;
+	bool snapshot_valid;
+	u16 mgmt_pkt_ctr;
+	u32 global_timestamp;
+	u32 ath12k_mgmt_rx_reo_snapshot_low;
+	u32 ath12k_mgmt_rx_reo_snapshot_high;
+	u8 retry_count;
+	int status;
+	struct ath12k_mgmt_rx_reo_shared_snapshot *snapshot_address;
+	u8 snapshot_version;
+
+	if (!snapshot_info) {
+		ath12k_err(ab, "Mgmt Rx REO snapshot info null\n");
+		return -EINVAL;
+	}
+
+	snapshot_address = snapshot_info->address;
+	if (!snapshot_address) {
+		ath12k_err(ab, "Mgmt Rx REO snapshot address null\n");
+		return -EINVAL;
+	}
+
+	snapshot_version = snapshot_info->version;
+
+	if (!snapshot_value) {
+		ath12k_err(ab, "Mgmt Rx REO snapshot null\n");
+		return -EINVAL;
+	}
+
+	memset(snapshot_value, 0, sizeof(*snapshot_value));
+
+	switch (id) {
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW:
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED:
+	case ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED:
+		retry_count = 0;
+		for (; retry_count < ATH12K_MGMT_RX_REO_SNAPSHOT_READ_RETRY_LIMIT;
+				retry_count++) {
+			status = ath12k_wmi_mgmt_rx_reo_read_snapshot_raw
+				(snapshot_address,
+				 &ath12k_mgmt_rx_reo_snapshot_low,
+				 &ath12k_mgmt_rx_reo_snapshot_high,
+				 snapshot_version,
+				 raw_snapshot[retry_count]);
+
+			if (status) {
+				ath12k_err(ab, "Failed to read snapshot %d status %d\n",
+						id, status);
+				return -EINVAL;
+			}
+
+			snapshot_valid = ath12k_mlo_shmem_mgmt_rx_reo_snapshot_valid_get(ab,
+					ath12k_mgmt_rx_reo_snapshot_low,
+					snapshot_version);
+
+			if (!snapshot_valid) {
+				ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Invalid REO snapshot value");
+				snapshot_value->valid = false;
+				snapshot_value->mgmt_pkt_ctr =
+					ath12k_mgmt_rx_reo_snapshot_get_mgmt_pkt_ctr
+					(ab, ath12k_mgmt_rx_reo_snapshot_low,
+					 snapshot_version);
+				snapshot_value->global_timestamp =
+					ath12k_mlo_shmem_mgmt_rx_reo_snapshot_global_timestamp_get
+					(ab, ath12k_mgmt_rx_reo_snapshot_low,
+					 ath12k_mgmt_rx_reo_snapshot_high,
+					 snapshot_version);
+				snapshot_value->retry_count = retry_count + 1;
+				return 0;
+			}
+
+			if (ath12k_mlo_shmem_mgmt_rx_reo_snapshot_check_consistency
+					(ar->ab, ath12k_mgmt_rx_reo_snapshot_low,
+					 ath12k_mgmt_rx_reo_snapshot_high,
+					 snapshot_version)) {
+				global_timestamp =
+					ath12k_mlo_shmem_mgmt_rx_reo_snapshot_global_timestamp_get
+					(ab, ath12k_mgmt_rx_reo_snapshot_low,
+					 ath12k_mgmt_rx_reo_snapshot_high,
+					 snapshot_version);
+				mgmt_pkt_ctr =
+					mlo_shmem_mgmt_rx_reo_snapshot_mgmt_pkt_ctr_get
+					(ab, ath12k_mgmt_rx_reo_snapshot_low,
+					 snapshot_version);
+				break;
+			}
+			ath12k_info(ab, "Inconsistent snapshot %d, version=%u, low=0x%x, high=0x%x, retry=%u\n",
+					id, snapshot_version,
+					ath12k_mgmt_rx_reo_snapshot_low,
+					ath12k_mgmt_rx_reo_snapshot_high,
+					retry_count);
+		}
+
+		if (retry_count == ATH12K_MGMT_RX_REO_SNAPSHOT_READ_RETRY_LIMIT) {
+			ath12k_err(ab, "Read retry limit, id = %d, ver = %u\n",
+					id, snapshot_version);
+			BUG_ON(1);
+			return -EINVAL;
+		}
+
+		snapshot_value->valid = true;
+		snapshot_value->mgmt_pkt_ctr = mgmt_pkt_ctr;
+		snapshot_value->global_timestamp = global_timestamp;
+		snapshot_value->retry_count = retry_count + 1;
+		status = 0;
+		break;
+
+	default:
+		ath12k_err(ab, "Invalid snapshot id %d\n", id);
+		status = -EINVAL;
+		break;
+	}
+
+	return status;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_print_snapshots() - Print all snapshots related
+ * to management Rx reorder module
+ * @mac_hw_ss: MAC HW snapshot
+ * @fw_forwarded_ss: FW forwarded snapshot
+ * @fw_consumed_ss: FW consumed snapshot
+ * @host_ss: Host snapshot
+ *
+ * return: int
+ */
+static int
+ath12k_mgmt_rx_reo_print_snapshots
+	    (struct ath12k_base *ab,
+			 struct ath12k_mgmt_rx_reo_snapshot_params *mac_hw_ss,
+	     struct ath12k_mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
+	     struct ath12k_mgmt_rx_reo_snapshot_params *fw_consumed_ss,
+	     struct ath12k_mgmt_rx_reo_snapshot_params *host_ss)
+{
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "HW SS: valid = %u, ctr = %u, ts = %u\n",
+		   mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
+			mac_hw_ss->global_timestamp);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "FW forwarded SS: valid = %u, ctr = %u, ts = %u\n",
+		   fw_forwarded_ss->valid,
+			fw_forwarded_ss->mgmt_pkt_ctr,
+			fw_forwarded_ss->global_timestamp);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "FW consumed SS: valid = %u, ctr = %u, ts = %u\n",
+		   fw_consumed_ss->valid,
+			fw_consumed_ss->mgmt_pkt_ctr,
+			fw_consumed_ss->global_timestamp);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "HOST SS: valid = %u, ctr = %u, ts = %u\n",
+		   host_ss->valid, host_ss->mgmt_pkt_ctr,
+			host_ss->global_timestamp);
+
+	return 0;
+}
+
+/**
+ * ath12k_wmi_mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management
+ * Rx REO snapshots
+ * @mac_hw_ss: MAC HW snapshot
+ * @fw_forwarded_ss: FW forwarded snapshot
+ * @fw_consumed_ss: FW consumed snapshot
+ * @host_ss: Host snapshot
+ * @link: link ID
+ *
+ * return: int
+ */
+static int
+ath12k_wmi_mgmt_rx_reo_invalidate_stale_snapshots
+	    (struct ath12k *ar,
+			 struct ath12k_mgmt_rx_reo_snapshot_params *mac_hw_ss,
+	     struct ath12k_mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
+	     struct ath12k_mgmt_rx_reo_snapshot_params *fw_consumed_ss,
+	     struct ath12k_mgmt_rx_reo_snapshot_params *host_ss,
+	     u8 link)
+{
+	struct ath12k_base *ab = ar->ab;
+
+	if (!mac_hw_ss->valid)
+		return 0;
+
+	if (host_ss->valid) {
+		if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+				(mac_hw_ss->global_timestamp,
+				 host_ss->global_timestamp) ||
+				!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+				(mac_hw_ss->mgmt_pkt_ctr,
+				 host_ss->mgmt_pkt_ctr)) {
+			ath12k_mgmt_rx_reo_print_snapshots(ab, mac_hw_ss, fw_forwarded_ss,
+							   fw_consumed_ss, host_ss);
+			ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Invalidate host snapshot, link %u",
+				   link);
+			host_ss->valid = false;
+		}
+	}
+
+	if (fw_forwarded_ss->valid) {
+		if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+				(mac_hw_ss->global_timestamp,
+				 fw_forwarded_ss->global_timestamp) ||
+				!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+				(mac_hw_ss->mgmt_pkt_ctr,
+				 fw_forwarded_ss->mgmt_pkt_ctr)) {
+			ath12k_mgmt_rx_reo_print_snapshots(ab, mac_hw_ss, fw_forwarded_ss,
+							   fw_consumed_ss, host_ss);
+			ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Invalidate FW forwarded SS, link %u\n",
+				   link);
+			fw_forwarded_ss->valid = false;
+		}
+	}
+
+	if (host_ss->valid && fw_forwarded_ss->valid &&
+	    (ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+			 (host_ss->global_timestamp,
+			  fw_forwarded_ss->global_timestamp) !=
+			 ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+			 (host_ss->mgmt_pkt_ctr,
+			  fw_forwarded_ss->mgmt_pkt_ctr))) {
+		ath12k_mgmt_rx_reo_print_snapshots(ab, mac_hw_ss, fw_forwarded_ss,
+						   fw_consumed_ss, host_ss);
+		ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Invalidate FW forwarded SS, link %u",
+			   link);
+		fw_forwarded_ss->valid = false;
+	}
+
+	if (fw_consumed_ss->valid) {
+		if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+				(mac_hw_ss->global_timestamp,
+				 fw_consumed_ss->global_timestamp) ||
+				!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+				(mac_hw_ss->mgmt_pkt_ctr,
+				 fw_consumed_ss->mgmt_pkt_ctr)) {
+			ath12k_mgmt_rx_reo_print_snapshots(ab, mac_hw_ss, fw_forwarded_ss,
+							   fw_consumed_ss, host_ss);
+			ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Invalidate FW consumed SS, link %u\n",
+				   link);
+			fw_consumed_ss->valid = false;
+		}
+		if (host_ss->valid && fw_consumed_ss->valid &&
+		    (ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+				 (host_ss->global_timestamp,
+				  fw_consumed_ss->global_timestamp) !=
+				 ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+				 (host_ss->mgmt_pkt_ctr,
+				  fw_consumed_ss->mgmt_pkt_ctr))) {
+			ath12k_mgmt_rx_reo_print_snapshots(ab, mac_hw_ss, fw_forwarded_ss,
+							   fw_consumed_ss, host_ss);
+			ath12k_dbg(ab, ATH12K_DBG_RX_REO, "Invalidate FW consumed SS, link %u",
+				   link);
+			fw_consumed_ss->valid = false;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management
+ * Rx REO snapshots
+ * @mac_hw_ss: MAC HW snapshot
+ * @fw_forwarded_ss: FW forwarded snapshot
+ * @fw_consumed_ss: FW consumed snapshot
+ * @host_ss: Host snapshot
+ *
+ * return: int
+ */
+static int
+mgmt_rx_reo_snapshots_check_sanity
+			(struct ath12k *ar,
+			struct ath12k_mgmt_rx_reo_snapshot_params *mac_hw_ss,
+			struct ath12k_mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
+			struct ath12k_mgmt_rx_reo_snapshot_params *fw_consumed_ss,
+			struct ath12k_mgmt_rx_reo_snapshot_params *host_ss)
+{
+	int status;
+	struct ath12k_base *ab = ar->ab;
+
+	if (!mac_hw_ss->valid) {
+		if (fw_forwarded_ss->valid || fw_consumed_ss->valid ||
+		    host_ss->valid) {
+			ath12k_err(ab, "MAC HW SS is invalid\n");
+			status = -EINVAL;
+			goto fail;
+		}
+
+		return 0;
+	}
+
+	if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
+		if (host_ss->valid) {
+			ath12k_err(ab, "FW forwarded and consumed SS invalid\n");
+			status = -EINVAL;
+			goto fail;
+		}
+
+		return 0;
+	}
+
+	if (fw_forwarded_ss->valid) {
+		if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+				(mac_hw_ss->global_timestamp,
+				 fw_forwarded_ss->global_timestamp)) {
+			ath12k_err(ab, "TS: MAC HW SS < FW forwarded SS\n");
+			status = -EINVAL;
+			goto fail;
+		}
+
+		if (!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+				(mac_hw_ss->mgmt_pkt_ctr,
+				 fw_forwarded_ss->mgmt_pkt_ctr)) {
+			ath12k_err(ab, "PKT CTR: MAC HW SS < FW forwarded SS\n");
+			status = -EINVAL;
+			goto fail;
+		}
+	}
+
+	if (fw_consumed_ss->valid) {
+		if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+				(mac_hw_ss->global_timestamp,
+				 fw_consumed_ss->global_timestamp)) {
+			ath12k_err(ab, "TS: MAC HW SS < FW consumed SS\n");
+			status = -EINVAL;
+			goto fail;
+		}
+
+		if (!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+				(mac_hw_ss->mgmt_pkt_ctr,
+				 fw_consumed_ss->mgmt_pkt_ctr)) {
+			ath12k_err(ab, "PKT CTR: MAC HW SS < FW consumed SS\n");
+			status = -EINVAL;
+			goto fail;
+		}
+	}
+
+	if (host_ss->valid) {
+		if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+				(mac_hw_ss->global_timestamp,
+				 host_ss->global_timestamp)) {
+			ath12k_err(ab, "TS: MAC HW SS < host SS\n");
+			status = -EINVAL;
+			goto fail;
+		}
+
+		if (!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+				(mac_hw_ss->mgmt_pkt_ctr,
+				 host_ss->mgmt_pkt_ctr)) {
+			ath12k_err(ab, "PKT CTR: MAC HW SS < host SS\n");
+			status = -EINVAL;
+			goto fail;
+		}
+
+		if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
+			if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+					(fw_forwarded_ss->global_timestamp,
+					 host_ss->global_timestamp)) {
+				ath12k_err(ab, "TS: FW forwarded < host SS\n");
+				status = -EINVAL;
+				goto fail;
+			}
+
+			if (!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+					(fw_forwarded_ss->mgmt_pkt_ctr,
+					 host_ss->mgmt_pkt_ctr)) {
+				ath12k_err(ab, "CTR: FW forwarded < host SS\n");
+				status = -EINVAL;
+				goto fail;
+			}
+		}
+
+		if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) {
+			if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+					(fw_consumed_ss->global_timestamp,
+					 host_ss->global_timestamp)) {
+				ath12k_err(ab, "TS: FW consumed < host SS\n");
+				status = -EINVAL;
+				goto fail;
+			}
+
+			if (!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+					(fw_consumed_ss->mgmt_pkt_ctr,
+					 host_ss->mgmt_pkt_ctr)) {
+				ath12k_err(ab, "CTR: FW consumed < host SS\n");
+				status = -EINVAL;
+				goto fail;
+			}
+		}
+
+		if (fw_forwarded_ss->valid && fw_consumed_ss->valid) {
+			if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+					(fw_consumed_ss->global_timestamp,
+					 host_ss->global_timestamp) &&
+					!ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+					(fw_forwarded_ss->global_timestamp,
+					 host_ss->global_timestamp)) {
+				ath12k_err(ab, "TS: FW consumed/forwarded < host\n");
+				status = -EINVAL;
+				goto fail;
+			}
+
+			if (!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+					(fw_consumed_ss->mgmt_pkt_ctr,
+					 host_ss->mgmt_pkt_ctr) &&
+					!ath12k_mgmt_rx_reo_compare_pkt_ctrs_gte
+					(fw_forwarded_ss->mgmt_pkt_ctr,
+					 host_ss->mgmt_pkt_ctr)) {
+				ath12k_err(ab, "CTR: FW consumed/forwarded < host\n");
+				status = -EINVAL;
+				goto fail;
+			}
+		}
+	}
+
+	return 0;
+
+fail:
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "HW SS: valid = %u, ctr = %u, ts = %u",
+		   mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
+			mac_hw_ss->global_timestamp);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "FW forwarded SS: valid = %u, ctr = %u, ts = %u",
+		   fw_forwarded_ss->valid,
+			fw_forwarded_ss->mgmt_pkt_ctr,
+			fw_forwarded_ss->global_timestamp);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "FW consumed SS: valid = %u, ctr = %u, ts = %u",
+		   fw_consumed_ss->valid,
+			fw_consumed_ss->mgmt_pkt_ctr,
+			fw_consumed_ss->global_timestamp);
+	ath12k_dbg(ab, ATH12K_DBG_RX_REO, "HOST SS: valid = %u, ctr = %u, ts = %u",
+		   host_ss->valid, host_ss->mgmt_pkt_ctr,
+			host_ss->global_timestamp);
+
+	return status;
+}
+
+/**
+ * ath12k_wmi_mgmt_rx_reorder_process_calculate_wait_count() - Calculates the number of
+ * frames an incoming frame should wait for before it gets delivered.
+ * @in_frame_pdev: pdev on which this frame is received
+ * @desc: frame Descriptor
+ *
+ * Each frame carrys a MGMT pkt number which is local to that link, and a
+ * timestamp which is global across all the links. MAC HW and FW also captures
+ * the same details of the last frame that they have seen. Host also maintains
+ * the details of the last frame it has seen. In total, there are 4 snapshots.
+ * 1. MAC HW snapshot - latest frame seen at MAC HW
+ * 2. FW forwarded snapshot- latest frame forwarded to the Host
+ * 3. FW consumed snapshot - latest frame consumed by the FW
+ * 4. Host/FW consumed snapshot - latest frame seen by the Host
+ * By using all these snapshots, this function tries to compute the wait count
+ * for a given incoming frame on all links.
+ *
+ * Return: 1 on success otherwise -1 on failure
+ */
+static int
+ath12k_wmi_mgmt_rx_reorder_process_calculate_wait_count(
+		struct ath12k *ar,
+		struct ath12k_mgmt_rx_reo_frame_descriptor *desc)
+{
+	int status;
+	u8 hw_link_id;
+	s8 in_frame_link;
+	int frames_pending, delta_fwd_host;
+	u8 snapshot_id;
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k *hw_link;
+	struct ath12k_mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
+	struct ath12k_mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx;
+	struct ath12k_mgmt_rx_reo_snapshot_info *snapshot_info;
+	struct ath12k_mgmt_rx_reo_snapshot_params snapshot_params
+				[ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
+	struct ath12k_mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
+					    *fw_consumed_ss, *host_ss;
+	struct ath12k_mgmt_rx_reo_params *in_frame_params;
+	struct ath12k_mgmt_rx_reo_wait_count *wait_count;
+
+	in_frame_params = &desc->rx_params->reo_params;
+
+	wait_count = &desc->wait_count;
+
+	/* Get the hw link ID of incoming frame */
+	in_frame_link = ar->pdev->hw_link_id;
+	in_frame_rx_reo_pdev_ctx = &ar->rx_reo_pdev_ctx;
+
+	if (!in_frame_rx_reo_pdev_ctx) {
+		ath12k_err(ab, "Reo context null for incoming frame\n");
+		return -EINVAL;
+	}
+	memset(in_frame_rx_reo_pdev_ctx->raw_snapshots, 0,
+	       sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots));
+
+	/* Iterate over all the valid hw links */
+	for (hw_link_id = 0; hw_link_id < ATH12K_WMI_MLO_MAX_LINKS; hw_link_id++) {
+		/* No need wait for any frames on an invalid hw_link_id */
+		if (!ab->ag->hw_links[hw_link_id]) {
+			frames_pending = 0;
+			goto update_pending_frames;
+		}
+
+		hw_link = ab->ag->hw_links[hw_link_id];
+
+		/* No need to wait for any frames if the pdev is not found */
+		if (!hw_link) {
+			ath12k_dbg(ab, ATH12K_DBG_RX_REO, "pdev is null for hw_link_id %d\n", hw_link_id);
+			frames_pending = 0;
+			goto update_pending_frames;
+		}
+
+		rx_reo_pdev_ctx = &hw_link->rx_reo_pdev_ctx;
+		if (!rx_reo_pdev_ctx) {
+			ath12k_err(ab, "Mgmt reo context empty for hw_link %pK\n",
+				   hw_link);
+			return -EINVAL;
+		}
+
+		if (!rx_reo_pdev_ctx->init_complete) {
+			ath12k_dbg(ab, ATH12K_DBG_RX_REO, "REO init in progress for hw_link_id %d",
+				   hw_link_id);
+			frames_pending = 0;
+			goto update_pending_frames;
+		}
+
+		host_ss = &rx_reo_pdev_ctx->host_snapshot;
+
+		desc->host_snapshot[hw_link_id] = rx_reo_pdev_ctx->host_snapshot;
+
+		ath12k_dbg(ab, ATH12K_DBG_RX_REO,
+			   "hw_link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
+				 hw_link_id, host_ss->valid, host_ss->mgmt_pkt_ctr,
+				 host_ss->global_timestamp);
+
+		snapshot_id = 0;
+		/* Read all the shared snapshots */
+		while (snapshot_id <
+			ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
+			snapshot_info = &rx_reo_pdev_ctx->
+				host_target_shared_snapshot_info[snapshot_id];
+
+			memset(&snapshot_params[snapshot_id], 0,
+			       sizeof(snapshot_params[snapshot_id]));
+
+			status = ath12k_wmi_mgmt_rx_reo_read_snapshot(
+					hw_link, snapshot_info, snapshot_id,
+					&snapshot_params[snapshot_id],
+					in_frame_rx_reo_pdev_ctx->raw_snapshots
+					[hw_link_id][snapshot_id]);
+
+			/* Read operation shouldn't fail */
+			if (status) {
+				ath12k_err(ab, "snapshot(%d) read failed on hw_link_id (%d) status %d\n",
+					   snapshot_id, hw_link_id, status);
+				return status;
+			}
+
+			/* If snapshot is valid, save it in the pdev context */
+			if (snapshot_params[snapshot_id].valid) {
+				rx_reo_pdev_ctx->
+				   last_valid_shared_snapshot[snapshot_id] =
+				   snapshot_params[snapshot_id];
+			}
+			desc->shared_snapshots[hw_link_id][snapshot_id] =
+						snapshot_params[snapshot_id];
+
+			snapshot_id++;
+		}
+
+		mac_hw_ss = &snapshot_params
+				[ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
+		fw_forwarded_ss = &snapshot_params
+				[ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
+		fw_consumed_ss = &snapshot_params
+				[ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
+
+		status = ath12k_wmi_mgmt_rx_reo_invalidate_stale_snapshots(ar, mac_hw_ss,
+									   fw_forwarded_ss,
+								fw_consumed_ss,
+								host_ss, hw_link_id);
+		if (status) {
+			ath12k_err(ab, "Failed to invalidate SS for hw_link_id %u\n",
+				   hw_link_id);
+			return status;
+		}
+
+		desc->shared_snapshots[hw_link_id][ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] =
+								*mac_hw_ss;
+		desc->shared_snapshots[hw_link_id][ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED] =
+								*fw_forwarded_ss;
+		desc->shared_snapshots[hw_link_id][ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] =
+								*fw_consumed_ss;
+		desc->host_snapshot[hw_link_id] = *host_ss;
+
+		status = mgmt_rx_reo_snapshots_check_sanity
+			(ar, mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss);
+		if (status) {
+			ath12k_err(ab, "Snapshot sanity for hw_link_id %u failed\n",
+				   hw_link_id);
+			return status;
+		}
+
+		ath12k_dbg(ab, ATH12K_DBG_RX_REO, "hw_link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
+			   hw_link_id, mac_hw_ss->valid,
+				 mac_hw_ss->mgmt_pkt_ctr,
+				 mac_hw_ss->global_timestamp);
+		ath12k_dbg(ab, ATH12K_DBG_RX_REO, "hw_link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
+			   hw_link_id, fw_forwarded_ss->valid,
+				 fw_forwarded_ss->mgmt_pkt_ctr,
+				 fw_forwarded_ss->global_timestamp);
+		ath12k_dbg(ab, ATH12K_DBG_RX_REO, "hw_link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
+			   hw_link_id, fw_consumed_ss->valid,
+				 fw_consumed_ss->mgmt_pkt_ctr,
+				 fw_consumed_ss->global_timestamp);
+
+		/* No need wait for any frames on the same hw_link_id */
+		if (hw_link_id == in_frame_link) {
+			frames_pending = 0;
+			goto update_pending_frames;
+		}
+
+		/**
+		 * If MAC HW snapshot is invalid, the hw_link_id has not started
+		 * receiving management frames. Set wait count to zero.
+		 */
+		if (!mac_hw_ss->valid) {
+			frames_pending = 0;
+			goto update_pending_frames;
+		}
+
+		/**
+		 * If host snapshot is invalid, wait for MAX number of frames.
+		 * When any frame in this hw_link_id arrives at host, actual wait
+		 * counts will be updated.
+		 */
+		if (!host_ss->valid) {
+			wait_count->per_link_count[hw_link_id] = UINT_MAX;
+			wait_count->total_count += UINT_MAX;
+			goto print_wait_count;
+		}
+
+		/**
+		 * If MAC HW snapshot sequence number and host snapshot
+		 * sequence number are same, all the frames received by
+		 * this hw_link_id are processed by host. No need to wait for
+		 * any frames from this hw_link_id.
+		 */
+		if (!ath12k_mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr,
+							  host_ss->mgmt_pkt_ctr)) {
+			frames_pending = 0;
+			goto update_pending_frames;
+		}
+
+		/**
+		 * Ideally, the incoming frame has to wait for only those frames
+		 * (on other links) which meet all the below criterion.
+		 * 1. Frame's timestamp is less than incoming frame's
+		 * 2. Frame is supposed to be consumed by the Host
+		 * 3. Frame is not yet seen by the Host.
+		 * We may not be able to compute the exact optimal wait count
+		 * because HW/FW provides a limited assist.
+		 * This reorder process tries to get the best estimate of wait
+		 * count by not waiting for those frames where we have a conclusive
+		 * evidence that we don't have to wait for those frames.
+		 */
+
+		/**
+		 * If this link has already seen a frame whose timestamp is
+		 * greater than or equal to incoming frame's timestamp,
+		 * then no need to wait for any frames on this link.
+		 * If the total wait count becomes zero, then the policy on
+		 * whether to deliver such a frame to upper layers is handled
+		 * separately.
+		 */
+		if (ath12k_mgmt_rx_reo_compare_global_timestamps_gte(
+				host_ss->global_timestamp,
+				in_frame_params->global_timestamp)) {
+			frames_pending = 0;
+			goto update_pending_frames;
+		}
+
+		/**
+		 * For starters, we only have to wait for the frames that are
+		 * seen by MAC HW but not yet seen by Host. The frames which
+		 * reach MAC HW later are guaranteed to have a timestamp
+		 * greater than incoming frame's timestamp.
+		 */
+		frames_pending = ath12k_mgmt_rx_reo_subtract_pkt_ctrs(
+					mac_hw_ss->mgmt_pkt_ctr,
+					host_ss->mgmt_pkt_ctr);
+		BUG_ON(!(frames_pending >= 0));
+
+		if (frames_pending &&
+		    ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+					(mac_hw_ss->global_timestamp,
+					 in_frame_params->global_timestamp)) {
+			/**
+			 * Last frame seen at MAC HW has timestamp greater than
+			 * or equal to incoming frame's timestamp. So no need to
+			 * wait for that last frame, but we can't conclusively
+			 * say anything about timestamp of frames before the
+			 * last frame, so try to wait for all of those frames.
+			 */
+			frames_pending--;
+			BUG_ON(!(frames_pending >= 0));
+
+			if (fw_consumed_ss->valid &&
+			    ath12k_mgmt_rx_reo_compare_global_timestamps_gte(
+				fw_consumed_ss->global_timestamp,
+				in_frame_params->global_timestamp)) {
+				/**
+				 * Last frame consumed by the FW has timestamp
+				 * greater than or equal to incoming frame's.
+				 * That means all the frames from
+				 * fw_consumed_ss->mgmt_pkt_ctr to
+				 * mac_hw->mgmt_pkt_ctr will have timestamp
+				 * greater than or equal to incoming frame's and
+				 * hence, no need to wait for those frames.
+				 * We just need to wait for frames from
+				 * host_ss->mgmt_pkt_ctr to
+				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
+				 * better estimate over the above estimate,
+				 * so update frames_pending.
+				 */
+				frames_pending =
+				  ath12k_mgmt_rx_reo_subtract_pkt_ctrs(
+				      fw_consumed_ss->mgmt_pkt_ctr,
+				      host_ss->mgmt_pkt_ctr) - 1;
+
+				BUG_ON(!(frames_pending >= 0));
+
+				/**
+				 * Last frame forwarded to Host has timestamp
+				 * less than incoming frame's. That means all
+				 * the frames starting from
+				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
+				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
+				 * the FW and hence, no need to wait for those
+				 * frames. We just need to wait for frames
+				 * from host_ss->mgmt_pkt_ctr to
+				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
+				 * better estimate over the above estimate,
+				 * so update frames_pending.
+				 */
+				if (fw_forwarded_ss->valid &&
+				    !ath12k_mgmt_rx_reo_compare_global_timestamps_gte(
+					fw_forwarded_ss->global_timestamp,
+					in_frame_params->global_timestamp)) {
+					frames_pending =
+					  ath12k_mgmt_rx_reo_subtract_pkt_ctrs(
+					      fw_forwarded_ss->mgmt_pkt_ctr,
+					      host_ss->mgmt_pkt_ctr);
+
+					/**
+					 * frames_pending can be negative in
+					 * cases whene there are no frames
+					 * getting forwarded to the Host. No
+					 * need to wait for any frames in that
+					 * case.
+					 */
+					if (frames_pending < 0)
+						frames_pending = 0;
+				}
+			}
+
+			/**
+			 * Last frame forwarded to Host has timestamp greater
+			 * than or equal to incoming frame's. That means all the
+			 * frames from fw_forwarded->mgmt_pkt_ctr to
+			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
+			 * or equal to incoming frame's and hence, no need to
+			 * wait for those frames. We may have to just wait for
+			 * frames from host_ss->mgmt_pkt_ctr to
+			 * fw_forwarded_ss->mgmt_pkt_ctr-1
+			 */
+			if (fw_forwarded_ss->valid &&
+			    ath12k_mgmt_rx_reo_compare_global_timestamps_gte(
+				fw_forwarded_ss->global_timestamp,
+				in_frame_params->global_timestamp)) {
+				delta_fwd_host =
+				  ath12k_mgmt_rx_reo_subtract_pkt_ctrs(
+				    fw_forwarded_ss->mgmt_pkt_ctr,
+				    host_ss->mgmt_pkt_ctr) - 1;
+
+				BUG_ON(!(delta_fwd_host >= 0));
+
+				/**
+				 * This will be a better estimate over the one
+				 * we computed using mac_hw_ss but this may or
+				 * may not be a better estimate over the
+				 * one we computed using fw_consumed_ss.
+				 * When timestamps of both fw_consumed_ss and
+				 * fw_forwarded_ss are greater than incoming
+				 * frame's but timestamp of fw_consumed_ss is
+				 * smaller than fw_forwarded_ss, then
+				 * frames_pending will be smaller than
+				 * delta_fwd_host, the reverse will be true in
+				 * other cases. Instead of checking for all
+				 * those cases, just waiting for the minimum
+				 * among these two should be sufficient.
+				 */
+				frames_pending = min(frames_pending,
+						     delta_fwd_host);
+				BUG_ON(!(frames_pending >= 0));
+			}
+		}
+
+update_pending_frames:
+		BUG_ON(!(frames_pending >= 0));
+
+		wait_count->per_link_count[hw_link_id] = frames_pending;
+		wait_count->total_count += frames_pending;
+
+print_wait_count:
+		ath12k_dbg(ab, ATH12K_DBG_RX_REO,
+			   "hw_link_id = %u wait count: per link = 0x%x, total = 0x%llx",
+			   hw_link_id, wait_count->per_link_count[hw_link_id],
+			   wait_count->total_count);
+	}
+	return 0;
+}
+
+/**
+ * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
+ * frame received.
+ * @frame_desc: Pointer to the frame descriptor
+ * @entry: Pointer to the list entry
+ *
+ * This API prepares the reorder list entry corresponding to a management frame
+ * to be consumed by host. This entry would be inserted at the appropriate
+ * position in the reorder list.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_prepare_list_entry(struct ath12k_base *ab,
+				      const struct ath12k_mgmt_rx_reo_frame_descriptor *frame_desc,
+	struct mgmt_rx_reo_list_entry **entry)
+{
+	struct mgmt_rx_reo_list_entry *list_entry;
+	struct ath12k_mgmt_rx_reo_params *reo_params =
+				&frame_desc->rx_params->reo_params;
+	struct ath12k *ar;
+	u8 link_id;
+
+	link_id = reo_params->link_id;
+
+	if (link_id >= ATH12K_GROUP_MAX_RADIO) {
+		ath12k_warn(ab, "%s:invalid hw link id %d\n", __func__, link_id);
+		return -EINVAL;
+	}
+
+	ar = rcu_dereference(ab->ag->hw_links[link_id]);
+	if (!ar) {
+		ath12k_err(ab, "ar corresponding to link %u is null\n",
+			   link_id);
+		return -EINVAL;
+	}
+
+	list_entry = kmalloc(sizeof(*list_entry), GFP_ATOMIC);
+	if (!list_entry)
+		return -ENOMEM;
+
+	list_entry->ar = ar;
+	list_entry->nbuf = frame_desc->nbuf;
+	list_entry->rx_params = frame_desc->rx_params;
+	list_entry->wait_count = frame_desc->wait_count;
+	list_entry->initial_wait_count = frame_desc->wait_count;
+	memcpy(list_entry->shared_snapshots, frame_desc->shared_snapshots,
+	       min(sizeof(list_entry->shared_snapshots),
+		   sizeof(frame_desc->shared_snapshots)));
+	memcpy(list_entry->host_snapshot, frame_desc->host_snapshot,
+	       min(sizeof(list_entry->host_snapshot),
+		   sizeof(frame_desc->host_snapshot)));
+	list_entry->status = 0;
+	if (list_entry->wait_count.total_count)
+		list_entry->status |=
+			ATH12K_MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
+
+	*entry = list_entry;
+
+	return 0;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
+ * is stale
+ * @ts_last_released_frame: pointer to global time stamp of the last frame
+ * removed from the reorder list
+ * @frame_desc: pointer to frame descriptor
+ *
+ * This API checks whether the current management frame under processing is
+ * stale. Any frame older than the last frame delivered to upper layer is a
+ * stale frame. This could happen when we have to deliver frames out of order
+ * due to time out or list size limit. The frames which arrive late at host and
+ * with time stamp lesser than the last delivered frame are stale frames and
+ * they need to be handled differently.
+ *
+ * Return: 0 on success, non-zero on failure. On success "is_stale" and
+ * "is_parallel_rx" members of
+ * @frame_desc will be filled with proper values.
+ */
+static int
+ath12k_mgmt_rx_reo_is_stale_frame(struct ath12k_base *ab,
+				  struct ath12k_mgmt_rx_reo_global_ts_info *ts_last_released_frame,
+	struct ath12k_mgmt_rx_reo_frame_descriptor *frame_desc)
+{
+	struct ath12k_mgmt_rx_reo_params *reo_params
+				= &frame_desc->rx_params->reo_params;
+	u32 cur_frame_start_ts;
+	u32 cur_frame_end_ts;
+
+	if (!ts_last_released_frame) {
+		ath12k_err(ab, "Last released frame time stamp info is null\n");
+		return -EINVAL;
+	}
+
+	if (!frame_desc) {
+		ath12k_err(ab, "Frame descriptor is null\n");
+		return -EINVAL;
+	}
+
+	frame_desc->is_stale = false;
+	frame_desc->is_parallel_rx = false;
+
+	if (!ts_last_released_frame->valid)
+		return 0;
+
+	cur_frame_start_ts = reo_params->start_timestamp;
+	cur_frame_end_ts = reo_params->end_timestamp;
+
+	frame_desc->is_stale =
+		!ath12k_mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts,
+				ts_last_released_frame->start_ts);
+
+	if (ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+			(ts_last_released_frame->start_ts, cur_frame_start_ts) &&
+			ath12k_mgmt_rx_reo_compare_global_timestamps_gte
+			(cur_frame_end_ts, ts_last_released_frame->end_ts)) {
+		frame_desc->is_parallel_rx = true;
+		frame_desc->is_stale = false;
+	}
+
+	return 0;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
+ * on the wait count of a frame received after that on air.
+ * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
+ * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
+ *
+ * This API optimizes the wait count of a frame based on the wait count of
+ * a frame received after that on air. Old frame refers to the frame received
+ * first on the air and new frame refers to the frame received after that.
+ * We use the following fundamental idea. Wait counts for old frames can't be
+ * more than wait counts for the new frame. Use this to optimize the wait count
+ * for the old frames. Per link wait count of an old frame is minimum of the
+ * per link wait count of the old frame and new frame.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_update_wait_count(
+	struct ath12k_mgmt_rx_reo_wait_count *wait_count_old_frame,
+	const struct ath12k_mgmt_rx_reo_wait_count *wait_count_new_frame)
+{
+	u8 link_id;
+
+	for (link_id = 0; link_id < ATH12K_WMI_MLO_MAX_LINKS; link_id++) {
+		if (wait_count_old_frame->per_link_count[link_id]) {
+			u32 temp_wait_count;
+			u32 wait_count_diff;
+
+			temp_wait_count =
+				wait_count_old_frame->per_link_count[link_id];
+			wait_count_old_frame->per_link_count[link_id] =
+				min(wait_count_old_frame->
+						per_link_count[link_id],
+						wait_count_new_frame->
+						per_link_count[link_id]);
+			wait_count_diff = temp_wait_count -
+				wait_count_old_frame->per_link_count[link_id];
+
+			wait_count_old_frame->total_count -= wait_count_diff;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_update_list() - Modify the reorder list when a frame is received
+ * @reo_list: Pointer to reorder list
+ * @frame_desc: Pointer to frame descriptor
+ * @is_queued: Whether this frame is queued in the REO list
+ *
+ * API to update the reorder list on every management frame reception.
+ * This API does the following things.
+ *   a) Update the wait counts for all the frames in the reorder list with
+ *      global time stamp <= current frame's global time stamp. We use the
+ *      following principle for updating the wait count in this case.
+ *      Let A and B be two management frames with global time stamp of A <=
+ *      global time stamp of B. Let WAi and WBi be the wait count of A and B
+ *      for link i, then WAi <= WBi. Hence we can optimize WAi as
+ *      min(WAi, WBi).
+ *   b) If the current frame is to be consumed by host, insert it in the
+ *      reorder list such that the list is always sorted in the increasing order
+ *      of global time stamp. Update the wait count of the current frame based
+ *      on the frame next to it in the reorder list (if any).
+ *   c) Update the wait count of the frames in the reorder list with global
+ *      time stamp > current frame's global time stamp. Let the current frame
+ *      belong to link "l". Then link "l"'s wait count can be reduced by one for
+ *      all the frames in the reorder list with global time stamp > current
+ *      frame's global time stamp.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_update_list(
+			struct ath12k_base *ab,
+			struct mgmt_rx_reo_list *reo_list,
+	    struct ath12k_mgmt_rx_reo_frame_descriptor *frame_desc,
+	    bool *is_queued)
+{
+	struct mgmt_rx_reo_list_entry *cur_entry;
+	struct mgmt_rx_reo_list_entry *least_greater_entry = NULL;
+	bool least_greater_entry_found = false;
+	int status;
+	u32 new_frame_global_ts;
+	struct mgmt_rx_reo_list_entry *new_entry = NULL;
+	u16 list_insertion_pos = 0;
+	struct ath12k_mgmt_rx_reo_params *reo_params;
+
+	if (!is_queued)
+		return -EINVAL;
+
+	*is_queued = false;
+
+	if (!reo_list) {
+		ath12k_err(ab, "Mgmt Rx reo list is null\n");
+		return -EINVAL;
+	}
+
+	if (!frame_desc) {
+		ath12k_err(ab, "Mgmt frame descriptor is null\n");
+		return -EINVAL;
+	}
+
+	if (frame_desc->rx_params) {
+		reo_params = &frame_desc->rx_params->reo_params;
+		new_frame_global_ts = reo_params->global_timestamp;
+	} else {
+		ath12k_err(ab, "null rx_param %p\n", frame_desc->rx_params);
+		return -EINVAL;
+	}
+
+	/* Prepare the list entry before acquiring lock */
+	if (frame_desc->type == ATH12K_MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
+	    frame_desc->reo_required) {
+		status = ath12k_mgmt_rx_reo_prepare_list_entry(ab, frame_desc, &new_entry);
+		if (status) {
+			ath12k_err(ab, "Failed to prepare list entry %d\n", status);
+			return -EINVAL;
+		}
+	}
+
+	spin_lock_bh(&reo_list->list_lock);
+
+	frame_desc->list_size_rx = reo_list->count;
+
+	status = ath12k_mgmt_rx_reo_is_stale_frame(ab, &reo_list->ts_last_released_frame,
+						   frame_desc);
+	if (status)
+		goto exit_free_entry;
+
+	list_for_each_entry(cur_entry, &reo_list->list, node) {
+		u32 cur_entry_global_ts;
+		struct ath12k_mgmt_rx_reo_params *cur_reo_params =
+				&cur_entry->rx_params->reo_params;
+
+		cur_entry_global_ts = cur_reo_params->global_timestamp;
+
+		if (!ath12k_mgmt_rx_reo_compare_global_timestamps_gte(
+					new_frame_global_ts, cur_entry_global_ts)) {
+			least_greater_entry = cur_entry;
+			least_greater_entry_found = true;
+			break;
+		}
+
+		/* if it is stale entry at this point and gts seems to be valid
+		 * then the reordering didn't properly happened please check the
+		 * reordering
+		 */
+		BUG_ON(!(!frame_desc->is_stale || cur_entry->is_parallel_rx));
+
+		list_insertion_pos++;
+
+		status = ath12k_mgmt_rx_reo_update_wait_count(
+				&cur_entry->wait_count,
+				&frame_desc->wait_count);
+		if (status)
+			goto exit_free_entry;
+
+		if (cur_entry->wait_count.total_count == 0)
+			cur_entry->status &=
+				~ATH12K_MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
+	}
+
+	if (frame_desc->type == ATH12K_MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
+	    !frame_desc->is_stale && frame_desc->reo_required) {
+		if (least_greater_entry_found) {
+			status = ath12k_mgmt_rx_reo_update_wait_count(
+					&new_entry->wait_count,
+					&least_greater_entry->wait_count);
+
+			if (status)
+				goto exit_free_entry;
+
+			frame_desc->wait_count = new_entry->wait_count;
+
+			if (new_entry->wait_count.total_count == 0)
+				new_entry->status &=
+					~ATH12K_MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
+		}
+
+		new_entry->insertion_ts = ktime_to_us(ktime_get());
+		new_entry->ingress_timestamp = frame_desc->ingress_timestamp;
+		new_entry->is_parallel_rx = frame_desc->is_parallel_rx;
+		frame_desc->list_insertion_pos = list_insertion_pos;
+
+		if (least_greater_entry_found)
+			list_add_tail(&new_entry->node, &least_greater_entry->node);
+		else
+			list_add_tail(&new_entry->node, &reo_list->list);
+
+		reo_list->count++;
+
+		*is_queued = true;
+
+		if (new_entry->wait_count.total_count == 0)
+			frame_desc->zero_wait_count_rx = true;
+
+		if (frame_desc->zero_wait_count_rx &&
+		    list_first_entry_or_null(&reo_list->list,
+					     struct mgmt_rx_reo_list_entry,
+					node) == new_entry)
+			frame_desc->immediate_delivery = true;
+	}
+
+	if (least_greater_entry_found) {
+		cur_entry = least_greater_entry;
+
+		list_for_each_entry_from(cur_entry, &reo_list->list, node) {
+			u8 frame_link_id;
+			struct ath12k_mgmt_rx_reo_wait_count *wait_count;
+
+			frame_link_id = reo_params->link_id;
+			wait_count = &cur_entry->wait_count;
+			if (wait_count->per_link_count[frame_link_id]) {
+				u32 old_wait_count;
+				u32 new_wait_count;
+				u32 wait_count_diff;
+				u16 pkt_ctr_delta;
+
+				pkt_ctr_delta = frame_desc->pkt_ctr_delta;
+				old_wait_count =
+					wait_count->per_link_count[frame_link_id];
+
+				if (old_wait_count >= pkt_ctr_delta)
+					new_wait_count = old_wait_count -
+						pkt_ctr_delta;
+				else
+					new_wait_count = 0;
+
+				wait_count_diff = old_wait_count -
+					new_wait_count;
+
+				wait_count->per_link_count[frame_link_id] =
+					new_wait_count;
+				wait_count->total_count -= wait_count_diff;
+
+				if (wait_count->total_count == 0)
+					cur_entry->status &=
+						~ATH12K_MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
+			}
+		}
+	}
+
+	status = 0;
+
+exit_free_entry:
+	/* Cleanup the entry if it is not queued */
+	if (new_entry && !*is_queued) {
+		/**
+		 * New entry created is not inserted to reorder list, free
+		 * the entry and release the reference
+		 */
+		kfree(new_entry);
+	}
+
+	spin_unlock_bh(&reo_list->list_lock);
+
+	return status;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_list_is_ready_to_send_up_entry() - API to check whether the
+ * list entry can be send to upper layers.
+ * @reo_list: Pointer to reorder list
+ * @entry: List entry
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static bool
+ath12k_mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
+						  struct mgmt_rx_reo_list_entry *entry)
+{
+	if (!reo_list || !entry)
+		return false;
+
+	return (reo_list->count > reo_list->max_list_size) ||
+		!ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(
+				entry) || ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry) ||
+		ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME
+		(entry);
+}
+
+/**
+ * ath12k_mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
+ * for releasing the reorder list entry to upper layer.
+ * reorder list.
+ * @entry: List entry
+ *
+ * This API expects the caller to acquire the spin lock protecting the reorder
+ * list.
+ *
+ * Return: Reason for releasing the frame.
+ */
+static u8
+ath12k_mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
+{
+	u8 release_reason = 0;
+
+	if (!entry)
+		return 0;
+
+	if (ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_MAX_SIZE_EXCEEDED(entry))
+		release_reason |=
+			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED;
+
+	if (!ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
+		release_reason |=
+			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT;
+
+	if (ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry))
+		release_reason |=
+			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT;
+
+	if (ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
+		release_reason |=
+			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
+
+	return release_reason;
+}
+
+/**
+ * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
+ * whether the current frame getting delivered to upper layer is a premature
+ * delivery
+ * @release_reason: release reason
+ *
+ * Return: true for a premature delivery
+ */
+static bool
+ath12k_mgmt_rx_reo_is_potential_premature_delivery(u8 release_reason)
+{
+	return !(release_reason &
+			MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT);
+}
+
+/**
+ * ath12k_mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
+ * @reo_list: Pointer to reorder list
+ * @entry: List entry
+ *
+ * API to send the frame to the upper layer. This API has to be called only
+ * for entries which can be released to upper layer. It is the caller's
+ * responsibility to ensure that entry can be released (by using API
+ * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after
+ * acquiring the lock which serializes the frame delivery to the upper layers.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
+			       struct mgmt_rx_reo_list_entry *entry)
+{
+	struct ath12k *ar = entry->ar;
+	u8 release_reason;
+	u8 link_id;
+	u32 entry_global_ts;
+	int status;
+	struct ath12k_mgmt_rx_reo_context *reo_context;
+	struct ath12k_mgmt_rx_reo_params *reo_params =
+		&entry->rx_params->reo_params;
+
+	if (unlikely(!rcu_access_pointer(ar->ab->pdevs_active[ar->pdev_idx]))) {
+		dev_kfree_skb(entry->nbuf);
+		goto free;
+	}
+
+	reo_context = container_of(reo_list, struct ath12k_mgmt_rx_reo_context,
+				   reo_list);
+
+	link_id = reo_params->link_id;
+	entry_global_ts = reo_params->global_timestamp;
+
+	release_reason = ath12k_mgmt_rx_reo_list_entry_get_release_reason(entry);
+
+	//TODO remove BUG_ON once the implementation is stablized
+	BUG_ON(!release_reason != 0);
+
+	entry->is_delivered = false;
+	entry->is_premature_delivery = false;
+	entry->release_reason = release_reason;
+
+	if (ath12k_mgmt_rx_reo_is_potential_premature_delivery(release_reason))
+		entry->is_premature_delivery = true;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_RX_REO,
+			"Mgmt Re-order egress: channel %d valid %u global_ts %u pkt_ctr %u is_parallel_rx %d\n",
+			entry->rx_params->channel,
+			entry->rx_params->reo_params.valid,
+			entry->rx_params->reo_params.global_timestamp,
+			entry->rx_params->reo_params.mgmt_pkt_ctr,
+			entry->is_parallel_rx);
+
+	ieee80211_rx_ni(entry->ar->ah->hw, entry->nbuf);
+	/* believing rx stack takes care of skb */
+	entry->is_delivered = true;
+
+free:
+	kfree(entry->rx_params);
+	/*Above ieee80211_rx_ni call frees nbuf and rx_params, make it null explicitly */
+	entry->nbuf = NULL;
+	entry->rx_params = NULL;
+	status = 0;
+
+	return status;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_list_release_entries() - Release entries from the reorder list
+ * @reo_context: Pointer to management Rx reorder context
+ *
+ * This API releases the entries from the reorder list based on the following
+ * conditions.
+ *   a) Entries with total wait count equal to 0
+ *   b) Entries which are timed out or entries with global time stamp <= global
+ *      time stamp of the latest frame which is timed out. We can only release
+ *      the entries in the increasing order of the global time stamp.
+ *      So all the entries with global time stamp <= global time stamp of the
+ *      latest timed out frame has to be released.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_list_release_entries(struct ath12k_base *ab,
+					struct ath12k_mgmt_rx_reo_context *reo_context)
+{
+	struct mgmt_rx_reo_list *reo_list;
+	struct ath12k_mgmt_rx_reo_params *reo_params;
+	int status;
+
+	if (!reo_context) {
+		ath12k_err(ab, "reo context is null\n");
+		return -EINVAL;
+	}
+
+	reo_list = &reo_context->reo_list;
+
+	spin_lock(&reo_context->frame_release_lock);
+
+	while (1) {
+		struct mgmt_rx_reo_list_entry *first_entry;
+		/* TODO yield if release_count > THRESHOLD */
+		u16 release_count = 0;
+		struct ath12k_mgmt_rx_reo_global_ts_info *ts_last_released_frame =
+			&reo_list->ts_last_released_frame;
+		u32 entry_global_ts;
+
+		spin_lock_bh(&reo_list->list_lock);
+
+		first_entry = list_first_entry_or_null(
+				&reo_list->list, struct mgmt_rx_reo_list_entry, node);
+
+		if (!first_entry) {
+			status = 0;
+			goto exit_unlock_list_lock;
+		}
+
+		if (!ath12k_mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
+								       first_entry)) {
+			status = 0;
+			goto exit_unlock_list_lock;
+		}
+
+		if (reo_list->count > reo_list->max_list_size)
+			first_entry->status |=
+				ATH12K_MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED;
+
+		list_del_init(&first_entry->node);
+		reo_list->count--;
+
+		if (status) {
+			status = -EINVAL;
+			goto exit_unlock_list_lock;
+		}
+		first_entry->removal_ts = ktime_to_us(ktime_get());
+
+		/**
+		 * Last released frame global time stamp is invalid means that
+		 * current frame is the first frame to be released to the
+		 * upper layer from the reorder list. Blindly update the last
+		 * released frame global time stamp to the current frame's
+		 * global time stamp and set the valid to true.
+		 * If the last released frame global time stamp is valid and
+		 * current frame's global time stamp is >= last released frame
+		 * global time stamp, deliver the current frame to upper layer
+		 * and update the last released frame global time stamp.
+		 */
+		reo_params = &first_entry->rx_params->reo_params;
+		entry_global_ts = reo_params->global_timestamp;
+
+		if (!ts_last_released_frame->valid ||
+		    ath12k_mgmt_rx_reo_compare_global_timestamps_gte(
+					entry_global_ts, ts_last_released_frame->global_ts)) {
+			ts_last_released_frame->global_ts = entry_global_ts;
+			ts_last_released_frame->start_ts =
+				reo_params->start_timestamp;
+			ts_last_released_frame->end_ts =
+				reo_params->end_timestamp;
+			ts_last_released_frame->valid = true;
+			ts_last_released_frame->expiry_time = jiffies + ATH12K_MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT;
+
+		} else {
+			/**
+			 * This should never happen. All the frames older than
+			 * the last frame released from the reorder list will be
+			 * discarded at the entry to reorder process itself.
+			 */
+			BUG_ON(!first_entry->is_parallel_rx);
+		}
+
+		spin_unlock_bh(&reo_list->list_lock);
+
+		status = ath12k_mgmt_rx_reo_list_entry_send_up(reo_list,
+							first_entry);
+		if (status) {
+			status = -EINVAL;
+			kfree(first_entry);
+			goto exit_unlock_frame_release_lock;
+		}
+
+		kfree(first_entry);
+		release_count++;
+	}
+
+	status = 0;
+	goto exit_unlock_frame_release_lock;
+
+exit_unlock_list_lock:
+	spin_unlock_bh(&reo_list->list_lock);
+exit_unlock_frame_release_lock:
+	spin_unlock(&reo_context->frame_release_lock);
+
+	return status;
+}
+
+static int
+ath12k_wmi_mgmt_rx_reorder_process_entry(struct ath12k *ar,
+					 struct ath12k_mgmt_rx_reo_frame_descriptor *desc,
+			    bool *is_queued)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_mgmt_rx_reo_context *reo_ctx;
+	int ret;
+
+	if (!is_queued)
+		return -EINVAL;
+
+	*is_queued = false;
+
+	if (!desc || !desc->rx_params) {
+		ath12k_err(ab, "MGMT Rx REO descriptor or rx params are null\n");
+		return -EINVAL;
+	}
+
+	reo_ctx = &ab->ag->rx_reo;
+	if (!reo_ctx) {
+		ath12k_err(ab, "REO context is NULL\n");
+		return -EINVAL;
+	}
+
+	if (!reo_ctx->init_done || !reo_ctx->timer_init_done) {
+		ath12k_err(ab, "MGMT Rx REO timer is not initialised\n");
+		return -EINVAL;
+	}
+	/**
+	 * Critical Section = Host snapshot update + Calculation of wait
+	 * counts + Update reorder list. Following section describes the
+	 * motivation for making this a critical section.
+	 * Lets take an example of 2 links (Link A & B) and each has received
+	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
+	 * MLO global time stamp of B1. Host is concurrently executing
+	 * "ath12k_wmi_mgmt_rx_reorder_process_entry" for A1 and B1 in 2 different CPUs.
+	 *
+	 * A lock less version of this API("ath12k_wmi_mgmt_rx_reorder_process_entry_v1") is
+	 * as follows.
+	 *
+	 * ath12k_wmi_mgmt_rx_reorder_process_entry()
+	 * {
+	 *     Host snapshot update
+	 *     Calculation of wait counts
+	 *     Update reorder list
+	 *     Release to upper layer
+	 * }
+	 *
+	 * We may run into race conditions under the following sequence of
+	 * operations.
+	 *
+	 * 1. Host snapshot update for link A in context of frame A1
+	 * 2. Host snapshot update for link B in context of frame B1
+	 * 3. Calculation of wait count for frame B1
+	 *        link A wait count =  0
+	 *        link B wait count =  0
+	 * 4. Update reorder list with frame B1
+	 * 5. Release B1 to upper layer
+	 * 6. Calculation of wait count for frame A1
+	 *        link A wait count =  0
+	 *        link B wait count =  0
+	 * 7. Update reorder list with frame A1
+	 * 8. Release A1 to upper layer
+	 *
+	 * This leads to incorrect behaviour as B1 goes to upper layer before
+	 * A1.
+	 *
+	 * To prevent this lets make Host snapshot update + Calculate wait count
+	 * a critical section by adding locks. The updated version of the API
+	 * ("ath12k_wmi_mgmt_rx_reorder_process_entry_v2") is as follows.
+	 *
+	 * ath12k_wmi_mgmt_rx_reorder_process_entry()
+	 * {
+	 *     LOCK
+	 *         Host snapshot update
+	 *         Calculation of wait counts
+	 *     UNLOCK
+	 *     Update reorder list
+	 *     Release to upper layer
+	 * }
+	 *
+	 * With this API also We may run into race conditions under the
+	 * following sequence of operations.
+	 *
+	 * 1. Host snapshot update for link A in context of frame A1 +
+	 *    Calculation of wait count for frame A1
+	 *        link A wait count =  0
+	 *        link B wait count =  0
+	 * 2. Host snapshot update for link B in context of frame B1 +
+	 *    Calculation of wait count for frame B1
+	 *        link A wait count =  0
+	 *        link B wait count =  0
+	 * 4. Update reorder list with frame B1
+	 * 5. Release B1 to upper layer
+	 * 7. Update reorder list with frame A1
+	 * 8. Release A1 to upper layer
+	 *
+	 * This also leads to incorrect behaviour as B1 goes to upper layer
+	 * before A1.
+	 *
+	 *
+	 * To prevent this, let's make Host snapshot update + Calculate wait
+	 * count + Update reorder list a critical section by adding locks.
+	 * The updated version of the API ("ath12k_wmi_mgmt_rx_reorder_process_entry_final")
+	 * is as follows.
+	 *
+	 * ath12k_wmi_mgmt_rx_reorder_process_entry()
+	 * {
+	 *     LOCK
+	 *         Host snapshot update
+	 *         Calculation of wait counts
+	 *         Update reorder list
+	 *     UNLOCK
+	 *     Release to upper layer
+	 * }
+	 */
+	spin_lock_bh(&reo_ctx->rx_reorder_entry_lock);
+
+	if (unlikely(!desc->rx_params->reo_params.valid)) {
+		ath12k_warn(ab, "Valid bit is not set for mgmt frame\n");
+		goto failure;
+	}
+
+	if ((desc->type == ATH12K_MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME ||
+	     desc->type == ATH12K_MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME) &&
+			!desc->rx_params->reo_params.duration_us) {
+		ath12k_warn(ab, "Rx reo duration of the frame is not valid %d\n",
+			    desc->rx_params->reo_params.duration_us);
+		goto failure;
+	}
+
+	/* Update the Host snapshot */
+	ret = ath12k_wlan_mgmt_rx_reo_update_host_snapshot(ar, desc);
+	if (ret)
+		goto failure;
+
+	/* Compute wait count for this frame/event */
+	ret = ath12k_wmi_mgmt_rx_reorder_process_calculate_wait_count(ar, desc);
+	if (ret)
+		goto failure;
+
+	/* Update the REO list */
+	ret = ath12k_mgmt_rx_reo_update_list(ab, &reo_ctx->reo_list, desc, is_queued);
+	if (ret)
+		goto failure;
+
+	spin_unlock_bh(&reo_ctx->rx_reorder_entry_lock);
+	/* Finally, release the entries for which pending frame is received */
+	return ath12k_mgmt_rx_reo_list_release_entries(ab, reo_ctx);
+
+failure:
+	spin_unlock_bh(&reo_ctx->rx_reorder_entry_lock);
+	return ret;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_list_ageout_timer_handler() - Periodic ageout timer handler
+ * @arg: Argument to timer handler
+ *
+ * This is the handler for periodic ageout timer used to timeout entries in the
+ * reorder list.
+ *
+ * Return: void
+ */
+static void
+ath12k_mgmt_rx_reo_list_ageout_timer_handler(struct timer_list *timer)
+
+{
+	struct mgmt_rx_reo_list *reo_list = from_timer(reo_list, timer, ageout_timer);
+	struct ath12k_base *ab = reo_list->ab;
+	struct mgmt_rx_reo_list_entry *cur_entry;
+	u64 cur_ts;
+	int status;
+	struct ath12k_mgmt_rx_reo_context *reo_context = &ab->ag->rx_reo;
+	/**
+	 * Stores the pointer to the entry in reorder list for the latest aged
+	 * out frame. Latest aged out frame is the aged out frame in reorder
+	 * list which has the largest global time stamp value.
+	 */
+	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
+
+	spin_lock_bh(&reo_list->list_lock);
+
+	cur_ts = ktime_to_us(ktime_get());
+
+	if (list_empty(&reo_list->list))
+		goto out;
+
+	list_for_each_entry(cur_entry, &reo_list->list, node) {
+		if (cur_ts - cur_entry->insertion_ts >=
+				reo_list->list_entry_timeout_us) {
+			latest_aged_out_entry = cur_entry;
+			cur_entry->status |= ATH12K_MGMT_RX_REO_STATUS_AGED_OUT;
+		}
+	}
+
+	if (latest_aged_out_entry) {
+		list_for_each_entry(cur_entry, &reo_list->list, node) {
+			if (cur_entry == latest_aged_out_entry)
+				break;
+			cur_entry->status |= ATH12K_MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
+		}
+	}
+
+
+out:
+	spin_unlock_bh(&reo_list->list_lock);
+
+	if (latest_aged_out_entry) {
+		status = ath12k_mgmt_rx_reo_list_release_entries(ab, reo_context);
+		if (status) {
+			ath12k_err(ab, "Failed to release entries, ret = %d\n",
+				   status);
+			return;
+		}
+	}
+
+	mod_timer(&reo_list->ageout_timer, jiffies +
+			msecs_to_jiffies(ATH12K_MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS));
+}
+
+/**
+ * ath12k_mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler() - Timer handler
+ * for global management Rx inactivity timer
+ * @arg: Argument to timer handler
+ *
+ * This is the timer handler for tracking management Rx inactivity across
+ * links.
+ *
+ * Return: void
+ */
+static void
+ath12k_mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler(struct timer_list *timer)
+
+{
+	struct mgmt_rx_reo_list *reo_list = from_timer(reo_list, timer, global_mgmt_rx_inactivity_timer);
+	struct ath12k_base *ab = reo_list->ab;
+	struct ath12k_mgmt_rx_reo_context *reo_context = &ab->ag->rx_reo;
+	struct ath12k_mgmt_rx_reo_global_ts_info *ts_last_released_frame;
+
+	ts_last_released_frame = &reo_list->ts_last_released_frame;
+
+	spin_lock(&reo_context->frame_release_lock);
+	spin_lock_bh(&reo_list->list_lock);
+
+	if (time_after(jiffies, ts_last_released_frame->expiry_time))
+		memset(ts_last_released_frame, 0, sizeof(*ts_last_released_frame));
+
+	spin_unlock_bh(&reo_list->list_lock);
+	spin_unlock(&reo_context->frame_release_lock);
+
+	mod_timer(&reo_list->global_mgmt_rx_inactivity_timer, jiffies +
+		 ATH12K_MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT);
+}
+
+/**
+ * mgmt_rx_reo_list_init() - Initialize the management rx-reorder list
+ * @reo_list: Pointer to reorder list
+ *
+ * API to initialize the management rx-reorder list.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_list_init(struct ath12k_base *ab, struct mgmt_rx_reo_list *reo_list)
+{
+	reo_list->max_list_size = ATH12K_MGMT_RX_REO_LIST_MAX_SIZE;
+	reo_list->list_entry_timeout_us = ATH12K_MGMT_RX_REO_LIST_TIMEOUT_US;
+	reo_list->count = 0;
+	reo_list->ab = ab;
+	INIT_LIST_HEAD(&reo_list->list);
+	spin_lock_init(&reo_list->list_lock);
+
+	timer_setup(&reo_list->ageout_timer,
+		    ath12k_mgmt_rx_reo_list_ageout_timer_handler, 0);
+
+	reo_list->ts_last_released_frame.valid = false;
+
+	timer_setup(&reo_list->global_mgmt_rx_inactivity_timer,
+		    ath12k_mgmt_rx_reo_global_mgmt_rx_inactivity_timer_handler, 0);
+
+	return 0;
+}
+
+/**
+ * ath12k_mgmt_rx_reo_flush_reorder_list() - Flush all entries in the reorder list
+ * @reo_list: Pointer to reorder list
+ *
+ * API to flush all the entries of the reorder list. This API would acquire
+ * the lock protecting the list.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_flush_reorder_list(struct ath12k_base *ab, struct mgmt_rx_reo_list *reo_list)
+{
+	struct mgmt_rx_reo_list_entry *cur_entry, *temp;
+
+	if (!reo_list) {
+		ath12k_err(ab, "reorder list is null\n");
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&reo_list->list_lock);
+
+	list_for_each_entry_safe(cur_entry, temp, &reo_list->list, node) {
+		list_del(&cur_entry->node);
+		dev_kfree_skb(cur_entry->nbuf);
+		kfree(cur_entry->rx_params);
+		kfree(cur_entry);
+	}
+
+	spin_unlock_bh(&reo_list->list_lock);
+
+	return 0;
+}
+
+/**
+ * mgmt_rx_reo_list_deinit() - De initialize the management rx-reorder list
+ * @reo_list: Pointer to reorder list
+ *
+ * API to de initialize the management rx-reorder list.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+ath12k_mgmt_rx_reo_list_deinit(struct ath12k_base *ab, struct mgmt_rx_reo_list *reo_list)
+{
+	int status;
+
+	status = ath12k_mgmt_rx_reo_flush_reorder_list(ab, reo_list);
+	if (status) {
+		ath12k_err(ab, "Failed to flush the reorder list\n");
+		return status;
+	}
+
+	return 0;
+}
+
+
+int ath12k_mgmt_rx_reo_init_context(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mgmt_rx_reo_context *reo_context = &ag->rx_reo;
+
+	if (!(ag->mlo_mem.is_mlo_mem_avail && ag->mgmt_rx_reorder))
+		return 0;
+
+	if (reo_context->init_done)
+		return 0;
+
+	spin_lock_init(&reo_context->rx_reorder_entry_lock);
+	spin_lock_init(&reo_context->frame_release_lock);
+
+	ath12k_mgmt_rx_reo_list_init(ab, &reo_context->reo_list);
+
+	reo_context->init_done = true;
+
+	return 0;
+}
+
+int
+ath12k_mgmt_rx_reo_deinit_context(struct ath12k_base *ab)
+{
+	int status;
+	struct ath12k_mgmt_rx_reo_context *reo_context = &ab->ag->rx_reo;
+	struct ath12k_hw_group *ag = ab->ag;
+
+	if (!(ag->mlo_mem.is_mlo_mem_avail && ag->mgmt_rx_reorder))
+		return 0;
+
+	if (!reo_context->init_done)
+		return 0;
+
+	status = ath12k_mgmt_rx_reo_list_deinit(ab, &reo_context->reo_list);
+	if (status) {
+		ath12k_err(ab, "Failed to de-initialize mgmt Rx reo list\n");
+		return status;
+	}
+
+	reo_context->init_done = false;
+
+	return status;
+}
+
+static void ath12k_fw_consumed_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_wmi_mgmt_rx_arg *rx_ev;
+	struct ath12k *ar;
+	struct ath12k_mgmt_rx_reo_frame_descriptor desc = {0};
+	bool is_queued = false;
+	int ret;
+
+	if (!(ag->mlo_mem.is_mlo_mem_avail && ag->mgmt_rx_reorder))
+		return;
+
+	rx_ev = kmalloc(sizeof(*rx_ev), GFP_ATOMIC);
+	if (!rx_ev) {
+		ath12k_err(ab, "failed to allocate rx event\n");
+		return;
+	}
+
+	if (ath12k_pull_fw_consumed_mgmt_rx_params_tlv(ab, skb, rx_ev) != 0) {
+		ath12k_warn(ab, "failed to extract mgmt rx event");
+		kfree(rx_ev);
+		return;
+	}
+
+	rcu_read_lock();
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev->pdev_id);
+
+	if (!ar) {
+		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+			    rx_ev->pdev_id);
+		goto exit;
+	}
+
+	rx_ev->reo_params.link_id = rx_ev->pdev_id;
+
+	/* Populate frame descriptor */
+	desc.type = ATH12K_MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME;
+	desc.nbuf = NULL; /* No frame buffer */
+	desc.rx_params = rx_ev;
+	desc.ingress_timestamp = ktime_to_us(ktime_get());
+	desc.list_size_rx = -1;
+	desc.list_insertion_pos = -1;
+
+	desc.frame_type = IEEE80211_FTYPE_MGMT;
+	desc.frame_subtype = 0xFF;
+
+	desc.reo_required = false;
+
+	ret = ath12k_wmi_mgmt_rx_reorder_process_entry(ar, &desc, &is_queued);
+	if (ret) {
+		ath12k_warn(ab, "Failed to execute MGMT REO reorder process\n");
+		goto exit;
+	}
+
+	if (is_queued)
+		WARN_ON(1);
+
+exit:
+	/* if the frame is queued dont free it here
+	 * it will be taken care by the mgmt rx reorder
+	 * process
+	 */
+	if (!is_queued)
+		kfree(rx_ev);
+
+	rcu_read_unlock();
+}
+
 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
 {
-	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_wmi_mgmt_rx_arg *rx_ev;
 	struct ath12k *ar;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_hdr *hdr;
 	u16 fc;
 	struct ieee80211_supported_band *sband;
+	struct ath12k_peer *peer;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_mgmt_frame_stats *mgmt_stats;
+	u16 frm_type = 0;
+	struct ath12k_mgmt_rx_reo_frame_descriptor desc = {0};
+	bool is_queued = false;
+	int ret;
 
-	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+	rx_ev = kmalloc(sizeof(*rx_ev), GFP_ATOMIC);
+	if (!rx_ev) {
+		dev_kfree_skb(skb);
+		ath12k_err(ab, "failed to allocate rx event\n");
+		return;
+	}
+
+
+	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, rx_ev) != 0) {
 		ath12k_warn(ab, "failed to extract mgmt rx event");
 		dev_kfree_skb(skb);
+		kfree(rx_ev);
 		return;
 	}
 
 	memset(status, 0, sizeof(*status));
 
 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
-		   rx_ev.status);
+		   rx_ev->status);
 
 	rcu_read_lock();
-	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev->pdev_id);
 
 	if (!ar) {
 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
-			    rx_ev.pdev_id);
+			    rx_ev->pdev_id);
 		dev_kfree_skb(skb);
 		goto exit;
 	}
 
 	if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
-	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+	    (rx_ev->status & (WMI_RX_STATUS_ERR_DECRYPT |
 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
 			     WMI_RX_STATUS_ERR_CRC))) {
 		dev_kfree_skb(skb);
 		goto exit;
 	}
 
-	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+	if (rx_ev->status & WMI_RX_STATUS_ERR_MIC)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
-	if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+	if (rx_ev->chan_freq >= ATH12K_MIN_6G_FREQ &&
+	    rx_ev->chan_freq <= ATH12K_MAX_6G_FREQ) {
 		status->band = NL80211_BAND_6GHZ;
-	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+		status->freq = rx_ev->chan_freq;
+	} else if (rx_ev->channel >= 1 && rx_ev->channel <= 14) {
 		status->band = NL80211_BAND_2GHZ;
-	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
+	} else if (rx_ev->channel >= 36 && rx_ev->channel <= ATH12K_MAX_5G_CHAN) {
 		status->band = NL80211_BAND_5GHZ;
 	} else {
 		/* Shouldn't happen unless list of advertised channels to
@@ -5516,20 +11772,50 @@
 		goto exit;
 	}
 
-	if (rx_ev.phy_mode == MODE_11B &&
+	if (rx_ev->phy_mode == MODE_11B &&
 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
 		ath12k_dbg(ab, ATH12K_DBG_WMI,
 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
 
 	sband = &ar->mac.sbands[status->band];
 
-	status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+	if (status->band != NL80211_BAND_6GHZ)
+		status->freq = ieee80211_channel_to_frequency(rx_ev->channel,
 						      status->band);
-	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
-	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+	status->signal = rx_ev->snr + ar->rssi_offsets.rssi_offset;
+	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev->rate / 100);
 
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = le16_to_cpu(hdr->frame_control);
+	frm_type = FIELD_GET(IEEE80211_FCTL_STYPE, fc);
+
+	spin_lock_bh(&ab->base_lock);
+
+	peer = ath12k_peer_find_by_addr(ab, hdr->addr1);
+	if(!peer)
+		peer = ath12k_peer_find_by_addr(ab, hdr->addr3);
+	if (!peer) {
+		spin_unlock_bh(&ab->base_lock);
+		goto skip_mgmt_stats;
+	}
+
+	vif = peer->vif;
+
+	spin_unlock_bh(&ab->base_lock);
+
+	if (!vif)
+		goto skip_mgmt_stats;
+
+	spin_lock_bh(&ar->data_lock);
+
+	ahvif = ath12k_vif_to_ahvif(vif);
+	mgmt_stats = &ahvif->mgmt_stats;
+	mgmt_stats->rx_cnt[frm_type]++;
+
+	spin_unlock_bh(&ar->data_lock);
+
+skip_mgmt_stats:
 
 	/* Firmware is guaranteed to report all essential management frames via
 	 * WMI while it can deliver some extra via HTT. Since there can be
@@ -5556,6 +11842,43 @@
 	 *	ath12k_mac_handle_beacon(ar, skb);
 	 */
 
+	if (!(ag->mlo_mem.is_mlo_mem_avail && ag->mgmt_rx_reorder))
+		goto pass_up;
+
+	if (!rx_ev->reo_params.valid) {
+		ath12k_warn(ab, "Invalid MGMT rx REO param for link %u\n",
+			    ar->pdev->hw_link_id);
+		goto pass_up;
+	}
+
+	rx_ev->reo_params.link_id = ar->pdev->hw_link_id;
+
+	/* Populate frame descriptor */
+	desc.type = ATH12K_MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME;
+	desc.nbuf = skb;
+	desc.rx_params = rx_ev;
+	desc.ingress_timestamp = ktime_to_us(ktime_get());
+	desc.list_size_rx = -1;
+	desc.list_insertion_pos = -1;
+
+	desc.frame_type = FIELD_GET(IEEE80211_FCTL_FTYPE, fc);
+	desc.frame_subtype = frm_type;
+
+	desc.reo_required = true;
+	ret = ath12k_wmi_mgmt_rx_reorder_process_entry(ar, &desc, &is_queued);
+	if (ret) {
+		ath12k_warn(ab, "Failed to execute MGMT REO reorder process\n");
+		dev_kfree_skb(skb);
+		goto exit;
+	}
+
+	/**
+	 *  If frame is queued, we shouldn't free up rx params
+	 */
+	if (is_queued)
+		goto exit;
+
+pass_up:
 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
 		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
 		   skb, skb->len,
@@ -5566,9 +11889,16 @@
 		   status->freq, status->band, status->signal,
 		   status->rate_idx);
 
-	ieee80211_rx_ni(ar->hw, skb);
+	ieee80211_rx_ni(ar->ah->hw, skb);
 
 exit:
+	/* if the frame is queued dont free it here
+	 * it will be taken care by the mgmt rx reorder
+	 * process
+	 */
+	if (!is_queued)
+		kfree(rx_ev);
+
 	rcu_read_unlock();
 }
 
@@ -5591,7 +11921,8 @@
 	}
 
 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
-				 le32_to_cpu(tx_compl_param.status));
+				 le32_to_cpu(tx_compl_param.status),
+				 le32_to_cpu(tx_compl_param.ack_rssi));
 
 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
@@ -5602,8 +11933,42 @@
 	rcu_read_unlock();
 }
 
-static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab,
-						  u32 vdev_id)
+static void ath12k_offchan_tx_completion_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	struct wmi_offchan_data_tx_compl_event offchan_tx_cmpl_params = {0};
+	u32 desc_id;
+	u32 pdev_id;
+	u32 status;
+	struct ath12k *ar;
+
+	if (ath12k_pull_offchan_tx_compl_param_tlv(ab, skb, &offchan_tx_cmpl_params) != 0) {
+		ath12k_warn(ab, "failed to extract mgmt tx compl event");
+		return;
+	}
+	status  = __le32_to_cpu(offchan_tx_cmpl_params.status);
+	pdev_id = __le32_to_cpu(offchan_tx_cmpl_params.pdev_id);
+	desc_id = __le32_to_cpu(offchan_tx_cmpl_params.desc_id);
+
+	rcu_read_lock();
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+	if (!ar) {
+		ath12k_warn(ab, "invalid pdev id %d in offchan_tx_compl_event\n",
+			    pdev_id);
+		goto exit;
+	}
+
+	wmi_process_offchan_tx_comp(ar, desc_id, status);
+
+	ath12k_dbg(ab, ATH12K_DBG_MGMT,
+		   "off chan tx compl ev pdev_id %d, desc_id %d, status %d",
+		   pdev_id, desc_id, status);
+exit:
+	rcu_read_unlock();
+}
+
+static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
+						  u32 vdev_id,
+						  enum ath12k_scan_state state)
 {
 	int i;
 	struct ath12k_pdev *pdev;
@@ -5615,7 +11980,7 @@
 			ar = pdev->ar;
 
 			spin_lock_bh(&ar->data_lock);
-			if (ar->scan.state == ATH12K_SCAN_ABORTING &&
+			if (ar->scan.state == state &&
 			    ar->scan.vdev_id == vdev_id) {
 				spin_unlock_bh(&ar->data_lock);
 				return ar;
@@ -5645,9 +12010,13 @@
 	 * aborting scan's vdev id matches this event info.
 	 */
 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
-	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED)
-		ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id));
-	else
+	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
+		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
+						 ATH12K_SCAN_ABORTING);
+		if(!ar)
+			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
+							 ATH12K_SCAN_RUNNING);
+	} else
 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
 
 	if (!ar) {
@@ -5688,6 +12057,8 @@
 		ath12k_wmi_event_scan_start_failed(ar);
 		break;
 	case WMI_SCAN_EVENT_DEQUEUED:
+		__ath12k_mac_scan_finish(ar);
+		break;
 	case WMI_SCAN_EVENT_PREEMPTED:
 	case WMI_SCAN_EVENT_RESTARTED:
 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
@@ -5706,6 +12077,7 @@
 	struct ieee80211_sta *sta;
 	struct ath12k_peer *peer;
 	struct ath12k *ar;
+	unsigned int link_id;
 
 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
 		ath12k_warn(ab, "failed to extract peer sta kickout event");
@@ -5731,16 +12103,27 @@
 		goto exit;
 	}
 
-	sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+	if (peer->mlo)
+		sta = ieee80211_find_sta_by_link_addrs(ar->ah->hw, arg.mac_addr,
+						       NULL, &link_id);
+	else
+		sta = ieee80211_find_sta_by_ifaddr(ar->ah->hw,
 					   arg.mac_addr, NULL);
 	if (!sta) {
-		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
-			    arg.mac_addr);
+		ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n",
+			    peer->mlo ? "MLO " : "", arg.mac_addr);
 		goto exit;
 	}
 
-	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
-		   arg.mac_addr);
+	if (peer->mlo && peer->link_id != link_id) {
+		ath12k_warn(ab,
+			    "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n",
+			    arg.mac_addr, peer->link_id, link_id);
+		goto exit;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_PEER, "peer sta kickout event %pM reason:%d",
+		   arg.mac_addr, arg.reason);
 
 	ieee80211_report_low_ack(sta, 10);
 
@@ -5928,12 +12311,14 @@
 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
 	survey->time      = div_u64(total, cc_freq_hz);
 	survey->time_busy = div_u64(busy, cc_freq_hz);
-	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
+	survey->time_rx   = div_u64(rx, cc_freq_hz);
 	survey->time_tx   = div_u64(tx, cc_freq_hz);
+	survey->time_bss_rx = div_u64(rx_bss, cc_freq_hz);
 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
 			     SURVEY_INFO_TIME |
 			     SURVEY_INFO_TIME_BUSY |
 			     SURVEY_INFO_TIME_RX |
+			     SURVEY_INFO_TIME_BSS_RX |
 			     SURVEY_INFO_TIME_TX);
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -5979,31 +12364,27 @@
 	rcu_read_unlock();
 }
 
-static void ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
+static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
+					  u16 tag, u16 tag_len,
+					  const void *ptr,
+					  void *data)
 {
-	const void **tb;
 	const struct wmi_service_available_event *ev;
-	int ret;
+	u32 *wmi_ext2_service_bitmap;
 	int i, j;
+	u16 expected_len;
+	u16 wmi_max_ext2_service_words;
 
-	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
-	if (IS_ERR(tb)) {
-		ret = PTR_ERR(tb);
-		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
-		return;
-	}
-
-	ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
-	if (!ev) {
-		ath12k_warn(ab, "failed to fetch svc available ev");
-		kfree(tb);
-		return;
+	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
+	if (tag_len < expected_len) {
+		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
+			    tag_len, tag);
+		return -EINVAL;
 	}
 
-	/* TODO: Use wmi_service_segment_offset information to get the service
-	 * especially when more services are advertised in multiple service
-	 * available events.
-	 */
+	switch (tag) {
+	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
+		ev = (struct wmi_service_available_event *)ptr;
 	for (i = 0, j = WMI_MAX_SERVICE;
 	     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
 	     i++) {
@@ -6015,11 +12396,42 @@
 	}
 
 	ath12k_dbg(ab, ATH12K_DBG_WMI,
-		   "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
-		   ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
-		   ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
+			   ev->wmi_service_segment_bitmap[0],
+			   ev->wmi_service_segment_bitmap[1],
+			   ev->wmi_service_segment_bitmap[2],
+			   ev->wmi_service_segment_bitmap[3]);
+		break;
+	case WMI_TAG_ARRAY_UINT32:
+		wmi_ext2_service_bitmap = (u32 *)ptr;
+		wmi_max_ext2_service_words = tag_len / sizeof(u32);
+		for (i = 0, j = WMI_MAX_EXT_SERVICE;
+		     i < wmi_max_ext2_service_words && j < WMI_MAX_EXT2_SERVICE;
+		     i++) {
+			do {
+				if (wmi_ext2_service_bitmap[i] &
+				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+					set_bit(j, ab->wmi_ab.svc_map);
+			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+		}
 
-	kfree(tb);
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
+			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
+		break;
+	}
+	return 0;
+}
+
+static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	int ret;
+
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath12k_wmi_tlv_services_parser,
+				  NULL);
+	return ret;
 }
 
 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
@@ -6052,6 +12464,57 @@
 
 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
 {
+	struct ath12k_fw_stats stats = {};
+	struct ath12k *ar;
+	int ret;
+
+	INIT_LIST_HEAD(&stats.pdevs);
+	INIT_LIST_HEAD(&stats.vdevs);
+	INIT_LIST_HEAD(&stats.bcn);
+
+	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
+	if (ret) {
+		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
+		goto free;
+	}
+
+	rcu_read_lock();
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+	if (!ar) {
+		rcu_read_unlock();
+		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+			    stats.pdev_id, ret);
+		goto free;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+
+	/* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+	 * debugfs fw stats. Therfore, processing it separately.
+	 */
+	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+		ar->fw_stats_done = true;
+		goto complete;
+	}
+
+	/* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested
+	 * only via debugfs fw stats. Hence, processing these two in debugfs context
+	 */
+	ath12k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+	complete(&ar->fw_stats_complete);
+	rcu_read_unlock();
+	spin_unlock_bh(&ar->data_lock);
+
+	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
+	 * at this point, no need to free the individual list.
+	 */
+	return;
+
+free:
+	ath12k_fw_stats_free(&stats);
 }
 
 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -6098,11 +12561,8 @@
 					  const u32 *vdev_ids)
 {
 	int i;
-	struct ath12k_vif *arvif;
-
-	/* Finish CSA once the switch count becomes NULL */
-	if (ev->current_switch_count)
-		return;
+	struct ath12k_link_vif *arvif;
+	struct ieee80211_bss_conf *link_conf;
 
 	rcu_read_lock();
 	for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
@@ -6113,9 +12573,24 @@
 				    vdev_ids[i]);
 			continue;
 		}
+		link_conf = rcu_dereference(arvif->ahvif->vif->link_conf[arvif->link_id]);
+ 
+		if (!link_conf)
+			continue;
 
-		if (arvif->is_up && arvif->vif->bss_conf.csa_active)
-			ieee80211_csa_finish(arvif->vif);
+		/* FIXME some changes are expected for ML vifs */
+		if (arvif->is_up && (link_conf->csa_active || link_conf->color_change_active)) {
+			if (!ev->current_switch_count) {
+				if (link_conf->csa_active)
+					ieee80211_csa_finish_mlo(arvif->ahvif->vif, arvif->link_id);
+			} else if (ev->current_switch_count > 1) {
+				ieee80211_beacon_update_cntdwn_mlo(arvif->ahvif->vif,
+							       arvif->link_id);
+			} else {
+				if (link_conf->color_change_active)
+					ieee80211_color_change_finish_mlo(arvif->ahvif->vif, arvif->link_id);
+			}
+		}
 	}
 	rcu_read_unlock();
 }
@@ -6198,32 +12673,530 @@
 	if (ar->dfs_block_radar_events)
 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
 	else
-		ieee80211_radar_detected(ar->hw);
+		ieee80211_radar_detected(ar->ah->hw);
 
 exit:
 	kfree(tb);
 }
+static int ath12k_wmi_awgn_intf_subtlv_parser(struct ath12k_base *ab,
+                                              u16 tag, u16 len,
+                                              const void *ptr, void *data)
+{
+        int ret = 0;
+        struct wmi_dcs_awgn_info *awgn_info;
+
+        switch (tag) {
+        case WMI_TAG_DCS_AWGN_INT_TYPE:
+                awgn_info = (struct wmi_dcs_awgn_info *)ptr;
+
+                ath12k_dbg(ab, ATH12K_DBG_WMI,
+                           "AWGN Info: channel width: %d, chan freq: %d, center_freq0: %d, center_freq1: %d, bw_intf_bitmap: %d\n",
+                           awgn_info->channel_width, awgn_info->chan_freq, awgn_info->center_freq0, awgn_info->center_freq1,
+                           awgn_info->chan_bw_interference_bitmap);
+                memcpy(data, awgn_info, sizeof(*awgn_info));
+                break;
+        default:
+                ath12k_warn(ab,
+                            "Received invalid tag for wmi dcs interference in subtlvs\n");
+                return -EINVAL;
+                break;
+        }
+
+        return ret;
+}
+
+static int ath12k_wmi_dcs_awgn_event_parser(struct ath12k_base *ab,
+                                            u16 tag, u16 len,
+                                            const void *ptr, void *data)
+{
+        int ret = 0;
+
+        ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi dcs awgn event tag 0x%x of len %d rcvd\n",
+                   tag, len);
+
+        switch (tag) {
+        case WMI_TAG_DCS_INTERFERENCE_EVENT:
+                /* Fixed param is already processed*/
+                break;
+        case WMI_TAG_ARRAY_STRUCT:
+                /* len 0 is expected for array of struct when there
+                 * is no content of that type to pack inside that tlv
+                 */
+                if (len == 0)
+                        return 0;
+                ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+                                          ath12k_wmi_awgn_intf_subtlv_parser,
+                                          data);
+                break;
+        default:
+                ath12k_warn(ab, "Received invalid tag for wmi dcs interference event\n");
+                ret = -EINVAL;
+                break;
+        }
+
+        return ret;
+}
+
+bool ath12k_wmi_validate_dcs_awgn_info(struct ath12k *ar, struct wmi_dcs_awgn_info *awgn_info)
+{
+        spin_lock_bh(&ar->data_lock);
+
+        if (!ar->rx_channel) {
+                spin_unlock_bh(&ar->data_lock);
+                return false;
+        }
+
+        if (awgn_info->chan_freq != ar->rx_channel->center_freq) {
+                spin_unlock_bh(&ar->data_lock);
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "dcs interference event received with wrong channel %d",awgn_info->chan_freq);
+                return false;
+        }
+        spin_unlock_bh(&ar->data_lock);
+
+        switch (awgn_info->channel_width) {
+        case WMI_HOST_CHAN_WIDTH_20:
+                if (awgn_info->chan_bw_interference_bitmap > WMI_DCS_SEG_PRI20) {
+                        ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                                   "dcs interference event received with wrong chan width bmap %d for 20MHz",
+                                   awgn_info->chan_bw_interference_bitmap);
+                        return false;
+                }
+                break;
+        case WMI_HOST_CHAN_WIDTH_40:
+                if (awgn_info->chan_bw_interference_bitmap > WMI_DCS_SEG_SEC20) {
+                        ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                                   "dcs interference event received with wrong chan width bmap %d for 40MHz",
+                                   awgn_info->chan_bw_interference_bitmap);
+                        return false;
+                }
+                break;
+        case WMI_HOST_CHAN_WIDTH_80:
+                if (awgn_info->chan_bw_interference_bitmap > WMI_DCS_SEG_SEC40) {
+                        ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                                   "dcs interference event received with wrong chan width bmap %d for 80MHz",
+                                   awgn_info->chan_bw_interference_bitmap);
+                        return false;
+                }
+                break;
+        case WMI_HOST_CHAN_WIDTH_160:
+        case WMI_HOST_CHAN_WIDTH_80P80:
+                if (awgn_info->chan_bw_interference_bitmap > WMI_DCS_SEG_SEC80) {
+                        ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                                   "dcs interference event received with wrong chan width bmap %d for 80P80/160MHz",
+                                   awgn_info->chan_bw_interference_bitmap);
+                        return false;
+                }
+                break;
+	case WMI_HOST_CHAN_WIDTH_320:
+		if (awgn_info->chan_bw_interference_bitmap > WMI_DCS_SEG_SEC160) {
+			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+				   "dcs interference event received with wrong chan width bmap %d for 320MHz",
+				   awgn_info->chan_bw_interference_bitmap);
+			return false;
+		}
+		break;
+        default:
+                ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                           "dcs interference event received with unknown channel width %d",
+                           awgn_info->channel_width);
+                return false;
+        }
+        return true;
+}
+
+static void
+ath12k_wmi_dcs_awgn_interference_event(struct ath12k_base *ab,
+                                       struct sk_buff *skb)
+{
+        const struct wmi_dcs_interference_ev *dcs_intf_ev;
+        struct wmi_dcs_awgn_info awgn_info = {};
+        struct cfg80211_chan_def *chandef;
+        struct ath12k_mac_any_chandef_arg arg;
+        struct ath12k *ar;
+        const struct wmi_tlv *tlv;
+	struct ath12k_hw *ah;
+        u16 tlv_tag;
+        u8 *ptr;
+        int ret;
+
+        if (!test_bit(WMI_TLV_SERVICE_DCS_AWGN_INT_SUPPORT, ab->wmi_ab.svc_map)) {
+                ath12k_warn(ab, "firmware doesn't support awgn interference, so dropping dcs interference ev\n");
+                return;
+        }
+
+        ptr = skb->data;
+
+        if (skb->len < (sizeof(*dcs_intf_ev) + TLV_HDR_SIZE)) {
+                ath12k_warn(ab, "dcs interference event size invalid\n");
+                return;
+        }
+
+        tlv = (struct wmi_tlv *)ptr;
+        tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+        ptr += sizeof(*tlv);
+
+        if (tlv_tag == WMI_TAG_DCS_INTERFERENCE_EVENT) {
+                dcs_intf_ev = (struct wmi_dcs_interference_ev*)ptr;
+
+                ath12k_dbg(ab, ATH12K_DBG_WMI,
+                           "pdev awgn detected on pdev %d, interference type %d\n",
+                           dcs_intf_ev->pdev_id, dcs_intf_ev->interference_type);
+
+                if (dcs_intf_ev->interference_type != WMI_DCS_AWGN_INTF) {
+                        ath12k_warn(ab, "interference type is not awgn\n");
+                        return;
+                }
+        } else {
+                ath12k_warn(ab, "dcs interference event received with wrong tag\n");
+                return;
+        }
+
+        ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+                                  ath12k_wmi_dcs_awgn_event_parser,
+                                  &awgn_info);
+        if (ret) {
+                ath12k_warn(ab, "failed to parse awgn tlv %d\n", ret);
+                return;
+        }
+
+        rcu_read_lock();
+        ar = ath12k_mac_get_ar_by_pdev_id(ab, dcs_intf_ev->pdev_id);
+        if (!ar) {
+                ath12k_warn(ab, "awgn detected in invalid pdev id(%d)\n",
+                            dcs_intf_ev->pdev_id);
+                goto exit;
+        }
+	if (!ar->supports_6ghz) {
+                ath12k_warn(ab, "pdev does not supports 6G, so dropping dcs interference event\n");
+                goto exit;
+        }
+
+	spin_lock_bh(&ar->data_lock);
+        if (ar->awgn_intf_handling_in_prog) {
+		spin_unlock_bh(&ar->data_lock);
+                rcu_read_unlock();
+		return;
+	}
+	spin_unlock_bh(&ar->data_lock);
+
+        if (!ath12k_wmi_validate_dcs_awgn_info(ar, &awgn_info)) {
+                ath12k_warn(ab, "Invalid DCS AWGN TLV - Skipping event");
+                goto exit;
+        }
+
+	ah = ar->ah;
+
+        ath12k_info(ab, "Interface(pdev %d) : AWGN interference detected\n",
+                    dcs_intf_ev->pdev_id);
+
+	arg.ar = ar;
+	arg.def = NULL;
+
+        ieee80211_iter_chan_contexts_atomic(ah->hw, ath12k_mac_get_any_chandef_iter,
+                                            &arg);
+        chandef = arg.def;
+        if (!chandef) {
+                ath12k_warn(ab, "chandef is not available\n");
+                goto exit;
+        }
+        ar->awgn_chandef = *chandef;
+
+	/* ieee80211_awgn_detected(ah->hw, awgn_info.chan_bw_interference_bitmap, */
+	/* 			chandef->chan); */
+
+        spin_lock_bh(&ar->data_lock);
+        ar->awgn_intf_handling_in_prog = true;
+        ar->chan_bw_interference_bitmap = awgn_info.chan_bw_interference_bitmap;
+        spin_unlock_bh(&ar->data_lock);
+
+        ath12k_dbg(ab, ATH12K_DBG_WMI, "AWGN : Interference handling started\n");
+exit:
+        rcu_read_unlock();
+}
+
+#ifdef CONFIG_ATH12K_PKTLOG
+static void
+ath12k_wmi_pktlog_decode_info(struct ath12k_base *ab,
+                                  struct sk_buff *skb)
+{
+	struct ath12k *ar;
+        const void **tb;
+	int ret;
+	struct ath_pktlog *pktlog;
+	struct ath_pl_fw_info *pktlog_info;
+
+	if (!test_bit(WMI_TLV_SERVICE_PKTLOG_DECODE_INFO_SUPPORT, ab->wmi_ab.svc_map)) {
+                ath12k_warn(ab, "firmware doesn't support pktlog decode info support\n");
+                return;
+        }
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+        if (IS_ERR(tb)) {
+                ret = PTR_ERR(tb);
+                ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+                return;
+        }
+	pktlog_info = tb[WMI_TAG_PDEV_PKTLOG_DECODE_INFO];
+        if (!pktlog_info) {
+                ath12k_warn(ab, "failed to fetch pktlog debug info");
+                kfree(tb);
+                return;
+        }
+
+	pktlog_info->pdev_id = DP_SW2HW_MACID(pktlog_info->pdev_id);
+        ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "pktlog pktlog_defs_json_version: %d", pktlog_info->pktlog_defs_json_version);
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "pktlog software_image: %s", pktlog_info->software_image);
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "pktlog chip_info: %s", pktlog_info->chip_info);
+	 ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "pktlog pdev_id: %d", pktlog_info->pdev_id);
+
+        ar = ath12k_mac_get_ar_by_pdev_id(ab, pktlog_info->pdev_id);
+        if (!ar) {
+                ath12k_warn(ab, "invalid pdev id in pktlog decode info %d", pktlog_info->pdev_id);
+                kfree(tb);
+                return;
+        }
+	pktlog = &ar->debug.pktlog;
+	pktlog->fw_version_record = 1;
+	if (pktlog->buf == NULL) {
+		ath12k_warn(ab, "failed to initialize, start pktlog\n");
+		kfree(tb);
+		return;
+	}
+	pktlog->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM_FW_VERSION_SUPPORT;
+	memcpy(pktlog->buf->bufhdr.software_image, pktlog_info->software_image, sizeof(pktlog_info->software_image));
+        memcpy(pktlog->buf->bufhdr.chip_info, pktlog_info->chip_info, sizeof(pktlog_info->chip_info));
+        pktlog->buf->bufhdr.pktlog_defs_json_version = pktlog_info->pktlog_defs_json_version;
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "pktlog new magic_num: 0x%x\n", pktlog->buf->bufhdr.magic_num);
+	kfree(tb);
+}
+#endif
+
+static int
+ath12k_wmi_rssi_dbm_conv_subtlv_parser(struct ath12k_base *ab,
+				      u16 tag, u16 len,
+				      const void *ptr, void *data)
+{
+	struct wmi_rssi_dbm_conv_offsets *rssi_offsets =
+		(struct wmi_rssi_dbm_conv_offsets *) data;
+	struct wmi_rssi_dbm_conv_param_info *param_info;
+	struct wmi_rssi_dbm_conv_temp_offset *temp_offset_info;
+	int i, ret = 0;
+	s8 min_nf = 0;
+
+	switch (tag) {
+	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO:
+		if (len != sizeof(*param_info)) {
+			ath12k_warn(ab, "wmi rssi dbm conv subtlv 0x%x invalid len rcvd",
+				    tag);
+			return -EINVAL;
+		}
+		param_info = (struct wmi_rssi_dbm_conv_param_info *)ptr;
+
+		/* Using minimum pri20 Noise Floor across active chains instead
+		 * of all sub-bands*/
+		for (i = 0; i < MAX_NUM_ANTENNA; i++) {
+			if (param_info->curr_rx_chainmask & (0x01 << i))
+				min_nf = min(param_info->nf_hw_dbm[i][0], min_nf);
+		}
+		rssi_offsets->min_nf_dbm = min_nf;
+		break;
+	case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO:
+		if (len != sizeof(*temp_offset_info)) {
+			ath12k_warn(ab, "wmi rssi dbm conv subtlv 0x%x invalid len rcvd",
+				    tag);
+			return -EINVAL;
+		}
+		temp_offset_info = (struct wmi_rssi_dbm_conv_temp_offset *)ptr;
+		rssi_offsets->rssi_temp_offset = temp_offset_info->rssi_temp_offset;
+		break;
+	default:
+		ath12k_warn(ab, "Received invalid sub-tag for wmi rssi dbm conversion\n");
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static int
+ath12k_wmi_rssi_dbm_conv_event_parser(struct ath12k_base *ab,
+				      u16 tag, u16 len,
+				      const void *ptr, void *data)
+{
+	int ret = 0;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi rssi dbm conv tag 0x%x of len %d rcvd",
+		   tag, len);
+	switch (tag) {
+	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM:
+		/* Fixed param is already processed*/
+		break;
+	case WMI_TAG_ARRAY_STRUCT:
+		/* len 0 is expected for array of struct when there
+		 * is no content of that type inside that tlv
+		 */
+		if (len == 0)
+			return ret;
+		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+					  ath12k_wmi_rssi_dbm_conv_subtlv_parser,
+					  data);
+		break;
+	default:
+		ath12k_warn(ab, "Received invalid tag for wmi rssi dbm conv interference event\n");
+		ret = -EINVAL;
+		break;
+
+	}
+
+	return ret;
+}
+
+static struct
+ath12k *ath12k_wmi_rssi_dbm_process_fixed_param( struct ath12k_base *ab,
+						 u8 *ptr, size_t len)
+{
+	struct ath12k *ar;
+	const struct wmi_tlv *tlv;
+	struct wmi_rssi_dbm_conv_event_fixed_param *fixed_param;
+	u16 tlv_tag;
+
+	if(!ptr) {
+		ath12k_warn(ab, "No data present in rssi dbm conv event\n");
+		return NULL;
+	}
+
+	if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
+		ath12k_warn(ab, "rssi dbm conv event size invalid\n");
+		return NULL;
+	}
+
+	tlv = (struct wmi_tlv *)ptr;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	ptr += sizeof(*tlv);
+
+	if (tlv_tag == WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) {
+		fixed_param = (struct wmi_rssi_dbm_conv_event_fixed_param *)ptr;
+
+		ar = ath12k_mac_get_ar_by_pdev_id(ab, fixed_param->pdev_id);
+		if (!ar) {
+			ath12k_warn(ab, "Failed to get ar for rssi dbm conv event\n");
+			return NULL;
+		}
+	} else {
+		ath12k_warn(ab, "rssi dbm conv event received without fixed param tlv at start\n");
+		return NULL;
+	}
+
+	return ar;
+}
+
+static void ath12k_wmi_rssi_dbm_conversion_param_info(struct ath12k_base *ab,
+						      struct sk_buff *skb)
+{
+	struct ath12k *ar;
+	struct wmi_rssi_dbm_conv_offsets *rssi_offsets;
+	int ret, i;
+
+	/* if pdevs are not active ignore the event */
+	for (i = 0; i < ab->num_radios; i++) {
+		if (!ab->pdevs_active[i])
+			return;
+	}
+
+	ar = ath12k_wmi_rssi_dbm_process_fixed_param(ab, skb->data,
+						     skb->len);
+	if(!ar) {
+		ath12k_warn(ab, "failed to get ar from rssi dbm conversion event\n");
+		return;
+	}
+
+	rssi_offsets = &ar->rssi_offsets;
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath12k_wmi_rssi_dbm_conv_event_parser,
+				  rssi_offsets);
+	if (ret) {
+		ath12k_warn(ab, "Unable to parse rssi dbm conversion event\n");
+		return;
+	}
+
+	rssi_offsets->rssi_offset = rssi_offsets->min_nf_dbm +
+				    rssi_offsets->rssi_temp_offset;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "RSSI offset updated, current offset is %d\n",
+		   rssi_offsets->rssi_offset);
+}
+
+static void ath12k_wmi_tm_event_segmented(struct ath12k_base *ab, u32 cmd_id,
+					struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_ftm_event_msg *ev;
+	u16 length;
+	int ret;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_ARRAY_BYTE];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch ftm msg\n");
+		kfree(tb);
+		return;
+	}
+
+	length = skb->len - TLV_HDR_SIZE;
+	ret = ath12k_tm_process_event(ab, cmd_id, ev, length);
+	if (ret)
+		ath12k_warn(ab, "Failed to process ftm event\n");
+	kfree(tb);
+}
 
 static void
 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
 				  struct sk_buff *skb)
 {
 	struct ath12k *ar;
-	struct wmi_pdev_temperature_event ev = {0};
+	const void **tb;
+	int ret;
+	const struct wmi_pdev_temperature_event *ev;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+    if (IS_ERR(tb)) {
+	    ret = PTR_ERR(tb);
+       ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+       return;
+    }
 
-	if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
-		ath12k_warn(ab, "failed to extract pdev temperature event");
+    ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+    if (!ev) {
+        ath12k_warn(ab, "failed to fetch pdev temp ev");
+        kfree(tb);
 		return;
 	}
 
 	ath12k_dbg(ab, ATH12K_DBG_WMI,
-		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+		   	"pdev temperature ev temp %d pdev_id %d\n", ev->temp,
+		     ev->pdev_id);
 
-	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
 	if (!ar) {
-		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d",
+			  ev->pdev_id);
 		return;
 	}
+
+	ath12k_thermal_event_temperature(ar, ev->temp);
+
+	kfree(tb);
 }
 
 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
@@ -6288,6 +13261,1056 @@
 	kfree(tb);
 }
 
+static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
+				  struct wmi_tpc_stats_event *tpc_stats,
+				  struct wmi_max_reg_power_fixed_param *ev)
+{
+	int total_size;
+	struct wmi_max_reg_power_allowed *reg_pwr;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "Received reg power array type %d length %d for tpc stats\n",
+		   ev->reg_power_type, ev->reg_array_len);
+
+	switch (ev->reg_power_type) {
+	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
+		reg_pwr = &tpc_stats->max_reg_allowed_power;
+		break;
+	default:
+		goto out;
+	}
+
+	/* Each entry is 2 byte hence multiplying the indices with 2 */
+	total_size = (ev->d1 * ev->d2 * ev->d3 * ev->d4 * 2);
+	if (ev->reg_array_len != total_size) {
+		ath12k_warn(ab,
+			    "Total size and reg_array_len doesn't match for tpc stats\n");
+		return -EINVAL;
+	}
+
+	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_param));
+
+	reg_pwr->reg_pwr_array = kzalloc(reg_pwr->tpc_reg_pwr.reg_array_len,
+					 GFP_ATOMIC);
+	if (!reg_pwr->reg_pwr_array)
+		return -ENOMEM;
+
+	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
+out:
+	return 0;
+}
+
+static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
+				     struct wmi_tpc_stats_event *tpc_stats,
+				     struct wmi_tpc_rates_array_fixed_param *ev)
+{
+	u32 flag = 0;
+	struct wmi_tpc_rates_array *rates_array;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "Received rates array type %d length %d for tpc stats\n",
+		   ev->rate_array_type, ev->rate_array_len);
+
+	switch (ev->rate_array_type) {
+	case ATH12K_TPC_STATS_RATES_ARRAY1:
+		rates_array = &tpc_stats->rates_array1;
+		flag = WMI_TPC_RATES_ARRAY1;
+		break;
+	case ATH12K_TPC_STATS_RATES_ARRAY2:
+		rates_array = &tpc_stats->rates_array2;
+		flag = WMI_TPC_RATES_ARRAY2;
+		break;
+	default:
+		ath12k_warn(ab,
+			    "Received invalid type of rates array for tpc stats\n");
+		return -EINVAL;
+	}
+	memcpy(&rates_array->tpc_rates_array, ev, sizeof(struct wmi_tpc_rates_array_fixed_param));
+	rates_array->rate_array = kzalloc(rates_array->tpc_rates_array.rate_array_len,
+					  GFP_ATOMIC);
+	if (!rates_array->rate_array)
+		return -ENOMEM;
+
+	tpc_stats->tlvs_rcvd |= flag;
+	return 0;
+}
+
+static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
+				      struct wmi_tpc_stats_event *tpc_stats,
+				      struct wmi_tpc_ctl_pwr_fixed_param *ev)
+{
+	int total_size, ret = 0;
+	u32 flag = 0;
+	struct wmi_tpc_ctl_pwr_table *ctl_array;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "Received ctl array type %d length %d for tpc stats\n",
+		   ev->ctl_array_type, ev->ctl_array_len);
+
+	switch (ev->ctl_array_type) {
+	case ATH12K_TPC_STATS_CTL_ARRAY:
+		ctl_array = &tpc_stats->ctl_array;
+		flag = WMI_TPC_CTL_PWR_ARRAY;
+		break;
+	default:
+		ath12k_warn(ab,
+			    "Received invalid type of ctl pwr table for tpc stats\n");
+		return -EINVAL;
+	}
+
+	total_size = (ev->d1 * ev->d2 * ev->d3 * ev->d4);
+	if (ev->ctl_array_len != total_size) {
+		ath12k_warn(ab,
+			    "Total size and ctl_array_len doesn't match for tpc stats\n");
+		return -EINVAL;
+	}
+
+	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_param));
+
+	ctl_array->ctl_pwr_table = kzalloc(ctl_array->tpc_ctl_pwr.ctl_array_len,
+					   GFP_ATOMIC);
+	if (!ctl_array->ctl_pwr_table)
+		return -ENOMEM;
+
+	tpc_stats->tlvs_rcvd |= flag;
+	return ret;
+}
+
+static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
+					      u16 tag, u16 len,
+					      const void *ptr, void *data)
+{
+	int ret = 0;
+	struct wmi_tpc_stats_event *tpc_stats = (struct wmi_tpc_stats_event *)data;
+	struct wmi_tpc_configs *tpc_config;
+	struct wmi_max_reg_power_fixed_param *tpc_reg_pwr;
+	struct wmi_tpc_rates_array_fixed_param *tpc_rates_array;
+	struct wmi_tpc_ctl_pwr_fixed_param *tpc_ctl_pwr;
+
+	if (!tpc_stats) {
+		ath12k_warn(ab, "tpc stats memory unavailable\n");
+		return -EINVAL;
+	}
+
+	switch (tag) {
+	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
+		tpc_config = (struct wmi_tpc_configs *)ptr;
+		memcpy(&tpc_stats->tpc_config, tpc_config,
+		       sizeof(struct wmi_tpc_configs));
+		break;
+
+	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
+		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_param *)ptr;
+		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
+		break;
+
+	case WMI_TAG_TPC_STATS_RATES_ARRAY:
+		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_param *)ptr;
+		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
+		break;
+
+	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
+		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_param *)ptr;
+		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
+		break;
+
+	default:
+		ath12k_warn(ab,
+			    "Received invalid tag for tpc stats in subtlvs\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
+					    const void *ptr, u16 tag, u16 len,
+					    struct wmi_tpc_stats_event *tpc_stats)
+{
+	s16 *reg_rates_src;
+	s8 *ctl_src;
+	s16 *dst_ptr;
+	s8 *dst_ptr_ctl;
+	int ret = 0;
+
+	if (tag == WMI_TAG_ARRAY_INT16)
+		reg_rates_src = (s16 *)ptr;
+	else
+		ctl_src = (s8 *)ptr;
+
+	switch (tpc_stats->event_count) {
+	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
+			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
+			memcpy(dst_ptr, reg_rates_src,
+			       tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
+			reg_rates_src += tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len;
+		}
+		break;
+	case ATH12K_TPC_STATS_RATES_EVENT1:
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
+			dst_ptr = tpc_stats->rates_array1.rate_array;
+			memcpy(dst_ptr, reg_rates_src,
+			       tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
+			reg_rates_src += tpc_stats->rates_array1.tpc_rates_array.rate_array_len;
+		}
+		break;
+	case ATH12K_TPC_STATS_RATES_EVENT2:
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
+			dst_ptr = tpc_stats->rates_array2.rate_array;
+			memcpy(dst_ptr, reg_rates_src,
+			       tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
+			reg_rates_src += tpc_stats->rates_array2.tpc_rates_array.rate_array_len;
+		}
+		break;
+	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
+			memcpy(dst_ptr_ctl, ctl_src,
+			       tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
+			ctl_src += tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len;
+		}
+		break;
+	}
+	return ret;
+}
+
+static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
+					     u16 tag, u16 len,
+					     const void *ptr, void *data)
+{
+	struct wmi_tpc_stats_event *tpc_stats = (struct wmi_tpc_stats_event *)data;
+	int ret = 0;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "tpc stats tag 0x%x of len %d rcvd\n",
+		   tag, len);
+	switch (tag) {
+	case WMI_TAG_CTRL_PATH_EVENT_FIXED_PARAM:
+		/* Fixed param is already processed*/
+		break;
+
+	case WMI_TAG_ARRAY_STRUCT:
+		/* len 0 is expected for array of struct when there
+		 * is no content of that type to pack inside that tlv
+		 */
+		if (len == 0)
+			return 0;
+		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+					  ath12k_wmi_tpc_stats_subtlv_parser,
+					  tpc_stats);
+		break;
+
+	case WMI_TAG_ARRAY_INT16:
+		if (len == 0)
+			return 0;
+		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+						       WMI_TAG_ARRAY_INT16,
+						       len, tpc_stats);
+		break;
+
+	case WMI_TAG_ARRAY_BYTE:
+		if (len == 0)
+			return 0;
+		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+						       WMI_TAG_ARRAY_BYTE,
+						       len, tpc_stats);
+		break;
+
+	default:
+		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
+{
+	struct wmi_tpc_stats_event *tpc_stats = ar->tpc_stats;
+
+	lockdep_assert_held(&ar->data_lock);
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
+	if (tpc_stats) {
+		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
+		kfree(tpc_stats->rates_array1.rate_array);
+		kfree(tpc_stats->rates_array2.rate_array);
+		kfree(tpc_stats->ctl_array.ctl_pwr_table);
+		kfree(tpc_stats);
+		ar->tpc_stats = NULL;
+	}
+}
+
+static struct ath12k *ath12k_wmi_tpc_process_fixed_param(struct ath12k_base *ab,
+							 u8 *ptr, size_t len)
+{
+	struct ath12k *ar;
+	const struct wmi_tlv *tlv;
+	struct wmi_cp_stats_event_fixed_param *fixed_param;
+	struct wmi_tpc_stats_event *tpc_stats;
+	u16 tlv_tag;
+
+	if (!ptr) {
+		ath12k_warn(ab, "No data present in tpc stats event\n");
+		return NULL;
+	}
+
+	if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
+		ath12k_warn(ab, "tpc stats event size invalid\n");
+		return NULL;
+	}
+
+	tlv = (struct wmi_tlv *)ptr;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	ptr += sizeof(*tlv);
+
+	if (tlv_tag == WMI_TAG_CTRL_PATH_EVENT_FIXED_PARAM) {
+		fixed_param = (struct wmi_cp_stats_event_fixed_param *)ptr;
+
+		ar = ath12k_mac_get_ar_by_pdev_id(ab, fixed_param->pdev_id);
+		if (!ar) {
+			ath12k_warn(ab, "Failed to get ar for tpc stats\n");
+			return NULL;
+		}
+	} else {
+		ath12k_warn(ab, "TPC Stats received without fixed param tlv at start\n");
+		return NULL;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	if (!ar->tpc_request) {
+		/* Event is received either without request or the
+		 * timeout, if memory is already allocated free it
+		 */
+		if (ar->tpc_stats) {
+			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
+			ath12k_wmi_free_tpc_stats_mem(ar);
+		}
+		spin_unlock_bh(&ar->data_lock);
+		return NULL;
+	}
+
+	if (fixed_param->event_count == 0) {
+		if (ar->tpc_stats) {
+			ath12k_warn(ab,
+				    "Invalid tpc memory present\n");
+			spin_unlock_bh(&ar->data_lock);
+			return NULL;
+		}
+		ar->tpc_stats =
+			    kzalloc(sizeof(struct wmi_tpc_stats_event),
+				    GFP_ATOMIC);
+	}
+
+	if (!ar->tpc_stats) {
+		ath12k_warn(ab,
+			    "Failed to allocate memory for tpc stats\n");
+		spin_unlock_bh(&ar->data_lock);
+		return NULL;
+	}
+
+	tpc_stats = ar->tpc_stats;
+
+	if (!(fixed_param->event_count == 0)) {
+		if (fixed_param->event_count != tpc_stats->event_count + 1) {
+			ath12k_warn(ab,
+				    "Invalid tpc event received\n");
+			spin_unlock_bh(&ar->data_lock);
+			return NULL;
+		}
+	}
+	tpc_stats->pdev_id = fixed_param->pdev_id;
+	tpc_stats->event_count = fixed_param->event_count;
+	tpc_stats->end_of_event = fixed_param->end_of_event;
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "tpc stats event_count %d\n",
+		   tpc_stats->event_count);
+
+	spin_unlock_bh(&ar->data_lock);
+
+	return ar;
+}
+
+static void ath12k_process_tpc_stats(struct ath12k_base *ab,
+				     struct sk_buff *skb)
+{
+	int ret;
+	struct ath12k *ar;
+	struct wmi_tpc_stats_event *tpc_stats = NULL;
+
+	rcu_read_lock();
+	ar = ath12k_wmi_tpc_process_fixed_param(ab, skb->data, skb->len);
+	if (!ar) {
+		ath12k_warn(ab, "Failed to get ar for tpc event\n");
+		goto exit;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	tpc_stats = ar->tpc_stats;
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath12k_wmi_tpc_stats_event_parser,
+				  tpc_stats);
+	if (ret) {
+		if (tpc_stats) {
+			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
+			ath12k_wmi_free_tpc_stats_mem(ar);
+		}
+		spin_unlock_bh(&ar->data_lock);
+		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
+		goto exit;
+	}
+
+	if (tpc_stats && tpc_stats->end_of_event)
+		complete(&ar->tpc_complete);
+
+	spin_unlock_bh(&ar->data_lock);
+
+exit:
+	rcu_read_unlock();
+}
+
+int ath12k_wmi_pdev_get_tpc_table_cmdid(struct ath12k *ar)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_request_ctrl_path_stats_cmd_fixed_param *cmd;
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	void *ptr;
+	u32 *pdev_ids, buf_len;
+	int ret;
+
+	buf_len = sizeof(*cmd) + sizeof(u32) + TLV_HDR_SIZE;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+	if (!skb)
+		return -ENOMEM;
+	cmd = (struct wmi_request_ctrl_path_stats_cmd_fixed_param *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CTRL_PATH_CMD_FIXED_PARAM) |
+			  FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+	cmd->stats_id_mask = WMI_REQ_CTRL_PATH_PDEV_TX_STAT;
+	cmd->action = WMI_REQUEST_CTRL_PATH_STAT_GET;
+	cmd->subid = ar->tpc_stats_type;
+
+	ptr = skb->data + sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+		      FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+
+	ptr += TLV_HDR_SIZE;
+
+	pdev_ids = ptr;
+	pdev_ids[0] = ar->pdev->pdev_id;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CTRL_PATH_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
+		dev_kfree_skb(skb);
+		return ret;
+	}
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
+		   ar->pdev->pdev_id);
+
+	return ret;
+}
+
+static void ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	const struct wmi_tlv *tlv;
+	u16 tlv_tag, tlv_len;
+	u32 *dev_id;
+	u8 *data;
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+
+	if (tlv_tag == WMI_TAG_ARRAY_BYTE) {
+		data = skb->data + sizeof(struct wmi_tlv);
+		dev_id = (uint32_t *)data;
+		*dev_id = ab->hw_params->hw_rev + ATH12K_DIAG_HW_ID_OFFSET;
+	} else {
+		ath12k_warn(ab, "WMI Diag Event missing required tlv\n");
+		return;
+	}
+
+	ath12k_fwlog_write(ab, data, tlv_len);
+}
+
+#define make_min_max(max,min) (u32_encode_bits(max, 0xf0) | u32_encode_bits(min, 0xf))
+
+static void
+ath12k_wmi_pdev_update_muedca_params_status_event(struct ath12k_base *ab,
+						  struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_pdev_update_muedca_event *ev;
+	struct ieee80211_mu_edca_param_set *params;
+	struct ath12k *ar;
+	int ret;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_MUEDCA_PARAMS_CONFIG_EVENT];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch pdev update muedca params ev");
+		goto mem_free;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "Update MU-EDCA parameters for pdev:%d\n", ev->pdev_id);
+
+	rcu_read_lock();
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+	if (!ar) {
+		ath12k_warn(ab, "MU-EDCA parameter change in invalid pdev %d\n",
+			    ev->pdev_id);
+		goto unlock;
+	}
+
+	params = kzalloc(sizeof(*params), GFP_ATOMIC);
+	if (!params) {
+		ath12k_warn(ab,
+			    "Failed to allocate memory for updated MU-EDCA Parameters");
+		goto unlock;
+	}
+
+	params->ac_be.aifsn = ev->aifsn[WMI_AC_BE];
+	params->ac_be.ecw_min_max = make_min_max(ev->ecwmax[WMI_AC_BE],
+						 ev->ecwmin[WMI_AC_BE]);
+	params->ac_be.mu_edca_timer = ev->muedca_expiration_time[WMI_AC_BE];
+
+	params->ac_bk.aifsn = ev->aifsn[WMI_AC_BK];
+	params->ac_bk.ecw_min_max = make_min_max(ev->ecwmax[WMI_AC_BK],
+						 ev->ecwmin[WMI_AC_BK]);
+	params->ac_bk.mu_edca_timer = ev->muedca_expiration_time[WMI_AC_BK];
+
+	params->ac_vi.aifsn = ev->aifsn[WMI_AC_VI];
+	params->ac_vi.ecw_min_max = make_min_max(ev->ecwmax[WMI_AC_VI],
+						 ev->ecwmin[WMI_AC_VI]);
+	params->ac_vi.mu_edca_timer = ev->muedca_expiration_time[WMI_AC_VI];
+
+	params->ac_vo.aifsn = ev->aifsn[WMI_AC_VO];
+	params->ac_vo.ecw_min_max = make_min_max(ev->ecwmax[WMI_AC_VO],
+						 ev->ecwmin[WMI_AC_VO]);
+	params->ac_vo.mu_edca_timer = ev->muedca_expiration_time[WMI_AC_VO];
+
+	/* cfg80211_update_muedca_params_event(ar->ah->hw->wiphy, params, GFP_ATOMIC); */
+
+	kfree(params);
+
+unlock:
+	rcu_read_unlock();
+mem_free:
+	kfree(tb);
+}
+
+static const char *ath12k_wmi_twt_add_dialog_event_status(u32 status)
+{
+	switch (status) {
+	case WMI_ADD_TWT_STATUS_OK:
+		return "ok";
+	case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+		return "twt disabled";
+	case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+		return "dialog id in use";
+	case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+		return "invalid parameters";
+	case WMI_ADD_TWT_STATUS_NOT_READY:
+		return "not ready";
+	case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+		return "resource unavailable";
+	case WMI_ADD_TWT_STATUS_NO_ACK:
+		return "no ack";
+	case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+		return "no response";
+	case WMI_ADD_TWT_STATUS_DENIED:
+		return "denied";
+	case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+		fallthrough;
+	default:
+		return "unknown error";
+	}
+}
+
+int ath12k_wmi_dbglog_cfg(struct ath12k *ar, u32 param, u64 value)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_dbglog_config_cmd_fixed_param *cmd;
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	u32 module_id_bitmap;
+	int ret, len;
+
+	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(module_id_bitmap);
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+	cmd = (struct wmi_dbglog_config_cmd_fixed_param *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+	cmd->dbg_log_param = param;
+
+	tlv = (struct wmi_tlv *)((u8 *) cmd + sizeof(*cmd));
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+		      FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+
+	switch (param) {
+	case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+	case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+	case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+	case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+		cmd->value = value;
+		break;
+	case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+	case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+		cmd->value = value;
+		module_id_bitmap = value >> 32;
+		memcpy(tlv->value, &module_id_bitmap, sizeof(module_id_bitmap));
+		break;
+	default:
+		dev_kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to send WMI_DBGLOG_CFG_CMDID\n");
+		dev_kfree_skb(skb);
+	}
+	return ret;
+}
+
+static void ath12k_wmi_twt_add_dialog_event(struct ath12k_base *ab,
+					    struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_twt_add_dialog_event *ev;
+	int ret;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab,
+			    "failed to parse wmi twt add dialog status event tlv: %d\n",
+			    ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+		goto exit;
+	}
+
+	if (ev->status)
+		ath12k_warn(ab,
+			    "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+			    ev->vdev_id, ev->dialog_id,
+			    ath12k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+	kfree(tb);
+}
+
+static void
+ath12k_wmi_obss_color_collision_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_obss_color_collision_event *ev;
+	struct ath12k_link_vif *arvif;
+	struct ath12k *ar;
+	int ret;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+		return;
+	}
+
+	rcu_read_lock();
+	ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch obss color collision ev");
+		goto unlock;
+	}
+
+	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+	if (!arvif)
+	{
+		ath12k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
+				ev->vdev_id);
+		goto unlock;
+	}
+
+	switch (ev->evt_type) {
+	case WMI_BSS_COLOR_COLLISION_DETECTION:
+		ar = arvif->ar;
+		arvif->obss_color_bitmap = ev->obss_color_bitmap;
+
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+				"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
+				ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
+		rcu_read_unlock();
+		ieee80211_queue_work(ar->ah->hw, &arvif->update_obss_color_notify_work);
+		goto exit;
+	case WMI_BSS_COLOR_COLLISION_DISABLE:
+	case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
+	case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
+		goto exit;
+	default:
+		ath12k_warn(ab, "received unknown obss color collision detetction event\n");
+		goto exit;
+	}
+
+unlock:
+	rcu_read_unlock();
+exit:
+	kfree(tb);
+}
+
+static void ath12k_process_ocac_complete_event(struct ath12k_base *ab,
+                struct sk_buff *skb)
+{
+        const void **tb;
+        const struct wmi_vdev_adfs_ocac_complete_event_fixed_param *ev;
+        struct ath12k *ar;
+        int ret;
+
+        tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+        if (IS_ERR(tb)) {
+                ret = PTR_ERR(tb);
+                ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+                return;
+        }
+
+        ev = tb[WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT];
+
+        if (!ev) {
+                ath12k_warn(ab, "failed to fetch ocac completed ev");
+                kfree(tb);
+                return;
+        }
+
+        ath12k_dbg(ab, ATH12K_DBG_WMI,
+                   "pdev dfs ocac complete event on pdev %d, chan freq %d,"
+		   "chan_width %d, status %d  freq %d, freq1  %d, freq2 %d",
+                   ev->vdev_id, ev->chan_freq, ev->chan_width,
+                   ev->status, ev->center_freq, ev->center_freq1,
+		   ev->center_freq2);
+
+        ar = ath12k_mac_get_ar_by_vdev_id(ab, ev->vdev_id);
+
+        if (!ar) {
+		ath12k_warn(ab, "OCAC complete event in invalid vdev %d\n",
+                            ev->vdev_id);
+                goto exit;
+        }
+
+        ath12k_dbg(ar->ab, ATH12K_DBG_WMI,"aDFS ocac complete event in vdev %d\n",
+                   ev->vdev_id);
+
+        if (ev->status) {
+            ath12k_mac_background_dfs_event(ar, ATH12K_BGDFS_ABORT);
+	} else {
+		memset(&ar->agile_chandef, 0, sizeof(struct cfg80211_chan_def));
+		ar->agile_chandef.chan = NULL;
+	}
+exit:
+        kfree(tb);
+}
+
+static int ath12k_wmi_pdev_sscan_fft_bin_index_parse(struct ath12k_base *soc,
+						     u16 tag, u16 len,
+						     const void *ptr, void *data)
+{
+	if (tag != WMI_TAG_PDEV_SSCAN_FFT_BIN_INDEX)
+		return -EPROTO;
+	return 0;
+}
+
+static int ath12k_wmi_pdev_sscan_per_detector_info_parse(struct ath12k_base *soc,
+							 u16 tag, u16 len,
+							 const void *ptr, void *data)
+{
+	if (tag != WMI_TAG_PDEV_SSCAN_PER_DETECTOR_INFO)
+		return -EPROTO;
+
+	return 0;
+}
+
+static int ath12k_wmi_tlv_sscan_fw_parse(struct ath12k_base *ab,
+					 u16 tag, u16 len,
+					 const void *ptr, void *data)
+{
+	struct wmi_pdev_sscan_fw_param_parse *parse = data;
+	int ret;
+
+	switch (tag) {
+
+	case WMI_TAG_PDEV_SSCAN_FW_CMD_FIXED_PARAM:
+		memcpy(&parse->fixed, ptr,
+		       sizeof(struct ath12k_wmi_pdev_sscan_fw_cmd_fixed_param));
+		parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
+		break;
+	case WMI_TAG_ARRAY_STRUCT:
+	       if (!parse->bin_entry_done) {
+		       parse->bin = (struct ath12k_wmi_pdev_sscan_fft_bin_index *)ptr;
+
+		       ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+						 ath12k_wmi_pdev_sscan_fft_bin_index_parse,
+						 parse);
+
+		       if (ret) {
+			       ath12k_warn(ab, "failed to parse fft bin index %d\n",
+					   ret);
+			       return ret;
+		       }
+
+		       parse->bin_entry_done = true;
+	       } else if (!parse->det_info_entry_done) {
+		       parse->det_info = (struct ath12k_wmi_pdev_sscan_per_detector_info *)ptr;
+
+		       ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+						 ath12k_wmi_pdev_sscan_per_detector_info_parse,
+						 parse);
+
+		       if (ret) {
+			       ath12k_warn(ab, "failed to parse detector info %d\n",
+					   ret);
+			       return ret;
+		       }
+		       parse->det_info_entry_done = true;
+	       }
+	       break;
+	case WMI_TAG_PDEV_SSCAN_CHAN_INFO:
+	       memcpy(&parse->ch_info, ptr,
+		      sizeof(struct ath12k_wmi_pdev_sscan_chan_info));
+	       parse->bin_entry_done = true;
+	       break;
+	default:
+	       break;
+	}
+	return 0;
+
+}
+
+static void
+ath12k_wmi_pdev_sscan_fw_param_event(struct ath12k_base *ab,
+				     struct sk_buff *skb)
+{
+	struct ath12k *ar;
+	struct wmi_pdev_sscan_fw_param_parse parse = { };
+	struct wmi_pdev_sscan_fw_param_event param;
+	int ret;
+	u8 pdev_idx;
+
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath12k_wmi_tlv_sscan_fw_parse,
+				  &parse);
+
+	if (ret) {
+		ath12k_warn(ab, "failed to parse padev sscan fw tlv %d\n", ret);
+		return;
+	}
+
+	param.fixed             = parse.fixed;
+	param.bin		= parse.bin;
+	param.ch_info		= parse.ch_info;
+	param.det_info		= parse.det_info;
+
+	pdev_idx = param.fixed.pdev_id;
+	ar = ab->pdevs[pdev_idx].ar;
+
+#ifdef CONFIG_ATH12K_SPECTRAL
+	ar->spectral.ch_width = param.ch_info.operating_bw;
+#endif
+
+}
+
+static int
+ath12k_wmi_spectral_scan_bw_cap_parse(struct ath12k_base *soc,
+				     u16 tag, u16 len,
+				     const void *ptr, void *data)
+{
+	struct wmi_spectral_capabilities_parse *parse = data;
+	if (tag != WMI_TAG_SPECTRAL_SCAN_BW_CAPABILITIES)
+		return -EPROTO;
+	parse->num_bw_caps_entry++;
+	return 0;
+}
+
+static int
+ath12k_wmi_spectral_fft_size_cap_parse(struct ath12k_base *soc,
+				       u16 tag, u16 len,
+				       const void *ptr, void *data)
+{
+	struct wmi_spectral_capabilities_parse *parse = data;
+	if (tag != WMI_TAG_SPECTRAL_FFT_SIZE_CAPABILITIES)
+		return -EPROTO;
+
+	parse->num_fft_size_caps_entry++;
+	return 0;
+}
+
+static int
+ath12k_wmi_tlv_spectral_cap_parse(struct ath12k_base *ab,
+				  u16 tag, u16 len,
+				  const void *ptr, void *data)
+{
+	struct wmi_spectral_capabilities_parse *parse = data;
+	int ret;
+
+	if (tag == WMI_TAG_ARRAY_STRUCT) {
+		if (!parse->sscan_bw_caps_entry_done) {
+			parse->num_bw_caps_entry = 0;
+			parse->sscan_bw_caps = (struct ath12k_wmi_spectral_scan_bw_capabilities *)ptr;
+			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+					ath12k_wmi_spectral_scan_bw_cap_parse,
+					parse);
+			if (ret) {
+				ath12k_warn(ab, "failed to parse scan bw cap %d\n",
+					    ret);
+				return ret;
+			}
+			parse->sscan_bw_caps_entry_done = true;
+		} else if (!parse->fft_size_caps_entry_done) {
+			parse->num_fft_size_caps_entry = 0;
+			parse->fft_size_caps = (struct ath12k_wmi_spectral_fft_size_capabilities *)ptr;
+			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+					ath12k_wmi_spectral_fft_size_cap_parse,
+					parse);
+			if (ret) {
+				ath12k_warn(ab, "failed to parse fft size cap %d\n",
+					    ret);
+				return ret;
+			}
+			parse->fft_size_caps_entry_done = true;
+		}
+	}
+	return 0;
+}
+
+static void
+ath12k_wmi_spectral_capabilities_event(struct ath12k_base *ab,
+				       struct sk_buff *skb)
+{
+	struct wmi_spectral_capabilities_parse parse = { };
+	struct wmi_spectral_capabilities_event param;
+	struct ath12k *ar = NULL;
+	int ret, size;
+	u8 pdev_id, i;
+	struct ath12k_pdev *pdev;
+
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath12k_wmi_tlv_spectral_cap_parse,
+				  &parse);
+	if (ret) {
+		ath12k_warn(ab, "failed to parse spectral capabilities tlv %d\n", ret);
+		return;
+	}
+
+	param.sscan_bw_caps = parse.sscan_bw_caps;
+	param.fft_size_caps = parse.fft_size_caps;
+	param.num_bw_caps_entry = parse.num_bw_caps_entry;
+	param.num_fft_size_caps_entry = parse.num_fft_size_caps_entry;
+
+	pdev_id = param.sscan_bw_caps->pdev_id;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (pdev && pdev->pdev_id == pdev_id) {
+			ar = pdev->ar;
+			break;
+		}
+	}
+
+	if (!ar) {
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+			   "ar is NULL for pdev_id %d use default spectral fft size 7",
+			   pdev_id);
+		return;
+	}
+	size = sizeof(struct ath12k_wmi_spectral_fft_size_capabilities)*
+					param.num_fft_size_caps_entry;
+
+#ifdef CONFIG_ATH12K_SPECTRAL
+	ar->spectral.spectral_cap.fft_size_caps = kzalloc(size, GFP_ATOMIC);
+	if (!ar->spectral.spectral_cap.fft_size_caps) {
+		ath12k_warn(ab, "Failed to allocate memory");
+		return;
+	}
+	memcpy(ar->spectral.spectral_cap.fft_size_caps,
+		param.fft_size_caps, size);
+	ar->spectral.spectral_cap.num_bw_caps_entry = param.num_bw_caps_entry;
+	ar->spectral.spectral_cap.num_fft_size_caps_entry = param.num_fft_size_caps_entry;
+#endif
+
+}
+
+int ath12k_wmi_pdev_get_ani_level(struct ath12k *ar, u32 cmd_id, u8 pdev_id)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_pdev_get_ani_level_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+	enum wmi_tlv_tag tlv_tag;
+
+	tlv_tag = WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_get_ani_level_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(tlv_tag, sizeof(*cmd));
+	cmd->pdev_id = cpu_to_le32(pdev_id);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_ANI_CONFIG cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI_PDEV_GET_ANI_CONFIG cmdid %d for pdev_id: %d\n",
+		   cmd_id, pdev_id);
+	return ret;
+}
+
+static void ath12k_wmi_event_ani_ofdm_level(struct ath12k_base *ab,
+					    struct sk_buff *skb)
+{
+	const struct wmi_pdev_ani_event *ev;
+	struct ath12k *ar;
+	u8 pdev_id;
+	bool pdev_id_valid;
+
+	ev = (struct wmi_pdev_ani_event *)skb->data;
+
+	pdev_id_valid = WMI_ANI_EVENT_PDEV_ID_VALID & le32_to_cpu(ev->pdev_id_bitmap);
+	if (!pdev_id_valid) {
+		ath12k_warn(ab, "WMI_PDEV_ANI_OFDM_LEVEL_EVENT is not supported\n");
+		return;
+	}
+
+	pdev_id = le32_get_bits(ev->pdev_id_bitmap, WMI_ANI_EVENT_PDEV_ID);
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+	if (!ar) {
+		ath12k_warn(ab, "Invalid pdev_id received for ANI OFDM level\n");
+		return;
+	}
+
+	ar->ani_ofdm_level = le32_to_cpu(ev->ani_level);
+	complete(&ar->ani_ofdm_event);
+}
+
 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
 {
 	struct wmi_cmd_hdr *cmd_hdr;
@@ -6380,11 +14403,26 @@
 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
 		ath12k_probe_resp_tx_status_event(ab, skb);
 		break;
+	case WMI_TWT_ADD_DIALOG_EVENTID:
+		ath12k_wmi_twt_add_dialog_event(ab, skb);
+		break;
+	case WMI_PDEV_SSCAN_FW_PARAM_EVENTID:
+		ath12k_wmi_pdev_sscan_fw_param_event(ab, skb);
+		break;
+	case WMI_SPECTRAL_CAPABILITIES_EVENTID:
+		ath12k_wmi_spectral_capabilities_event(ab, skb);
+		break;
+	case WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID:
+		ath12k_offchan_tx_completion_event(ab, skb);
+		break;
 	/* add Unsupported events here */
 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
 	case WMI_TWT_ENABLE_EVENTID:
 	case WMI_TWT_DISABLE_EVENTID:
+	case WMI_TWT_DEL_DIALOG_EVENTID:
+	case WMI_TWT_PAUSE_DIALOG_EVENTID:
+	case WMI_TWT_RESUME_DIALOG_EVENTID:
 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
 		ath12k_dbg(ab, ATH12K_DBG_WMI,
 			   "ignoring unsupported event 0x%x\n", id);
@@ -6395,6 +14433,58 @@
 	case WMI_VDEV_DELETE_RESP_EVENTID:
 		ath12k_vdev_delete_resp_event(ab, skb);
 		break;
+	case WMI_DIAG_EVENTID:
+		ath12k_wmi_diag_event(ab, skb);
+		break;
+	case WMI_DCS_INTERFERENCE_EVENTID:
+                ath12k_wmi_dcs_awgn_interference_event(ab, skb);
+                break;
+	case WMI_PDEV_PKTLOG_DECODE_INFO_EVENTID:
+#ifdef CONFIG_ATH12K_PKTLOG
+		ath12k_wmi_pktlog_decode_info(ab, skb);
+#endif
+		break;
+	case WMI_CTRL_PATH_STATS_EVENTID:
+#ifdef CONFIG_ATH12K_DEBUGFS
+		ath12k_wmi_ctrl_path_stats_event(ab, skb);
+#endif
+		break;
+	case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID:
+		ath12k_wmi_rssi_dbm_conversion_param_info(ab, skb);
+		break;
+	case WMI_MUEDCA_PARAMS_CONFIG_EVENTID:
+		ath12k_wmi_pdev_update_muedca_params_status_event(ab, skb);
+		break;
+	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+		ath12k_wmi_obss_color_collision_event(ab, skb);
+		break;
+	case WMI_PDEV_UTF_EVENTID:
+		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
+			ath12k_wmi_tm_event_segmented(ab, id, skb);
+		else
+			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
+		break;
+	case WMI_STATS_CTRL_PATH_EVENTID:
+		ath12k_process_tpc_stats(ab, skb);
+		break;
+	case WMI_MLO_SETUP_COMPLETE_EVENTID:
+		ath12k_wmi_event_mlo_setup_complete(ab, skb);
+		break;
+	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
+		ath12k_wmi_event_teardown_complete(ab, skb);
+		break;
+	case WMI_PDEV_MULTIPLE_VDEV_RESTART_RESP_EVENTID:
+		ath12k_wmi_event_mvr_response(ab, skb);
+		break;
+	case WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID:
+		ath12k_process_ocac_complete_event(ab, skb);
+		break;
+	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
+		ath12k_fw_consumed_mgmt_rx_event(ab, skb);
+		break;
+	case WMI_PDEV_ANI_OFDM_LEVEL_EVENTID:
+		ath12k_wmi_event_ani_ofdm_level(ab, skb);
+		break;
 	/* TODO: Add remaining events */
 	default:
 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -6433,6 +14523,7 @@
 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+	init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
 
 	return 0;
 }
@@ -6495,15 +14586,17 @@
 	return ret;
 }
 
-int ath12k_wmi_simulate_radar(struct ath12k *ar)
+int ath12k_wmi_simulate_radar(struct ath12k *ar, u32 radar_params)
 {
-	struct ath12k_vif *arvif;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_vif *ahvif;
 	u32 dfs_args[DFS_MAX_TEST_ARGS];
 	struct wmi_unit_test_cmd wmi_ut;
 	bool arvif_found = false;
 
 	list_for_each_entry(arvif, &ar->arvifs, list) {
-		if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+		ahvif = arvif->ahvif;
+		if (arvif->is_started && ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
 			arvif_found = true;
 			break;
 		}
@@ -6514,11 +14607,10 @@
 
 	dfs_args[DFS_TEST_CMDID] = 0;
 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
-	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
-	 * freq offset (b3 - b10) to unit test. For simulation
-	 * purpose this can be set to 0 which is valid.
+	/* Currently we pass segment_id(b0 - b1), chirp(b2)
+	 * freq offset (b3 - b10), detector_id(b11 - b12) to unit test.
 	 */
-	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+	dfs_args[DFS_TEST_RADAR_PARAM] = radar_params;
 
 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
@@ -6530,6 +14622,76 @@
 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
 }
 
+int ath12k_wmi_simulate_awgn(struct ath12k *ar, u32 chan_bw_interference_bitmap)
+{
+        struct ath12k_link_vif *arvif;
+	struct ath12k_vif *ahvif;
+        u32 awgn_args[WMI_AWGN_MAX_TEST_ARGS];
+        struct wmi_unit_test_cmd wmi_ut;
+        bool arvif_found = false;
+
+        if (!test_bit(WMI_TLV_SERVICE_DCS_AWGN_INT_SUPPORT, ar->ab->wmi_ab.svc_map)) {
+                ath12k_warn(ar->ab, "firmware doesn't support awgn interference, so can't simulate it\n");
+                return -EOPNOTSUPP;
+        }
+
+        list_for_each_entry(arvif, &ar->arvifs, list) {
+		ahvif = arvif->ahvif;
+                if (arvif->is_started && ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+                        arvif_found = true;
+                        break;
+                }
+        }
+
+        if (!arvif_found)
+                return -EINVAL;
+
+        awgn_args[WMI_AWGN_TEST_AWGN_INT] = WMI_UNIT_TEST_AWGN_INTF_TYPE;
+        awgn_args[WMI_AWGN_TEST_BITMAP] = chan_bw_interference_bitmap;
+
+        wmi_ut.vdev_id = arvif->vdev_id;
+        wmi_ut.module_id = WMI_AWGN_UNIT_TEST_MODULE;
+        wmi_ut.num_args = WMI_AWGN_MAX_TEST_ARGS;
+        wmi_ut.diag_token = WMI_AWGN_UNIT_TEST_TOKEN;
+
+        ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+                   "Triggering AWGN Simulation, interference bitmap : 0x%x\n",
+                   chan_bw_interference_bitmap);
+
+        return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, awgn_args);
+}
+
+int ath12k_wmi_pdev_m3_dump_enable(struct ath12k *ar, u32 enable)
+{
+	struct ath12k_link_vif *arvif;
+	u32 m3_args[WMI_M3_MAX_TEST_ARGS];
+	struct wmi_unit_test_cmd wmi_ut;
+	bool arvif_found = false;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->is_started) {
+			arvif_found = true;
+			break;
+		}
+	}
+
+	if (!arvif_found)
+		return -EINVAL;
+
+	m3_args[WMI_M3_TEST_CMDID] = WMI_DBG_ENABLE_M3_SSR;
+	m3_args[WMI_M3_TEST_ENABLE] = enable;
+
+	wmi_ut.vdev_id = arvif->vdev_id;
+	wmi_ut.module_id = WMI_M3_UNIT_TEST_MODULE;
+	wmi_ut.num_args = WMI_M3_MAX_TEST_ARGS;
+	wmi_ut.diag_token = WMI_M3_UNIT_TEST_TOKEN;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "%s M3 SSR dump\n",
+		   enable ? "Enabling" : "Disabling");
+
+	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, m3_args);
+}
+
 int ath12k_wmi_connect(struct ath12k_base *ab)
 {
 	u32 i;
@@ -6554,14 +14716,14 @@
 }
 
 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
-			   u8 pdev_id)
+			   u8 pdev_idx)
 {
 	struct ath12k_wmi_pdev *wmi_handle;
 
-	if (pdev_id >= ab->hw_params->max_radios)
+	if (pdev_idx >= ab->hw_params->max_radios)
 		return -EINVAL;
 
-	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+	wmi_handle = &ab->wmi_ab.wmi[pdev_idx];
 
 	wmi_handle->wmi_ab = &ab->wmi_ab;
 
@@ -6602,5 +14764,732 @@
 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
 		ath12k_wmi_pdev_detach(ab, i);
 
+	clear_bit(ATH12K_FLAG_WMI_INIT_DONE, &ab->dev_flags);
 	ath12k_wmi_free_dbring_caps(ab);
 }
+
+int ath12k_wmi_pdev_ap_ps_cmd_send(struct ath12k *ar, u8 pdev_id,
+				   u32 param_value)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_pdev_ap_ps_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_ap_ps_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+				     WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+	cmd->pdev_id = pdev_id;
+	cmd->param_value = param_value;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send ap ps enable/disable cmd %d\n",ret);
+		dev_kfree_skb(skb);
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "wmi pdev ap ps set pdev id %d value %d\n",
+		   pdev_id, param_value);
+
+	return ret;
+}
+
+int ath12k_wmi_mlo_setup(struct ath12k *ar,
+			 struct wmi_mlo_setup_params *mlo_params)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_mlo_setup_cmd_fixed_param *cmd;
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	u32 *plinks;
+	u32 num_links;
+	int i, ret, len;
+
+	num_links = mlo_params->num_partner_links;
+
+	len = sizeof(*cmd) + TLV_HDR_SIZE + (num_links * sizeof(u32));
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_mlo_setup_cmd_fixed_param *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_SETUP_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+	cmd->mld_group_id = mlo_params->group_id;
+	cmd->pdev_id = ar->pdev->pdev_id;
+
+	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+		      FIELD_PREP(WMI_TLV_LEN, num_links);
+
+	plinks = (u32 *)tlv->value;
+	for (i = 0; i < num_links; i++)
+		plinks[i] = mlo_params->partner_link_id[i];
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to submit WMI_MLO_SETUP_CMDID cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int ath12k_wmi_mlo_ready(struct ath12k *ar)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_mlo_ready_cmd_fixed_param *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_mlo_ready_cmd_fixed_param *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_READY_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to submit WMI_MLO_READY_CMDID cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int ath12k_wmi_mlo_teardown(struct ath12k *ar, bool umac_reset)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_mlo_teardown_fixed_param *cmd;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_mlo_teardown_fixed_param *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MLO_TEARDOWN_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+	cmd->pdev_id = ar->pdev->pdev_id;
+	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
+	cmd->umac_reset = umac_reset;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to submit WMI MLO teardown cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	ar->mlo_complete_event = false;
+	return ret;
+}
+
+bool ath12k_wmi_is_mvr_supported(struct ath12k_base *ab)
+{
+	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+
+	return test_bit(WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART,
+			 wmi_ab->svc_map) &&
+		test_bit(WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART_RESPONSE_SUPPORT,
+			 wmi_ab->svc_map);
+}
+
+int ath12k_wmi_pdev_multiple_vdev_restart(struct ath12k *ar,
+					  struct wmi_pdev_multiple_vdev_restart_req_arg *arg)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_pdev_multiple_vdev_restart_request_cmd *cmd;
+	struct ath12k_wmi_channel_params *chan;
+	struct wmi_tlv *tlv;
+	struct ath12k_wmi_channel_params *chan_device;
+	u32 num_vdev_ids;
+	__le32 *vdev_ids;
+	size_t vdev_ids_len;
+	struct sk_buff *skb;
+	void *ptr;
+	int ret, len, i;
+	bool device_params_present = false;
+
+	if (WARN_ON(arg->vdev_ids.id_len > TARGET_NUM_VDEVS))
+		return -EINVAL;
+
+	num_vdev_ids = arg->vdev_ids.id_len;
+	vdev_ids_len = num_vdev_ids * sizeof(__le32);
+
+	len = sizeof(*cmd) + TLV_HDR_SIZE + vdev_ids_len +
+	      sizeof(*chan) + TLV_HDR_SIZE + TLV_HDR_SIZE +
+	      TLV_HDR_SIZE;
+
+	device_params_present = ath12k_wmi_check_device_present(arg->width_device,
+								arg->center_freq_device,
+								arg->channel.band_center_freq1);
+
+	if (device_params_present)
+		len += TLV_HDR_SIZE + sizeof(*chan_device);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_pdev_multiple_vdev_restart_request_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+						 sizeof(*cmd));
+	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+	cmd->num_vdevs = cpu_to_le32(arg->vdev_ids.id_len);
+
+	if (device_params_present) {
+		arg->ru_punct_bitmap =
+		ath12k_wmi_set_ru_punc_bitmap_device(arg->channel.freq, arg->width_device,
+						     arg->center_freq_device,
+						     arg->ru_punct_bitmap);
+	}
+	cmd->puncture_20mhz_bitmap = cpu_to_le32(arg->ru_punct_bitmap);
+
+	cmd->flags = cpu_to_le32(WMI_MVR_RESPONSE_SUPPORT_EXPECTED);
+
+	ptr = skb->data + sizeof(*cmd);
+	tlv = (struct wmi_tlv *)ptr;
+
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, vdev_ids_len);
+	vdev_ids = (__le32 *)tlv->value;
+
+	for (i = 0; i < num_vdev_ids; i++)
+		vdev_ids[i] = cpu_to_le32(arg->vdev_ids.id[i]);
+
+	ptr += TLV_HDR_SIZE + vdev_ids_len;
+	chan = (struct ath12k_wmi_channel_params *)ptr;
+
+	ath12k_wmi_put_wmi_channel(chan, arg->channel);
+
+	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, sizeof(*chan));
+	ptr += sizeof(*chan);
+
+	/* Zero length TLVs for phymode_list, preferred_tx_stream_list
+	 * and preferred_rx_stream_list which are mandatory if any of
+	 * the following TLVs are to be sent to target.
+	 */
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+	ptr += sizeof(*tlv);
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+	ptr += sizeof(*tlv);
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+	ptr += sizeof(*tlv);
+
+	if (device_params_present) {
+		tlv = ptr;
+		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+						 sizeof(*chan_device));
+		ptr += TLV_HDR_SIZE;
+		chan_device = ptr;
+		ath12k_wmi_set_wmi_channel_device(chan_device, &arg->channel,
+						  arg->center_freq_device,
+						  arg->width_device);
+		ptr += sizeof(*chan_device);
+	}
+
+	ret = ath12k_wmi_cmd_send(wmi, skb,
+				  WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "wmi failed to send mvr command (%d)\n",
+			    ret);
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "wmi mvr cmd sent num_vdevs %d freq %d\n",
+		   num_vdev_ids, arg->channel.freq);
+
+	return ret;
+}
+
+int ath12k_wmi_vdev_adfs_ch_cfg_cmd_send(struct ath12k *ar,
+					 u32 vdev_id,
+					 struct cfg80211_chan_def *def)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_vdev_adfs_ch_cfg_cmd *cmd;
+	struct sk_buff *skb;
+	int ret = 0;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_vdev_adfs_ch_cfg_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_ADFS_CH_CFG_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN,
+				     sizeof(struct wmi_vdev_adfs_ch_cfg_cmd) - TLV_HDR_SIZE);
+	cmd->vdev_id = vdev_id;
+
+	if (ar->ab->dfs_region == ATH12K_DFS_REG_ETSI) {
+		cmd->ocac_mode = WMI_ADFS_MODE_QUICK_OCAC;
+		cmd->min_duration_ms = cfg80211_chandef_dfs_cac_time(ar->ah->hw->wiphy,
+								     def /*,
+									   true, false */);
+
+
+		if (cmd->min_duration_ms == MIN_WEATHER_RADAR_CHAN_PRECAC_TIMEOUT)
+			cmd->max_duration_ms = MAX_WEATHER_RADAR_CHAN_PRECAC_TIMEOUT;
+		else
+			cmd->max_duration_ms = MAX_PRECAC_TIMEOUT;
+	} else if (ar->ab->dfs_region == ATH12K_DFS_REG_FCC) {
+		cmd->ocac_mode = WMI_ADFS_MODE_QUICK_RCAC;
+		cmd->min_duration_ms = MIN_RCAC_TIMEOUT;
+		cmd->max_duration_ms = MAX_RCAC_TIMEOUT;
+	}
+
+	cmd->chan_freq = def->chan->center_freq;
+	cmd->chan_width = ath12k_wmi_get_host_chan_width(def->width);
+	cmd->center_freq1 = def->center_freq1;
+	cmd->center_freq2 = def->center_freq2;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "Send adfs channel cfg command for vdev id %d "
+		   "mode as %d min duration %d chan_freq %d chan_width %d\n"
+		   "center_freq1 %d center_freq2 %d", cmd->vdev_id,
+		   cmd->ocac_mode, cmd->min_duration_ms, cmd->chan_freq,
+		   cmd->chan_width, cmd->center_freq1, cmd->center_freq2);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_ADFS_CH_CFG_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to send WMI_VDEV_ADFS_CH_CFG_CMDID\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int ath12k_wmi_vdev_adfs_ocac_abort_cmd_send(struct ath12k *ar, u32 vdev_id)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_vdev_adfs_ocac_abort_cmd *cmd;
+	struct sk_buff *skb;
+	int ret = 0;
+
+	if (!ar->agile_chandef.chan) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+			   "Currently, agile CAC is not active on any channel."
+			   "Ignore abort");
+		return ret;
+	}
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_vdev_adfs_ocac_abort_cmd *)skb->data;
+	cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD) |
+			  FIELD_PREP(WMI_TLV_LEN,
+				     sizeof(struct wmi_vdev_adfs_ocac_abort_cmd) - TLV_HDR_SIZE);
+
+	cmd->vdev_id = vdev_id;
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_ADFS_OCAC_ABORT_CMDID);
+
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to send WMI_VDEV_ADFS_ABORT_CMD\n");
+		dev_kfree_skb(skb);
+		return ret;
+	}
+	return ret;
+}
+
+#ifdef CONFIG_ATH12K_SAWF
+
+int ath12k_wmi_svc_config_send(struct ath12k *ar, struct ath12k_sawf_svc_params *param)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_sawf_svc_cfg_cmd_fixed_param *cmd;
+	struct sk_buff *skb;
+	int len, ret;
+
+	len = sizeof(*cmd);
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_sawf_svc_cfg_cmd_fixed_param *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SAWF_SERVICE_CLASS_CFG_CMD_FIXED_PARAM,
+						 sizeof(*cmd));
+
+	/* Valid svc_id advertized to user is in the range of 1 to 128.
+	 * However, firmware is maintaining svc_id range from 0 to 127.
+	 * So sending svc_id - 1, to address this firmware limitation.
+	 */
+	cmd->svc_class_id = cpu_to_le32(param->svc_id - 1);
+	cmd->min_thruput_kbps = cpu_to_le32(param->min_throughput_rate);
+	cmd->max_thruput_kbps = cpu_to_le32(param->max_throughput_rate);
+	cmd->burst_size_bytes = cpu_to_le32(param->burst_size);
+	cmd->svc_interval_ms = cpu_to_le32(param->service_interval);
+	cmd->delay_bound_ms = cpu_to_le32(param->delay_bound);
+	cmd->time_to_live_ms = cpu_to_le32(param->msdu_ttl);
+	cmd->priority = cpu_to_le32(param->priority);
+	cmd->tid = cpu_to_le32(param->tid);
+	cmd->msdu_loss_rate_ppm = cpu_to_le32(param->msdu_rate_loss);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "Service class configure: svc_id: %u, min_throughput: %u, "
+		   "max_throughput: %u, burst_size: %u, svc_interval: %u, "
+		   "delay_bound: %u, TTL: %u, priority: %u, tid: %u, msdu_loss_rate: %u",
+		   cmd->svc_class_id, cmd->min_thruput_kbps, cmd->max_thruput_kbps,
+		   cmd->burst_size_bytes, cmd->svc_interval_ms, cmd->delay_bound_ms,
+		   cmd->time_to_live_ms, cmd->priority, cmd->tid,
+		   cmd->msdu_loss_rate_ppm);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SAWF_SERVICE_CLASS_CFG_CMDID);
+	if (ret) {
+		ath12k_err(ar->ab,
+			    "failed to config/reconfig the service class param\n");
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+int ath12k_wmi_svc_send_disable(struct ath12k *ar, u32 svc_id)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_sawf_svc_disable_cmd_fixed_param *cmd;
+	struct sk_buff *skb;
+	int len, ret;
+
+	len = sizeof(*cmd);
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_sawf_svc_disable_cmd_fixed_param *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SAWF_SERVICE_CLASS_DISABLE_CMD_FIXED_PARAM,
+						 sizeof(*cmd));
+
+	/* Valid svc_id advertized to user is in the range of 1 to 128.
+	 * However, firmware is maintaining svc_id range from 0 to 127.
+	 * So sending svc_id - 1, to address this firmware limitation.
+	 */
+	cmd->svc_class_id = cpu_to_le32(svc_id - 1);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "Service class disable: svc_id: %u",
+		   cmd->svc_class_id);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SAWF_SERVICE_CLASS_DISABLE_CMDID);
+	if (ret) {
+		ath12k_err(ar->ab,
+			    "failed to disable service class: %u\n", svc_id);
+		dev_kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_ATH12K_SAWF */
+
+static void ath12k_wmi_put_peer_list(struct ath12k_base *ab,
+				     struct wmi_chan_width_peer_list *peer_list,
+				     struct wmi_chan_width_peer_arg *peer_arg,
+				     u32 num_peers, int start_idx)
+{
+	struct wmi_chan_width_peer_list *itr;
+	struct wmi_chan_width_peer_arg *arg_itr;
+	int i;
+	u32 host_chan_width;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "wmi peer channel width switch command peer list\n");
+
+	for (i = 0; i < num_peers; i++) {
+		itr = &peer_list[i];
+		arg_itr = &peer_arg[start_idx + i];
+
+		itr->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHAN_WIDTH_PEER_LIST,
+							 sizeof(*itr));
+		ether_addr_copy(itr->mac_addr.addr, arg_itr->mac_addr.addr);
+		host_chan_width = ath12k_wmi_get_host_chan_switch_width(arg_itr->chan_width);
+		itr->chan_width = cpu_to_le32(host_chan_width);
+		itr->puncture_20mhz_bitmap = cpu_to_le32(arg_itr->puncture_20mhz_bitmap);
+
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+			   "   (%u) width %u addr %pM punct_bitmap 0x%x host chan_width: %d\n",
+			   i + 1, arg_itr->chan_width, arg_itr->mac_addr.addr,
+			   arg_itr->puncture_20mhz_bitmap, host_chan_width);
+	}
+}
+
+static int ath12k_wmi_peer_chan_width_switch(struct ath12k *ar,
+					     struct wmi_peer_chan_width_switch_arg *arg)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_peer_chan_width_switch_req_cmd *cmd;
+	struct wmi_chan_width_peer_list *peer_list;
+	struct wmi_tlv *tlv;
+	u32 num_peers;
+	size_t peer_list_len;
+	struct sk_buff *skb;
+	void *ptr;
+	int ret, len;
+
+	num_peers = arg->num_peers;
+
+	if (WARN_ON(num_peers > ab->chwidth_num_peer_caps))
+		return -EINVAL;
+
+	peer_list_len = num_peers * sizeof(*peer_list);
+
+	len = sizeof(*cmd) + TLV_HDR_SIZE + peer_list_len;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_peer_chan_width_switch_req_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+						 sizeof(*cmd));
+	cmd->num_peers = cpu_to_le32(num_peers);
+	cmd->vdev_var = cpu_to_le32(arg->vdev_var);
+
+	ptr = skb->data + sizeof(*cmd);
+	tlv = (struct wmi_tlv *)ptr;
+
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, peer_list_len);
+	peer_list = (struct wmi_chan_width_peer_list *)tlv->value;
+
+	ath12k_wmi_put_peer_list(ab, peer_list, arg->peer_arg, num_peers,
+				 arg->start_idx);
+
+	ptr += peer_list_len;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CHAN_WIDTH_SWITCH_CMDID);
+	if (ret) {
+		ath12k_warn(ab, "wmi failed to send peer chan width switch command (%d)\n",
+			    ret);
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "wmi peer chan width switch cmd sent num_peers %d \n",
+		   num_peers);
+
+	return ret;
+}
+
+void ath12k_wmi_set_peers_chan_width(struct ath12k_link_vif *arvif,
+				     struct wmi_chan_width_peer_arg *peer_arg,
+				     int num, u8 start_idx)
+{
+	struct ath12k *ar = arvif->ar;
+	struct wmi_chan_width_peer_arg *arg;
+	int i, err;
+
+	for (i = 0; i < num; i++) {
+		arg = &peer_arg[start_idx + i];
+
+		/* for bandwidth upgrade phymode should be set already. Safe to
+		 * to directly set channel width
+		 */
+		err = ath12k_wmi_set_peer_param(ar, arg->mac_addr.addr,
+						arvif->vdev_id, WMI_PEER_CHWIDTH,
+						arg->chan_width);
+		if (err) {
+			ath12k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+				    arg->mac_addr.addr, arg->chan_width, err);
+			continue;
+		}
+
+		/* for bandwidth downgrade, phymode should be set after setting
+		 * channel width
+		 */
+		if (!arg->is_upgrade) {
+			err = ath12k_wmi_set_peer_param(ar, arg->mac_addr.addr,
+							arvif->vdev_id, WMI_PEER_PHYMODE,
+							arg->peer_phymode);
+			if (err)
+				ath12k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+					    arg->mac_addr.addr, arg->peer_phymode, err);
+		}
+	}
+}
+
+void ath12k_wmi_peer_chan_width_switch_work(struct work_struct *work)
+{
+	struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
+						     peer_ch_width_switch_work);
+	struct ath12k *ar = arvif->ar;
+	struct ath12k_peer_ch_width_switch_data *data;
+	struct wmi_peer_chan_width_switch_arg arg;
+	struct wmi_chan_width_peer_arg *peer_arg;
+	unsigned long time_left = 0;
+	int count_left, curr_count, max_count_per_cmd = ar->ab->chwidth_num_peer_caps;
+	int cmd_num = 0, ret, i;
+
+	mutex_lock(&ar->conf_mutex);
+
+	/* possible that the worker got scheduled after complete was triggered. In
+	 * this case we don't wait for timeout */
+	if (arvif->peer_ch_width_switch_data->count == arvif->num_stations)
+		goto send_cmd;
+
+	mutex_unlock(&ar->conf_mutex);
+
+	time_left = wait_for_completion_timeout(&arvif->peer_ch_width_switch_send,
+						ATH12K_PEER_CH_WIDTH_SWITCH_TIMEOUT_HZ);
+	if (time_left == 0) {
+		/* Even though timeout occured, we would send the command for the peers
+		 * for which we received sta rc update event, hence not returning */
+		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+			   "timed out waiting for all peers in peer channel width switch\n");
+	}
+
+	mutex_lock(&ar->conf_mutex);
+
+send_cmd:
+
+	data = arvif->peer_ch_width_switch_data;
+
+	spin_lock_bh(&ar->data_lock);
+	arg.vdev_var = arvif->vdev_id;
+	spin_unlock_bh(&ar->data_lock);
+
+	arg.vdev_var |= ATH12K_PEER_VALID_VDEV_ID | ATH12K_PEER_PUNCT_BITMAP_VALID;
+	arg.peer_arg = data->peer_arg;
+
+	count_left = data->count;
+
+	while (count_left > 0) {
+		if (count_left <= max_count_per_cmd)
+			curr_count = count_left;
+		else
+			curr_count = max_count_per_cmd;
+
+		count_left -= curr_count;
+
+		cmd_num++;
+
+		arg.num_peers = curr_count;
+		arg.start_idx = (cmd_num - 1) * max_count_per_cmd;
+
+		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+			   "wmi peer channel width switch command num %u\n",
+			   cmd_num);
+
+		ret = ath12k_wmi_peer_chan_width_switch(ar, &arg);
+		if (ret) {
+			/* fallback */
+			ath12k_wmi_set_peers_chan_width(arvif, arg.peer_arg, arg.num_peers,
+							arg.start_idx);
+		}
+	}
+
+	for (i = 0; i < data->count; i++) {
+		peer_arg = &data->peer_arg[i];
+
+		/* for bandwidth upgrade phymode is set from worker scheduler */
+		if (peer_arg->is_upgrade)
+			continue;
+
+		/* for bandwidth downgrade phymode needs to be set */
+		ret = ath12k_wmi_set_peer_param(ar, peer_arg->mac_addr.addr,
+						arvif->vdev_id, WMI_PEER_PHYMODE,
+						peer_arg->peer_phymode);
+		if (ret)
+			ath12k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+				    peer_arg->mac_addr.addr, peer_arg->peer_phymode, ret);
+	}
+
+	kfree(arvif->peer_ch_width_switch_data);
+	arvif->peer_ch_width_switch_data = NULL;
+	mutex_unlock(&ar->conf_mutex);
+}
+
+int ath12k_wmi_set_latency(struct ath12k *ar, struct ath12k_latency_params *params)
+{
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	struct ath12k_wmi_pdev *wmi;
+	struct wmi_peer_tid_latency_cmd *cmd;
+	struct wmi_tid_latency_params *tid_lat;
+	void *data;
+	int len, ret, num_peer = 1, sawf_ul_param = 1;
+	u8 dl_enable, ul_enable;
+
+	wmi = ar->wmi;
+	len = sizeof(*cmd) + TLV_HDR_SIZE + (num_peer * sizeof(*tid_lat));
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb) {
+		ath12k_warn(ar->ab, "Memory is not available\n");
+		return -ENOMEM;
+	}
+
+	if (params->direction == WMI_LATENCY_DIR_UL) {
+		ul_enable = 1;
+		dl_enable = 0;
+	} else if (params->direction == WMI_LATENCY_DIR_DL) {
+		ul_enable = 0;
+		dl_enable = 1;
+	} else {
+		ath12k_warn(ar->ab, "Direct link is not supported\n");
+		dev_kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	data = skb->data;
+	cmd = data;
+	cmd->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_PEER_TID_LATENCY_CONFIG_FIXED_PARAM,
+					     (sizeof(*cmd) - TLV_HDR_SIZE));
+	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+	data += sizeof(*cmd);
+	tlv = data;
+	len = sizeof(*tid_lat) * num_peer;
+
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+
+	data += TLV_HDR_SIZE;
+	tid_lat = data;
+
+	tid_lat->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_TID_LATENCY_INFO,
+						 (sizeof(*tid_lat) - TLV_HDR_SIZE));
+
+	tid_lat->service_interval = cpu_to_le32(params->service_interval);
+	tid_lat->burst_size_diff = cpu_to_le32(params->burst_size);
+	tid_lat->max_latency = cpu_to_le32(params->delay_bound);
+	tid_lat->min_tput = cpu_to_le32(params->min_data_rate);
+
+	ether_addr_copy(tid_lat->destmac.addr, params->peer_mac);
+
+	tid_lat->latency_tid_info =
+		le32_encode_bits(params->user_priority, WMI_LATENCY_TID_INFO_SCS_TID_NUM) |
+		le32_encode_bits(params->ac, WMI_LATENCY_TID_INFO_SCS_AC) |
+		le32_encode_bits(dl_enable, WMI_LATENCY_TID_INFO_SCS_DL_EN) |
+		le32_encode_bits(ul_enable, WMI_LATENCY_TID_INFO_SCS_UL_EN) |
+		le32_encode_bits(params->req_type, WMI_LATENCY_TID_INFO_SCS_BURST_SZ_SUM) |
+		le32_encode_bits(sawf_ul_param, WMI_LATENCY_TID_INFO_SCS_SAWF_UL_PARAM);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_TID_LATENCY_CONFIG_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to submit WMI_PEER_TID_LATENCY_CONFIG_CMDID cmd %d\n",
+			    ret);
+		dev_kfree_skb(skb);
+	}
+	return ret;
+}
diff -ruw linux-6.4/drivers/net/wireless/ath/ath12k/wmi.h linux-6.4-fbx/drivers/net/wireless/ath/ath12k/wmi.h
--- linux-6.4/drivers/net/wireless/ath/ath12k/wmi.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/wmi.h	2024-03-18 14:40:14.867741770 +0100
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_WMI_H
@@ -24,6 +24,12 @@
 
 struct ath12k_base;
 struct ath12k;
+struct ath12k_fw_stats;
+struct ath12k_reg_tpc_power_info;
+struct ath12k_sawf_svc_params;
+struct ath12k_latency_params;
+
+#define PSOC_HOST_MAX_NUM_SS (8)
 
 /* There is no signed version of __le32, so for a temporary solution come
  * up with our own version. The idea is from fs/ntfs/endian.h.
@@ -72,6 +78,31 @@
 	u8 value[];
 } __packed;
 
+struct wmi_vdev_ch_power_info {
+        u32 tlv_header;
+        u32 chan_cfreq; /* Channel center frequency (MHz) */
+        /* Unit: dBm, either PSD/EIRP power for this frequency or
+         * incremental for non-PSD BW
+         */
+        u32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+        u32 tlv_header;
+        u32 vdev_id;
+        u32 psd_power; /* Value: 0 or 1, is PSD power or not */
+        u32 eirp_power; /* Maximum EIRP power (dBm units), valid only if power is PSD */
+        u32 power_type_6ghz; /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+        /* This fixed_param TLV is followed by the below TLVs:
+         * num_pwr_levels of wmi_vdev_ch_power_info
+         * For non-psd power, the power values are for 20, 40, and till
+         * BSS BW power levels.
+         * The num_pwr_levels will be checked by sw how many elements present
+         * in the variable-length array.
+         */
+} __packed;
+
+
 #define WMI_TLV_LEN	GENMASK(15, 0)
 #define WMI_TLV_TAG	GENMASK(31, 16)
 #define TLV_HDR_SIZE	sizeof_field(struct wmi_tlv, header)
@@ -175,6 +206,58 @@
 #define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
 
 #define WMI_BA_MODE_BUFFER_SIZE_256  3
+#define WMI_BA_MODE_BUFFER_SIZE_1024 6
+
+#define ATH12K_VALIDATE_PARSED_DATA_POINTER(parsed_bytes, data, \
+			   remaining_len) \
+do { \
+	if ((parsed_bytes) < 0 || (parsed_bytes > remaining_len)) { \
+		ath12k_err(ab, "TLV extraction failed\n"); \
+		return -EINVAL; \
+	} \
+	(data) += (parsed_bytes); \
+	(remaining_len) -= (parsed_bytes); \
+} while (0)
+
+/**
+ * is_field_present_in_tlv() - Check whether a given field is present
+ * in a given TLV
+ * @ptlv: Pointer to start of the TLV
+ * @field_name: name of the field in the TLV structure
+ * @tlv_len: Length of the TLV
+ *
+ * Return: true if the field is present within the TLV,
+ * else false
+ */
+#define is_field_present_in_tlv(ptlv, field_name, tlv_len) \
+	(offsetof(typeof(*(ptlv)), field_name) < (tlv_len) ? \
+	true : false)
+
+/**
+ * get_field_value_in_tlv() - Get the value of a given field in a given TLV
+ * @ptlv: Pointer to start of the TLV
+ * @field_name: name of the field in the TLV structure
+ * @tlv_len: Length of the TLV
+ *
+ * Return: Value of the given field if the offset of the field with in the TLV
+ * structure is less than the TLV length, else 0.
+ */
+#define get_field_value_in_tlv(ptlv, field_name, tlv_len) \
+	(offsetof(typeof(*(ptlv)), field_name) < (tlv_len) ? \
+	(ptlv)->field_name : 0)
+
+/**
+ * get_field_pointer_in_tlv() - Get the address of a given field in a given TLV
+ * @ptlv: Pointer to start of the TLV
+ * @field_name: name of the field in the TLV structure
+ * @tlv_len: Length of the TLV
+ *
+ * Return: Address of the given field if the offset of the field with in the
+ * TLV structure is less than the TLV length, else NULL.
+ */
+#define get_field_pointer_in_tlv(ptlv, field_name, tlv_len) \
+	(offsetof(typeof(*(ptlv)), field_name) < (tlv_len) ? \
+	&(ptlv)->field_name : NULL)
 
 /* HW mode config type replicated from FW header
  * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
@@ -227,6 +310,22 @@
 	WMI_HOST_WLAN_2G_5G_CAP	= 3,
 };
 
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+	/* HE LTF related configuration */
+	WMI_HE_AUTORATE_LTF_1X = BIT(0),
+	WMI_HE_AUTORATE_LTF_2X = BIT(1),
+	WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+	/* HE GI related configuration */
+	WMI_AUTORATE_400NS_GI = BIT(8),
+	WMI_AUTORATE_800NS_GI = BIT(9),
+	WMI_AUTORATE_1600NS_GI = BIT(10),
+	WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
 enum wmi_cmd_group {
 	/* 0 to 2 are reserved */
 	WMI_GRP_START = 0x3,
@@ -292,6 +391,9 @@
 	WMI_GRP_TWT            = 0x3e,
 	WMI_GRP_MOTION_DET     = 0x3f,
 	WMI_GRP_SPATIAL_REUSE  = 0x40,
+	WMI_GRP_LATENCY        = 0x47,
+	WMI_GRP_MLO	       = 0x48,
+	WMI_GRP_SAWF           = 0x49,
 };
 
 #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
@@ -361,6 +463,17 @@
 	WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
 	WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
 	WMI_PDEV_PKTLOG_FILTER_CMDID,
+	WMI_PDEV_SET_RAP_CONFIG_CMDID,
+	WMI_PDEV_DSM_FILTER_CMDID,
+	WMI_PDEV_FRAME_INJECT_CMDID,
+	WMI_PDEV_TBTT_OFFSET_SYNC_CMDID,
+	WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID,
+	WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
+	WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+	WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+	WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+	WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+	WMI_PDEV_GET_TPC_STATS_CMDID,
 	WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
 	WMI_VDEV_DELETE_CMDID,
 	WMI_VDEV_START_REQUEST_CMDID,
@@ -388,6 +501,38 @@
 	WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
 	WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
 	WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+        /** WMI commands related to dbg arp stats */
+        WMI_VDEV_SET_ARP_STAT_CMDID,
+        WMI_VDEV_GET_ARP_STAT_CMDID,
+        /** get tx power for the current vdev */
+        WMI_VDEV_GET_TX_POWER_CMDID,
+        /* limit STA offchannel activity */
+        WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+        /** To set custom software retries per-AC for vdev */
+        WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+        /** To set chainmask configuration for vdev */
+        WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+        WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+        /* request LTE-Coex info */
+        WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+        /** delete all peer (excluding bss peer) */
+        WMI_VDEV_DELETE_ALL_PEER_CMDID,
+        /* To set bss max idle time related parameters */
+        WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+        /** Indicates FW to trigger Audio sync  */
+        WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+        /** Gives Qtimer value  to FW  */
+        WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+        /** Preferred channel list for each vdev */
+        WMI_VDEV_SET_PCL_CMDID,
+        /** VDEV_GET_BIG_DATA_CMD IS DEPRECATED - DO NOT USE */
+        WMI_VDEV_GET_BIG_DATA_CMDID,
+        /** Get per vdev BIG DATA stats phase 2 */
+        WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+        /** set TPC PSD/non-PSD power */
+        WMI_VDEV_SET_TPC_POWER_CMDID,
+	WMI_VDEV_IGMP_OFFLOAD_CMDID,
+	WMI_VDEV_SET_INTRA_BSS_CMDID,
 	WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
 	WMI_PEER_DELETE_CMDID,
 	WMI_PEER_FLUSH_TIDS_CMDID,
@@ -410,6 +555,16 @@
 	WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
 	WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
 	WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+	WMI_PEER_RESERVED0_CMDID,
+	WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID,
+	WMI_PEER_TID_CONFIGURATIONS_CMDID,
+	WMI_PEER_CFR_CAPTURE_CMDID,
+	WMI_PEER_CHAN_WIDTH_SWITCH_CMDID,
+	WMI_PEER_TX_PN_REQUEST_CMDID,
+	WMI_PEER_UNMAP_RESPONSE_CMDID,
+	WMI_PEER_CONFIG_VLAN_CMDID,
+	WMI_PEER_CONFIG_PPE_DS_CMDID,
+	WMI_PEER_SET_INTRA_BSS_CMDID,
 	WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
 	WMI_PDEV_SEND_BCN_CMDID,
 	WMI_BCN_TMPL_CMDID,
@@ -424,6 +579,9 @@
 	WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
 	WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
 	WMI_FILS_DISCOVERY_TMPL_CMDID,
+	WMI_QOS_NULL_FRAME_TX_SEND_CMDID,
+	/** WMI CMD to receive the management filter criteria from the host for RX REO */
+	WMI_MGMT_RX_REO_FILTER_CONFIGURATION_CMDID,
 	WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
 	WMI_ADDBA_SEND_CMDID,
 	WMI_ADDBA_STATUS_CMDID,
@@ -519,6 +677,11 @@
 	WMI_REQUEST_RCPI_CMDID,
 	WMI_REQUEST_PEER_STATS_INFO_CMDID,
 	WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+	WMI_REQUEST_WLM_STATS_CMDID,
+	WMI_REQUEST_CTRL_PATH_STATS_CMDID,
+	WMI_REQUEST_UNIFIED_LL_GET_STA_CMDID,
+	WMI_REQUEST_THERMAL_STATS_CMDID,
+	WMI_REQUEST_STATS_CTRL_PATH_CMDID,
 	WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
 	WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
 	WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@@ -669,6 +832,22 @@
 	WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
 				WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
 	WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+	/** WMI commands specific to Tid level Latency config **/
+	/** VDEV Latency Config command */
+	WMI_VDEV_TID_LATENCY_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LATENCY),
+	/** TID Latency Request command */
+	WMI_PEER_TID_LATENCY_CONFIG_CMDID,
+	WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_TLV_CMD(WMI_GRP_MLO),
+	WMI_MLO_SETUP_CMDID,
+	WMI_MLO_READY_CMDID,
+	WMI_MLO_TEARDOWN_CMDID,
+	WMI_MLO_PEER_TID_TO_LINK_MAP_CMDID,
+	/** Service Aware WiFi (SAWF) **/
+	/** configure or reconfigure the parameters for a service class **/
+	WMI_SAWF_SERVICE_CLASS_CFG_CMDID = WMI_TLV_CMD(WMI_GRP_SAWF),
+	/** disable a service class **/
+	WMI_SAWF_SERVICE_CLASS_DISABLE_CMDID,
+
 };
 
 enum wmi_tlv_event_id {
@@ -676,6 +855,9 @@
 	WMI_READY_EVENTID,
 	WMI_SERVICE_AVAILABLE_EVENTID,
 	WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+	WMI_PDEV_SSCAN_FW_PARAM_EVENTID,
+	WMI_SSCAN_EVT_MESSAGE_EVENTID,
+	WMI_SPECTRAL_CAPABILITIES_EVENTID,
 	WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
 	WMI_CHAN_INFO_EVENTID,
 	WMI_PHYERR_EVENTID,
@@ -710,6 +892,15 @@
 	WMI_PDEV_RAP_INFO_EVENTID,
 	WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
 	WMI_SERVICE_READY_EXT2_EVENTID,
+	WMI_PDEV_MULTIPLE_VDEV_RESTART_RESP_EVENTID,
+	WMI_PDEV_GET_TPC_STATS_EVENTID,
+	WMI_PDEV_GET_DPD_STATUS_EVENTID,
+	WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID,
+	WMI_PDEV_SET_HALPHY_CAL_BMAP_EVENTID,
+	WMI_PDEV_AOA_PHASEDELTA_EVENTID,
+	WMI_PDEV_FIPS_EXTEND_EVENTID,
+	WMI_PDEV_PKTLOG_DECODE_INFO_EVENTID,
+	WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID,
 	WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
 	WMI_VDEV_STOPPED_EVENTID,
 	WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@@ -751,6 +942,9 @@
 	WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
 	WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
 	WMI_HOST_FILS_DISCOVERY_EVENTID,
+	WMI_HOST_FILS_V2_DISCOVERY_EVENTID,
+	WMI_QOS_NULL_FRAME_TX_COMPLETION_EVENTID,
+	WMI_MGMT_RX_FW_CONSUMED_EVENTID,
 	WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
 	WMI_TX_ADDBA_COMPLETE_EVENTID,
 	WMI_BA_RSP_SSN_EVENTID,
@@ -781,6 +975,9 @@
 	WMI_UPDATE_RCPI_EVENTID,
 	WMI_PEER_STATS_INFO_EVENTID,
 	WMI_RADIO_CHAN_STATS_EVENTID,
+	WMI_WLM_STATS_EVENTID,
+	WMI_CTRL_PATH_STATS_EVENTID,
+	WMI_STATS_CTRL_PATH_EVENTID,
 	WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
 	WMI_NLO_SCAN_COMPLETE_EVENTID,
 	WMI_APFIND_EVENTID,
@@ -817,6 +1014,7 @@
 	WMI_READ_DATA_FROM_FLASH_EVENTID,
 	WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
 	WMI_PKGID_EVENTID,
+	WMI_MUEDCA_PARAMS_CONFIG_EVENTID = 0x1d01e,
 	WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
 	WMI_UPLOADH_EVENTID,
 	WMI_CAPTUREH_EVENTID,
@@ -849,6 +1047,8 @@
 	WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
 	WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
 	WMI_SAP_OFL_DEL_STA_EVENTID,
+	WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+					WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
 	WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
 	WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
 	WMI_DCC_GET_STATS_RESP_EVENTID,
@@ -878,6 +1078,9 @@
 	WMI_TWT_DEL_DIALOG_EVENTID,
 	WMI_TWT_PAUSE_DIALOG_EVENTID,
 	WMI_TWT_RESUME_DIALOG_EVENTID,
+	WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO),
+	WMI_MLO_SETUP_COMPLETE_EVENTID,
+	WMI_MLO_TEARDOWN_COMPLETE_EVENTID,
 };
 
 enum wmi_tlv_pdev_param {
@@ -1024,6 +1227,12 @@
 	WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
 	WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
 	WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+	WMI_PDEV_PARAM_SET_CONG_CTRL_MAX_MSDUS = 0xa6,
+	WMI_PDEV_PARAM_SUB_CHANNEL_MARKING = 0xb0,
+	WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
+	WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
+	WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
+	WMI_PDEV_PARAM_MPD_USERPD_SSR = 0xce,
 };
 
 enum wmi_tlv_vdev_param {
@@ -1136,13 +1345,25 @@
 	WMI_VDEV_PARAM_HE_RANGE_EXT,
 	WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
 	WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+	WMI_VDEV_PARAM_HE_LTF = 0x74,
 	WMI_VDEV_PARAM_BA_MODE = 0x7e,
+	WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
 	WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+	WMI_VDEV_PARAM_UL_FIXED_RATE,
 	WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
 	WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
 	WMI_VDEV_PARAM_BSS_COLOR,
 	WMI_VDEV_PARAM_SET_HEMU_MODE,
 	WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+	WMI_VDEV_PARAM_OBSSPD,
+	WMI_VDEV_PARAM_SET_EHT_MU_MODE,
+	WMI_VDEV_PARAM_EHT_LTF,
+	WMI_VDEV_PARAM_UL_EHT_LTF,
+	WMI_VDEV_PARAM_EHT_DCM,
+	WMI_VDEV_PARAM_EHT_RANGE_EXT,
+	WMI_VDEV_PARAM_NON_DATA_EHT_RANGE_EXT,
+	WMI_VDEV_PARAM_FIXED_PUNCTURE_PATTERN,
+	WMI_VDEV_PARAM_EHTOPS_0_31,
 };
 
 enum wmi_tlv_peer_flags {
@@ -1175,6 +1396,7 @@
 	WMI_TAG_ARRAY_BYTE,
 	WMI_TAG_ARRAY_STRUCT,
 	WMI_TAG_ARRAY_FIXED_STRUCT,
+	WMI_TAG_ARRAY_INT16,
 	WMI_TAG_LAST_ARRAY_ENUM = 31,
 	WMI_TAG_SERVICE_READY_EVENT,
 	WMI_TAG_HAL_REG_CAPABILITIES,
@@ -1920,10 +2142,76 @@
 	/* TODO add all the missing cmds */
 	WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
 	WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+	WMI_TAG_MUEDCA_PARAMS_CONFIG_EVENT = 0x32a,
+	WMI_TAG_SERVICE_READY_EXT2_EVENT = 0x334,
 	WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
+	WMI_TAG_MULTIPLE_VDEV_RESTART_RESPONSE_EVENT = 0x365,
 	WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
+	WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
+	WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
+	WMI_TAG_PDEV_SSCAN_FW_CMD_FIXED_PARAM = 0x37f,
+	WMI_TAG_PDEV_SSCAN_FFT_BIN_INDEX,
+	WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD = 0x381,
+	WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+	WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+	WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+	/* TODO add all the missing cmds */
+	WMI_CTRL_PATH_STATS_CMD_FIXED_PARAM =
+		WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD + 4,
+	WMI_CTRL_PATH_STATS_EV_FIXED_PARAM,
+	WMI_CTRL_PATH_PDEV_STATS,
 	WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
 	WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+	WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+	WMI_TAG_VDEV_CH_POWER_INFO,
+	WMI_TAG_PEER_TID_LATENCY_CONFIG_FIXED_PARAM = 0x3B9,
+	WMI_TAG_TID_LATENCY_INFO,
+	WMI_CTRL_PATH_CAL_STATS = 0x3BC,
+	WMI_CTRL_PATH_BTCOEX_STATS = 0x3FD,
+	WMI_CTRL_PATH_AWGN_STATS = 0x3F9,
+	WMI_TAG_EHT_RATE_SET = 0x3C4,
+	WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
+	WMI_TAG_MLO_TX_SEND_PARAMS,
+	WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+	WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC,
+	WMI_TAG_MLO_SETUP_CMD,
+	WMI_TAG_MLO_SETUP_COMPLETE_EVENT,
+	WMI_TAG_MLO_READY_CMD,
+	WMI_TAG_MLO_TEARDOWN_CMD,
+	WMI_TAG_MLO_TEARDOWN_COMPLETE,
+	WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0,
+	WMI_TAG_MLO_MGMT_RX_REO_PARAMS = 0x3D2,
+	WMI_TAG_MLO_MGMT_RX_FW_CONSUMED_HDR = 0x3D3,
+	WMI_TAG_MLO_MGMT_RX_REO_FILTER_CFG_CMD = 0x3D4,
+	WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5,
+	WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6,
+	WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7,
+	WMI_TAG_VDEV_SET_INTRA_BSS_PARAMS = 0x3EE,
+	WMI_TAG_PEER_SET_INTRA_BSS_PARAMS,
+	WMI_TAG_PDEV_PKTLOG_DECODE_INFO = 0x414,
+	WMI_TAG_TPC_STATS_GET_CMD = 0x38B,
+	WMI_TAG_TPC_STATS_EVENT_FIXED_PARAM,
+	WMI_TAG_TPC_STATS_CONFIG_EVENT,
+	WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
+	WMI_TAG_TPC_STATS_RATES_ARRAY,
+	WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
+	WMI_TAG_BCN_TMPL_ML_PARAMS_CMD = 0x3E6,
+	WMI_TAG_PEER_CONFIG_PPEDS_ROUTING = 0x3EA,
+	WMI_TAG_SAWF_SERVICE_CLASS_CFG_CMD_FIXED_PARAM = 0x40A,
+	WMI_TAG_SAWF_SERVICE_CLASS_DISABLE_CMD_FIXED_PARAM = 0x40B,
+	WMI_TAG_SPECTRAL_SCAN_BW_CAPABILITIES = 0x415,
+	WMI_TAG_SPECTRAL_FFT_SIZE_CAPABILITIES,
+	WMI_TAG_PDEV_SSCAN_CHAN_INFO = 0X417,
+	WMI_TAG_PDEV_SSCAN_PER_DETECTOR_INFO,
+	WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM = 0x427,
+	WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO,
+	WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO,
+	WMI_TAG_BCN_TMPL_ML_INFO_CMD = 0x436,
+	WMI_TAG_MLO_MGMT_RX_CU_PARAMS = 0x439,
+	WMI_TAG_CTRL_PATH_CMD_FIXED_PARAM = 0x442,
+	WMI_TAG_CTRL_PATH_EVENT_FIXED_PARAM,
+	WMI_TAG_PDEV_DFS_RADAR_FLAGS = 0x4b4,
+	WMI_TAG_PRB_RESP_TMPL_ML_INFO_CMD = 0x460,
 	WMI_TAG_MAX
 };
 
@@ -2147,8 +2435,28 @@
 	WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
 	WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
 	WMI_TLV_SERVICE_EXT2_MSG = 220,
-
-	WMI_MAX_EXT_SERVICE
+	WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART_RESPONSE_SUPPORT = 235,
+	WMI_TLV_SERVICE_5_9GHZ_SUPPORT = 247,
+	WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
+
+	WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
+	WMI_MAX_EXT_SERVICE = 256,
+
+	WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
+	WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
+	WMI_TLV_SERVICE_DCS_AWGN_INT_SUPPORT = 286,
+	WMI_TLV_SERVICE_BE = 289,
+	WMI_TLV_SERVICE_SAWF_LEVEL0 = 311,
+	WMI_TLV_SERVICE_PKTLOG_DECODE_INFO_SUPPORT = 320,
+	WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365,
+	WMI_TLV_SERVICE_RADAR_FLAGS_SUPPORT = 390,
+	WMI_TLV_SERVICE_SW_PROG_DFS_SUPPORT = 395,
+	WMI_MAX_EXT2_SERVICE,
+};
+
+enum wmi_unit_test_cmdid {
+	/* TODO: Add the remaining cmd ids if needed */
+	WMI_DBG_ENABLE_M3_SSR = 36,
 };
 
 enum {
@@ -2196,6 +2504,14 @@
 	WMI_VDEV_PREAMBLE_SHORT = 2,
 };
 
+/** 6GHZ params **/
+/* Control to enable/disable beacon tx in non-HT duplicate */
+#define WMI_VDEV_6GHZ_BITMAP_NON_HT_DUPLICATE_BEACON		        BIT(0)
+/* Control to enable/disable broadcast probe response tx in non-HT duplicate */
+#define WMI_VDEV_6GHZ_BITMAP_NON_HT_DUPLICATE_BCAST_PROBE_RSP		BIT(1)
+/* Control to enable/disable FILS discovery frame tx in non-HT duplicate */
+#define WMI_VDEV_6GHZ_BITMAP_NON_HT_DUPLICATE_FD_FRAME		        BIT(2)
+
 enum wmi_peer_smps_state {
 	WMI_PEER_SMPS_PS_NONE =	0,
 	WMI_PEER_SMPS_STATIC  = 1,
@@ -2207,6 +2523,8 @@
 	WMI_PEER_CHWIDTH_40MHZ = 1,
 	WMI_PEER_CHWIDTH_80MHZ = 2,
 	WMI_PEER_CHWIDTH_160MHZ = 3,
+	WMI_PEER_CHWIDTH_320MHZ = 4,
+	WMI_PEER_CHWIDTH_MAX = 9,
 };
 
 enum wmi_beacon_gen_mode {
@@ -2217,11 +2535,31 @@
 enum wmi_direct_buffer_module {
 	WMI_DIRECT_BUF_SPECTRAL = 0,
 	WMI_DIRECT_BUF_CFR = 1,
+	WMI_CONFIG_MODULE_CV_UPLOAD = 2,
 
 	/* keep it last */
 	WMI_DIRECT_BUF_MAX
 };
 
+/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext
+ *			event
+ * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss
+ *			   of 80MHz
+ * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss
+ *			    of 80MHz
+ * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz
+ * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max
+ *			 nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+	WMI_NSS_RATIO_1BY2_NSS = 0x0,
+	WMI_NSS_RATIO_3BY4_NSS = 0x1,
+	WMI_NSS_RATIO_1_NSS = 0x2,
+	WMI_NSS_RATIO_2_NSS = 0x3,
+};
+
+
 struct ath12k_wmi_pdev_band_arg {
 	u32 pdev_id;
 	u32 start_freq;
@@ -2234,6 +2572,7 @@
 	u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
 };
 
+#define PSOC_HOST_MAX_MAC_SIZE (2)
 #define PSOC_HOST_MAX_PHY_SIZE (3)
 #define ATH12K_11B_SUPPORT                 BIT(0)
 #define ATH12K_11G_SUPPORT                 BIT(1)
@@ -2255,6 +2594,77 @@
 	u32 high_5ghz_chan;
 };
 
+#define WMI_MAX_EHTCAP_MAC_SIZE  2
+#define WMI_MAX_EHTCAP_PHY_SIZE  3
+#define WMI_MAX_EHTCAP_RATE_SET  3
+
+enum {
+	WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP       = 0x00000001,
+	WMI_HOST_VDEV_FLAGS_TRANSMIT_AP         = 0x00000002,
+	WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP     = 0x00000004,
+	WMI_HOST_VDEV_FLAGS_EMA_MODE            = 0x00000008,
+	WMI_HOST_VDEV_FLAGS_SCAN_MODE_VAP       = 0x00000010,
+};
+
+/* HW mode MLO capability flags
+ * WMI_HOST_MLO_CAP_FLAG_NONE: Do not support MLO for the specific HW mode
+ * WMI_HOST_MLO_CAP_FLAG_NON_STR_IN_DBS: Support STR MLO when DBS for the specific HW mode
+ * WMI_HOST_MLO_CAP_FLAG_STR_IN_DBS: Support Non-STR MLO when DBS for the specific HW mode
+ * WMI_HOST_MLO_CAP_FLAG_NON_STR_IN_SBS: Support STR MLO when SBS for the specific HW mode
+ * WMI_HOST_MLO_CAP_FLAG_STR_IN_SBS: Support Non-STR MLO when SBS for the specific HW mode
+ * WMI_HOST_MLO_CAP_FLAG_STR: Support STR for the specific HW mode
+ * WMI_HOST_MLO_CAP_FLAG_EMLSR: Support eMLSR mode
+ */
+#define WMI_HOST_MLO_CAP_FLAG_NONE		0x0
+#define WMI_HOST_MLO_CAP_FLAG_NON_STR_IN_DBS	0x1
+#define WMI_HOST_MLO_CAP_FLAG_STR_IN_DBS	0x2
+#define WMI_HOST_MLO_CAP_FLAG_NON_STR_IN_SBS	0x4
+#define WMI_HOST_MLO_CAP_FLAG_STR_IN_SBS	0x8
+#define WMI_HOST_MLO_CAP_FLAG_STR		0x10
+#define WMI_HOST_MLO_CAP_FLAG_EMLSR		0x20
+
+/*
+ * 0 – index indicated EHT-MCS map for 20Mhz only sta (4 bytes valid)
+ * 1 – index for <= 80MHz bw  (only 3 bytes are valid and other is reserved)
+ * 2 – index for == 160Mhz bw (only 3 bytes are valid and other is reserved)
+ * 3 – index for == 320Mhz bw (only 3 bytes are valid and other is reserved)
+ */
+#define WMI_MAX_EHT_SUPP_MCS_2G_SIZE  2
+#define WMI_MAX_EHT_SUPP_MCS_5G_SIZE  4
+
+#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_80          0
+#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_160         1
+#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_320         2
+
+#define WMI_MCS_NSS_MAP_0_7	GENMASK(3, 0)
+#define WMI_MCS_NSS_MAP_8_9	GENMASK(7, 4)
+#define WMI_MCS_NSS_MAP_10_11	GENMASK(11, 8)
+#define WMI_MCS_NSS_MAP_12_13	GENMASK(15, 12)
+
+/*
+ * 0 – index indicated EHT-MCS map for 20Mhz only sta (4 bytes valid)
+ * 1 – index for <= 80MHz bw  (only 3 bytes are valid and other is reserved)
+ * 2 – index for == 160Mhz bw (only 3 bytes are valid and other is reserved)
+ * 3 – index for == 320Mhz bw (only 3 bytes are valid and other is reserved)
+ */
+#define WMI_MAX_EHT_SUPP_MCS_2G_SIZE  2
+#define WMI_MAX_EHT_SUPP_MCS_5G_SIZE  4
+
+struct ath12k_service_ext2_param {
+	u32 reg_db_version;
+	u32 hw_min_max_tx_power_2g;
+	u32 hw_min_max_tx_power_5g;
+	u32 chwidth_num_peer_caps;
+	u32 ru_punct_supp_bw;
+	u32 max_user_per_ppdu_ofdma;
+	u32 max_user_per_ppdu_mumimo;
+	u32 target_cap_flags;
+	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
+	u32 max_num_linkview_peers;
+	u32 max_msduq_per_tid;
+	u32 default_msduq_per_tid;
+};
+
 #define WMI_HOST_MAX_PDEV 3
 
 struct ath12k_wmi_host_mem_chunk_params {
@@ -2333,6 +2743,12 @@
 	u32 sched_params;
 	u32 twt_ap_pdev_count;
 	u32 twt_ap_sta_count;
+	bool is_reg_cc_ext_event_supported;
+	u32 ema_max_vap_cnt;
+	u32 ema_max_profile_period;
+	u8  dp_peer_meta_data_ver;
+	bool sawf;
+	bool is_full_bw_nol_feature_supported;
 };
 
 struct ath12k_wmi_init_cmd_arg {
@@ -2367,6 +2783,34 @@
 	__le32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
 } __packed;
 
+struct ath12k_ppe_threshold {
+	u32 numss_m1;
+	u32 ru_bit_mask;
+	u32 ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS];
+};
+
+struct ath12k_chainmask_caps {
+	u32 supported_caps;
+	u32 chainmask;
+};
+
+struct ath12k_chainmask_table {
+	u32 table_id;
+	u32 num_valid_chainmasks;
+	struct ath12k_chainmask_caps *cap_list;
+};
+
+#define ATH12K_MAX_CHAINMASK_TABLES 5
+
+struct wmi_ppe_threshold {
+	u32 numss_m1; /** NSS - 1*/
+	union {
+		u32 ru_count;
+		u32 ru_mask;
+	} __packed;
+	u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
 #define HW_BD_INFO_SIZE       5
 
 struct ath12k_wmi_abi_version_params {
@@ -2385,6 +2829,14 @@
 } __packed;
 
 #define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+#define WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT   12
+#define WMI_RSRC_CFG_HOST_SVC_FLAG_FULL_BW_NOL_SUPPORT_BIT 14
+#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5,4)
+#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET      BIT(9)
+#define WMI_RSRC_CFG_FLAGS2_INTRABSS_MEC_WDS_LEARNING_DISABLE	BIT(15)
+#define WMI_RSRC_CFG_FLAG1_THREE_WAY_COEX_CONFIG_OVERRIDE_SUPPORT BIT(25)
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64	BIT(5)
+#define WMI_RSRC_CFG_FLAGS2_SAWF_CONFIG_ENABLE_SET	  BIT(13)
 
 struct ath12k_wmi_resource_config_params {
 	__le32 tlv_header;
@@ -2516,6 +2968,9 @@
 	__le32 num_chainmask_tables;
 } __packed;
 
+#define WMI_HW_MODE_INFO0_CONFIG_TYPE          GENMASK(26, 0)
+#define WMI_HW_MODE_INFO0_MLO_CAP_FLAG         GENMASK(31, 27)
+
 struct ath12k_wmi_hw_mode_cap_params {
 	__le32 tlv_header;
 	__le32 hw_mode_id;
@@ -2523,11 +2978,30 @@
 	__le32 hw_mode_config_type;
 } __packed;
 
+struct wmi_mac_phy_chainmask_combo {
+	u32 chainmask_table_id;
+	u32 num_valid_chainmask;
+} __packed;
+
+struct wmi_mac_phy_chainmask_caps {
+	u32 tlv_header;
+	u32 supported_flags;
+	u32 chainmask;
+} __packed;
+
+
 #define WMI_MAX_HECAP_PHY_SIZE                 (3)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS    BIT(0)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \
+	FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val)
+#define WMI_NSS_RATIO_INFO_BITPOS              GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+	FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val)
 
 struct ath12k_wmi_mac_phy_caps_params {
 	__le32 hw_mode_id;
-	__le32 pdev_id;
+	__le16 pdev_id;
+	__le16 hw_link_id;
 	__le32 phy_id;
 	__le32 supported_flags;
 	__le32 supported_bands;
@@ -2557,6 +3031,12 @@
 	__le32 he_cap_info_2g_ext;
 	__le32 he_cap_info_5g_ext;
 	__le32 he_cap_info_internal;
+	__le32 wireless_modes;
+	__le32 low_2ghz_chan_freq;
+	__le32 high_2ghz_chan_freq;
+	__le32 low_5ghz_chan_freq;
+	__le32 high_5ghz_chan_freq;
+	__le32 nss_ratio;
 } __packed;
 
 struct ath12k_wmi_hal_reg_caps_ext_params {
@@ -2577,6 +3057,108 @@
 	__le32 num_phy;
 } __packed;
 
+struct wmi_service_ready_ext2_event {
+	u32 reg_db_version;
+	u32 hw_min_max_tx_power_2g;
+	u32 hw_min_max_tx_power_5g;
+	u32 chwidth_num_peer_caps;
+	u32 ru_punct_supp_bw;
+	u32 max_user_per_ppdu_ofdma;
+	u32 max_user_per_ppdu_mumimo;
+	u32 target_cap_flags;
+	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
+	u32 max_num_linkview_peers;
+	u32 max_num_msduq_supported_per_tid;
+	u32 default_num_msduq_supported_per_tid;
+} __packed;
+
+enum wmi_spectral_scaling_formula_id {
+	NO_SCALING = 0,
+	AGC_GAIN_RSSI_CORR_BASED = 1,
+};
+
+struct wmi_spectral_bin_scaling {
+	u32 pdev_id;
+	enum wmi_spectral_scaling_formula_id formula_id;
+	u32 low_level_offset;
+	u32 high_level_offset;
+	u32 rssi_thr;
+	u32 default_agc_max_gain;
+} __packed;
+
+enum wmi_eht_mcs_support {
+	WMI_EHT_MCS_SUPPORT_0_9   = 0,
+	WMI_EHT_MCS_SUPPORT_0_11  = 1,
+	WMI_EHT_MCS_SUPPORT_0_13  = 2,
+	WMI_EHT_MCS_NOT_SUPPORTED = 3,
+};
+
+#define EML_INFO_EMLSR_SUPPORT		BIT(0)
+#define EML_INFO_EMLSR_PADDING_DELAY	GENMASK(3, 1)
+#define EML_INFO_EMLSR_TRANSITION_DELAY	GENMASK(6, 4)
+#define EML_INFO_EMLMR_SUPPORT		BIT(7)
+#define EML_INFO_EMLMR_PADDING_DELAY	GENMASK(10, 8)
+#define EML_INFO_TRANSITION_TIMEOUT	GENMASK(14, 11)
+
+enum wmi_mld_cap_tid_to_link_map {
+	WMI_MLD_MAP_TID_TO_LINK_NONE,
+	WMI_MLD_MAP_EACH_TID_TO_SAME_OR_DIFF_LINK_SET,
+	WMI_MLD_MAP_ALL_TID_TO_SAME_LINK_SET,
+};
+
+#define MLD_INFO_MAX_SIMULTANEOUS_LINK		GENMASK(3, 0)
+#define MLD_INFO_SRS_SUPPORT			BIT(4)
+#define MLD_INFO_TID_TO_LINK_MAP_SUPPORT	GENMASK(6, 5)
+#define MLD_INFO_FREQ_SEPARATION_STR		GENMASK(11, 7)
+#define MLD_INFO_AAR_SUPPORT			BIT(12)
+
+struct wmi_mac_phy_caps_ext {
+	u32 hw_mode_id;
+	union {
+		struct {
+			u32 pdev_id:16,
+			    hw_link_id:16;
+		} wmi_pdev_to_link_map;
+		u32 pdev_id;
+	} u;
+	u32 phy_id;
+	u32 wireless_modes_ext;
+	u32 eht_cap_mac_info_2G[WMI_MAX_EHTCAP_MAC_SIZE];
+	u32 eht_cap_mac_info_5G[WMI_MAX_EHTCAP_MAC_SIZE];
+	u32 rsvd0[2];
+	u32 eht_cap_phy_info_2G[WMI_MAX_EHTCAP_PHY_SIZE];
+	u32 eht_cap_phy_info_5G[WMI_MAX_EHTCAP_PHY_SIZE];
+	struct wmi_ppe_threshold eht_ppet2G;
+	struct wmi_ppe_threshold eht_ppet5G;
+	u32 eht_cap_info_internal;
+	u32 eht_supp_mcs_ext_2G[WMI_MAX_EHT_SUPP_MCS_2G_SIZE];
+	u32 eht_supp_mcs_ext_5G[WMI_MAX_EHT_SUPP_MCS_5G_SIZE];
+	union {
+		struct {
+			u32 emlsr_support:1,
+			    emlsr_padding_delay:3,
+			    emlsr_transition_delay:3,
+			    emlmr_support:1,
+			    emlmr_delay:3,
+			    transition_timeout:4,
+			    reserved: 17;
+		};
+		u32 eml_capability;
+	} eml_cap_u;
+	union {
+		struct {
+			u32 max_num_simultaneous_links:4,
+			    srs_support:1,
+			    tid_to_link_negotiation_support:2,
+			    freq_separation_str:5,
+			    aar_support:1,
+			    reserved2: 19;
+        	};
+		u32 mld_capability;
+	} mld_cap_u;
+} __packed;
+
+
 /* 2 word representation of MAC addr */
 struct ath12k_wmi_mac_addr_params {
 	u8 addr[ETH_ALEN];
@@ -2622,7 +3204,10 @@
 		u8 rx;
 	} chains[NUM_NL80211_BANDS];
 	u32 pdev_id;
+	u32 mbssid_flags;
+	u32 mbssid_tx_vdev_id;
 	u8 if_stats_id;
+	u8 mld_addr[ETH_ALEN];
 };
 
 #define ATH12K_MAX_VDEV_STATS_ID	0x30
@@ -2636,6 +3221,9 @@
 	struct ath12k_wmi_mac_addr_params vdev_macaddr;
 	__le32 num_cfg_txrx_streams;
 	__le32 pdev_id;
+	__le32 mbssid_flags;
+	__le32 mbssid_tx_vdev_id;
+	__le32 vdev_stats_id_valid;
 	__le32 vdev_stats_id;
 } __packed;
 
@@ -2646,19 +3234,66 @@
 	u32 supported_rx_streams;
 } __packed;
 
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+	union {
+		u8 addr[6];
+		struct {
+			u32 word0;
+			u32 word1;
+		} __packed;
+	} __packed;
+} __packed;
+
+struct wmi_vdev_create_mlo_params {
+	u32 tlv_header;
+	struct wmi_mac_addr mld_macaddr;
+} __packed;
+
+#define ATH12K_WMI_FLAG_MLO_ENABLED			BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK			BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC		BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID	BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID		BIT(4)
+#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV			BIT(5)
+#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT		BIT(6)
+#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE		BIT(7)
+#define ATH12K_WMI_FLAG_MLO_LINK_ADD			BIT(8)
+
+struct wmi_vdev_start_mlo_params {
+	u32 tlv_header;
+	u32 flags;
+} __packed;
+
+struct wmi_partner_link_info {
+	u32 tlv_header;
+	u32 vdev_id;
+	u32 hw_link_id;
+	struct wmi_mac_addr vdev_addr;
+} __packed;
+
 struct wmi_vdev_delete_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
 } __packed;
 
+struct vdev_up_params {
+	__le32 vdev_id;
+	__le16 aid;
+	const u8 *bssid;
+	__le32 profile_idx;
+	__le32 profile_count;
+	u8 *tx_bssid;
+} __packed;
+
 struct wmi_vdev_up_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
 	__le32 vdev_assoc_id;
 	struct ath12k_wmi_mac_addr_params vdev_bssid;
-	struct ath12k_wmi_mac_addr_params trans_bssid;
+	struct ath12k_wmi_mac_addr_params tx_vdev_bssid;
 	__le32 profile_idx;
-	__le32 profile_num;
+	__le32 profile_count;
 } __packed;
 
 struct wmi_vdev_stop_cmd {
@@ -2674,6 +3309,7 @@
 #define WMI_VDEV_START_HIDDEN_SSID  BIT(0)
 #define WMI_VDEV_START_PMF_ENABLED  BIT(1)
 #define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4)
 
 #define ATH12K_WMI_SSID_LEN 32
 
@@ -2682,7 +3318,7 @@
 	u8 ssid[ATH12K_WMI_SSID_LEN];
 } __packed;
 
-#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (10 * HZ)
 
 struct wmi_vdev_start_request_cmd {
 	__le32 tlv_header;
@@ -2701,6 +3337,11 @@
 	__le32 he_ops;
 	__le32 cac_duration_ms;
 	__le32 regdomain;
+	__le32  min_data_rate;
+	__le32 mbssid_flags;
+	__le32 mbssid_tx_vdev_id;
+	__le32  eht_ops;
+	__le32  ru_punct_bitmap;
 } __packed;
 
 #define MGMT_TX_DL_FRM_LEN		     64
@@ -2729,6 +3370,25 @@
 	u8  reg_class_id;
 };
 
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+struct wmi_peer_config_ppeds_cmd {
+	__le32 tlv_header;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 ppe_routing_enable;
+	__le32 service_code;
+	__le32 priority_valid;
+	__le32 src_info;
+	__le32 vdev_id;
+};
+
+enum wmi_ppeds_routing_type {
+	WMI_PPE_ROUTING_DISABLED = 0,
+	WMI_AST_USE_PPE_ENABLED  = 1,
+	WMI_AST_USE_PPE_DISABLED = 2,
+	WMI_PPE_ROUTING_TYPE_MAX,
+};
+#endif
+
 enum wmi_phy_mode {
 	MODE_11A        = 0,
 	MODE_11G        = 1,   /* 11b/g Mode */
@@ -2754,12 +3414,20 @@
 	MODE_11AX_HE20_2G = 21,
 	MODE_11AX_HE40_2G = 22,
 	MODE_11AX_HE80_2G = 23,
-	MODE_UNKNOWN = 24,
-	MODE_MAX = 24
+	MODE_11BE_EHT20 = 24,
+	MODE_11BE_EHT40 = 25,
+	MODE_11BE_EHT80 = 26,
+	MODE_11BE_EHT80_80 = 27,
+	MODE_11BE_EHT160 = 28,
+	MODE_11BE_EHT160_160 = 29,
+	MODE_11BE_EHT320 = 30,
+	MODE_11BE_EHT20_2G = 31,
+	MODE_11BE_EHT40_2G = 32,
+	MODE_UNKNOWN = 33,
+	MODE_MAX = 33
 };
 
-struct wmi_vdev_start_req_arg {
-	u32 vdev_id;
+struct wmi_channel_arg {
 	u32 freq;
 	u32 band_center_freq1;
 	u32 band_center_freq2;
@@ -2776,6 +3444,113 @@
 	u32 max_reg_power;
 	u32 max_antenna_gain;
 	enum wmi_phy_mode mode;
+};
+
+static inline const char *ath12k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+	switch (mode) {
+	case MODE_11A:
+		return "11a";
+	case MODE_11G:
+		return "11g";
+	case MODE_11B:
+		return "11b";
+	case MODE_11GONLY:
+		return "11gonly";
+	case MODE_11NA_HT20:
+		return "11na-ht20";
+	case MODE_11NG_HT20:
+		return "11ng-ht20";
+	case MODE_11NA_HT40:
+		return "11na-ht40";
+	case MODE_11NG_HT40:
+		return "11ng-ht40";
+	case MODE_11AC_VHT20:
+		return "11ac-vht20";
+	case MODE_11AC_VHT40:
+		return "11ac-vht40";
+	case MODE_11AC_VHT80:
+		return "11ac-vht80";
+	case MODE_11AC_VHT160:
+		return "11ac-vht160";
+	case MODE_11AC_VHT80_80:
+		return "11ac-vht80+80";
+	case MODE_11AC_VHT20_2G:
+		return "11ac-vht20-2g";
+	case MODE_11AC_VHT40_2G:
+		return "11ac-vht40-2g";
+	case MODE_11AC_VHT80_2G:
+		return "11ac-vht80-2g";
+	case MODE_11AX_HE20:
+		return "11ax-he20";
+	case MODE_11AX_HE40:
+		return "11ax-he40";
+	case MODE_11AX_HE80:
+		return "11ax-he80";
+	case MODE_11AX_HE80_80:
+		return "11ax-he80+80";
+	case MODE_11AX_HE160:
+		return "11ax-he160";
+	case MODE_11AX_HE20_2G:
+		return "11ax-he20-2g";
+	case MODE_11AX_HE40_2G:
+		return "11ax-he40-2g";
+	case MODE_11AX_HE80_2G:
+		return "11ax-he80-2g";
+	case MODE_11BE_EHT20:
+		return "11be-eht20";
+	case MODE_11BE_EHT40:
+		return "11be-eht40";
+	case MODE_11BE_EHT80:
+		return "11be-eht80";
+	case MODE_11BE_EHT80_80:
+		return "11be-eht80+80";
+	case MODE_11BE_EHT160:
+		return "11be-eht160";
+	case MODE_11BE_EHT160_160:
+		return "11be-eht160+160";
+	case MODE_11BE_EHT320:
+		return "11be-eht320";
+	case MODE_11BE_EHT20_2G:
+		return "11be-eht20-2g";
+	case MODE_11BE_EHT40_2G:
+		return "11be-eht40-2g";
+	case MODE_UNKNOWN:
+		/* skip */
+		break;
+
+		/* no default handler to allow compiler to check that the
+		 *          * enum is fully handled
+		 *                   */
+	}
+
+	return "<unknown>";
+}
+
+#define ATH12K_WMI_MLO_MAX_LINKS 3
+
+struct wmi_ml_partner_info {
+	u32 vdev_id;
+	u32 hw_link_id;
+	u8 addr[ETH_ALEN];
+	bool assoc_link;
+	bool primary_umac;
+	bool logical_link_idx_valid;
+	u32 logical_link_idx;
+};
+
+struct wmi_ml_arg {
+	bool enabled;
+	bool assoc_link;
+	bool mcast_link;
+	bool link_add;
+	u8 num_partner_links;
+	struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
+struct wmi_vdev_start_req_arg {
+	u32 vdev_id;
+	struct wmi_channel_arg channel;
 	u32 bcn_intval;
 	u32 dtim_period;
 	u8 *ssid;
@@ -2791,12 +3566,20 @@
 	u32 pref_rx_streams;
 	u32 pref_tx_streams;
 	u32 num_noa_descriptors;
+	u32 min_data_rate;
+	u32 mbssid_flags;
+	u32 mbssid_tx_vdev_id;
+	u32 ru_punct_bitmap;
+	struct wmi_ml_arg ml;
+	u32 width_device;
+	u32 center_freq_device;
 };
 
 struct ath12k_wmi_peer_create_arg {
 	const u8 *peer_addr;
 	u32 peer_type;
 	u32 vdev_id;
+	bool ml_enabled;
 };
 
 struct ath12k_wmi_pdev_set_regdomain_arg {
@@ -2809,6 +3592,27 @@
 	u32 pdev_id;
 };
 
+/* Defines various options for routing policy */
+enum wmi_pdev_dest_ring_handler_type {
+	ATH12K_WMI_PKTROUTE_USE_CCE  = 0,
+	ATH12K_WMI_PKTROUTE_USE_ASPT = 1,
+	ATH12K_WMI_PKTROUTE_USE_FSE  = 2,
+	ATH12K_WMI_PKTROUTE_USE_CCE2 = 3,
+};
+
+enum ath12k_wmi_pkt_route_opcode {
+	ATH12K_WMI_PKTROUTE_ADD,
+	ATH12K_WMI_PKTROUTE_DEL,
+};
+
+struct ath12k_wmi_pkt_route_param {
+	enum ath12k_wmi_pkt_route_opcode opcode;
+	u32 route_type_bmap;
+	u32 dst_ring_handler;
+	u32 dst_ring;
+	u32 meta_data;
+};
+
 struct ath12k_wmi_rx_reorder_queue_remove_arg {
 	u8 *peer_macaddr;
 	u16 vdev_id;
@@ -2832,17 +3636,19 @@
 #define REG_RULE_MAX_BW				0x0000ffff
 #define REG_RULE_REG_PWR			0x00ff0000
 #define REG_RULE_ANT_GAIN			0xff000000
-#define REG_RULE_PSD_INFO			BIT(2)
-#define REG_RULE_PSD_EIRP			0xffff0000
+#define REG_RULE_PSD_INFO                       BIT(0)
+#define REG_RULE_PSD_EIRP                       0xff0000
 
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
-#define HECAP_PHYDWORD_0	0
-#define HECAP_PHYDWORD_1	1
-#define HECAP_PHYDWORD_2	2
+#define HE_PHYCAP_BYTE_0	0
+#define HE_PHYCAP_BYTE_1	1
+#define HE_PHYCAP_BYTE_2	2
+#define HE_PHYCAP_BYTE_3	3
+#define HE_PHYCAP_BYTE_4	4
 
 #define HECAP_PHY_SU_BFER		BIT(31)
 #define HECAP_PHY_SU_BFEE		BIT(0)
@@ -2876,8 +3682,31 @@
 #define HE_DL_MUOFDMA_ENABLE	1
 #define HE_UL_MUOFDMA_ENABLE	1
 #define HE_DL_MUMIMO_ENABLE	1
+#define HE_UL_MUMIMO_ENABLE	1
 #define HE_MU_BFEE_ENABLE	1
 #define HE_SU_BFEE_ENABLE	1
+#define HE_MU_BFER_ENABLE	1
+#define HE_SU_BFER_ENABLE	1
+
+#define EHT_MODE_SU_TX_BFEE		BIT(0)
+#define EHT_MODE_SU_TX_BFER		BIT(1)
+#define EHT_MODE_MU_TX_BFEE		BIT(2)
+#define EHT_MODE_MU_TX_BFER		BIT(3)
+#define EHT_MODE_DL_OFDMA		BIT(4)
+#define EHT_MODE_UL_OFDMA		BIT(5)
+#define EHT_MODE_MUMIMO			BIT(6)
+#define EHT_MODE_DL_OFDMA_TXBF		BIT(7)
+#define EHT_MODE_DL_OFDMA_MUMIMO	BIT(8)
+#define EHT_MODE_UL_OFDMA_MUMIMO	BIT(9)
+
+#define EHT_DL_MUOFDMA_ENABLE    1
+#define EHT_UL_MUOFDMA_ENABLE    1
+#define EHT_DL_MUMIMO_ENABLE     1
+#define EHT_UL_MUMIMO_ENABLE     1
+#define EHT_MU_BFEE_ENABLE       1
+#define EHT_SU_BFEE_ENABLE       1
+#define EHT_MU_BFER_ENABLE       1
+#define EHT_SU_BFER_ENABLE       1
 
 #define HE_VHT_SOUNDING_MODE_ENABLE		1
 #define HE_SU_MU_SOUNDING_MODE_ENABLE		1
@@ -2901,6 +3730,11 @@
 	WMI_PEER_TYPE_TDLS = 2,
 };
 
+struct wmi_peer_create_mlo_params {
+	u32 tlv_header;
+	u32 flags;
+} __packed;
+
 struct wmi_peer_create_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -2938,6 +3772,18 @@
 	WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
 };
 
+struct wmi_pdev_ap_ps_cmd {
+	u32 tlv_header;
+	u32 pdev_id;
+	u32 param_value;
+};
+
+struct wmi_pdev_get_ani_level_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+	__le32 param_id;
+} __packed;
+
 struct wmi_pdev_set_param_cmd {
 	__le32 tlv_header;
 	__le32 pdev_id;
@@ -2966,6 +3812,7 @@
 	__le32 tlv_header;
 	/* ref wmi_bss_chan_info_req_type */
 	__le32 req_type;
+	__le32 pdev_id;
 } __packed;
 
 struct wmi_ap_ps_peer_cmd {
@@ -2994,6 +3841,16 @@
 	__le32 dfs_domain;
 } __packed;
 
+struct wmi_pdev_pkt_route_cmd {
+	u32 tlv_header;
+	u32 pdev_id;
+	u32 opcode;
+	u32 route_type_bmap;
+	u32 dst_ring;
+	u32 meta_data;
+	u32 dst_ring_handler;
+} __packed;
+
 struct wmi_peer_set_param_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -3002,6 +3859,21 @@
 	__le32 param_value;
 } __packed;
 
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+struct wmi_peer_set_intra_bss_cmd {
+	__le32 tlv_header;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 vdev_id;
+	__le32 enable;
+} __packed;
+
+struct ath12k_vdev_set_intra_bss_cmd {
+    u32 tlv_header;
+    u32 vdev_id;
+    u32 enable;
+} __packed;
+#endif
+
 struct wmi_peer_flush_tids_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -3030,7 +3902,6 @@
 
 #define WLAN_SCAN_MAX_NUM_SSID          10
 #define WLAN_SCAN_MAX_NUM_BSSID         10
-#define WLAN_SCAN_MAX_NUM_CHANNELS      40
 
 struct ath12k_wmi_element_info_arg {
 	u32 len;
@@ -3213,17 +4084,17 @@
 			    scan_f_filter_prb_req:1,
 			    scan_f_bypass_dfs_chn:1,
 			    scan_f_continue_on_err:1,
+			    scan_f_promisc_mode:1,
+			    scan_f_force_active_dfs_chn:1,
+			    scan_f_add_tpc_ie_in_probe:1,
+			    scan_f_add_ds_ie_in_probe:1,
+			    scan_f_add_spoofed_mac_in_probe:1,
 			    scan_f_offchan_mgmt_tx:1,
 			    scan_f_offchan_data_tx:1,
-			    scan_f_promisc_mode:1,
 			    scan_f_capture_phy_err:1,
 			    scan_f_strict_passive_pch:1,
 			    scan_f_half_rate:1,
 			    scan_f_quarter_rate:1,
-			    scan_f_force_active_dfs_chn:1,
-			    scan_f_add_tpc_ie_in_probe:1,
-			    scan_f_add_ds_ie_in_probe:1,
-			    scan_f_add_spoofed_mac_in_probe:1,
 			    scan_f_add_rand_seq_in_probe:1,
 			    scan_f_en_ie_whitelist_in_probe:1,
 			    scan_f_forced:1,
@@ -3239,7 +4110,7 @@
 	u32 num_bssid;
 	u32 num_ssids;
 	u32 n_probes;
-	u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
+	u32 *chan_list;
 	u32 notify_scan_events;
 	struct cfg80211_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
 	struct ath12k_wmi_mac_addr_params bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
@@ -3347,6 +4218,29 @@
 #define WMI_CHAN_REG_INFO2_ANT_MAX	GENMASK(7, 0)
 #define WMI_CHAN_REG_INFO2_MAX_TX_PWR	GENMASK(15, 8)
 
+enum reg_subdomains_6g {
+       EMPTY_6G = 0x0,
+       FCC1_CLIENT_LPI_REGULAR_6G = 0x01,
+       FCC1_CLIENT_SP_6G = 0x02,
+       FCC1_AP_LPI_6G = 0x03,
+       FCC1_CLIENT_LPI_SUBORDINATE = FCC1_AP_LPI_6G,
+       FCC1_AP_SP_6G = 0x04,
+       ETSI1_LPI_6G = 0x10,
+       ETSI1_VLP_6G = 0x11,
+       ETSI2_LPI_6G = 0x12,
+       ETSI2_VLP_6G = 0x13,
+       APL1_LPI_6G = 0x20,
+       APL1_VLP_6G = 0x21,
+};
+
+enum reg_super_domain_6g {
+       FCC1_6G = 0x01,
+       ETSI1_6G = 0x02,
+       ETSI2_6G = 0x03,
+       APL1_6G = 0x04,
+       FCC1_6G_CL = 0x05,
+ };
+
 struct ath12k_wmi_channel_params {
 	__le32 tlv_header;
 	__le32 mhz;
@@ -3369,6 +4263,12 @@
 #define ATH12K_WMI_FW_HANG_ASSERT_TYPE 1
 #define ATH12K_WMI_FW_HANG_DELAY 0
 
+enum wmi_fw_hang_recovery_mode_type {
+	ATH12K_WMI_DISABLE_FW_RECOVERY = 200,
+	ATH12K_WMI_FW_HANG_RECOVERY_MODE0,
+	ATH12K_WMI_FW_HANG_RECOVERY_MODE1,
+};
+
 /* type, 0:unused 1: ASSERT 2: not respond detect command
  * delay_time_ms, the simulate will delay time
  */
@@ -3386,14 +4286,66 @@
 	__le32 param_value;
 } __packed;
 
+enum wmi_stats_id {
+	WMI_REQUEST_PEER_STAT			= BIT(0),
+	WMI_REQUEST_AP_STAT			= BIT(1),
+	WMI_REQUEST_PDEV_STAT			= BIT(2),
+	WMI_REQUEST_VDEV_STAT			= BIT(3),
+	WMI_REQUEST_BCNFLT_STAT			= BIT(4),
+	WMI_REQUEST_VDEV_RATE_STAT		= BIT(5),
+	WMI_REQUEST_INST_STAT			= BIT(6),
+	WMI_REQUEST_MIB_STAT			= BIT(7),
+	WMI_REQUEST_RSSI_PER_CHAIN_STAT		= BIT(8),
+	WMI_REQUEST_CONGESTION_STAT		= BIT(9),
+	WMI_REQUEST_PEER_EXTD_STAT		= BIT(10),
+	WMI_REQUEST_BCN_STAT			= BIT(11),
+	WMI_REQUEST_BCN_STAT_RESET		= BIT(12),
+	WMI_REQUEST_PEER_EXTD2_STAT		= BIT(13),
+};
+
+struct wmi_request_stats_cmd {
+	__le32 tlv_header;
+	/* enum wmi_stats_id */
+	__le32 stats_id;
+	__le32 vdev_id;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	__le32 pdev_id;
+} __packed;
+
 struct wmi_get_pdev_temperature_cmd {
 	__le32 tlv_header;
 	__le32 param;
 	__le32 pdev_id;
 } __packed;
 
+#define MAX_WMI_UTF_LEN 252
+
+struct wmi_ftm_seg_hdr {
+	u32 len;
+	u32 msgref;
+	u32 segmentinfo;
+	u32 pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+	u32 tlv_header;
+	struct wmi_ftm_seg_hdr seg_hdr;
+	u8 data[];
+} __packed;
+
+struct wmi_ftm_event_msg {
+	struct wmi_ftm_seg_hdr seg_hdr;
+	u8 data[];
+} __packed;
 #define WMI_BEACON_TX_BUFFER_SIZE	512
 
+#define WMI_BEACON_EMA_PARAM_PERIODICITY_SHIFT         0
+#define WMI_BEACON_EMA_PARAM_TMPL_IDX_SHIFT            8
+#define WMI_BEACON_EMA_PARAM_FIRST_TMPL_SHIFT          16
+#define WMI_BEACON_EMA_PARAM_LAST_TMPL_SHIFT           24
+
+#define WMI_BEACON_PROTECTION_EN_BIT	BIT(0)
+
 struct wmi_bcn_tmpl_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -3404,6 +4356,11 @@
 	__le32 csa_event_bitmap;
 	__le32 mbssid_ie_offset;
 	__le32 esp_ie_offset;
+	__le32 csc_switch_count_offset;
+	__le32 csc_event_bitmap;
+	__le32 mu_edca_ie_offset;
+	__le32 feature_enable_bitmap;
+	__le32 ema_params;
 } __packed;
 
 struct wmi_vdev_install_key_cmd {
@@ -3454,7 +4411,39 @@
 	u8 rates[WMI_MAX_SUPPORTED_RATES];
 };
 
+#define EMLSR_DELAY_MAX 5
+#define EMLSR_TRANS_DELAY_MAX 6
+#define EMLCAP_TIMEOUT_MAX 11
+#define IEEE80211_TU_TO_USEC(x)        ((x) << 10)  /* (x)X1024 */
+#define EMLCAP_TRANSTIMEOUT_0  0
+#define EMLCAP_TRANSTIMEOUT_1  (1U << 7)
+#define EMLCAP_TRANSTIMEOUT_2  (1U << 8)
+#define EMLCAP_TRANSTIMEOUT_3  (1U << 9)
+#define EMLCAP_TRANSTIMEOUT_4  IEEE80211_TU_TO_USEC(1)
+#define EMLCAP_TRANSTIMEOUT_5  IEEE80211_TU_TO_USEC((1U << 1))
+#define EMLCAP_TRANSTIMEOUT_6  IEEE80211_TU_TO_USEC((1U << 2))
+#define EMLCAP_TRANSTIMEOUT_7  IEEE80211_TU_TO_USEC((1U << 3))
+#define EMLCAP_TRANSTIMEOUT_8  IEEE80211_TU_TO_USEC((1U << 4))
+#define EMLCAP_TRANSTIMEOUT_9  IEEE80211_TU_TO_USEC((1U << 5))
+#define EMLCAP_TRANSTIMEOUT_10 IEEE80211_TU_TO_USEC((1U << 6))
+
+struct peer_assoc_mlo_params {
+	bool enabled;
+	bool assoc_link;
+	bool primary_umac;
+	bool peer_id_valid;
+	bool logical_link_idx_valid;
+	u8 mld_addr[ETH_ALEN];
+	u32 logical_link_idx;
+	u32 ml_peer_id;
+	u32 ieee_link_id;
+	u8 num_partner_links;
+	struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+	u32 eml_caps;
+};
+
 struct ath12k_wmi_peer_assoc_arg {
+	struct wmi_mac_addr peer_macaddr;
 	u32 vdev_id;
 	u32 peer_new_assoc;
 	u32 peer_associd;
@@ -3487,6 +4476,7 @@
 	bool bw_40;
 	bool bw_80;
 	bool bw_160;
+	bool bw_320;
 	bool stbc_flag;
 	bool ldpc_flag;
 	bool static_mimops_flag;
@@ -3514,8 +4504,39 @@
 	bool twt_responder;
 	bool twt_requester;
 	struct ath12k_wmi_ppe_threshold_arg peer_ppet;
+	bool eht_flag;
+	u32 peer_eht_cap_mac[WMI_MAX_EHTCAP_MAC_SIZE];
+	u32 peer_eht_cap_phy[WMI_MAX_EHTCAP_PHY_SIZE];
+	u32 peer_eht_mcs_count;
+	u32 peer_eht_rx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
+	u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
+	u32 peer_eht_ops;
+	struct ath12k_ppe_threshold peer_eht_ppet;
+	u32 ru_punct_bitmap;
+	bool is_assoc;
+	struct peer_assoc_mlo_params ml;
 };
 
+struct wmi_peer_assoc_mlo_partner_info {
+	u32 tlv_header;
+	u32 vdev_id;
+	u32 hw_link_id;
+	u32 flags;
+	u32 logical_link_idx;
+} __packed;
+
+struct wmi_peer_assoc_mlo_params {
+	u32 tlv_header;
+	u32 flags;
+	struct wmi_mac_addr mld_addr;
+	u32 logical_link_idx;
+	u32 ml_peer_id;
+	u32 ieee_link_id;
+	u32 emlsr_trans_timeout_us;
+	u32 emlsr_trans_delay_us;
+	u32 emlsr_padding_delay_us;
+} __packed;
+
 struct wmi_peer_assoc_complete_cmd {
 	__le32 tlv_header;
 	struct ath12k_wmi_mac_addr_params peer_macaddr;
@@ -3545,8 +4566,23 @@
 	__le32 peer_he_cap_info_internal;
 	__le32 min_data_rate;
 	__le32 peer_he_caps_6ghz;
+	__le32 sta_type;
+	__le32 bss_max_idle_option;
+	__le32 auth_mode;
+	__le32 peer_flags_ext;
+	__le32 ru_punct_bitmap;
+	__le32 peer_eht_cap_mac[WMI_MAX_EHTCAP_MAC_SIZE];
+	__le32 peer_eht_cap_phy[WMI_MAX_EHTCAP_PHY_SIZE];
+	__le32 peer_eht_ops;
+	struct wmi_ppe_threshold peer_eht_ppet;
 } __packed;
 
+struct stats_request_params {
+	u32 stats_id;
+	u32 vdev_id;
+	u32 pdev_id;
+};
+
 struct wmi_stop_scan_cmd {
 	__le32 tlv_header;
 	__le32 requestor;
@@ -3569,6 +4605,7 @@
 	__le32 pdev_id;
 } __packed;
 
+#define WMI_MLO_MGMT_TID		0xFFFFFFFF
 #define WMI_MGMT_SEND_DOWNLD_LEN	64
 
 #define WMI_TX_PARAMS_DWORD0_POWER		GENMASK(7, 0)
@@ -3596,6 +4633,18 @@
 	/* This TLV is followed by struct wmi_mgmt_frame */
 
 	/* Followed by struct wmi_mgmt_send_params */
+	/* Followed by struct wmi_mlo_mgmt_send_params */
+} __packed;
+
+struct wmi_mlo_mgmt_send_params {
+	u32 tlv_header;
+	u32 hw_link_id;
+} __packed;
+
+struct wmi_mgmt_send_params {
+	u32 tlv_header;
+	u32 tx_param_dword0;
+	u32 tx_param_dword1;
 } __packed;
 
 struct wmi_sta_powersave_mode_cmd {
@@ -3673,6 +4722,39 @@
 	} cc_info;
 } __packed;
 
+#define THERMAL_LEVELS  1
+struct tt_level_config {
+	u32 tmplwm;
+	u32 tmphwm;
+	u32 dcoffpercent;
+	u32 priority;
+};
+
+struct ath12k_wmi_thermal_mitigation_arg {
+	u32 pdev_id;
+	u32 enable;
+	u32 dc;
+	u32 dc_per_event;
+	struct tt_level_config levelconf[THERMAL_LEVELS];
+};
+
+struct wmi_therm_throt_config_request_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+	__le32 enable;
+	__le32 dc;
+	__le32 dc_per_event;
+	__le32 therm_throt_levels;
+} __packed;
+
+struct wmi_therm_throt_level_config_info {
+	__le32 tlv_header;
+	u32 temp_lwm;
+	u32 temp_hwm;
+	u32 dc_off_percent;
+	u32 prio;
+} __packed;
+
 struct wmi_delba_send_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -3704,6 +4786,36 @@
 	struct ath12k_wmi_mac_addr_params peer_macaddr;
 } __packed;
 
+struct wmi_pdev_pktlog_filter_info {
+	__le32 tlv_header;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+	__le32 enable;
+	__le32 filter_type;
+	__le32 num_mac;
+} __packed;
+
+enum ath12k_wmi_pktlog_enable {
+	ATH12K_WMI_PKTLOG_ENABLE_AUTO  = 0,
+	ATH12K_WMI_PKTLOG_ENABLE_FORCE = 1,
+};
+
+struct wmi_pktlog_enable_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+	__le32 evlist; /* WMI_PKTLOG_EVENT */
+	__le32 enable;
+} __packed;
+
+struct wmi_pktlog_disable_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+} __packed;
+
 #define DFS_PHYERR_UNIT_TEST_CMD 0
 #define DFS_UNIT_TEST_MODULE	0x2b
 #define DFS_UNIT_TEST_TOKEN	0xAA
@@ -3721,6 +4833,26 @@
 	u32 radar_param;
 };
 
+#define WMI_AWGN_UNIT_TEST_MODULE 0x18
+#define WMI_AWGN_UNIT_TEST_TOKEN  0
+#define WMI_UNIT_TEST_AWGN_INTF_TYPE 1
+#define WMI_UNIT_TEST_AWGN_PRIMARY_20 0x01
+
+enum wmi_awgn_test_args_idx {
+        WMI_AWGN_TEST_AWGN_INT,
+        WMI_AWGN_TEST_BITMAP,
+        WMI_AWGN_MAX_TEST_ARGS,
+};
+
+#define WMI_M3_UNIT_TEST_MODULE	0x22
+#define WMI_M3_UNIT_TEST_TOKEN	0
+
+enum wmi_m3_test_args_idx {
+	WMI_M3_TEST_CMDID,
+	WMI_M3_TEST_ENABLE,
+	WMI_M3_MAX_TEST_ARGS,
+};
+
 struct wmi_unit_test_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -3757,6 +4889,9 @@
 #define WMI_PEER_160MHZ		0x40000000
 #define WMI_PEER_SAFEMODE_EN	0x80000000
 
+#define WMI_PEER_EXT_EHT        0x00000001
+#define WMI_PEER_EXT_320MHZ     0x00000002
+
 struct ath12k_wmi_vht_rate_set_params {
 	__le32 tlv_header;
 	__le32 rx_max_rate;
@@ -3768,6 +4903,14 @@
 
 struct ath12k_wmi_he_rate_set_params {
 	__le32 tlv_header;
+	/* MCS at which the peer can receive */
+	__le32 rx_mcs_set;
+	/* MCS at which the peer can transmit */
+	__le32 tx_mcs_set;
+} __packed;
+
+struct wmi_eht_rate_set {
+	__le32 tlv_header;
 	__le32 rx_mcs_set;
 	__le32 tx_mcs_set;
 } __packed;
@@ -3775,7 +4918,6 @@
 #define MAX_REG_RULES 10
 #define REG_ALPHA2_LEN 2
 #define MAX_6G_REG_RULES 5
-#define REG_US_5G_NUM_REG_RULES 4
 
 enum wmi_start_event_param {
 	WMI_VDEV_START_RESP_EVENT = 0,
@@ -3796,6 +4938,7 @@
 	};
 	__le32 cfgd_tx_streams;
 	__le32 cfgd_rx_streams;
+	s32 max_allowed_tx_power;
 } __packed;
 
 /* VDEV start response status codes */
@@ -3805,6 +4948,10 @@
 	WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
 	WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
 	WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+	/** Band unsupported by current hw mode in VDEV start */
+	WMI_VDEV_START_RESPONSE_INVALID_BAND = 5,
+	WMI_VDEV_START_RESPONSE_INVALID_PREFERRED_TX_RX_STREAMS = 6, /** Invalid preferred tx/rx streams */
+	WMI_VDEV_START_RESPONSE_INVALID_TX_VAP_CONFIG = 7, /** Invalid tx_vap config in VDEV start */
 };
 
 enum wmi_reg_6g_ap_type {
@@ -3846,6 +4993,11 @@
 
 #define WMI_REG_CLIENT_MAX 4
 
+enum wmi_reg_chan_list_cmd_type {
+        WMI_REG_CHAN_LIST_CC_ID = 0,
+        WMI_REG_CHAN_LIST_CC_EXT_ID = 1,
+};
+
 struct wmi_reg_chan_list_cc_ext_event {
 	__le32 status_code;
 	__le32 phy_id;
@@ -3918,7 +5070,6 @@
 } __packed;
 
 struct wmi_pdev_bss_chan_info_event {
-	__le32 pdev_id;
 	__le32 freq;	/* Units in MHz */
 	__le32 noise_floor;	/* units are dBm */
 	/* rx clear - how often the channel was unused */
@@ -3936,6 +5087,7 @@
 	/*rx_cycle cnt for my bss in 64bits format */
 	__le32 rx_bss_cycle_count_low;
 	__le32 rx_bss_cycle_count_high;
+	__le32 pdev_id;
 } __packed;
 
 #define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
@@ -3977,6 +5129,177 @@
 	__le32 tx_status;
 } __packed;
 
+struct wmi_pdev_stats_base {
+	s32 chan_nf;
+	u32 tx_frame_count; /* Cycles spent transmitting frames */
+	u32 rx_frame_count; /* Cycles spent receiving frames */
+	u32 rx_clear_count; /* Total channel busy time, evidently */
+	u32 cycle_count; /* Total on-channel time */
+	u32 phy_err_count;
+	u32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+	u32 ack_rx_bad;
+	u32 rts_bad;
+	u32 rts_good;
+	u32 fcs_bad;
+	u32 no_beacons;
+	u32 mib_int_count;
+} __packed;
+
+struct wmi_pdev_stats_tx {
+	/* Num HTT cookies queued to dispatch list */
+	s32 comp_queued;
+
+	/* Num HTT cookies dispatched */
+	s32 comp_delivered;
+
+	/* Num MSDU queued to WAL */
+	s32 msdu_enqued;
+
+	/* Num MPDU queue to WAL */
+	s32 mpdu_enqued;
+
+	/* Num MSDUs dropped by WMM limit */
+	s32 wmm_drop;
+
+	/* Num Local frames queued */
+	s32 local_enqued;
+
+	/* Num Local frames done */
+	s32 local_freed;
+
+	/* Num queued to HW */
+	s32 hw_queued;
+
+	/* Num PPDU reaped from HW */
+	s32 hw_reaped;
+
+	/* Num underruns */
+	s32 underrun;
+
+	/* Num PPDUs cleaned up in TX abort */
+	s32 tx_abort;
+
+	/* Num MPDUs requeued by SW */
+	s32 mpdus_requed;
+
+	/* excessive retries */
+	u32 tx_ko;
+
+	/* data hw rate code */
+	u32 data_rc;
+
+	/* Scheduler self triggers */
+	u32 self_triggers;
+
+	/* frames dropped due to excessive sw retries */
+	u32 sw_retry_failure;
+
+	/* illegal rate phy errors  */
+	u32 illgl_rate_phy_err;
+
+	/* wal pdev continuous xretry */
+	u32 pdev_cont_xretry;
+
+	/* wal pdev tx timeouts */
+	u32 pdev_tx_timeout;
+
+	/* wal pdev resets  */
+	u32 pdev_resets;
+
+	/* frames dropped due to non-availability of stateless TIDs */
+	u32 stateless_tid_alloc_failure;
+
+	/* PhY/BB underrun */
+	u32 phy_underrun;
+
+	/* MPDU is more than txop limit */
+	u32 txop_ovf;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+	/* Cnts any change in ring routing mid-ppdu */
+	s32 mid_ppdu_route_change;
+
+	/* Total number of statuses processed */
+	s32 status_rcvd;
+
+	/* Extra frags on rings 0-3 */
+	s32 r0_frags;
+	s32 r1_frags;
+	s32 r2_frags;
+	s32 r3_frags;
+
+	/* MSDUs / MPDUs delivered to HTT */
+	s32 htt_msdus;
+	s32 htt_mpdus;
+
+	/* MSDUs / MPDUs delivered to local stack */
+	s32 loc_msdus;
+	s32 loc_mpdus;
+
+	/* AMSDUs that have more MSDUs than the status ring size */
+	s32 oversize_amsdu;
+
+	/* Number of PHY errors */
+	s32 phy_errs;
+
+	/* Number of PHY errors drops */
+	s32 phy_err_drop;
+
+	/* Number of mpdu errors - FCS, MIC, ENC etc. */
+	s32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats {
+	struct wmi_pdev_stats_base base;
+	struct wmi_pdev_stats_tx tx;
+	struct wmi_pdev_stats_rx rx;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats {
+	u32 vdev_id;
+	u32 beacon_snr;
+	u32 data_snr;
+	u32 num_tx_frames[WLAN_MAX_AC];
+	u32 num_rx_frames;
+	u32 num_tx_frames_retries[WLAN_MAX_AC];
+	u32 num_tx_frames_failures[WLAN_MAX_AC];
+	u32 num_rts_fail;
+	u32 num_rts_success;
+	u32 num_rx_err;
+	u32 num_rx_discard;
+	u32 num_tx_not_acked;
+	u32 tx_rate_history[MAX_TX_RATE_VALUES];
+	u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct wmi_bcn_stats {
+	u32 vdev_id;
+	u32 tx_bcn_succ_cnt;
+	u32 tx_bcn_outage_cnt;
+} __packed;
+
+struct wmi_stats_event {
+	__le32 stats_id;
+	__le32 num_pdev_stats;
+	__le32 num_vdev_stats;
+	__le32 num_peer_stats;
+	__le32 num_bcnflt_stats;
+	__le32 num_chan_stats;
+	__le32 num_mib_stats;
+	__le32 pdev_id;
+	__le32 num_bcn_stats;
+	__le32 num_peer_extd_stats;
+	__le32 num_peer_extd2_stats;
+} __packed;
+
 struct wmi_pdev_ctl_failsafe_chk_event {
 	__le32 pdev_id;
 	__le32 ctl_failsafe_status;
@@ -4001,10 +5324,83 @@
 	a_sle32 sidx;
 } __packed;
 
+struct wmi_pdev_radar_flags_param {
+	__le32 radar_flags;
+} __packed;
+#define WMI_PDEV_RADAR_FLAGS_FULL_BW_NOL_MARK_BIT 0
+
+#define WMI_DCS_AWGN_INTF       0x04
+
+struct wmi_dcs_awgn_info {
+        u32 channel_width;
+        u32 chan_freq;
+        u32 center_freq0;
+        u32 center_freq1;
+        u32 chan_bw_interference_bitmap;
+} __packed;
+
+struct wmi_dcs_interference_ev {
+        u32 interference_type;
+        u32 pdev_id;
+} __packed;
+
+enum wmi_host_channel_width {
+	WMI_HOST_CHAN_WIDTH_20      = 0,
+	WMI_HOST_CHAN_WIDTH_40      = 1,
+	WMI_HOST_CHAN_WIDTH_80      = 2,
+	WMI_HOST_CHAN_WIDTH_160     = 3,
+	WMI_HOST_CHAN_WIDTH_80P80   = 4,
+	WMI_HOST_CHAN_WIDTH_5       = 5,
+	WMI_HOST_CHAN_WIDTH_10      = 6,
+	WMI_HOST_CHAN_WIDTH_165     = 7,
+	WMI_HOST_CHAN_WIDTH_160P160 = 8,
+	WMI_HOST_CHAN_WIDTH_320     = 9,
+	/*keep last */
+	WMI_HOST_CHAN_WIDTH_MAX	  = 0xF,
+};
+
+enum wmi_dcs_interference_chan_segment {
+	WMI_DCS_SEG_PRI20                 = 0x1,
+	WMI_DCS_SEG_SEC20                 = 0x2,
+	WMI_DCS_SEG_SEC40_LOW             = 0x4,
+	WMI_DCS_SEG_SEC40_UP              = 0x8,
+	WMI_DCS_SEG_SEC40                 = 0xC,
+	WMI_DCS_SEG_SEC80_LOW             = 0x10,
+	WMI_DCS_SEG_SEC80_LOW_UP          = 0x20,
+	WMI_DCS_SEG_SEC80_UP_LOW          = 0x40,
+	WMI_DCS_SEG_SEC80_UP              = 0x80,
+	WMI_DCS_SEG_SEC80                 = 0xF0,
+	WMI_DCS_SEG_SEC160_LOW            = 0x0100,
+	WMI_DCS_SEG_SEC160_LOW_UP         = 0x0200,
+	WMI_DCS_SEG_SEC160_LOW_UP_UP      = 0x0400,
+	WMI_DCS_SEG_SEC160_LOW_UP_UP_UP   = 0x0800,
+	WMI_DCS_SEG_SEC160_UP_LOW_LOW_LOW = 0x1000,
+	WMI_DCS_SEG_SEC160_UP_LOW_LOW     = 0x2000,
+	WMI_DCS_SEG_SEC160_UP_LOW         = 0x4000,
+	WMI_DCS_SEG_SEC160_UP             = 0x8000,
+	WMI_DCS_SEG_SEC160                = 0xFF00,
+};
+
 struct wmi_pdev_temperature_event {
 	/* temperature value in Celsius degree */
 	a_sle32 temp;
 	__le32 pdev_id;
+	__le32 mbssid_flags;
+	__le32 mbssid_tx_vdev_id;
+} __packed;
+
+#define WMI_AC_BE				0
+#define WMI_AC_BK				1
+#define WMI_AC_VI				2
+#define WMI_AC_VO				3
+#define WMI_AC_MAX				4
+
+struct wmi_pdev_update_muedca_event {
+	u32 pdev_id;
+	u32 aifsn[WMI_AC_MAX];
+	u32 ecwmin[WMI_AC_MAX];
+	u32 ecwmax[WMI_AC_MAX];
+	u32 muedca_expiration_time[WMI_AC_MAX];
 } __packed;
 
 #define WMI_RX_STATUS_OK			0x00
@@ -4015,6 +5411,1000 @@
 
 #define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
 
+/**
+ * ath12k_host_mlo_glb_per_chip_crash_info - per chip crash
+ * information in MLO global shared memory
+ * @chip_id: MLO chip id
+ * @crash_reason: Address of the crash_reason corresponding to chip_id
+ * recovery_mode: Address of recovery mode corressponding to chip_id
+ */
+struct ath12k_host_mlo_glb_per_chip_crash_info {
+	u8 chip_id;
+	void *crash_reason;
+	void *recovery_mode;
+};
+
+/**
+ * ath12k_host_mlo_glb_chip_crash_info - chip crash information in MLO
+ * global shared memory
+ * @no_of_chips: No of partner chip to which crash information is shared
+ * @valid_chip_bmap: Valid chip bitmap
+ * @per_chip_crash_info: pointer to per chip crash information.
+ */
+struct ath12k_host_mlo_glb_chip_crash_info {
+	u8 no_of_chips;
+	u8 valid_chip_bmap;
+	struct ath12k_host_mlo_glb_per_chip_crash_info *per_chip_crash_info;
+};
+
+/**
+ * ath12k_host_mlo_glb_rx_reo_snapshot_info - MGMT Rx REO information in MLO
+ * global shared memory
+ * @num_links: Number of valid links
+ * @valid_link_bmap: Valid link bitmap
+ * @link_info: pointer to an array of Rx REO per-link information
+ * @hw_forwarded_snapshot_ver: HW forwarded snapshot version
+ * @fw_forwarded_snapshot_ver: FW forwarded snapshot version
+ * @fw_consumed_snapshot_ver: FW consumed snapshot version
+ */
+struct ath12k_host_mlo_glb_rx_reo_snapshot_info {
+	u8 num_links;
+	u16 valid_link_bmap;
+	struct ath12k_host_mlo_glb_rx_reo_per_link_info *link_info;
+	u8 hw_forwarded_snapshot_ver;
+	u8 fw_forwarded_snapshot_ver;
+	u8 fw_consumed_snapshot_ver;
+};
+
+/**
+ * ath12k_host_ath12k_mlo_glb_shmem_params - MLO global shared memory parameters
+ * @major_version: Major version
+ * @minor_version: Minor version
+ */
+struct ath12k_host_ath12k_mlo_glb_shmem_params {
+	u16 major_version;
+	u16 minor_version;
+};
+
+/**
+ * ath12k_host_mlo_mem_arena - MLO Global shared memory arena context
+ * @shmem_params: shared memory parameters
+ * @rx_reo_snapshot_info: MGMT Rx REO snapshot information
+ * @init_done: Initialized snapshot info
+ */
+struct ath12k_host_mlo_mem_arena {
+	struct ath12k_host_ath12k_mlo_glb_shmem_params shmem_params;
+	struct ath12k_host_mlo_glb_rx_reo_snapshot_info rx_reo_snapshot_info;
+	struct ath12k_host_mlo_glb_chip_crash_info global_chip_crash_info;
+	bool init_done;
+	/* Protect the parallel initialization */
+	struct mutex mutex_lock;
+};
+
+/** Helper Macros for tlv header of the given tlv buffer */
+/* Size of the TLV Header which is the Tag and Length fields */
+#define MLO_SHMEM_TLV_HDR_SIZE (1 * sizeof(u32))
+
+/* TLV Helper macro to get the TLV Header given the pointer to the TLV buffer. */
+#define MLO_SHMEMTLV_GET_HDR(tlv_buf) (((u32 *)(tlv_buf))[0])
+
+/* TLV Helper macro to set the TLV Header given the pointer to the TLV buffer. */
+#define MLO_SHMEMTLV_SET_HDR(tlv_buf, tag, len) \
+	((((u32 *)(tlv_buf))[0]) = ((tag << 16) | (len & 0x0000FFFF)))
+
+/* TLV Helper macro to get the TLV Tag given the TLV header. */
+#define MLO_SHMEMTLV_GET_TLVTAG(tlv_header)  ((u32)((tlv_header) >> 16))
+
+/* TLV Helper macro to get the TLV Buffer Length (minus TLV header size)
+ * given the TLV header.
+ */
+#define MLO_SHMEMTLV_GET_TLVLEN(tlv_header) \
+	((u32)((tlv_header) & 0x0000FFFF))
+/* TLV Helper macro to get the TLV length from TLV structure size
+ * by removing TLV header size.
+ */
+#define MLO_SHMEMTLV_GET_STRUCT_TLVLEN(tlv_struct) \
+	((u32)(sizeof(tlv_struct) - MLO_SHMEM_TLV_HDR_SIZE))
+
+/* Definition of Global H SHMEM Arena */
+struct ath12k_mlo_glb_shmem {
+	/* TLV tag and len; tag equals ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_H_SHMEM */
+	u32 tlv_header;
+	/**
+	 * major_minor_version
+	 *
+	 * [15:0]:   minor version
+	 * [31:16]:  major version
+	 */
+	u32 major_minor_version;
+	/*  This TLV is followed by TLVs
+	 *  mlo_glb_rx_reo_snapshot_info reo_snapshot;
+	 *  struct mlo_glb_link_info glb_info;
+	 */
+};
+
+#define MLO_SHMEM_GLB_H_SHMEM_PARAM_MAJOR_VERSION_MASK	GENMASK(31, 16)
+#define MLO_SHMEM_GLB_H_SHMEM_PARAM_MINOR_VERSION_MASK	GENMASK(15, 0)
+#define MLO_SHMEM_GLB_LINK_INFO_PARAM_VALID_LINK_BMAP_MASK	GENMASK(19, 4)
+#define MLO_SHMEM_GLB_LINK_INFO_PARAM_NO_OF_LINKS_MASK	GENMASK(3, 0)
+#define MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_HW_FWD_SNAPSHOT_VER_MASK	GENMASK(2, 0)
+#define MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_FWD_SNAPSHOT_VER_MASK	GENMASK(5, 3)
+#define	MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_CONSUMED_SNAPSHOT_VER_MASK	GENMASK(8, 6)
+#define MLO_SHMEM_CHIP_CRASH_INFO_PARAM_NO_OF_CHIPS_MASK	GENMASK(1, 0)
+#define	MLO_SHMEM_CHIP_CRASH_INFO_PARAM_VALID_CHIP_BMAP_MASK	GENMASK(4, 2)
+
+#define MLO_SHMEM_GLB_H_SHMEM_PARAM_MAJOR_VERSION_GET(major_minor_version) \
+	FIELD_GET(MLO_SHMEM_GLB_H_SHMEM_PARAM_MAJOR_VERSION_MASK, major_minor_version)
+#define MLO_SHMEM_GLB_H_SHMEM_PARAM_MINOR_VERSION_GET(major_minor_version) \
+	FIELD_GET(MLO_SHMEM_GLB_H_SHMEM_PARAM_MINOR_VERSION_MASK, major_minor_version)
+
+#define MLO_SHMEM_GLB_LINK_INFO_PARAM_VALID_LINK_BMAP_GET(link_info) \
+	FIELD_GET(MLO_SHMEM_GLB_LINK_INFO_PARAM_VALID_LINK_BMAP_MASK, link_info)
+#define MLO_SHMEM_GLB_LINK_INFO_PARAM_NO_OF_LINKS_GET(link_info) \
+	FIELD_GET(MLO_SHMEM_GLB_LINK_INFO_PARAM_NO_OF_LINKS_MASK, link_info)
+#define MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_HW_FWD_SNAPSHOT_VER_GET(snapshot_ver_info) \
+	FIELD_GET(MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_HW_FWD_SNAPSHOT_VER_MASK, snapshot_ver_info)
+#define MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_FWD_SNAPSHOT_VER_GET(snapshot_ver_info) \
+	FIELD_GET(MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_FWD_SNAPSHOT_VER_MASK, snapshot_ver_info)
+#define MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_CONSUMED_SNAPSHOT_VER_GET(snapshot_ver_info) \
+	FIELD_GET(MLO_SHMEM_GLB_RX_REO_SNAPSHOT_PARAM_FW_CONSUMED_SNAPSHOT_VER_MASK, snapshot_ver_info)
+
+#define MLO_SHMEM_CHIP_CRASH_INFO_PARAM_NO_OF_CHIPS_GET(chip_info) \
+	FIELD_GET(MLO_SHMEM_CHIP_CRASH_INFO_PARAM_NO_OF_CHIPS_MASK, chip_info)
+#define MLO_SHMEM_CHIP_CRASH_INFO_PARAM_VALID_CHIP_BMAP_GET(chip_info) \
+	FIELD_GET(MLO_SHMEM_CHIP_CRASH_INFO_PARAM_VALID_CHIP_BMAP_MASK, chip_info)
+
+/** Definition of the GLB_H_SHMEM arena tlv structures */
+enum {
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MGMT_RX_REO_SNAPSHOT,
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_PER_LINK_SNAPSHOT_INFO,
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_SNAPSHOT_INFO,
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_LINK,
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_LINK_INFO,
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_H_SHMEM,
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_CHIP_CRASH_INFO,
+	ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_PER_CHIP_CRASH_INFO,
+};
+
+struct mlo_glb_link_info {
+	/* TLV tag and len; tag equals ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_LINK_INFO */
+	u32 tlv_header;
+
+	/**
+	 * link_info
+	 *
+	 * [3:0]:   no_of_links
+	 * [19:4]:  valid_link_bmap
+	 * [31:20]: reserved
+	 */
+	u32 link_info;
+	/*  This TLV is followed by array of mlo_glb_link:
+	 *  mlo_glb_link will have multiple instances equal to num of hw links
+	 *  received by no_of_link
+	 *      mlo_glb_link glb_link_info[];
+	 */
+};
+
+enum ath12k_mlo_chip_crash_reason {
+	ATH12K_MLO_SHMEM_CRASH_PARTNER_CHIPS = 1,
+};
+
+struct mlo_glb_chip_crash_info {
+	/* TLV tag and len; tag equals
+	 * MLO_SHMEM_TLV_STRUCT_MLO_GLB_CHIP_CRASH_INFO */
+	u32 tlv_header;
+
+	/**
+	 * [1:0]:  no_of_chips
+	 * [4:2]:  valid_chip_bmap
+	 * For number of chips beyond 3, extension fields are added.
+	 * [9:5]:  valid_chip_bmap_ext
+	 * [15:12]: no_of_chips_ext
+	 * [31:16]: reserved
+	 */
+	u32 chip_info;
+	/*
+	 * This TLV is followed by array of mlo_glb_per_chip_crash_info:
+	 * mlo_glb_per_chip_crash_info will have multiple instances equal to
+	 * num of partner chips received by no_of_chips
+	 * mlo_glb_per_chip_crash_info per_chip_crash_info[];
+	 */
+};
+
+struct mlo_glb_per_chip_crash_info {
+	/* TLV tag and len; tag equals MLO_SHMEM_TLV_STRUCT_MLO_GLB_PER_CHIP_CRASH_INFO */
+	u32 tlv_header;
+
+	/*
+	 * crash reason, takes value in enum ath12k_mlo_chip_crash_reason
+	 */
+	u32 crash_reason;
+
+	/*
+	 * recovery mode, takes value in enum ath12k_mlo_recovery_mode
+	 */
+	u32 recovery_mode;
+};
+
+
+/* Definition of the complete REO snapshot info */
+struct mlo_glb_rx_reo_snapshot_info {
+	/* TLV tag and len; tag equals ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_SNAPSHOT_INFO */
+	u32 tlv_header;
+
+	/**
+	 * link_info
+	 *
+	 * [3:0]:   no_of_links
+	 * [19:4]:  valid_link_bmap
+	 * [31:20]: reserved
+	 */
+	u32 link_info;
+
+	/**
+	 * snapshot_ver_info
+	 *
+	 * [2:0]:  hw_forwarded snapshot version
+	 * [5:3]:  fw_forwarded snapshot version
+	 * [8:6]:  fw_consumed snapshot version
+	 * [31:9]: reserved
+	 */
+	u32 snapshot_ver_info;
+	u32 reserved_alignment_padding;
+
+	/*  This TLV is followed by array of mlo_glb_rx_reo_per_link_snapshot_info:
+	 *  mlo_glb_rx_reo_per_link_snapshot_info will have multiple instances
+	 *  equal to num of hw links received by no_of_link
+	 *      mlo_glb_rx_reo_per_link_snapshot_info per_link_info[];
+	 */
+};
+
+struct ath12k_host_mlo_glb_rx_reo_per_link_info {
+	u8 link_id;
+	void *fw_consumed;
+	void *fw_forwarded;
+	void *hw_forwarded;
+};
+
+/* REO snapshot structure */
+struct mgmt_rx_reo_snapshot {
+	/* TLV tag and len; tag equals ATH12K_MLO_SHMEM_TLV_STRUCT_MGMT_RX_REO_SNAPSHOT */
+	u32 tlv_header;
+	u32 reserved_alignment_padding;
+	/**
+	 * mgmt_rx_reo_snapshot_low
+	 *
+	 * [0]:     valid
+	 * [16:1]:  mgmt_pkt_ctr
+	 * [31:17]: global_timestamp_low
+	 */
+	u32 mgmt_rx_reo_snapshot_low;
+
+	/**
+	 * mgmt_rx_reo_snapshot_high
+	 *
+	 * [16:0]:  global_timestamp_high
+	 * [31:17]: mgmt_pkt_ctr_redundant
+	 */
+	u32 mgmt_rx_reo_snapshot_high;
+
+};
+
+struct mlo_glb_rx_reo_per_link_snapshot_info {
+	/* TLV tag and len; tag equals ATH12K_MLO_SHMEM_TLV_STRUCT_MLO_GLB_RX_REO_PER_LINK_SNAPSHOT_INFO */
+	u32 tlv_header;
+	u32 reserved_alignment_padding;
+	struct mgmt_rx_reo_snapshot fw_consumed;
+	struct mgmt_rx_reo_snapshot fw_forwarded;
+	struct mgmt_rx_reo_snapshot hw_forwarded;
+};
+
+/**
+ * Enum which defines different versions of management Rx reorder snapshots.
+ */
+enum {
+	/**
+	 * DWORD Lower:
+	 * [15:0]  : Management packet counter
+	 * [30:16] : Redundant global time stamp = Global time stamp[14:0]
+	 * [31]    : Valid
+	 *
+	 * DWORD Upper:
+	 * [31:0]  : Global time stamp
+	 *
+	 */
+	ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_TIMESTAMP_REDUNDANCY = 0,
+
+	/**
+	 * DWORD Lower:
+	 * [14:0]  : Global time stamp[14:0]
+	 * [30:15] : Management packet counter
+	 * [31]    : Valid
+	 *
+	 * DWORD Upper:
+	 * [14:0]  : Redundant management packet counter = Management packet
+	 *           counter[14:0]
+	 * [31:15] : Global time stamp[31:15]
+	 */
+	ATH12K_MGMT_RX_REO_SNAPSHOT_VERSION_PKT_CTR_REDUNDANCY = 1,
+};
+
+/**
+ * Macros for getting and setting the required number of bits
+ * from the TLV params.
+ */
+#define ATH12K_MLO_SHMEM_GET_BITS(_val, _index, _num_bits) \
+	(((_val) >> (_index)) & ((1 << (_num_bits)) - 1))
+
+/* WMI CMD to receive the management filter criteria from the host */
+struct ath12k_wmi_mgmt_rx_reo_filter_config_cmd_fixed_param {
+	u32 tlv_header;
+	u32 pdev_id; /* pdev_id for identifying the MAC */
+	/* filter:
+	 * Each bit represents the possible combination of frame type (2 bits)
+	 * and subtype (4 bits)
+	 * There would be 64 such combinations as per the 802.11 standard
+	 * For Exp : We have beacon frame, we will take the type and subtype
+	 *           of this frame and concatenate the bits, it will give 6 bits
+	 *           number. We need to go to that bit position in the below
+	 *           2 filter_low and filter_high bitmap and set the bit.
+	 */
+	u32 filter_low;
+	u32 filter_high;
+};
+
+#define WMI_MGMT_RX_FW_CONSUMED_PARAM_MGMT_PKT_CTR_VALID_GET GENMASK(15, 15)
+#define WMI_MGMT_RX_FW_CONSUMED_PARAM_MGMT_PKT_CTR_GET	GENMASK(31, 16)
+
+struct ath12k_wmi_mgmt_rx_fw_consumed_hdr {
+	u32 rx_tsf_l32; /* h/w assigned timestamp of the rx frame in micro sec */
+	u32 rx_tsf_u32 ;/* h/w assigned timestamp of the rx frame in micro sec */
+	u32 pdev_id; /* pdev_id for identifying the MAC the rx mgmt frame was received by */
+	/**
+	 * peer_info_subtype
+	 *
+	 * [15:0]:  ml_peer_id, ML peer_id unique across chips
+	 * [18:16]: ieee_link_id, protocol link id on which the rx frame is received
+	 * [27:19]: reserved
+	 * [31:28]: subtype, subtype of the received MGMT frame
+	 */
+	u32 peer_info_subtype;
+	u32 chan_freq; /* frequency in MHz of the channel on which this frame was received */
+	/* Timestamp (in micro sec) of the last fw consumed/dropped mgmt. frame, same across chips */
+	u32 global_timestamp;
+	/**
+	 * mgmt_pkt_ctr_info
+	 *
+	 * [14:0]:  reserved
+	 * [15]:    mgmt_pkt_ctr_valid
+	 * [31:16]: mgmt_pkt_ctr, Sequence number of the last fw consumed mgmt frame
+	 */
+	u32 mgmt_pkt_ctr_info;
+	u32 rx_ppdu_duration_us; /* receive duration in us */
+	u32 mpdu_end_timestamp; /* mpdu end timestamp in us (based on HWMLO timer) */
+};
+
+#define WMI_MGMT_RX_REO_PARAM_IEEE_LINK_ID_GET	GENMASK(14, 12)
+#define WMI_MGMT_RX_REO_PARAM_MGMT_PKT_CTR_VALID_GET	GENMASK(15, 15)
+#define WMI_MGMT_RX_REO_PARAM_MGMT_PKT_CTR_GET	GENMASK(31, 16)
+
+/** Data structure of the TLV to add in RX EVENTID for providing REO params
+ *  like global_timestamp and mgmt_pkt_ctr
+ */
+struct ath12k_wmi_mgmt_rx_reo_params {
+	/* Timestamp (in micro sec) of the last fw forwarded mgmt. frame, same across chips */
+	u32 global_timestamp;
+	/**
+	 * mgmt_pkt_ctr_link_info
+	 *
+	 * [11:0]:  reserved
+	 * [14:12]: ieee_link_id, protocol link id on which the rx frame is received
+	 * [15]:    mgmt_pkt_ctr_valid
+	 * [31:16]: mgmt_pkt_ctr, Sequence number of the last fw forwarded mgmt frame
+	 */
+
+	u32 mgmt_pkt_ctr_link_info;
+	u32 rx_ppdu_duration_us; /* receive duration in us */
+	u32 mpdu_end_timestamp; /* mpdu end timestamp in us (based on HWMLO timer) */
+};
+
+/* struct ath12k_mgmt_rx_reo_params - MGMT Rx REO parameters
+ * @valid: Whether these params are valid
+ * @pdev_id: pdev ID for which FW consumed event is received
+ * @link_id: link ID for which FW consumed event is received
+ * @mgmt_pkt_ctr: MGMT packet counter of the frame that is consumed
+ * @global_timestamp: Global timestamp of the frame that is consumed
+ * @duration_us: duration in us
+ * @start_timestamp: start time stamp
+ * @end_timestamp: end time stamp
+ */
+struct ath12k_mgmt_rx_reo_params {
+	bool valid;
+	u8 pdev_id;
+	u8 link_id;
+	u8 mlo_grp_id;
+	u16 mgmt_pkt_ctr;
+	u32 global_timestamp;
+	u16 duration_us;
+	u32 start_timestamp;
+	u32 end_timestamp;
+};
+
+/* struct ath12k_mgmt_rx_reo_filter - MGMT Rx REO filter
+ * @filter_low: Least significant 32-bits of the filter
+ * @filter_high: Most significant 32-bits of the filter
+ */
+struct ath12k_mgmt_rx_reo_filter {
+	u32 low;
+	u32 high;
+};
+
+/* struct ath12k_mgmt_rx_reo_wait_count - Wait count for a mgmt frame
+ * @per_link_count: Array of wait counts for all MLO links. Each array entry
+ * holds the number of frames this mgmt frame should wait for on that
+ * particular link.
+ * @total_count: Sum of entries in @per_link_count
+ */
+struct ath12k_mgmt_rx_reo_wait_count {
+	unsigned int per_link_count[ATH12K_WMI_MLO_MAX_LINKS];
+	unsigned long long total_count;
+};
+
+/* struct ath12k_mgmt_rx_reo_snapshot_params - Represents the simplified version of
+ * Management Rx Frame snapshot for Host use. Note that this is different from
+ * the structure shared between the Host and FW/HW
+ * @valid: Whether this snapshot is valid
+ * @retry_count: snapshot read retry count
+ * @mgmt_pkt_ctr: MGMT packet counter. This will be local to a particular
+ * HW link
+ * @global_timestamp: Global timestamp.This is taken from a clock which is
+ * common across all the HW links
+ */
+struct ath12k_mgmt_rx_reo_snapshot_params {
+	bool valid;
+	u8 retry_count;
+	u16 mgmt_pkt_ctr;
+	u32 global_timestamp;
+};
+
+/* struct ath12k_mgmt_rx_reo_shared_snapshot - Represents the management rx-reorder
+ * shared snapshots
+ * @ath12k_mgmt_rx_reo_snapshot_low: Lower 32 bits of the reo snapshot
+ * @ath12k_mgmt_rx_reo_snapshot_high: Higher 32 bits of the reo snapshot
+ */
+struct ath12k_mgmt_rx_reo_shared_snapshot {
+	union {
+		u32 ath12k_mgmt_rx_reo_snapshot_low;
+		u32 mgmt_pkt_ctr_ver_a:16,
+			global_timestamp_redundant_ver_a:15,
+			valid_ver_a:1;
+		u32 global_timestamp_low_ver_b:15,
+			mgmt_pkt_ctr_ver_b:16,
+			valid_ver_b:1;
+	} u_low;
+
+	union {
+		u32 ath12k_mgmt_rx_reo_snapshot_high;
+		u32 global_timestamp_ver_a;
+		u32 mgmt_pkt_ctr_redundant_ver_b:15,
+			global_timestamp_high_ver_b:17;
+	} u_high;
+};
+
+/* struct ath12k_mgmt_rx_reo_snapshot_info - Information related to management Rx
+ * reorder snapshot
+ * @address: Snapshot address
+ * @version: Snapshot version
+ */
+struct ath12k_mgmt_rx_reo_snapshot_info {
+	struct ath12k_mgmt_rx_reo_shared_snapshot *address;
+	u8 version;
+};
+
+#define ATH12K_MGMT_RX_REO_SNAPSHOT_B2B_READ_SWAR_RETRY_LIMIT     (11)
+#define ATH12K_MGMT_RX_REO_SNAPSHOT_READ_RETRY_LIMIT              (25)
+
+/* enum ath12k_mgmt_rx_reo_shared_snapshot_id - Represents the management
+ * rx-reorder snapshots shared between host and target in the host DDR.
+ * These snapshots are written by FW/HW and read by Host.
+ * @ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED: FW consumed snapshot
+ * @ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED: FW forwarded snapshot
+ * @ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW: MAC HW snapshot
+ * @ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX: Max number of snapshots
+ * @ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_INVALID: Invalid snapshot
+ */
+enum ath12k_mgmt_rx_reo_shared_snapshot_id {
+	ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED = 0,
+	ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED = 1,
+	ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW = 2,
+	ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX = 3,
+	ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_INVALID,
+};
+
+/* struct ath12k_mgmt_rx_reo_pdev_info - Pdev information required by the Management
+ * Rx REO module
+ * @host_snapshot: Latest snapshot seen at the Host.
+ * It considers both MGMT Rx and MGMT FW consumed.
+ * @last_valid_shared_snapshot: Array of last valid snapshots(for snapshots
+ * shared between host and target)
+ * @host_target_shared_snapshot_info: Array of meta information related to
+ * snapshots(for snapshots shared between host and target)
+ * @filter: MGMT Rx REO filter
+ * @init_complete: Flag to indicate initialization completion of the
+ * ath12k_mgmt_rx_reo_pdev_info object
+ */
+struct ath12k_mgmt_rx_reo_pdev_info {
+	struct ath12k_mgmt_rx_reo_snapshot_params host_snapshot;
+	struct ath12k_mgmt_rx_reo_snapshot_params last_valid_shared_snapshot
+		[ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
+	struct ath12k_mgmt_rx_reo_snapshot_info host_target_shared_snapshot_info
+		[ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
+	struct ath12k_mgmt_rx_reo_filter filter;
+	struct ath12k_mgmt_rx_reo_shared_snapshot raw_snapshots[ATH12K_WMI_MLO_MAX_LINKS]
+		[ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX]
+		[ATH12K_MGMT_RX_REO_SNAPSHOT_READ_RETRY_LIMIT]
+			[ATH12K_MGMT_RX_REO_SNAPSHOT_B2B_READ_SWAR_RETRY_LIMIT];
+	bool init_complete;
+};
+
+/* enum ath12k_mgmt_rx_reo_frame_descriptor_type - Enumeration for management frame
+ * descriptor type.
+ * @ATH12K_MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME: Management frame to be consumed
+ * by host.
+ * @ATH12K_MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME: Management frame consumed by FW
+ * @ATH12K_MGMT_RX_REO_FRAME_DESC_ERROR_FRAME: Management frame which got dropped
+ * at host due to any error
+ * @ATH12K_MGMT_RX_REO_FRAME_DESC_TYPE_MAX: Maximum number of frame types
+ */
+enum ath12k_mgmt_rx_reo_frame_descriptor_type {
+	ATH12K_MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME = 0,
+	ATH12K_MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME,
+	ATH12K_MGMT_RX_REO_FRAME_DESC_ERROR_FRAME,
+	ATH12K_MGMT_RX_REO_FRAME_DESC_TYPE_MAX,
+};
+
+/* struct ath12k_mgmt_rx_reo_global_ts_info - This structure holds the global time
+ * stamp information of a frame.
+ * @valid: Indicates whether global time stamp is valid
+ * @global_ts: Global time stamp value
+ * @start_ts: Start time stamp value
+ * @end_ts: End time stamp value
+ * @expiry_time: information in the structure is valid until expiry_time
+ */
+struct ath12k_mgmt_rx_reo_global_ts_info {
+	bool valid;
+	u32 global_ts;
+	u32 start_ts;
+	u32 end_ts;
+	unsigned long expiry_time;
+};
+
+/* struct ath12k_reo_ingress_debug_frame_info - Debug information about a frame
+ * entering reorder process
+ * @link_id: link id
+ * @mgmt_pkt_ctr: management packet counter
+ * @global_timestamp: MLO global time stamp
+ * @start_timestamp: start time stamp of the frame
+ * @end_timestamp: end time stamp of the frame
+ * @duration_us: duration of the frame in us
+ * @desc_type: Type of the frame descriptor
+ * @frame_type: frame type
+ * @frame_subtype: frame sub type
+ * @ingress_timestamp: Host time stamp when the frames enters the reorder
+ * process
+ * @ingress_duration: Duration in us for processing the incoming frame.
+ * ingress_duration = Time stamp at which reorder list update is done -
+ * Time stamp at which frame has entered the reorder module
+ * @wait_count: Wait count calculated for the current frame
+ * @is_queued: Indicates whether this frame is queued to reorder list
+ * @is_stale: Indicates whether this frame is stale.
+ * @is_parallel_rx: Indicates that this frame is received in parallel to the
+ * last frame which is delivered to the upper layer.
+ * @zero_wait_count_rx: Indicates whether this frame's wait count was
+ * zero when received by host
+ * @immediate_delivery: Indicates whether this frame can be delivered
+ * immediately to the upper layers
+ * @is_error: Indicates whether any error occurred during processing this frame
+ * @ts_last_released_frame: Stores the global time stamp for the last frame
+ * removed from the reorder list
+ * @list_size_rx: Size of the reorder list when this frame is received (before
+ * updating the list based on this frame).
+ * @list_insertion_pos: Position in the reorder list where this frame is going
+ * to get inserted (Applicable for only host consumed frames)
+ * @shared_snapshots: snapshots shared b/w host and target
+ * @host_snapshot: host snapshot
+ * @cpu_id: CPU index
+ * @reo_required: Indicates whether reorder is required for the current frame.
+ * If reorder is not required, current frame will just be used for updating the
+ * wait count of frames already part of the reorder list.
+ */
+struct ath12k_reo_ingress_debug_frame_info {
+	u8 link_id;
+	u16 mgmt_pkt_ctr;
+	u32 global_timestamp;
+	u32 start_timestamp;
+	u32 end_timestamp;
+	u32 duration_us;
+	enum ath12k_mgmt_rx_reo_frame_descriptor_type desc_type;
+	u8 frame_type;
+	u8 frame_subtype;
+	u64 ingress_timestamp;
+	u64 ingress_duration;
+	struct ath12k_mgmt_rx_reo_wait_count wait_count;
+	bool is_queued;
+	bool is_stale;
+	bool is_parallel_rx;
+	bool zero_wait_count_rx;
+	bool immediate_delivery;
+	bool is_error;
+	struct ath12k_mgmt_rx_reo_global_ts_info ts_last_released_frame;
+	s16 list_size_rx;
+	s16 list_insertion_pos;
+	struct ath12k_mgmt_rx_reo_snapshot_params shared_snapshots
+		[ATH12K_WMI_MLO_MAX_LINKS][ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
+	struct ath12k_mgmt_rx_reo_snapshot_params host_snapshot[ATH12K_WMI_MLO_MAX_LINKS];
+	int cpu_id;
+	bool reo_required;
+};
+
+/* struct ath12k_reo_ingress_frame_stats - Structure to store statistics related to
+ * incoming frames
+ * @ingress_count: Number of frames entering reo module
+ * @queued_count: Number of frames queued to reorder list
+ * @zero_wait_count_rx_count: Number of frames for which wait count is
+ * zero when received at host
+ * @immediate_delivery_count: Number of frames which can be delivered
+ * immediately to the upper layers without reordering. A frame can be
+ * immediately delivered if it has wait count of zero on reception at host
+ * and the global time stamp is less than or equal to the global time
+ * stamp of all the frames in the reorder list. Such frames would get
+ * inserted to the head of the reorder list and gets delivered immediately
+ * to the upper layers.
+ * @stale_count: Number of stale frames. Any frame older than the
+ * last frame delivered to upper layer is a stale frame.
+ * @error_count: Number of frames dropped due to error occurred
+ * within the reorder module
+ */
+struct ath12k_reo_ingress_frame_stats {
+	u64 ingress_count
+		[ATH12K_WMI_MLO_MAX_LINKS][ATH12K_MGMT_RX_REO_FRAME_DESC_TYPE_MAX];
+	u64 queued_count[ATH12K_WMI_MLO_MAX_LINKS];
+	u64 zero_wait_count_rx_count[ATH12K_WMI_MLO_MAX_LINKS];
+	u64 immediate_delivery_count[ATH12K_WMI_MLO_MAX_LINKS];
+	u64 stale_count[ATH12K_WMI_MLO_MAX_LINKS]
+		[ATH12K_MGMT_RX_REO_FRAME_DESC_TYPE_MAX];
+	u64 error_count[ATH12K_WMI_MLO_MAX_LINKS]
+		[ATH12K_MGMT_RX_REO_FRAME_DESC_TYPE_MAX];
+};
+
+#define ATH12K_MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE	(785)
+
+/* struct reo_ingress_debug_info - Circular array to store the
+ * debug information about the frames entering the reorder process.
+ * @frame_list: Circular array to store the debug info about frames
+ * @frame_list_size: Size of circular array @frame_list
+ * @next_index: The index at which information about next frame will be logged
+ * @wrap_aroud: Flag to indicate whether wrap around occurred when logging
+ * debug information to @frame_list
+ * @stats: Stats related to incoming frames
+ * @boarder: boarder string
+ */
+struct reo_ingress_debug_info {
+	struct ath12k_reo_ingress_debug_frame_info *frame_list;
+	u16 frame_list_size;
+	int next_index;
+	bool wrap_aroud;
+	struct ath12k_reo_ingress_frame_stats stats;
+	char boarder[ATH12K_MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1];
+};
+
+/* struct ath12k_reo_egress_debug_frame_info - Debug information about a frame
+ * leaving the reorder module
+ * @is_delivered: Indicates whether the frame is delivered to upper layers
+ * @is_premature_delivery: Indicates whether the frame is delivered
+ * prematurely
+ * @link_id: link id
+ * @mgmt_pkt_ctr: management packet counter
+ * @global_timestamp: MLO global time stamp
+ * @ingress_timestamp: Host time stamp when the frame enters the reorder module
+ * @insertion_ts: Host time stamp when the frame is inserted into the reorder
+ * list
+ * @egress_timestamp: Host time stamp just before delivery of the frame to upper
+ * layer
+ * @egress_duration: Duration in us taken by the upper layer to process
+ * the frame.
+ * @removal_ts: Host time stamp when this entry is removed from the list
+ * @initial_wait_count: Wait count when the frame is queued
+ * @final_wait_count: Wait count when frame is released to upper layer
+ * @release_reason: Reason for delivering the frame to upper layers
+ * @shared_snapshots: snapshots shared b/w host and target
+ * @host_snapshot: host snapshot
+ * @cpu_id: CPU index
+ */
+struct ath12k_reo_egress_debug_frame_info {
+	bool is_delivered;
+	bool is_premature_delivery;
+	u8 link_id;
+	u16 mgmt_pkt_ctr;
+	u32 global_timestamp;
+	u64 ingress_timestamp;
+	u64 insertion_ts;
+	u64 egress_timestamp;
+	u64 egress_duration;
+	u64 removal_ts;
+	struct ath12k_mgmt_rx_reo_wait_count initial_wait_count;
+	struct ath12k_mgmt_rx_reo_wait_count final_wait_count;
+	u8 release_reason;
+	struct ath12k_mgmt_rx_reo_snapshot_params shared_snapshots
+		[ATH12K_WMI_MLO_MAX_LINKS][ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
+	struct ath12k_mgmt_rx_reo_snapshot_params host_snapshot[ATH12K_WMI_MLO_MAX_LINKS];
+	int cpu_id;
+};
+
+/* Reason to release an entry from the reorder list */
+#define MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_ZERO_WAIT_COUNT           (BIT(0))
+#define MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_AGED_OUT                  (BIT(1))
+#define MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME (BIT(2))
+#define MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED    (BIT(3))
+#define MGMT_RX_REO_RELEASE_REASON_MAX      \
+    (MGMT_RX_REO_LIST_ENTRY_RELEASE_REASON_LIST_MAX_SIZE_EXCEEDED << 1)
+
+/**
+ * struct reo_egress_frame_stats - Structure to store statistics related to
+ * outgoing frames
+ * @delivery_attempts_count: Number of attempts to deliver management
+ * frames to upper layers
+ * @delivery_success_count: Number of successful management frame
+ * deliveries to upper layer
+ * @premature_delivery_count:  Number of frames delivered
+ * prematurely. Premature delivery is the delivery of a management frame
+ * to the upper layers even before its wait count is reaching zero.
+ * @delivery_count: Number frames delivered successfully for
+ * each link and release  reason.
+ */
+struct reo_egress_frame_stats {
+	u64 delivery_attempts_count[ATH12K_WMI_MLO_MAX_LINKS];
+	u64 delivery_success_count[ATH12K_WMI_MLO_MAX_LINKS];
+	u64 premature_delivery_count[ATH12K_WMI_MLO_MAX_LINKS];
+	u64 delivery_count[ATH12K_WMI_MLO_MAX_LINKS]
+		[MGMT_RX_REO_RELEASE_REASON_MAX];
+};
+
+#define ATH12K_MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE   (816)
+
+/**
+ * struct ath12k_reo_egress_debug_info - Circular array to store the
+ * debug information about the frames leaving the reorder module.
+ * @frame_list: Circular array to store the debug info
+ * @frame_list_size: Size of circular array @frame_list
+ * @next_index: The index at which information about next frame will be logged
+ * @wrap_aroud: Flag to indicate whether wrap around occurred when logging
+ * debug information to @frame_list
+ * @stats: Stats related to outgoing frames
+ * @boarder: boarder string
+ */
+struct ath12k_reo_egress_debug_info {
+	struct ath12k_reo_egress_debug_frame_info *frame_list;
+	u16 frame_list_size;
+	int next_index;
+	bool wrap_aroud;
+	struct reo_egress_frame_stats stats;
+	char boarder[ATH12K_MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1];
+};
+
+/**
+ * struct mgmt_rx_reo_list – Linked list used to reorder the management frames
+ * received. Each list entry would correspond to a management frame. List
+ * entries would be sorted in the same order in which they are received by MAC
+ * HW.
+ * @list: List used for reordering
+ * @list_lock: Lock to protect the list
+ * @max_list_size: Maximum size of the reorder list
+ * @list_entry_timeout_us: Time out value(microsecond) for the reorder list
+ * entries
+ * @ageout_timer: Periodic timer to age-out the list entries
+ * @global_mgmt_rx_inactivity_timer: Global management Rx inactivity timer
+ * @ts_last_released_frame: Stores the global time stamp for the last frame
+ * removed from the reorder list
+ */
+struct mgmt_rx_reo_list {
+	struct ath12k_base *ab;
+	struct list_head list;
+	int count;
+	/* protects the list used for reordering */
+	spinlock_t list_lock;
+	u32 max_list_size;
+	u32 list_entry_timeout_us;
+	struct timer_list ageout_timer;
+	struct timer_list global_mgmt_rx_inactivity_timer;
+	struct ath12k_mgmt_rx_reo_global_ts_info ts_last_released_frame;
+};
+
+/**
+ * struct ath12k_mgmt_rx_reo_context - This structure holds the info required for
+ * management rx-reordering. Reordering is done across all the psocs.
+ * So there should be only one instance of this structure defined.
+ * @reo_list: Linked list used for reordering
+ * @rx_reorder_entry_lock: Spin lock to protect rx reorder process entry critical
+ * section execution
+ * @frame_release_lock: Spin lock to serialize the frame delivery to the
+ * upper layers. This could prevent race conditions like the one given in
+ * the following example.
+ * Lets take an example of 2 links (Link A & B) and each has received
+ * a management frame A1(deauth) and B1(auth) such that MLO global time
+ * stamp of A1 < MLO global time stamp of B1. Host is concurrently
+ * executing "mgmt_rx_reo_list_release_entries" for A1 and B1 in
+ * 2 different CPUs. It is possible that frame B1 gets processed by
+ * upper layers before frame A1 and this could result in unwanted
+ * disconnection. Hence it is required to serialize the delivery
+ * of management frames to upper layers in the strict order of MLO
+ * global time stamp.
+ * @sim_context: Management rx-reorder simulation context
+ * @ingress_debug_info_init_count: Initialization count of
+ * object @ingress_frame_debug_info
+ * @ingress_frame_debug_info: Debug object to log incoming frames
+ * @egress_frame_debug_info: Debug object to log outgoing frames
+ * @egress_debug_info_init_count: Initialization count of
+ * object @egress_frame_debug_info
+ * @simulation_in_progress: Flag to indicate whether simulation is
+ * in progress
+ * @init_done: This will indicate intialization of management rx-reorder list.
+ * @timer_init_done: This will indicate firing of management rx-reorder timer.
+ */
+struct ath12k_mgmt_rx_reo_context {
+	struct mgmt_rx_reo_list reo_list;
+	/* Protects the rx reorder process entry critical section exec */
+	spinlock_t rx_reorder_entry_lock;
+	/* Lock to Serialize the frame delivery */
+	spinlock_t frame_release_lock;
+	atomic_t ingress_debug_info_init_count;
+	struct  reo_ingress_debug_info ingress_frame_debug_info;
+	atomic_t egress_debug_info_init_count;
+	struct  ath12k_reo_egress_debug_info egress_frame_debug_info;
+	bool init_done;
+	bool timer_init_done;
+};
+
+/** MGMT RX REO Changes */
+/* Macros for having versioning info for compatibility check between host and firmware */
+#define MLO_SHMEM_MAJOR_VERSION 2
+#define MLO_SHMEM_MINOR_VERSION 1
+
+/**
+ * Enum which defines different versions of management Rx reorder snapshots.
+ */
+enum {
+	/**
+	 * DWORD Lower:
+	 * [15:0]  : Management packet counter
+	 * [30:16] : Redundant global time stamp = Global time stamp[14:0]
+	 * [31]    : Valid
+	 *
+	 * DWORD Upper:
+	 * [31:0]  : Global time stamp
+	 *
+	 */
+	MGMT_RX_REO_SNAPSHOT_VERSION_TIMESTAMP_REDUNDANCY = 0,
+
+	/**
+	 * DWORD Lower:
+	 * [14:0]  : Global time stamp[14:0]
+	 * [30:15] : Management packet counter
+	 * [31]    : Valid
+	 *
+	 * DWORD Upper:
+	 * [14:0]  : Redundant management packet counter = Management packet
+	 *           counter[14:0]
+	 * [31:15] : Global time stamp[31:15]
+	 */
+	MGMT_RX_REO_SNAPSHOT_VERSION_PKT_CTR_REDUNDANCY = 1,
+};
+
+#define ATH12K_MGMT_RX_REO_INVALID_SNAPSHOT_VERSION      (-1)
+
+/**
+ * struct mgmt_rx_reo_list_entry - Entry in the Management reorder list
+ * @node: List node
+ * @nbuf: nbuf corresponding to this frame
+ * @rx_params: Management rx event parameters
+ * @wait_count: Wait counts for the frame
+ * @initial_wait_count: Wait count when the frame is queued
+ * @insertion_ts: Host time stamp when this entry is inserted to the list.
+ * @removal_ts: Host time stamp when this entry is removed from the list
+ * @ingress_timestamp: Host time stamp when this frame has arrived reorder
+ * module
+ * @egress_timestamp: Host time stamp when this frame has exited reorder
+ * module
+ * @status: Status for this entry
+ * @pdev: Pointer to pdev object corresponding to this frame
+ * @release_reason: Release reason
+ * @is_delivered: Indicates whether the frame is delivered successfully
+ * @is_premature_delivery: Indicates whether the frame is delivered
+ * prematurely
+ * @is_parallel_rx: Indicates that this frame is received in parallel to the
+ * last frame which is delivered to the upper layer.
+ * @shared_snapshots: snapshots shared b/w host and target
+ * @host_snapshot: host snapshot
+ */
+struct mgmt_rx_reo_list_entry {
+	struct list_head node;
+	struct sk_buff *nbuf;
+	struct ath12k_wmi_mgmt_rx_arg *rx_params;
+	struct ath12k_mgmt_rx_reo_wait_count wait_count;
+	struct ath12k_mgmt_rx_reo_wait_count initial_wait_count;
+	u64 insertion_ts;
+	u64 removal_ts;
+	u64 ingress_timestamp;
+	u64 egress_timestamp;
+	u32 status;
+	struct ath12k *ar;
+	u8 release_reason;
+	bool is_delivered;
+	bool is_premature_delivery;
+	bool is_parallel_rx;
+	struct ath12k_mgmt_rx_reo_snapshot_params shared_snapshots
+		[ATH12K_WMI_MLO_MAX_LINKS][ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
+	struct ath12k_mgmt_rx_reo_snapshot_params host_snapshot[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
+#define ATH12K_MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS         (BIT(0))
+#define ATH12K_MGMT_RX_REO_STATUS_AGED_OUT                              (BIT(1))
+#define ATH12K_MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME      (BIT(2))
+#define ATH12K_MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED                (BIT(3))
+
+#define ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry)   \
+	((entry)->status & ATH12K_MGMT_RX_REO_STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS)
+#define ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_AGED_OUT(entry)   \
+	((entry)->status & ATH12K_MGMT_RX_REO_STATUS_AGED_OUT)
+#define ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry)  \
+	((entry)->status & ATH12K_MGMT_RX_REO_STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME)
+#define ATH12K_MGMT_RX_REO_LIST_ENTRY_IS_MAX_SIZE_EXCEEDED(entry)  \
+	((entry)->status & ATH12K_MGMT_RX_REO_STATUS_LIST_MAX_SIZE_EXCEEDED)
+
+#define ATH12K_MGMT_RX_REO_GLOBAL_MGMT_RX_INACTIVITY_TIMEOUT	(600 * HZ)
+#define ATH12K_MGMT_RX_REO_LIST_MAX_SIZE             (100)
+#define ATH12K_MGMT_RX_REO_LIST_TIMEOUT_US           (500000)
+#define ATH12K_MGMT_RX_REO_AGEOUT_TIMER_PERIOD_MS    (250)
+
+#define ATH12K_MGMT_RX_REO_PKT_CTR_HALF_RANGE	(0x8000)
+#define ATH12K_MGMT_RX_REO_PKT_CTR_FULL_RANGE	(ATH12K_MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
+
+#define CU_VDEV_MAP_LB GENMASK(15, 0)
+#define CU_VDEV_MAP_HB GENMASK(31, 16)
+/* Maximum number of CU LINKS across the system.
+ * this is not the CU links within an AP MLD.
+ */
+#define CU_MAX_MLO_LINKS 8
+#define MAX_AP_MLDS_PER_LINK 16
+
+struct ath12k_wmi_mgmt_rx_cu_params {
+	/* CU vdev map to intimate about the on-going Critical update
+	 * per-link contains 16 VAPs at max.
+	 */
+
+	/* bits    : 0-15 | 16-31
+	 * link-id :  0   |   1
+	 */
+	__le32 cu_vdev_map_1;
+	/* bits    : 0-15 | 16-31
+	 * link-id :  2   |   3
+	 */
+	__le32 cu_vdev_map_2;
+	/* bits    : 0-15 | 16-31
+	 * link-id :  4   |   5
+	 */
+	__le32 cu_vdev_map_3;
+	/* bits    : 0-15 | 16-31
+	 * link-id :  6   |   7
+	 */
+	__le32 cu_vdev_map_4; /* bits 63:32 */
+};
+
+struct ath12k_mgmt_rx_cu_arg {
+	u16 cu_vdev_map[CU_MAX_MLO_LINKS];
+	u8 *bpcc_bufp;
+};
+
 struct ath12k_wmi_mgmt_rx_arg {
 	u32 chan_freq;
 	u32 channel;
@@ -4028,6 +6418,61 @@
 	int rssi;
 	u32 tsf_delta;
 	u8 pdev_id;
+	struct ath12k_mgmt_rx_reo_params reo_params;
+};
+
+/**
+ * struct ath12k_mgmt_rx_reo_frame_descriptor - Frame Descriptor used to describe
+ * a management frame in mgmt rx reo module.
+ * @type: Frame descriptor type
+ * @frame_type: frame type
+ * @frame_subtype: frame subtype
+ * @nbuf: nbuf corresponding to this frame
+ * @rx_params: Management rx event parameters
+ * @wait_count: Wait counts for the frame
+ * @ingress_timestamp: Host time stamp when the frames enters the reorder
+ * process
+ * @is_stale: Indicates whether this frame is stale. Any frame older than the
+ * last frame delivered to upper layer is a stale frame. Stale frames should not
+ * be delivered to the upper layers. These frames can be discarded after
+ * updating the host snapshot and wait counts of entries currently residing in
+ * the reorder list.
+ * @zero_wait_count_rx: Indicates whether this frame's wait count was
+ * zero when received by host
+ * @immediate_delivery: Indicates whether this frame can be delivered
+ * immediately to the upper layers
+ * @list_size_rx: Size of the reorder list when this frame is received (before
+ * updating the list based on this frame).
+ * @list_insertion_pos: Position in the reorder list where this frame is going
+ * to get inserted (Applicable for only host consumed frames)
+ * @shared_snapshots: snapshots shared b/w host and target
+ * @host_snapshot: host snapshot
+ * @is_parallel_rx: Indicates that this frame is received in parallel to the
+ * last frame which is delivered to the upper layer.
+ * @pkt_ctr_delta: Packet counter delta of the current and last frame
+ * @reo_required: Indicates whether reorder is required for the current frame.
+ * If reorder is not required, current frame will just be used for updating the
+ * wait count of frames already part of the reorder list.
+ */
+struct ath12k_mgmt_rx_reo_frame_descriptor {
+	enum ath12k_mgmt_rx_reo_frame_descriptor_type type;
+	u8 frame_type;
+	u8 frame_subtype;
+	struct sk_buff *nbuf;
+	struct ath12k_wmi_mgmt_rx_arg *rx_params;
+	struct ath12k_mgmt_rx_reo_wait_count wait_count;
+	u64 ingress_timestamp;
+	bool is_stale;
+	bool zero_wait_count_rx;
+	bool immediate_delivery;
+	s16 list_size_rx;
+	s16 list_insertion_pos;
+	struct ath12k_mgmt_rx_reo_snapshot_params shared_snapshots
+		[ATH12K_WMI_MLO_MAX_LINKS][ATH12K_MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
+	struct ath12k_mgmt_rx_reo_snapshot_params host_snapshot[ATH12K_WMI_MLO_MAX_LINKS];
+	bool is_parallel_rx;
+	int pkt_ctr_delta;
+	bool reo_required;
 };
 
 #define ATH_MAX_ANTENNA 4
@@ -4055,6 +6500,8 @@
 	__le32 desc_id;
 	__le32 status;
 	__le32 pdev_id;
+	__le32 ppdu_id;
+	__le32 ack_rssi;
 } __packed;
 
 struct wmi_scan_event {
@@ -4072,12 +6519,33 @@
 	__le32 tsf_timestamp;
 } __packed;
 
+struct wmi_offchan_data_tx_compl_event {
+	__le32 desc_id;
+	__le32 status;
+	__le32 pdev_id;
+	__le32 ppdu_id;
+} __packed;
+
+enum wmi_peer_sta_kickout_reason {
+	WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED = 0,
+	WMI_PEER_STA_KICKOUT_REASON_XRETRY = 1,
+	WMI_PEER_STA_KICKOUT_REASON_INACTIVITY = 2,
+	WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT = 3,
+	WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT = 4,
+	WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT = 5,
+	WMI_PEER_STA_KICKOUT_REASON_ROAMING_EVENT = 6,
+};
+
 struct wmi_peer_sta_kickout_arg {
-	const u8 *mac_addr;
+	u8 mac_addr[ETH_ALEN];
+	__le32 reason;
+	__le32 rssi;
 };
 
 struct wmi_peer_sta_kickout_event {
 	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	__le32 reason;
+	__le32 rssi;
 } __packed;
 
 enum wmi_roam_reason {
@@ -4138,6 +6606,7 @@
 };
 
 enum wmi_vdev_type {
+	WMI_VDEV_TYPE_UNSPEC  = 0,
 	WMI_VDEV_TYPE_AP      = 1,
 	WMI_VDEV_TYPE_STA     = 2,
 	WMI_VDEV_TYPE_IBSS    = 3,
@@ -4262,7 +6731,7 @@
 
 #define DISABLE_SIFS_RESPONSE_TRIGGER 0
 
-#define WMI_MAX_KEY_INDEX   3
+#define WMI_MAX_KEY_INDEX   7
 #define WMI_MAX_KEY_LEN     32
 
 enum wmi_key_type {
@@ -4304,6 +6773,7 @@
 	WMI_RATE_PREAMBLE_HT,
 	WMI_RATE_PREAMBLE_VHT,
 	WMI_RATE_PREAMBLE_HE,
+	WMI_RATE_PREAMBLE_EHT,
 };
 
 /**
@@ -4353,6 +6823,119 @@
 	ATH12K_HW_TXRX_ETHERNET = 2,
 };
 
+enum wmi_coex_config_type {
+        WMI_COEX_CONFIG_PAGE_P2P_TDM            = 1,
+        WMI_COEX_CONFIG_PAGE_STA_TDM            = 2,
+        WMI_COEX_CONFIG_PAGE_SAP_TDM            = 3,
+        WMI_COEX_CONFIG_DURING_WLAN_CONN        = 4,
+        WMI_COEX_CONFIG_BTC_ENABLE              = 5,
+        WMI_COEX_CONFIG_COEX_DBG                = 6,
+        WMI_COEX_CONFIG_PAGE_P2P_STA_TDM        = 7,
+        WMI_COEX_CONFIG_INQUIRY_P2P_TDM         = 8,
+        WMI_COEX_CONFIG_INQUIRY_STA_TDM         = 9,
+        WMI_COEX_CONFIG_INQUIRY_SAP_TDM         = 10,
+        WMI_COEX_CONFIG_INQUIRY_P2P_STA_TDM     = 11,
+        WMI_COEX_CONFIG_TX_POWER                = 12,
+        WMI_COEX_CONFIG_PTA_CONFIG              = 13,
+        WMI_COEX_CONFIG_AP_TDM                  = 14,
+        WMI_COEX_CONFIG_WLAN_SCAN_PRIORITY      = 15,
+        WMI_COEX_CONFIG_WLAN_PKT_PRIORITY       = 16,
+        WMI_COEX_CONFIG_PTA_INTERFACE           = 17,
+        WMI_COEX_CONFIG_THREE_WAY_COEX_RESET    = 32,
+        WMI_COEX_CONFIG_THREE_WAY_COEX_START    = 34,
+        /* WMI_COEX_CONFIG_FORCED_ALGO
+         * config to select coex algorithm
+         * coex_algo: select fixed coex algorithm
+         */
+        WMI_COEX_CONFIG_FORCED_ALGO             = 47,
+};
+
+struct coex_config_arg {
+        u32 vdev_id;
+        u32 config_type;
+        union {
+                struct {
+                        u32 coex_enable;
+                };
+
+                struct {
+                        u32 pta_num;
+                        u32 coex_mode;
+                        u32 bt_txrx_time;
+                        u32 bt_priority_time;
+                        u32 pta_algorithm;
+                        u32 pta_priority;
+                };
+
+                struct {
+                        u32 wlan_pkt_type;
+                        u32 wlan_pkt_type_continued;
+                        u32 wlan_pkt_weight;
+                        u32 bt_pkt_weight;
+                };
+                struct {
+                        u32 duty_cycle;
+                        u32 wlan_duration;
+                };
+                struct {
+                        u32 coex_algo;
+                };
+                struct {
+                        u32 priority0;
+                        u32 priority1;
+                        u32 priority2;
+                        u32 config_arg4;
+                        u32 config_arg5;
+                        u32 config_arg6;
+                };
+        };
+};
+
+struct wmi_coex_config_cmd {
+        u32 tlv_header;
+        u32 vdev_id;
+        u32 config_type;
+        union {
+                struct {
+                        u32 coex_enable;
+                } __packed;
+
+                struct {
+                        u32 pta_num;
+                        u32 coex_mode;
+                        u32 bt_txrx_time;
+                        u32 bt_priority_time;
+                        u32 pta_algorithm;
+                        u32 pta_priority;
+                } __packed;
+
+                struct {
+                        u32 wlan_pkt_type;
+                        u32 wlan_pkt_type_continued;
+                        u32 wlan_pkt_weight;
+                        u32 bt_pkt_weight;
+                } __packed;
+
+                struct {
+                        u32 duty_cycle;
+                        u32 wlan_duration;
+                } __packed;
+
+                struct {
+                        u32 coex_algo;
+                } __packed;
+
+                struct {
+                        u32 priority0;
+                        u32 priority1;
+                        u32 priority2;
+                        u32 config_arg4;
+                        u32 config_arg5;
+                        u32 config_arg6;
+                } __packed;
+        } __packed;
+} __packed;
+
 struct wmi_wmm_params {
 	__le32 tlv_header;
 	__le32 cwmin;
@@ -4428,6 +7011,465 @@
 	__le32 pdev_id;
 } __packed;
 
+enum WMI_HOST_TWT_COMMAND {
+	WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+	WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+	WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+	WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+	WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+	WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+	WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+	WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST           BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER         BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE       BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION      BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+	u32 tlv_header;
+	u32 vdev_id;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	u32 dialog_id;
+	u32 wake_intvl_us;
+	u32 wake_intvl_mantis;
+	u32 wake_dura_us;
+	u32 sp_offset_us;
+	u32 flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+	u32 vdev_id;
+	u8 peer_macaddr[ETH_ALEN];
+	u32 dialog_id;
+	u32 wake_intvl_us;
+	u32 wake_intvl_mantis;
+	u32 wake_dura_us;
+	u32 sp_offset_us;
+	u8 twt_cmd;
+	u8 flag_bcast;
+	u8 flag_trigger;
+	u8 flag_flow_type;
+	u8 flag_protection;
+} __packed;
+
+enum  wmi_twt_add_dialog_status {
+	WMI_ADD_TWT_STATUS_OK,
+	WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+	WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+	WMI_ADD_TWT_STATUS_INVALID_PARAM,
+	WMI_ADD_TWT_STATUS_NOT_READY,
+	WMI_ADD_TWT_STATUS_NO_RESOURCE,
+	WMI_ADD_TWT_STATUS_NO_ACK,
+	WMI_ADD_TWT_STATUS_NO_RESPONSE,
+	WMI_ADD_TWT_STATUS_DENIED,
+	WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+	u32 vdev_id;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	u32 dialog_id;
+	u32 status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+	u32 vdev_id;
+	u8 peer_macaddr[ETH_ALEN];
+	u32 dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+	u32 tlv_header;
+	u32 vdev_id;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+	u32 vdev_id;
+	u8 peer_macaddr[ETH_ALEN];
+	u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+	u32 tlv_header;
+	u32 vdev_id;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	u32 dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+	u32 vdev_id;
+	u8 peer_macaddr[ETH_ALEN];
+	u32 dialog_id;
+	u32 sp_offset_us;
+	u32 next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+	u32 tlv_header;
+	u32 vdev_id;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	u32 dialog_id;
+	u32 sp_offset_us;
+	u32 next_twt_size;
+} __packed;
+
+struct wmi_sawf_svc_cfg_cmd_fixed_param {
+	u32 tlv_header; /* TLV tag and len */
+			/* Tag equals WMI_TAG_SAWF_SERVICE_CLASS_CFG_CMD_FIXED_PARAM */
+	u32 svc_class_id; /* which service class is being configured */
+	/*-----
+	 * The below fields specify the values for the parameters of the
+	 * service class being configured.
+	 * Each such service class parameter has a default value specified in the
+	 * above WMI_SAWF_SVC_CLASS_PARAM_DEFAULTS enum.
+	 * This default value shall be specified for service classes where
+	 * the parameter in question is not applicable.
+	 * For example, for service classes that have no minimum throughput
+	 * requirement, the min_thruput_kbps field should be set to
+	 * WMI_SAWF_SVC_CLASS_PARAM_DEFAULT_MIN_THRUPUT, i.e. 0.
+	 *-----
+	 */
+	/* min_thruput_kbps:
+	 * How much throughput should be "guaranteed" for each MSDU queue
+	 * belonging to this service class.
+	 * Units are kilobits per second.
+	 */
+	u32 min_thruput_kbps;
+	/* max_thruput_kbps:
+	 * What upper limit on throughput shall be applied to MSDU queues beloning
+	 * to this service class, if other peer-TIDs are not meeting their QoS
+	 * service goals.
+	 * Units are kilobits per second.
+	 */
+	u32 max_thruput_kbps;
+	/* burst_size_bytes:
+	 * How much data (i.e. how many MSDUs) should be pulled from a
+	 * MSDU queue belonging to this service class to be formed into MPDUs
+	 * and enqueued for transmission.
+	 * Similarly, how long should a tx op be for MPDUs containing MSDUs from
+	 * this service class, to ensure that the necessary amount of data gets
+	 * delivered to the peer.
+	 * Units are bytes.
+	 */
+	u32 burst_size_bytes;
+	/* svc_interval_ms:
+	 * How frequently MSDUs belonging to this service class should be
+	 * formed into MPDUs and enqueued for transmission.
+	 * The svc_interval_ms parameter is expected to be <= the delay_bound_ms
+	 * parameter.
+	 * Units are milliseconds.
+	 */
+	u32 svc_interval_ms;
+	/* delay_bound_ms:
+	 * How promptly the MSDUs belonging to this service class need to be
+	 * delivered to the recipient peer.
+	 * Units are milliseconds.
+	 */
+	u32 delay_bound_ms;
+	/* time_to_live_ms:
+	 * How long MSDUs belonging to this service class remain valid.
+	 * If the MSDU has not been successfully transmitted before this
+	 * time-to-live time has elapsed, the MSDU should be discarded.
+	 * The time_to_live_ms parameter is expected to be >= the delay_bound_ms
+	 * parameter.
+	 * Units are milliseconds.
+	 */
+	u32 time_to_live_ms;
+	/* priority:
+	 * What degree of precedence shall the WLAN FW's tx scheduler use
+	 * when considering whether to transmit MPDUs generated from MSDUs
+	 * belonging to this service class.
+	 */
+	u32 priority;
+	/* tid:
+	 * Which WLAN TID shall be used for delivering traffic of this
+	 * service class.
+	 */
+	u32 tid;
+	/* msdu_loss_rate_ppm:
+	 * This parameter indicates the acceptable rate of MSDU loss.
+	 * Units are parts per million.
+	 * E.g. if it is acceptable for 1 MSDU of every 10000 to be lost,
+	 * the msdu_loss_rate_ppm value would be 100,
+	 * since 100 / 1000000 = 1 / 10000.
+	 */
+	u32 msdu_loss_rate_ppm;
+} __packed;
+
+struct wmi_sawf_svc_disable_cmd_fixed_param {
+	u32 tlv_header; /* TLV tag and len*/
+			/* Tag equals WMI_TAG_SAWF_SERVICE_CLASS_DISABLE_CMD_FIXED_PARAM*/
+	u32 svc_class_id; /* which service class is being disabled */
+} __packed;
+
+/**
+ * WMI arrays of length WMI_MGMT_FRAME_SUBTYPE_MAX use the
+ * IEEE802.11 standard's enumeration of mgmt frame subtypes:
+ *  0 -> IEEE80211_STYPE_FC0_SUBTYPE_ASSOC_REQ
+ *  1 -> IEEE80211_STYPE_FC0_SUBTYPE_ASSOC_RESP
+ *  2 -> IEEE80211_STYPE_FC0_SUBTYPE_REASSOC_REQ
+ *  3 -> IEEE80211_STYPE_FC0_SUBTYPE_REASSOC_RESP
+ *  4 -> IEEE80211_STYPE_FC0_SUBTYPE_PROBE_REQ
+ *  5 -> IEEE80211_STYPE_FC0_SUBTYPE_PROBE_RESP
+ *  6 -> Reserved
+ *  7 -> Reserved
+ *  8 -> IEEE80211_STYPE_FC0_SUBTYPE_BEACON
+ *  9 -> IEEE80211_STYPE_FC0_SUBTYPE_ATIM
+ * 10 -> IEEE80211_STYPE_FC0_SUBTYPE_DISASSOC
+ * 11 -> IEEE80211_STYPE_FC0_SUBTYPE_AUTH
+ * 12 -> IEEE80211_STYPE_FC0_SUBTYPE_DEAUTH
+ * 13 -> IEEE80211_STYPE_FCO_SUBTYPE_ACTION
+ * 14 -> IEEE80211_STYPE_FC0_SUBTYPE_ACTION_NOACK
+ * 15 -> IEEE80211_STYPE_FC0_SUBTYPE_RESERVED
+ */
+#define WMI_MGMT_FRAME_SUBTYPE_MAX 16
+#define WMI_MAX_STRING_LEN 256
+
+enum wmi_ctrl_path_cal_prof_id {
+	WMI_CTRL_PATH_STATS_CAL_PROF_COLD_BOOT_CAL = 0,
+	WMI_CTRL_PATH_STATS_CAL_PROF_FULL_CHAN_SWITCH,
+	WMI_CTRL_PATH_STATS_CAL_PROF_SCAN_CHAN_SWITCH,
+	WMI_CTRL_PATH_STATS_CAL_PROF_DPD_SPLIT_CAL,
+	WMI_CTRL_PATH_STATS_CAL_PROF_TEMP_TRIGEER_CAL,
+	WMI_CTRL_PATH_STATS_CAL_PROF_POWER_SAVE_WAKE_UP,
+	WMI_CTRL_PATH_STATS_CAL_PROF_TIMER_TRIGGER_CAL,
+	WMI_CTRL_PATH_STATS_CAL_PROF_FTM_TRIGGER_CAL,
+	WMI_CTRL_PATH_STATS_CAL_PROF_AGILE_OR_POWER_DOWN_DTIM,
+	WMI_CTRL_PATH_STATS_CAL_PROF_NOISY_ENV_RXDO,
+	/* IDs from 10 to 30 for future use*/
+	WMI_CTRL_PATH_STATS_CAL_PROFILE_INVALID = 0x1F,
+};
+
+static inline const char *
+wmi_ctrl_path_cal_prof_id_to_name(u8 prof_id) {
+	switch (prof_id) {
+	case WMI_CTRL_PATH_STATS_CAL_PROF_COLD_BOOT_CAL:
+		return "COLD_BOOT_CAL";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_FULL_CHAN_SWITCH:
+		return "FULL_CHAN_SWITCH";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_SCAN_CHAN_SWITCH:
+		return "SCAN_CHAN_SWITCH";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_DPD_SPLIT_CAL:
+		return "DPD_SPLIT_CAL";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_TEMP_TRIGEER_CAL:
+		return "TEMP_TRIGEER_CAL";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_POWER_SAVE_WAKE_UP:
+		return "POWER_SAVE_WAKE_UP";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_TIMER_TRIGGER_CAL:
+		return "TIMER_TRIGGER_CAL";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_FTM_TRIGGER_CAL:
+		return "FTM_TRIGGER_CAL";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_AGILE_OR_POWER_DOWN_DTIM:
+		return "AGILE_OR_POWER_DOWN_DTIM";
+	case WMI_CTRL_PATH_STATS_CAL_PROF_NOISY_ENV_RXDO:
+		return "NOISY_ENV_RXDO";
+	case WMI_CTRL_PATH_STATS_CAL_PROFILE_INVALID:
+		break;
+	}
+	return "UNKOWN_CAL_PROFILE";
+}
+
+enum wmi_ctrl_path_cal_type_id {
+	WMI_CTRL_PATH_STATS_CAL_TYPE_ADC = 0,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_DAC,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_PROCESS,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_NOISE_FLOOR,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_RXDCO,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_COMB_TXLO_TXIQ_RXIQ,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_TXLO,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_TXIQ,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_RXIQ,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_IM2,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_LNA,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_LP_RXDCO,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_LP_RXIQ,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_MEMORYLESS,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_MEMORY,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_IBF,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_PDET_AND_PAL,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_RXDCO_IQ,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_RXDCO_DTIM,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_TPC_CAL,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_TIMEREQ,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_BWFILTER,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_PEF,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_PADROOP,
+	WMI_CTRL_PATH_STATS_CAL_TYPE_SELFCALTPC,
+	/* IDs 25 to 254 for future use*/
+	WMI_CTRL_PATH_STATS_CAL_TYPE_INVALID = 0xff,
+};
+
+static inline const char *
+wmi_ctrl_path_cal_type_id_to_name(u8 type_id) {
+	switch (type_id) {
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_ADC:
+		return "ADC";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_DAC:
+		return "DAC";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_PROCESS:
+		return "PROCESS";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_NOISE_FLOOR:
+		return "NOISE_FLOOR";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_RXDCO:
+		return "RXDCO";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_COMB_TXLO_TXIQ_RXIQ:
+		return "COMB_TXLO_TXIQ_RXIQ";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_TXLO:
+		return "TXLO";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_TXIQ:
+		return "TXIQ";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_RXIQ:
+		return "RXIQ";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_IM2:
+		return "IM2";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_LNA:
+		return "LNA";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_LP_RXDCO:
+		return "DPD_LP_RXDCO";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_LP_RXIQ:
+		return "DPD_LP_RXIQ";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_MEMORYLESS:
+		return "DPD_MEMORYLESS";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_MEMORY:
+		return "DPD_MEMORY";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_IBF:
+		return "IBF";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_PDET_AND_PAL:
+		return "PDET_AND_PAL";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_RXDCO_IQ:
+		return "RXDCO_IQ";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_RXDCO_DTIM:
+		return "RXDCO_DTIM";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_TPC_CAL:
+		return "TPC_CAL";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_DPD_TIMEREQ:
+		return "DPD_TIMEREQ";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_BWFILTER:
+		return "BWFILTER";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_PEF:
+		return "PEF";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_PADROOP:
+		return "PADROOP";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_SELFCALTPC:
+		return "SELFCALTPC";
+	case WMI_CTRL_PATH_STATS_CAL_TYPE_INVALID:
+		break;
+	}
+	return "UNKNOWN_CAL_TYPE";
+}
+
+enum wmi_ctrl_path_periodic_cal_type_id {
+	WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_NOISE_FLOOR,
+	WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_DPD_MEMORYLESS,
+	WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_DPD_MEMORY,
+	/* IDs 3 to 254 for future use*/
+	WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_INVALID = 0xFF,
+};
+
+static inline const char *
+wmi_ctrl_path_periodic_cal_type_id_to_name(u8 type_id) {
+	switch (type_id) {
+	case WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_NOISE_FLOOR:
+		return "NOISE_FLOOR";
+	case WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_DPD_MEMORYLESS:
+		return "DPD_MEMORYLESS";
+	case WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_DPD_MEMORY:
+		return "DPD_MEMORY";
+	case WMI_CTRL_PATH_STATS_PERIODIC_CAL_TYPE_INVALID:
+		break;
+	}
+	return "UNKNOWN_PERIODIC_CAL_TYPE";
+}
+
+#define WMI_CTRL_PATH_CAL_PROF_MASK	GENMASK(12, 8)
+#define WMI_CTRL_PATH_CAL_TYPE_MASK	GENMASK(7, 0)
+#define WMI_CTRL_PATH_IS_PERIODIC_CAL	GENMASK(13, 13)
+#define WMI_AWGN_MAX_BW 6
+
+struct wmi_ctrl_path_pdev_stats {
+	u32 pdev_id;
+	u32 tx_mgmt_subtype[WMI_MGMT_FRAME_SUBTYPE_MAX];
+	u32 rx_mgmt_subtype[WMI_MGMT_FRAME_SUBTYPE_MAX];
+	u32 scan_fail_dfs_violation_time_ms;
+	u32 nol_chk_fail_last_chan_freq;
+	u32 nol_chk_fail_time_stamp_ms;
+	u32 tot_peer_create_cnt;
+	u32 tot_peer_del_cnt;
+	u32 tot_peer_del_resp_cnt;
+	u32 vdev_pause_fail_rt_to_sched_algo_fifo_full_cnt;
+} __packed;
+
+struct wmi_ctrl_path_cal_stats {
+	u32 pdev_id;
+	u32 cal_info;
+	u32 cal_triggered_cnt;
+	u32 cal_fail_cnt;
+	u32 cal_fcs_cnt;
+	u32 cal_fcs_fail_cnt;
+} __packed;
+
+struct  wmi_ctrl_path_stats_cmd_param {
+	u32 tlv_header;
+	u32 stats_id;
+	u32 req_id;
+	/* get/reset/start/stop based on stats id is defined as
+	 * a part of wmi_ctrl_path_stats_action
+	 */
+	u32 action;
+} __packed;
+
+struct wmi_ctrl_path_stats_ev_param {
+	u32 req_id;
+	/* more flag
+	 * 1 - More events sent after this event.
+	 * 0 - no more events after this event.
+	 */
+	u32 more;
+} __packed;
+
+struct wmi_ctrl_path_stats_list {
+	struct list_head list;
+	void *stats_ptr;
+} __packed;
+
+struct wmi_ctrl_path_btcoex_stats {
+	u32 pdev_id;
+	u32 bt_tx_req_cntr;
+	u32 bt_rx_req_cntr;
+	u32 bt_req_nack_cntr;
+	u32 wl_tx_req_nack_schd_bt_reason_cntr;
+	u32 wl_tx_req_nack_current_bt_reason_cntr;
+	u32 wl_tx_req_nack_other_wlan_tx_reason_cntr;
+	u32 wl_in_tx_abort_cntr;
+	u32 wl_tx_auto_resp_req_cntr;
+	u32 wl_tx_req_ack_cntr;
+	u32 wl_tx_req_cntr;
+} __packed;
+
+struct wmi_ctrl_path_awgn_stats {
+	u32 awgn_send_evt_cnt;
+	u32 awgn_pri_int_cnt;
+	u32 awgn_sec_int_cnt;
+	u32 awgn_pkt_drop_trigger_cnt;
+	u32 awgn_pkt_drop_trigger_reset_cnt;
+	u32 awgn_bw_drop_cnt;
+	u32 awgn_bw_drop_reset_cnt;
+	u32 awgn_cca_int_cnt;
+	u32 awgn_cca_int_reset_cnt;
+	u32 awgn_cca_ack_blk_cnt;
+	u32 awgn_cca_ack_reset_cnt;
+	u32 awgn_int_bw_cnt[WMI_AWGN_MAX_BW];
+} __packed;
+
+struct wmi_ctrl_path_stats_ev_parse_param {
+	struct list_head list;
+	struct ath12k *ar;
+} __packed;
+
 struct wmi_obss_spatial_reuse_params_cmd {
 	__le32 tlv_header;
 	__le32 pdev_id;
@@ -4437,6 +7479,12 @@
 	__le32 vdev_id;
 } __packed;
 
+struct wmi_pdev_obss_pd_bitmap_cmd {
+	u32 tlv_header;
+	u32 pdev_id;
+	u32 bitmap[2];
+} __packed;
+
 #define ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS		200
 #define ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE		0
 #define ATH12K_OBSS_COLOR_COLLISION_DETECTION			1
@@ -4444,6 +7492,13 @@
 #define ATH12K_BSS_COLOR_STA_PERIODS				10000
 #define ATH12K_BSS_COLOR_AP_PERIODS				5000
 
+enum wmi_bss_color_collision {
+	WMI_BSS_COLOR_COLLISION_DISABLE = 0,
+	WMI_BSS_COLOR_COLLISION_DETECTION,
+	WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY,
+	WMI_BSS_COLOR_FREE_SLOT_AVAILABLE,
+};
+
 struct wmi_obss_color_collision_cfg_params_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -4461,6 +7516,12 @@
 	__le32 enable;
 } __packed;
 
+struct wmi_obss_color_collision_event {
+	u32 vdev_id;
+	u32 evt_type;
+	u64 obss_color_bitmap;
+} __packed;
+
 #define ATH12K_IPV4_TH_SEED_SIZE 5
 #define ATH12K_IPV6_TH_SEED_SIZE 11
 
@@ -4604,6 +7665,78 @@
 	__le32 paddr_hi;
 } __packed;
 
+struct ath12k_wmi_pdev_sscan_fw_cmd_fixed_param {
+	u32 pdev_id;
+	u32 spectral_scan_mode;
+} __packed;
+
+
+struct ath12k_wmi_pdev_sscan_fft_bin_index {
+	u32 pri80_bins;
+	u32 sec80_bins;
+	u32 mid_5mhz_bins;
+} __packed;
+
+struct ath12k_wmi_pdev_sscan_chan_info {
+	u32 operating_pri20_freq;
+	u32 operating_cfreq1;
+	u32 operating_cfreq2;
+	u32 operating_bw;
+	u32 operating_puncture_20mhz_bitmap;
+	u32 sscan_cfreq1;
+	u32 sscan_cfreq2;
+	u32 sscan_bw;
+	u32 sscan_puncture_20mhz_bitmap;
+} __packed;
+
+struct ath12k_wmi_pdev_sscan_per_detector_info {
+	__le32 tlv_header;
+	u32 detector_id;
+	u32 start_freq;
+	u32 end_freq;
+} __packed;
+
+struct ath12k_wmi_spectral_scan_bw_capabilities {
+	__le32 tlv_header;
+	u32 pdev_id;
+	u32 sscan_mode;
+	u32 operating_bw;
+	union {
+		struct {
+			u32 supports_sscan_bw_20:1,
+			    supports_sscan_bw_40:1,
+			    supports_sscan_bw_80:1,
+			    supports_sscan_bw_160:1,
+			    supports_sscan_bw_80p80:1,
+			    supports_sscan_bw_320:1,
+			    reserved:21;
+		};
+		u32 supported_flags;
+	};
+} __packed;
+
+struct ath12k_wmi_spectral_fft_size_capabilities {
+	__le32 tlv_header;
+	u32 pdev_id;
+	u32 sscan_bw;
+	union {
+		struct {
+			u32 supports_fft_size_1:1,
+			    supports_fft_size_2:1,
+			    supports_fft_size_3:1,
+			    supports_fft_size_4:1,
+			    supports_fft_size_5:1,
+			    supports_fft_size_6:1,
+			    supports_fft_size_7:1,
+			    supports_fft_size_8:1,
+			    supports_fft_size_9:1,
+			    supports_fft_size_10:1,
+			    supports_fft_size_11:1,
+			    reserved:21;
+		};
+		u32 supported_flags;
+	};
+} __packed;
 #define WMI_SPECTRAL_META_INFO1_FREQ1		GENMASK(15, 0)
 #define WMI_SPECTRAL_META_INFO1_FREQ2		GENMASK(31, 16)
 
@@ -4642,18 +7775,265 @@
 	__le32 buf_len;
 } __packed;
 
+enum wmi_tpc_pream_bw {
+	WMI_TPC_PREAM_CCK,
+	WMI_TPC_PREAM_OFDM,
+	WMI_TPC_PREAM_HT20,
+	WMI_TPC_PREAM_HT40,
+	WMI_TPC_PREAM_VHT20,
+	WMI_TPC_PREAM_VHT40,
+	WMI_TPC_PREAM_VHT80,
+	WMI_TPC_PREAM_VHT160,
+	WMI_TPC_PREAM_HE20,
+	WMI_TPC_PREAM_HE40,
+	WMI_TPC_PREAM_HE80,
+	WMI_TPC_PREAM_HE160,
+	WMI_TPC_PREAM_EHT20,
+	WMI_TPC_PREAM_EHT40,
+	WMI_TPC_PREAM_EHT60,
+	WMI_TPC_PREAM_EHT80,
+	WMI_TPC_PREAM_EHT120,
+	WMI_TPC_PREAM_EHT140,
+	WMI_TPC_PREAM_EHT160,
+	WMI_TPC_PREAM_EHT200,
+	WMI_TPC_PREAM_EHT240,
+	WMI_TPC_PREAM_EHT280,
+	WMI_TPC_PREAM_EHT320,
+	WMI_TPC_PREAM_MAX
+};
+
+#define ATH12K_2G_MAX_FREQUENCY		2495
+#define ATH12K_5G_MAX_FREQUENCY		5920
+#define WMI_TPC_CONFIG			BIT(1)
+#define WMI_TPC_REG_PWR_ALLOWED		BIT(2)
+#define WMI_TPC_RATES_ARRAY1		BIT(3)
+#define WMI_TPC_RATES_ARRAY2		BIT(4)
+#define WMI_TPC_RATES_DL_OFDMA_ARRAY	BIT(5)
+#define WMI_TPC_CTL_PWR_ARRAY		BIT(6)
+#define WMI_TPC_CONFIG_PARAM		0x1
+#define ATH12K_TPC_RATE_ARRAY_MU	GENMASK(15, 8)
+#define ATH12K_TPC_RATE_ARRAY_SU	GENMASK(7, 0)
+#define ATH12K_HW_PREAMBLE(_rcode) (((_rcode) >> 8) & 0x7)
+#define ATH12K_HW_NSS(_rcode) (((_rcode) >> 5) & 0x7)
+#define MODULATION_LIMIT		126
+#define TPC_STATS_REG_PWR_ALLOWED_TYPE	0
+#define HE_EXTRA_MCS_SUPPORT		GENMASK(31, 16)
+
+enum wmi_ctrl_path_stats_action {
+	WMI_REQUEST_CTRL_PATH_STAT_GET   = 1,
+	WMI_REQUEST_CTRL_PATH_STAT_RESET = 2,
+	WMI_REQUEST_CTRL_PATH_STAT_START = 3,
+	WMI_REQUEST_CTRL_PATH_STAT_STOP  = 4,
+};
+
+struct wmi_request_ctrl_path_stats_cmd_fixed_param {
+	u32 tlv_header;
+	/** Bitmask showing which of stats IDs 0-31 have been requested
+	 * These stats ids are defined in enum wmi_ctrl_path_stats_id.
+	 */
+	u32 stats_id_mask;
+	/** request ID to store the cookies in wifistats */
+	u32 request_id;
+	/** action
+	 * get/reset/start/stop based on stats id
+	 * defined as a part of wmi_ctrl_path_stats_action
+	 */
+	u32 action;
+	/** Request Halphy subid stats
+	 * According to the requested stats_id this halphy_subid varies
+	 * For stats_id = 1, the possible values could be enum wmi_halphy_ctrl_path_statsid
+	 */
+	u32 subid;
+} __packed;
+
+struct wmi_cp_stats_event_fixed_param {
+	/** Request ID */
+	u32 request_id;
+	/*end_of_event - single event or Multiple Event */
+	u32 end_of_event;
+	/*event_count - If Multiple Events are send, this is to identify particular
+	 * event out of Multiple Events that are send to host
+	 */
+	u32 event_count;
+	u32 pdev_id;
+} __packed;
+
+struct wmi_tpc_configs {
+	u32 reg_domain;
+	/* current channel in MHz */
+	u32 chan_freq;
+	/* current phy mode */
+	u32 phy_mode;
+	/* Max antenna gain for current regulatory in 0.25 dBm steps */
+	u32 twice_antenna_reduction;
+	/* Max transmit power allowed in regulatory domain in 0.25 dBm steps */
+	u32 twice_max_reg_power;
+	/* User specified antenna gain in 0.25 dBm steps */
+	s32 twice_antenna_gain;
+	/* The overall power limit in 0.25 dBm steps */
+	u32 power_limit;
+	/* The total number of rates supported */
+	u32 rate_max;
+	/* The total number of active chains */
+	u32 num_tx_chain;
+	/* not used for now */
+	u32 ctl;
+	u32 flags;
+	/* tells info abt BE, HE, HE_EXTRA_MCS, 160, 320, 11BE PUNC*/
+	u32 caps;
+} __packed;
+
+struct wmi_max_reg_power_fixed_param {
+	/* 0: maxRegAllowedPower[TX_NUM_CHAIN];
+	 * 1:maxRegPowerAGCDD[TX_NUM_CHAIN - 1][TX_NUM_CHAIN - 1],
+	 * 2:maxRegPowerAGSTBC[TX_NUM_CHAIN - 1][TX_NUM_CHAIN - 1],
+	 * 3:maxRegPowerAGTXBF([TX_NUM_CHAIN - 1][TX_NUM_CHAIN - 1]
+	 * type 1-3 no used, for future use
+	 */
+	u32 reg_power_type;
+	/* Length of the regulatory power array being sent in bytes */
+	u32 reg_array_len;
+	/* dimensions below
+	 * d1 - [TX_NUM_CHAIN - 1]
+	 * d2- [TX_NUM_CHAIN - 1] for cdd, stbc, txbf
+	 * d2 = 1 for maxRegAllowedPower
+	 * d3 = 1  d4 = 1
+	 */
+	u32 d1;
+	u32 d2;
+	u32 d3;
+	u32 d4;
+} __packed;
+
+struct wmi_max_reg_power_allowed {
+	struct wmi_max_reg_power_fixed_param tpc_reg_pwr;
+	s16 *reg_pwr_array;
+};
+
+struct wmi_tpc_rates_array_fixed_param {
+	/* 0: ratesArray[TPC_RATE_MAX], (for CCK, OFDM, HT, VHT and HE Rates info)
+	 * 1: ratesArray2[TPC_RATE_MAX] (for EHT Rates info),
+	 * 2: dl_ofdma rate array. Type 2 unsed, for future use
+	 */
+	u32 rate_array_type;
+	u32 rate_array_len;
+} __packed;
+
+struct wmi_tpc_rates_array {
+	struct wmi_tpc_rates_array_fixed_param tpc_rates_array;
+	s16 *rate_array;
+};
+
+struct wmi_tpc_ctl_pwr_fixed_param {
+	/* 0: ctl_array; 1: ctl_160 array; 2: ctl_dlOfdma array
+	 * Type 2 unsed, for future use
+	 */
+	u32 ctl_array_type;
+	/* Length of the CTL array being sent in bytes */
+	u32 ctl_array_len;
+	/* Message MAY be split into smaller chunks to fit in the WMI svc msg
+	 * not used for now
+	 */
+	u32 end_of_ctl_pwr;
+	/* Incremented for every event chunk for Host to know the sequence
+	 * not used for now
+	 */
+	u32 ctl_pwr_count;
+	/* Dimensions below
+	 * For ctl_array
+	 * d4 = No of chains
+	 * d3 = BF on/off = 2
+	 * d2 = 28 which the number of different tx modes-legacy, HT20, HT40 etc
+	 * d1 = NSS  number of spatial streams
+	 * s8 ctl_160array[WHAL_MAX_NUM_CHAINS][pri_or_sec][bf][nss].
+	 * For ctl_160 array
+	 * d4 = No of chains	d3 = primary/secondary channel
+	 * d2 = BF on/off	d1 = NSS
+	 */
+	u32 d1;
+	u32 d2;
+	u32 d3;
+	u32 d4;
+} __packed;
+
+struct wmi_tpc_ctl_pwr_table {
+	struct wmi_tpc_ctl_pwr_fixed_param tpc_ctl_pwr;
+	s8 *ctl_pwr_table;
+};
+
+struct wmi_tpc_stats_event {
+	u32 pdev_id;
+	u32 event_count;
+	u32 end_of_event;
+	/* Bitmap to set and use the received tlvs alone. Not all tlvs are
+	 * supported for all chipset. For eg ctl_160 is not present for chipset
+	 * which does not have 160Mhz support
+	 */
+	u32 tlvs_rcvd;
+	struct wmi_max_reg_power_allowed max_reg_allowed_power;
+	struct wmi_tpc_rates_array rates_array1;
+	struct wmi_tpc_rates_array rates_array2;
+	struct wmi_tpc_configs tpc_config;
+	struct wmi_tpc_ctl_pwr_table ctl_array;
+};
+
+enum ath12k_wmi_tpc_stats_events {
+	ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT,
+	ATH12K_TPC_STATS_RATES_EVENT1,
+	ATH12K_TPC_STATS_RATES_EVENT2,
+	ATH12K_TPC_STATS_CTL_TABLE_EVENT
+};
+
+enum ath12k_wmi_tpc_stats_rates_array {
+	ATH12K_TPC_STATS_RATES_ARRAY1,
+	ATH12K_TPC_STATS_RATES_ARRAY2,
+};
+
+enum ath12k_wmi_tpc_stats_ctl_array {
+	ATH12K_TPC_STATS_CTL_ARRAY,
+	ATH12K_TPC_STATS_CTL_160ARRAY,
+};
+
+enum  wmi_ctrl_path_stats_id {
+	/* bit 0 is currently unused / reserved */
+	WMI_REQ_CTRL_PATH_PDEV_TX_STAT   = 1,
+	WMI_REQ_CTRL_PATH_VDEV_EXTD_STAT = 2,
+	WMI_REQ_CTRL_PATH_MEM_STAT       = 3,
+	WMI_REQ_CTRL_PATH_CAL_STAT       = 5,
+	WMI_REQ_CTRL_PATH_AWGN_STAT	 = 7,
+	WMI_REQ_CTRL_PATH_BTCOEX_STAT    = 8,
+};
+
+enum wmi_dblog_param {
+	WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+	WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+	WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+	WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+	WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+	WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
+};
+
+struct wmi_dbglog_config_cmd_fixed_param {
+	u32 tlv_header;
+	u32 dbg_log_param;
+	u32 value;
+} __packed;
+
 #define WMI_MAX_MEM_REQS 32
 
 #define MAX_RADIOS 3
 
+#define WMI_MLO_CMD_TIMEOUT_HZ		(5 * HZ)
 #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
 #define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+#define WMI_CTRL_STATS_READY_TIMEOUT_HZ (1 * HZ)
 
 struct ath12k_wmi_pdev {
 	struct ath12k_wmi_base *wmi_ab;
 	enum ath12k_htc_ep_id eid;
 	const struct wmi_peer_flags_map *peer_flags;
 	u32 rx_decap_mode;
+	wait_queue_head_t tx_ce_desc_wq;
 };
 
 struct ath12k_wmi_base {
@@ -4664,7 +8044,7 @@
 
 	struct completion service_ready;
 	struct completion unified_ready;
-	DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+	DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE);
 	wait_queue_head_t tx_credits_wq;
 	const struct wmi_peer_flags_map *peer_flags;
 	u32 num_mem_chunks;
@@ -4676,30 +8056,312 @@
 	struct ath12k_wmi_target_cap_arg *targ_cap;
 };
 
+struct wmi_mlo_setup_cmd_fixed_param {
+	u32 tlv_header;
+	u32 mld_group_id;
+	u32 pdev_id;
+} __packed;
+
+struct wmi_mlo_setup_params {
+	u32 group_id;
+	u8 num_partner_links;
+	u8 *partner_link_id;
+};
+
+struct wmi_mlo_ready_cmd_fixed_param {
+	u32 tlv_header;
+	u32 pdev_id;
+} __packed;
+
+enum wmi_mlo_tear_down_reason_code_type {
+	WMI_MLO_TEARDOWN_SSR_REASON,
+};
+
+struct wmi_mlo_teardown_fixed_param {
+	u32 tlv_header;
+	u32 pdev_id;
+	u32 reason_code;
+	u32 umac_reset;
+} __packed;
+
+struct wmi_mlo_setup_complete_event_fixed_param {
+	u32 pdev_id;
+	u32 status;
+} __packed;
+
+struct wmi_mlo_teardown_complete_fixed_param {
+	u32 pdev_id;
+	u32 status;
+} __packed;
+
+/* Inform FW that host expects response for multi-vdev
+ * restart command */
+#define WMI_MVR_RESPONSE_SUPPORT_EXPECTED     0x1
+#define WMI_MVR_CMD_TIMEOUT_HZ		      (2 * HZ)
+#define WMI_MVR_RESP_VDEV_BM_MAX_LEN	      2
+#define WMI_MVR_RESP_VDEV_BM_MAX_LEN_BYTES    (WMI_MVR_RESP_VDEV_BM_MAX_LEN * 4)
+
+struct wmi_vdev_ids_arg {
+	u32 id_len;
+	u32 id[17]; /* TARGET_NUM_VDEVS */
+};
+
+struct wmi_pdev_multiple_vdev_restart_req_arg {
+	struct wmi_vdev_ids_arg vdev_ids;
+	struct wmi_channel_arg channel;
+	u16 ru_punct_bitmap;
+	u32 width_device;
+	u32 center_freq_device;
+};
+
+struct wmi_pdev_multiple_vdev_restart_request_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+	__le32 requestor_id;
+	__le32 disable_hw_ack;
+	__le32 cac_duration_ms;
+	__le32 num_vdevs;
+	__le32 flags;
+	__le32 puncture_20mhz_bitmap;
+} __packed;
+
+struct wmi_pdev_mvr_resp_event_fixed_param {
+	u32 pdev_id;
+	u32 requestor_id;
+	u32 status;
+} __packed;
+
+struct wmi_pdev_mvr_resp_event_parse {
+	struct wmi_pdev_mvr_resp_event_fixed_param fixed_param;
+	u32 num_vdevs_bm;
+	u32 vdev_id_bm[WMI_MVR_RESP_VDEV_BM_MAX_LEN];
+} __packed;
+
+struct wmi_bcn_tmpl_ml_params {
+	__le32 tlv_header;
+	__le32 vdev_id;
+	__le32 hw_link_id;
+	__le32 beacon_interval;
+	__le32 csa_switch_count_offset;
+	__le32 ext_csa_switch_count_offset;
+	__le32 per_sta_profile_offset;
+	__le32 quiet_ie_offset;
+	__le32 is_other_ie_present;
+} __packed;
+
+struct wmi_bcn_tmpl_ml_info {
+	__le32 tlv_header;
+	__le32 hw_link_id;
+	__le32 cu_vdev_map_cat1_lo;
+	__le32 cu_vdev_map_cat1_hi;
+	__le32 cu_vdev_map_cat2_lo;
+	__le32 cu_vdev_map_cat2_hi;
+} __packed;
+
+struct wmi_critical_update_arg {
+	u16 num_ml_params;
+	struct wmi_bcn_tmpl_ml_params *ml_params;
+	u16 num_ml_info;
+	struct wmi_bcn_tmpl_ml_info *ml_info;
+};
+
+struct wmi_prb_resp_tmpl_ml_info_params {
+	__le32 tlv_header;
+	__le32 hw_link_id;
+	__le32 cu_vdev_map_cat1_lo;
+	__le32 cu_vdev_map_cat1_hi;
+	__le32 cu_vdev_map_cat2_lo;
+	__le32 cu_vdev_map_cat2_hi;
+} __packed;
+
+#define ATH12K_LOWER_32_MASK			GENMASK_ULL(31, 0)
+#define ATH12K_UPPER_32_MASK			GENMASK_ULL(63, 32)
+#define ATH12K_GET_LOWER_32_BITS(val)		(val & ATH12K_LOWER_32_MASK)
+#define ATH12K_GET_UPPER_32_BITS(val)		((val & ATH12K_UPPER_32_MASK) >> 32)
+
+
+#define WMI_ADFS_MODE_QUICK_OCAC                0 /* Agile preCAC */
+#define WMI_ADFS_MODE_QUICK_RCAC                2 /* Agile Rolling CAC */
+#define WMI_SUPPORT_CHAIN_MASK_ADFS             BIT(31)
+
+#define MIN_PRECAC_TIMEOUT                      (6 * 60 * 1000) /* 6 minutes */
+#define MIN_WEATHER_RADAR_CHAN_PRECAC_TIMEOUT   (6 * 10 * 60 * 1000) /* 1 hour */
+#define MAX_PRECAC_TIMEOUT                      (4 * 60 * 60 * 1000) /* 4 hours */
+#define MAX_WEATHER_RADAR_CHAN_PRECAC_TIMEOUT   (24 * 60 * 60 * 1000) /* 24 hours */
+#define MIN_RCAC_TIMEOUT                        (62 * 1000) /* 62 seconds */
+#define MAX_RCAC_TIMEOUT                        0xffffffff
+
+struct wmi_vdev_adfs_ch_cfg_cmd {
+	u32  tlv_header;
+	u32  vdev_id;
+	u32  ocac_mode;
+	u32  min_duration_ms;
+	u32  max_duration_ms;
+	u32  chan_freq;
+	u32  chan_width;
+        /*
+         * Two center frequencies are required since agile channel switch
+         * has to support 160/165 MHz for products like Pine.
+         * For agile which supports only up to 80MHz (HK),
+         * freq2 will be 0 and ignored.
+         */
+	union {
+		u32  center_freq;
+		u32  center_freq1;
+	};
+	u32  center_freq2;
+} __packed;
+
+struct wmi_vdev_adfs_ocac_abort_cmd {
+	u32 tlv_header;
+	u32 vdev_id;
+} __packed;
+
+#define WMI_DFS_RADAR_DETECTED_IN_SERVICE_CHAN  0
+#define WMI_DFS_RADAR_DETECTED_IN_OCAC_CHAN     1
+
+struct wmi_vdev_adfs_ocac_complete_event_fixed_param {
+	u32 vdev_id;
+	u32 chan_freq;
+	u32 chan_width;
+	union {
+		u32 center_freq;
+		u32 center_freq1;
+	};
+	u32 status;
+	u32 center_freq2;
+} __packed;
+
+#define ATH12K_PEER_VALID_VDEV_ID		(1 << 31)
+#define ATH12K_PEER_PUNCT_BITMAP_VALID		(1 << 30)
+#define ATH12K_PEER_CH_WIDTH_SWITCH_TIMEOUT_HZ	(5 * HZ)
+
+struct wmi_chan_width_peer_arg {
+	struct ath12k_wmi_mac_addr_params mac_addr;
+	u32 chan_width;
+	u32 puncture_20mhz_bitmap;
+	enum wmi_phy_mode peer_phymode;
+	bool is_upgrade;
+};
+
+struct wmi_peer_chan_width_switch_arg {
+	u32 num_peers;
+	u32 vdev_var;
+	u32 start_idx;
+	struct wmi_chan_width_peer_arg *peer_arg;
+};
+
+struct wmi_peer_chan_width_switch_req_cmd {
+	__le32 tlv_header;
+	__le32 num_peers;
+	__le32 vdev_var;
+} __packed;
+
+struct wmi_chan_width_peer_list {
+	__le32 tlv_header;
+	struct ath12k_wmi_mac_addr_params mac_addr;
+	__le32 chan_width;
+	__le32 puncture_20mhz_bitmap;
+} __packed;
+
+
+#define MAX_20MHZ_SEGS 16
+#define MAX_NUM_ANTENNA 8
+
+struct wmi_rssi_dbm_conv_event_fixed_param {
+	u32 pdev_id;
+} __packed;
+
+struct wmi_rssi_dbm_conv_param_info {
+	u32 curr_bw;
+	u32 curr_rx_chainmask;
+	u32 xbar_config;
+	u32 xlna_bypass_offset;
+	u32 xlna_bypass_threshold;
+	s8 nf_hw_dbm[MAX_NUM_ANTENNA][MAX_20MHZ_SEGS];
+} __packed;
+
+struct wmi_rssi_dbm_conv_temp_offset {
+	s32 rssi_temp_offset;
+} __packed;
+
+struct wmi_rssi_dbm_conv_offsets {
+	s32 rssi_temp_offset;
+	s8 min_nf_dbm;
+	/* rssi_offset is the sum of min_nf_dbm & rssi_temp_offset*/
+	s32 rssi_offset;
+};
+
+#define WMI_LATENCY_TID_INFO_SCS_TID_NUM		GENMASK(7, 0)
+#define WMI_LATENCY_TID_INFO_SCS_AC			GENMASK(9, 8)
+#define WMI_LATENCY_TID_INFO_SCS_DL_EN			BIT(10)
+#define WMI_LATENCY_TID_INFO_SCS_UL_EN			BIT(11)
+#define WMI_LATENCY_TID_INFO_SCS_BURST_SZ_SUM		GENMASK(13, 12)
+#define WMI_LATENCY_TID_INFO_SCS_MSDUQ_ID		GENMASK(17, 14)
+#define WMI_LATENCY_TID_INFO_SCS_UL_OFDMA_DISABLE	BIT(18)
+#define WMI_LATENCY_TID_INFO_SCS_UL_MU_MIMO_DISABLE	BIT(19)
+#define WMI_LATENCY_TID_INFO_SCS_SAWF_UL_PARAM		BIT(20)
+
+#define WMI_LATENCY_DIR_UL	0
+#define WMI_LATENCY_DIR_DL	1
+#define WMI_LATENCY_DIR_DIRECT	2
+
+struct wmi_tid_latency_params {
+	__le32 tlv_header;
+	struct wmi_mac_addr destmac;
+	__le32 service_interval;
+	__le32 burst_size_diff;
+	__le32 max_latency;
+	__le32 max_per;
+	__le32 min_tput;
+	__le32 latency_tid_info;
+} __packed;
+
+struct wmi_peer_tid_latency_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+} __packed;
+
+#define WMI_ANI_EVENT_PDEV_ID_VALID	BIT(31)
+#define WMI_ANI_EVENT_PDEV_ID		GENMASK(7, 0)
+
+struct wmi_pdev_ani_event {
+	__le32 tlv_header;
+	__s32 ani_level;
+	/* Bits 7:0  - pdev_id
+	 * Bits 30:8 - reserved
+	 * Bits 31   - pdev_id_valid - Flag to check pdev_id is valid or not
+	 */
+	__le32 pdev_id_bitmap;
+} __packed;
+
 #define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024)
 
 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
 			     struct ath12k_wmi_resource_config_arg *config);
 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
 			     struct ath12k_wmi_resource_config_arg *config);
+void ath12k_wmi_init_ipq5332(struct ath12k_base *ab,
+			     struct ath12k_wmi_resource_config_arg *config);
 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
 			u32 cmd_id);
 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+			 struct sk_buff *frame, bool link_agnostic);
+int ath12k_wmi_offchan_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
 			 struct sk_buff *frame);
 int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
 			struct ieee80211_mutable_offsets *offs,
-			struct sk_buff *bcn);
+			struct sk_buff *bcn, u32 ema_param);
 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id);
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid,
-		       const u8 *bssid);
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct vdev_up_params *params);
 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id);
 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
 			  bool restart);
 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
 			      u32 vdev_id, u32 param_id, u32 param_val);
 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
-			      u32 param_value, u8 pdev_id);
+			      s32 param_value, u8 pdev_id);
 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable);
 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab);
 int ath12k_wmi_cmd_init(struct ath12k_base *ab);
@@ -4718,7 +8380,8 @@
 
 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
 				u32 param, u32 param_value);
-int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms);
+int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms,
+				 bool nowait);
 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
 				    const u8 *peer_addr, u8 vdev_id);
 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id);
@@ -4764,6 +8427,12 @@
 					    u32 vdev_id, u32 bcn_ctrl_op);
 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
 				     struct ath12k_wmi_init_country_arg *arg);
+int
+ath12k_wmi_send_thermal_mitigation_cmd(struct ath12k *ar,
+				       struct ath12k_wmi_thermal_mitigation_arg *arg);
+int ath12k_wmi_pdev_pktlog_enable(struct ath12k *ar, u32 pktlog_filter);
+int ath12k_wmi_pdev_pktlog_disable(struct ath12k *ar);
+int ath12k_wmi_pdev_peer_pktlog_filter(struct ath12k *ar, u8 *addr, u8 enable);
 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
 					   int vdev_id, const u8 *addr,
 					   dma_addr_t paddr, u8 tid,
@@ -4772,13 +8441,47 @@
 int
 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg);
+int ath12k_wmi_send_pdev_pkt_route(struct ath12k *ar,
+				   struct ath12k_wmi_pkt_route_param *param);
 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg);
-int ath12k_wmi_simulate_radar(struct ath12k *ar);
+int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
+			     struct ath12k_fw_stats *stats);
+size_t ath12k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath12k_wmi_fw_stats_num_peers_extd(struct list_head *head);
+size_t ath12k_wmi_fw_stats_num_vdevs(struct list_head *head);
+void ath12k_wmi_fw_stats_fill(struct ath12k *ar,
+			      struct ath12k_fw_stats *fw_stats, u32 stats_id,
+			      char *buf);
+int ath12k_wmi_simulate_radar(struct ath12k *ar, u32 radar_params);
+int ath12k_wmi_simulate_awgn(struct ath12k *ar, u32 chan_bw_interference_bitmap);
 int ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id);
 int ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id);
+int ath12k_wmi_send_twt_add_dialog_cmd(struct ath12k *ar,
+				       struct wmi_twt_add_dialog_params *params);
+int ath12k_wmi_send_twt_del_dialog_cmd(struct ath12k *ar,
+				       struct wmi_twt_del_dialog_params *params);
+int ath12k_wmi_send_twt_pause_dialog_cmd(struct ath12k *ar,
+					 struct wmi_twt_pause_dialog_params *params);
+int ath12k_wmi_send_twt_resume_dialog_cmd(struct ath12k *ar,
+					  struct wmi_twt_resume_dialog_params *params);
 int ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
 				 struct ieee80211_he_obss_pd *he_obss_pd);
+int ath12k_wmi_pdev_set_srg_bss_color_bitmap(struct ath12k *ar, u32 *bitmap);
+int ath12k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath12k *ar, u32 *bitmap);
+int ath12k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath12k *ar,
+						 u32 *bitmap);
+int ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath12k *ar,
+						 u32 *bitmap);
+int ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath12k *ar,
+						     u32 *bitmap);
+int ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath12k *ar,
+						     u32 *bitmap);
+int ath12k_send_coex_config_cmd(struct ath12k *ar,
+                                struct coex_config_arg *coex_config);
+int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
+                                  u8 bss_color, u32 period,
+                                  bool enable);
 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
 				  u8 bss_color, u32 period,
 				  bool enable);
@@ -4799,5 +8502,49 @@
 			       struct sk_buff *tmpl);
 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
 			   enum wmi_host_hw_mode_config_type mode);
-
+int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
+                                        u32 vdev_id,
+                                        struct ath12k_reg_tpc_power_info *param);
+int ath12k_wmi_dbglog_cfg(struct ath12k *ar, u32 param, u64 value);
+int ath12k_wmi_pdev_ap_ps_cmd_send(struct ath12k *ar, u8 pdev_id, u32 value);
+int ath12k_wmi_pdev_m3_dump_enable(struct ath12k *ar, u32 enable);
+int ath12k_wmi_pdev_get_tpc_table_cmdid(struct ath12k *ar);
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar);
+int ath12k_wmi_send_wmi_ctrl_stats_cmd(struct ath12k *ar,
+				       struct wmi_ctrl_path_stats_cmd_param *param);
+int ath12k_wmi_mlo_setup(struct ath12k *ar,
+			 struct wmi_mlo_setup_params *mlo_params);
+int ath12k_wmi_mlo_ready(struct ath12k *ar);
+int ath12k_wmi_mlo_teardown(struct ath12k *ar, bool umac_reset);
+bool ath12k_wmi_is_mvr_supported(struct ath12k_base *ab);
+int ath12k_wmi_pdev_multiple_vdev_restart(struct ath12k *ar,
+					  struct wmi_pdev_multiple_vdev_restart_req_arg *arg);
+int ath12k_wmi_vdev_adfs_ch_cfg_cmd_send(struct ath12k *ar,u32 vdev_id,
+                                        struct cfg80211_chan_def *chandef);
+int ath12k_wmi_vdev_adfs_ocac_abort_cmd_send(struct ath12k *ar,u32 vdev_id);
+int ath12k_wmi_svc_config_send(struct ath12k *ar,
+			       struct ath12k_sawf_svc_params *param);
+int ath12k_wmi_svc_send_disable(struct ath12k *ar, u32 svc_id);
+int ath12k_wmi_mgmt_rx_reo_filter_config(struct ath12k *ar,
+					 struct ath12k_mgmt_rx_reo_filter *filter);
+int
+ath12k_mgmt_rx_reo_init_context(struct ath12k_base *ab);
+int
+ath12k_mgmt_rx_reo_deinit_context(struct ath12k_base *ab);
+#ifdef CONFIG_ATH12K_BONDED_DS_SUPPORT
+int
+ath12k_wmi_send_vdev_set_intra_bss_cmd(struct ath12k *ar,
+				       u32 vdev_id, u32 enable);
+int ath12k_wmi_set_peer_intra_bss_cmd(struct ath12k *ar,  u32 vdev_id, const u8 *peer_addr,
+				      u32 enable);
+#endif
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+int ath12k_wmi_config_peer_ppeds_routing(struct ath12k *ar,
+					 const u8 *peer_addr, u8 vdev_id,
+					 u32 service_code, u32 priority_valid,
+					 u32 src_info, bool ppe_routing_enable);
+#endif
+void ath12k_wmi_peer_chan_width_switch_work(struct work_struct *work);
+int ath12k_wmi_set_latency(struct ath12k *ar, struct ath12k_latency_params *params);
+int ath12k_wmi_pdev_get_ani_level(struct ath12k *ar, u32 param_id, u8 pdev_id);
 #endif
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ar9003_calib.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ar9003_calib.c	2023-05-22 20:06:42.251800816 +0200
@@ -165,6 +165,8 @@
 		if (ret < 0)
 			return ret;
 
+		ath9k_hw_update_cca_threshold(ah);
+
 		/* start NF calibration, without updating BB NF register */
 		ath9k_hw_start_nfcal(ah, false);
 	}
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/ar9003_phy.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/ar9003_phy.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.c	2023-05-22 20:06:42.251800816 +0200
@@ -1918,6 +1918,116 @@
 	}
 }
 
+/*
+ * Adaptive CCA threshold - Calculate and update CCA threshold periodically
+ * after NF calibration and at the end of initialization sequence during every
+ * chip reset.
+ *
+ * Step 1: Compute NF_max_primary and NF_max_extension
+ * If noise floor completes,
+ *   NF_max_primary = max of noise floor read across all chains in primary channel
+ *   NF_max_extension = max of noise floor read across all chains in extension channel
+ * else
+ *   NF_max_primary = NF_max_extension = the value that is forced into HW as noise floor
+ *
+ * Step 2: Compute CCA_threshold_primary and CCA_threshold_extension
+ *   CCA_threshold_primary = CCA_detection_level – CCA_detection_margin – NF_max_primary
+ *   CCA_threshold_extension = CCA_detection_level – CCA_detection_margin – NF_max_extension
+ *
+ * Step 3: Program CCA thresholds
+ *
+ */
+void ar9003_update_cca_threshold(struct ath_hw *ah)
+{
+	struct ath9k_hw_cal_data *cal = ah->caldata;
+	struct ath9k_nfcal_hist *h;
+	u_int16_t cca_detection_margin_pri, cca_detection_margin_ext;
+	int16_t nf, nf_max_primary, nf_max_extension, nf_nominal,
+		derived_max_cca, max_cca_cap, cca_threshold_primary,
+		cca_threshold_extension;
+	u_int8_t chainmask;
+	int chan, chain, i, init_nf = 0;
+
+	if (!ah->adaptive_cca_threshold_enabled)
+		return;
+
+	if (!cal)
+		return;
+
+	h = cal->nfCalHist;
+
+	if (IS_CHAN_2GHZ(ah->curchan))
+		nf = ah->nf_2g.max;
+	else
+		nf = ah->nf_5g.max;
+
+	nf_max_primary = nf_max_extension = nf;
+
+	chainmask = ah->rxchainmask & ah->caps.rx_chainmask;
+
+	/* Compute max of noise floor read across all chains in primary channel */
+	for (chan = 0; chan < 2 /*ctl,ext*/; chan++) {
+		ath_dbg(ath9k_hw_common(ah), CALIBRATE, "chan: %s\n",
+			!chan ? "ctrl" : "extn");
+
+		for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+			if (!((chainmask >> chain) & 0x1))
+				continue;
+
+			i = chan * AR9300_MAX_CHAINS + chain;
+			if (!init_nf) {
+				nf = h[i].privNF;
+				init_nf = 1;
+			}
+
+			ath_dbg(ath9k_hw_common(ah), CALIBRATE, "privNF[%d]: %d\n",
+				i, h[i].privNF);
+			nf = (nf > h[i].privNF) ? nf : h[i].privNF;
+		}
+
+		if (!chan)
+			nf_max_primary = nf;
+		else
+			nf_max_extension = nf;
+	}
+
+	if (IS_CHAN_HT40(ah->curchan))
+		nf_nominal = NF_NOM_40MHZ;
+	else
+		nf_nominal = NF_NOM_20MHZ;
+
+	cca_detection_margin_pri = ah->cca_detection_margin;
+	if (nf_max_primary < nf_nominal)
+		cca_detection_margin_pri += (nf_nominal - nf_max_primary);
+
+	cca_detection_margin_ext = ah->cca_detection_margin;
+	if (nf_max_extension < nf_nominal)
+		cca_detection_margin_ext += (nf_nominal - nf_max_extension);
+
+	derived_max_cca = ah->cca_detection_level - ah->cca_detection_margin - BEST_CASE_NOISE_FLOOR;
+	max_cca_cap = derived_max_cca < MAX_CCA_THRESHOLD ? derived_max_cca : MAX_CCA_THRESHOLD;
+
+	ath_dbg(ath9k_hw_common(ah), CALIBRATE, "derived_max_cca: %d, max_cca_cap: %d\n",
+		derived_max_cca, max_cca_cap);
+
+	cca_threshold_primary = ah->cca_detection_level - cca_detection_margin_pri - nf_max_primary;
+	cca_threshold_primary = cca_threshold_primary < max_cca_cap ?
+				(cca_threshold_primary > MIN_CCA_THRESHOLD ?
+					cca_threshold_primary : MIN_CCA_THRESHOLD) : max_cca_cap;
+	cca_threshold_extension = ah->cca_detection_level - cca_detection_margin_ext - nf_max_extension;
+	cca_threshold_extension = cca_threshold_extension < max_cca_cap ?
+				  (cca_threshold_extension > MIN_CCA_THRESHOLD ?
+					cca_threshold_extension : MIN_CCA_THRESHOLD) : max_cca_cap;
+
+	ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+		"nf_max_primary: %d, nf_max_extension: %d, cca_pri: %d, cca_ext: %d\n",
+		nf_max_primary, nf_max_extension, cca_threshold_primary, cca_threshold_extension);
+
+	REG_RMW_FIELD(ah, AR_PHY_CCA_0, AR_PHY_CCA_THRESH62, cca_threshold_primary);
+	REG_RMW_FIELD(ah, AR_PHY_EXTCHN_PWRTHR1, AR_PHY_EXT_CCA0_THRESH62, cca_threshold_extension);
+	REG_RMW_FIELD(ah, AR_PHY_CCA_CTRL_0, AR_PHY_EXT_CCA0_THRESH62_MODE, 0x0);
+}
+
 void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1953,6 +2063,7 @@
 	priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
 	priv_ops->set_radar_params = ar9003_hw_set_radar_params;
 	priv_ops->fast_chan_change = ar9003_hw_fast_chan_change;
+	priv_ops->update_cca_threshold = ar9003_update_cca_threshold;
 
 	ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
 	ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/ar9003_phy.h linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.h
--- linux-6.4/drivers/net/wireless/ath/ath9k/ar9003_phy.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.h	2023-05-22 20:06:42.251800816 +0200
@@ -399,6 +399,8 @@
 #define AR_PHY_EXT_CCA0_THRESH62_S  0
 #define AR_PHY_EXT_CCA0_THRESH62_1    0x000001FF
 #define AR_PHY_EXT_CCA0_THRESH62_1_S  0
+#define AR_PHY_EXT_CCA0_THRESH62_MODE    0x00040000
+#define AR_PHY_EXT_CCA0_THRESH62_MODE_S  18
 #define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK          0x0000003F
 #define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S        0
 #define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME           0x00001FC0
@@ -1326,4 +1328,10 @@
 
 #define AR9300_DFS_FIRPWR -28
 
+#define BEST_CASE_NOISE_FLOOR         -130
+#define MAX_CCA_THRESHOLD              90
+#define MIN_CCA_THRESHOLD              0
+#define NF_NOM_20MHZ                  -101
+#define NF_NOM_40MHZ                  -98
+
 #endif  /* AR9003_PHY_H */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/ath9k.h linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ath9k.h
--- linux-6.4/drivers/net/wireless/ath/ath9k/ath9k.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/ath9k.h	2023-05-22 20:06:42.255800922 +0200
@@ -1152,4 +1152,18 @@
 static inline void ath_ahb_exit(void) {};
 #endif
 
+#ifdef CONFIG_ATH9K_TX99
+extern int ath9k_enable_tx99;
+
+static inline bool ath9k_tx99_enabled(void)
+{
+	return ath9k_enable_tx99 > 0;
+}
+#else
+static inline bool ath9k_tx99_enabled(void)
+{
+	return false;
+}
+#endif
+
 #endif /* ATH9K_H */
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/calib.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/calib.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/calib.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/calib.c	2023-05-22 20:06:42.255800922 +0200
@@ -234,12 +234,17 @@
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
 		    AR_PHY_AGC_CONTROL_ENABLE_NF);
 
+	if (ah->adaptive_cca_threshold_enabled) {
+		REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
+		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+	} else {
 	if (update)
 		REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
 		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
 	else
 		REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
 		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+	}
 
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
 }
@@ -476,9 +481,13 @@
 	 * the baseband update the internal NF value itself, similar to
 	 * what is being done after a full reset.
 	 */
-	if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
-		ath9k_hw_start_nfcal(ah, true);
-	else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF))
+	if (!test_bit(NFCAL_PENDING, &caldata->cal_flags)) {
+		bool do_fast_recalib;
+
+		ath9k_hw_update_cca_threshold(ah);
+		do_fast_recalib = !ah->adaptive_cca_threshold_enabled;
+		ath9k_hw_start_nfcal(ah, do_fast_recalib);
+	} else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF))
 		ath9k_hw_getnf(ah, ah->curchan);
 
 	set_bit(NFCAL_INTF, &caldata->cal_flags);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/debug.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/debug.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/debug.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/debug.c	2023-05-22 20:06:42.255800922 +0200
@@ -1289,6 +1289,101 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t read_file_cca_detection_level(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", ah->cca_detection_level);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_cca_detection_level(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	if (kstrtol(buf, 0, &val))
+		return -EINVAL;
+
+	if (val > 0)
+		return -EINVAL;
+
+	ah->cca_detection_level = val;
+
+	return count;
+}
+
+static const struct file_operations fops_cca_detection_level = {
+	.read = read_file_cca_detection_level,
+	.write = write_file_cca_detection_level,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t read_file_cca_detection_margin(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", ah->cca_detection_margin);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_cca_detection_margin(struct file *file,
+					       const char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	ah->cca_detection_margin = val;
+
+	return count;
+}
+
+static const struct file_operations fops_cca_detection_margin = {
+	.read = read_file_cca_detection_margin,
+	.write = write_file_cca_detection_margin,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 /* Ethtool support for get-stats */
 
 #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1497,6 +1592,11 @@
 
 	debugfs_create_file("nf_override", 0600,
 			    sc->debug.debugfs_phy, sc, &fops_nf_override);
-
+	debugfs_create_file("cca_detection_level", 0600,
+			    sc->debug.debugfs_phy, sc, &fops_cca_detection_level);
+	debugfs_create_file("cca_detection_margin", 0600,
+			    sc->debug.debugfs_phy, sc, &fops_cca_detection_margin);
+	debugfs_create_bool("adaptive_cca_threshold_enabled", 0600, sc->debug.debugfs_phy,
+			    &sc->sc_ah->adaptive_cca_threshold_enabled);
 	return 0;
 }
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/hw-ops.h linux-6.4-fbx/drivers/net/wireless/ath/ath9k/hw-ops.h
--- linux-6.4/drivers/net/wireless/ath/ath9k/hw-ops.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/hw-ops.h	2023-03-10 13:04:57.476924804 +0100
@@ -260,6 +260,14 @@
 	ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
 }
 
+static inline void ath9k_hw_update_cca_threshold(struct ath_hw *ah)
+{
+	if (!ath9k_hw_private_ops(ah)->update_cca_threshold)
+		return;
+
+	ath9k_hw_private_ops(ah)->update_cca_threshold(ah);
+}
+
 static inline void ath9k_hw_init_cal_settings(struct ath_hw *ah)
 {
 	ath9k_hw_private_ops(ah)->init_cal_settings(ah);
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/hw.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/hw.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/hw.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/hw.c	2023-05-22 20:06:42.259801029 +0200
@@ -395,6 +395,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 
+
 	ah->config.dma_beacon_response_time = 1;
 	ah->config.sw_beacon_response_time = 6;
 	ah->config.cwm_ignore_extcca = false;
@@ -1837,6 +1838,7 @@
 		ar9003_mci_2g5g_switch(ah, false);
 
 	ath9k_hw_loadnf(ah, ah->curchan);
+	ath9k_hw_update_cca_threshold(ah);
 	ath9k_hw_start_nfcal(ah, true);
 
 	if (AR_SREV_9271(ah))
@@ -2062,6 +2064,7 @@
 
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		ath9k_hw_loadnf(ah, chan);
+		ath9k_hw_update_cca_threshold(ah);
 		ath9k_hw_start_nfcal(ah, true);
 	}
 
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/hw.h linux-6.4-fbx/drivers/net/wireless/ath/ath9k/hw.h
--- linux-6.4/drivers/net/wireless/ath/ath9k/hw.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/hw.h	2023-05-22 20:06:42.259801029 +0200
@@ -667,7 +667,7 @@
 				 struct ath_hw_radar_conf *conf);
 	int (*fast_chan_change)(struct ath_hw *ah, struct ath9k_channel *chan,
 				u8 *ini_reloaded);
-
+	void (*update_cca_threshold)(struct ath_hw *ah);
 	/* ANI */
 	void (*ani_cache_ini_regs)(struct ath_hw *ah);
 
@@ -988,6 +988,10 @@
 	bool msi_enabled;
 	u32 msi_mask;
 	u32 msi_reg;
+
+	bool adaptive_cca_threshold_enabled;
+	s16 cca_detection_level;
+	u16 cca_detection_margin;
 };
 
 struct ath_bus_ops {
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/init.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/init.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/init.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/init.c	2023-05-22 20:06:42.259801029 +0200
@@ -80,6 +80,16 @@
 module_param_named(use_msi, ath9k_use_msi, int, 0444);
 MODULE_PARM_DESC(use_msi, "Use MSI instead of INTx if possible");
 
+int ath9k_use_adaptive_cca;
+module_param_named(use_adaptive_cca, ath9k_use_adaptive_cca, int, 0444);
+MODULE_PARM_DESC(use_adaptive_cca, "enable adaptive cca by default");
+
+#ifdef CONFIG_ATH9K_TX99
+int ath9k_enable_tx99;
+module_param_named(enable_tx99, ath9k_enable_tx99, int, 0444);
+MODULE_PARM_DESC(enable_tx99, "Enable TX99, which will disable STA/AP mode support");
+#endif
+
 bool is_ath9k_unloaded;
 
 #ifdef CONFIG_MAC80211_LEDS
@@ -714,6 +724,9 @@
 	ah->hw_version.devid = devid;
 	ah->ah_flags |= AH_USE_EEPROM;
 	ah->led_pin = -1;
+	ah->cca_detection_level = -70;
+	ah->cca_detection_margin = 3;
+	ah->adaptive_cca_threshold_enabled = ath9k_use_adaptive_cca;
 	ah->reg_ops.read = ath9k_ioread32;
 	ah->reg_ops.multi_read = ath9k_multi_ioread32;
 	ah->reg_ops.write = ath9k_iowrite32;
@@ -742,6 +755,7 @@
 	common->debug_mask = ath9k_debug;
 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
 	common->disable_ani = false;
+	common->dfs_pulse_valid_diff_ts = 0;
 
 	/*
 	 * Platform quirks.
@@ -981,7 +995,7 @@
 			       NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
 			       NL80211_FEATURE_P2P_GO_CTWIN;
 
-	if (!IS_ENABLED(CONFIG_ATH9K_TX99)) {
+	if (!ath9k_tx99_enabled()) {
 		hw->wiphy->interface_modes =
 			BIT(NL80211_IFTYPE_P2P_GO) |
 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/link.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/link.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/link.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/link.c	2023-03-15 13:36:54.470058565 +0100
@@ -523,11 +523,14 @@
 		survey->filled |= SURVEY_INFO_TIME |
 			SURVEY_INFO_TIME_BUSY |
 			SURVEY_INFO_TIME_RX |
+			SURVEY_INFO_TIME_BSS_RX |
 			SURVEY_INFO_TIME_TX;
 		survey->time += cc->cycles / div;
 		survey->time_busy += cc->rx_busy / div;
 		survey->time_rx += cc->rx_frame / div;
 		survey->time_tx += cc->tx_frame / div;
+		/* convert rx airtime from usec to msec */
+		survey->time_bss_rx += cc->rx_bss_frame / 1000;
 	}
 
 	if (cc->cycles < div)
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/main.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/main.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/main.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/main.c	2024-01-19 17:01:19.881847467 +0100
@@ -850,7 +850,7 @@
 static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
 {
 	struct ath_hw *ah = sc->sc_ah;
-	int i;
+	int i, j;
 	struct ath_txq *txq;
 	bool key_in_use = false;
 
@@ -868,8 +868,9 @@
 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 			int idx = txq->txq_tailidx;
 
-			while (!key_in_use &&
-			       !list_empty(&txq->txq_fifo[idx])) {
+			for (j = 0; !key_in_use &&
+			     !list_empty(&txq->txq_fifo[idx]) &&
+			     j < ATH_TXFIFO_DEPTH; j++) {
 				key_in_use = ath9k_txq_list_has_key(
 					&txq->txq_fifo[idx], keyix);
 				INCR(idx, ATH_TXFIFO_DEPTH);
@@ -1336,7 +1337,7 @@
 	struct ath_node *an = &avp->mcast_node;
 
 	mutex_lock(&sc->mutex);
-	if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
+	if (ath9k_tx99_enabled()) {
 		if (sc->cur_chan->nvifs >= 1) {
 			mutex_unlock(&sc->mutex);
 			return -EOPNOTSUPP;
@@ -1386,7 +1387,7 @@
 
 	mutex_lock(&sc->mutex);
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
+	if (ath9k_tx99_enabled()) {
 		mutex_unlock(&sc->mutex);
 		return -EOPNOTSUPP;
 	}
@@ -1446,7 +1447,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return;
 
 	sc->ps_enabled = true;
@@ -1465,7 +1466,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return;
 
 	sc->ps_enabled = false;
@@ -1541,7 +1542,7 @@
 		ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
 	}
 
-	if (changed & IEEE80211_CONF_CHANGE_POWER)
+	if ((changed & IEEE80211_CONF_CHANGE_POWER) && !(ah->tpc_enabled))
 		ath9k_set_txpower(sc, NULL);
 
 	mutex_unlock(&sc->mutex);
@@ -2070,7 +2071,7 @@
 	unsigned long flags;
 	int pos;
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return -EOPNOTSUPP;
 
 	spin_lock_irqsave(&common->cc_lock, flags);
@@ -2120,7 +2121,7 @@
 	struct ath_softc *sc = hw->priv;
 	struct ath_hw *ah = sc->sc_ah;
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return;
 
 	mutex_lock(&sc->mutex);
@@ -2383,7 +2384,22 @@
 {
 	struct ath_softc *sc = hw->priv;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	struct cfg80211_chan_def *chandef = &sc->cur_chan->chandef;
+	struct ieee80211_channel *chan = chandef->chan;
+	int pos = chan->hw_value;
 	set_bit(ATH_OP_SCANNING, &common->op_flags);
+
+	/* Reset current survey */
+	if (!sc->cur_chan->offchannel) {
+		if (sc->cur_survey != &sc->survey[pos]) {
+			if (sc->cur_survey)
+				sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+			sc->cur_survey = &sc->survey[pos];
+		}
+
+		memset(sc->cur_survey, 0, sizeof(struct survey_info));
+		sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+	}
 }
 
 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw,
diff -ruw linux-6.4/drivers/net/wireless/ath/ath9k/recv.c linux-6.4-fbx/drivers/net/wireless/ath/ath9k/recv.c
--- linux-6.4/drivers/net/wireless/ath/ath9k/recv.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath9k/recv.c	2023-03-15 13:36:54.470058565 +0100
@@ -377,7 +377,7 @@
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	u32 rfilt;
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return 0;
 
 	rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
@@ -1018,6 +1018,7 @@
 	struct ieee80211_rx_status *rxs;
 	const struct ieee80211_rate *rate;
 	bool is_sgi, is_40, is_sp;
+	unsigned long flags;
 	int phy;
 	u16 len = rs->rs_datalen;
 	u32 airtime = 0;
@@ -1052,6 +1053,10 @@
 						len, rxs->rate_idx, is_sp);
 	}
 
+	spin_lock_irqsave(&common->cc_lock, flags);
+	common->cc_survey.rx_bss_frame += airtime;
+	spin_unlock_irqrestore(&common->cc_lock, flags);
+
 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
 exit:
 	rcu_read_unlock();
diff -ruw linux-6.4/drivers/net/wireless/ath/dfs_pattern_detector.c linux-6.4-fbx/drivers/net/wireless/ath/dfs_pattern_detector.c
--- linux-6.4/drivers/net/wireless/ath/dfs_pattern_detector.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/dfs_pattern_detector.c	2024-01-25 13:40:06.020812288 +0100
@@ -275,6 +275,7 @@
 {
 	u32 i;
 	struct channel_detector *cd;
+	int diff_ts;
 
 	/*
 	 * pulses received for a non-supported or un-initialized
@@ -287,8 +288,9 @@
 	if (cd == NULL)
 		return false;
 
+	diff_ts = event->ts - dpd->last_pulse_ts;
 	/* reset detector on time stamp wraparound, caused by TSF reset */
-	if (event->ts < dpd->last_pulse_ts)
+	if (diff_ts < dpd->common->dfs_pulse_valid_diff_ts)
 		dpd_reset(dpd);
 	dpd->last_pulse_ts = event->ts;
 
diff -ruw linux-6.4/drivers/net/wireless/ath/dfs_pattern_detector.h linux-6.4-fbx/drivers/net/wireless/ath/dfs_pattern_detector.h
--- linux-6.4/drivers/net/wireless/ath/dfs_pattern_detector.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/dfs_pattern_detector.h	2023-02-27 20:51:59.590934925 +0100
@@ -24,7 +24,7 @@
 /* tolerated deviation of radar time stamp in usecs on both sides
  * TODO: this might need to be HW-dependent
  */
-#define PRI_TOLERANCE	16
+#define PRI_TOLERANCE	6
 
 /**
  * struct ath_dfs_pool_stats - DFS Statistics for global pools
diff -ruw linux-6.4/drivers/net/wireless/ath/key.c linux-6.4-fbx/drivers/net/wireless/ath/key.c
--- linux-6.4/drivers/net/wireless/ath/key.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/key.c	2023-05-22 20:06:42.267801242 +0200
@@ -524,7 +524,7 @@
 			idx = ath_reserve_key_cache_slot(common, key->cipher);
 			break;
 		default:
-			idx = key->keyidx;
+			idx = ath_reserve_key_cache_slot(common, key->cipher);
 			break;
 		}
 	} else if (key->keyidx) {
diff -ruw linux-6.4/drivers/net/wireless/ath/regd.c linux-6.4-fbx/drivers/net/wireless/ath/regd.c
--- linux-6.4/drivers/net/wireless/ath/regd.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/ath/regd.c	2023-02-27 20:46:50.166676215 +0100
@@ -345,6 +345,10 @@
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
+#ifdef CONFIG_ATH_REG_IGNORE
+	return;
+#endif
+
 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!wiphy->bands[band])
 			continue;
@@ -379,6 +383,10 @@
 {
 	struct ieee80211_supported_band *sband;
 
+#ifdef CONFIG_ATH_REG_IGNORE
+	return;
+#endif
+
 	sband = wiphy->bands[NL80211_BAND_2GHZ];
 	if (!sband)
 		return;
@@ -408,6 +416,9 @@
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
+#ifdef CONFIG_ATH_REG_IGNORE
+	return;
+#endif
 	if (!wiphy->bands[NL80211_BAND_5GHZ])
 		return;
 
@@ -640,6 +651,11 @@
 	const struct ieee80211_regdomain *regd;
 
 	wiphy->reg_notifier = reg_notifier;
+
+#ifdef CONFIG_ATH_REG_IGNORE
+	return 0;
+#endif
+
 	wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
 				   REGULATORY_CUSTOM_REG;
 
@@ -704,7 +720,7 @@
 	    regdmn == CTRY_DEFAULT) {
 		printk(KERN_DEBUG "ath: EEPROM indicates default "
 		       "country code should be used\n");
-		reg->country_code = CTRY_UNITED_STATES;
+		reg->country_code = CTRY_FRANCE;
 	}
 
 	if (reg->country_code == CTRY_DEFAULT) {
diff -ruw linux-6.4/drivers/net/wireless/marvell/Kconfig linux-6.4-fbx/drivers/net/wireless/marvell/Kconfig
--- linux-6.4/drivers/net/wireless/marvell/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/marvell/Kconfig	2023-03-10 17:18:24.474042758 +0100
@@ -25,4 +25,8 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called mwl8k.  If unsure, say N.
 
+config MWL8K_NEW
+	tristate "Marvell 88W8xxx PCI/PCIe NEW"
+	depends on MAC80211 && PCI
+
 endif # WLAN_VENDOR_MARVELL
diff -ruw linux-6.4/drivers/net/wireless/marvell/Makefile linux-6.4-fbx/drivers/net/wireless/marvell/Makefile
--- linux-6.4/drivers/net/wireless/marvell/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/net/wireless/marvell/Makefile	2023-03-10 17:18:24.474042758 +0100
@@ -5,3 +5,4 @@
 obj-$(CONFIG_MWIFIEX)	+= mwifiex/
 
 obj-$(CONFIG_MWL8K)	+= mwl8k.o
+obj-$(CONFIG_MWL8K_NEW)	+= mwl8k_new/
diff -ruw linux-6.4/drivers/nvmem/Kconfig linux-6.4-fbx/drivers/nvmem/Kconfig
--- linux-6.4/drivers/nvmem/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/nvmem/Kconfig	2023-05-22 20:06:42.603810179 +0200
@@ -59,6 +59,9 @@
 	  This driver provides support for Broadcom's NVRAM that can be accessed
 	  using I/O mapping.
 
+config NVMEM_IGNORE_RO
+	bool "ignore read-only flags"
+
 config NVMEM_IMX_IIM
 	tristate "i.MX IC Identification Module support"
 	depends on ARCH_MXC || COMPILE_TEST
diff -ruw linux-6.4/drivers/nvmem/core.c linux-6.4-fbx/drivers/nvmem/core.c
--- linux-6.4/drivers/nvmem/core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/nvmem/core.c	2023-05-22 20:06:42.607810285 +0200
@@ -930,8 +930,10 @@
 	if (rval)
 		goto err_put_device;
 
+#ifndef CONFIG_NVMEM_IGNORE_RO
 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
 			   config->read_only || !nvmem->reg_write;
+#endif
 
 #ifdef CONFIG_NVMEM_SYSFS
 	nvmem->dev.groups = nvmem_dev_groups;
diff -ruw linux-6.4/drivers/of/Kconfig linux-6.4-fbx/drivers/of/Kconfig
--- linux-6.4/drivers/of/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/of/Kconfig	2023-05-22 20:06:42.607810285 +0200
@@ -47,6 +47,11 @@
 
 	  If unsure, say N here, but this option is safe to enable.
 
+config OF_DTB_BUILTIN_LIST
+	string "Link given list of DTB files into kernel"
+	help
+	  Specify filename without .dtb extension
+
 config OF_FLATTREE
 	bool
 	select DTC
@@ -102,4 +107,11 @@
 config OF_NUMA
 	bool
 
+config OF_CONFIGFS
+	bool "Device Tree Overlay ConfigFS interface"
+	select CONFIGFS_FS
+	select OF_OVERLAY
+	help
+	  Enable a simple user-space driven DT overlay interface.
+
 endif # OF
diff -ruw linux-6.4/drivers/of/Makefile linux-6.4-fbx/drivers/of/Makefile
--- linux-6.4/drivers/of/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/of/Makefile	2023-05-22 20:06:42.607810285 +0200
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-y = base.o cpu.o device.o module.o platform.o property.o
 obj-$(CONFIG_OF_KOBJ) += kobj.o
+obj-$(CONFIG_OF_CONFIGFS) += configfs.o
 obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
 obj-$(CONFIG_OF_FLATTREE) += fdt.o
 obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff -ruw linux-6.4/drivers/of/fdt.c linux-6.4-fbx/drivers/of/fdt.c
--- linux-6.4/drivers/of/fdt.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/of/fdt.c	2023-05-22 20:06:42.611810392 +0200
@@ -29,6 +29,7 @@
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
+#include <asm-generic/vmlinux.lds.h>
 
 #include "of_private.h"
 
@@ -787,6 +788,39 @@
 	return 0;
 }
 
+/*
+ * iterate list of built-in dtb to find a compatible match
+ */
+const void __init *of_fdt_find_compatible_dtb(const char *name)
+{
+	struct fdt_header {
+		__be32 magic;
+		__be32 totalsize;
+	};
+	const struct fdt_header *blob, *best;
+	unsigned int best_score = ~0;
+
+	best = NULL;
+	blob = (const struct fdt_header *)__dtb_start;
+	while ((void *)blob < (void *)__dtb_end &&
+	       (be32_to_cpu(blob->magic) == OF_DT_HEADER)) {
+		unsigned int score;
+		u32 size;
+
+		score = of_fdt_is_compatible(blob, 0, name);
+		if (score > 0 && score < best_score) {
+			best = blob;
+			best_score = score;
+		}
+
+		size = be32_to_cpu(blob->totalsize);
+		blob = (const struct fdt_header *)
+			PTR_ALIGN((void *)blob + size, STRUCT_ALIGNMENT);
+	}
+
+	return best;
+}
+
 /**
  * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
  * @node: node to test
diff -ruw linux-6.4/drivers/of/overlay.c linux-6.4-fbx/drivers/of/overlay.c
--- linux-6.4/drivers/of/overlay.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/of/overlay.c	2023-06-27 11:47:15.875861725 +0200
@@ -358,7 +358,7 @@
 	}
 
 	if (!of_node_check_flag(target->np, OF_OVERLAY))
-		pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
+		pr_debug("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
 		       target->np, new_prop->name);
 
 	if (ret) {
diff -ruw linux-6.4/drivers/pci/controller/Kconfig linux-6.4-fbx/drivers/pci/controller/Kconfig
--- linux-6.4/drivers/pci/controller/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/controller/Kconfig	2023-05-22 20:06:42.623810711 +0200
@@ -179,7 +179,6 @@
 	depends on MVEBU_MBUS
 	depends on ARM
 	depends on OF
-	depends on BROKEN
 	select PCI_BRIDGE_EMUL
 	help
 	 Add support for Marvell EBU PCIe controller. This PCIe controller
@@ -208,6 +207,12 @@
 	  Say Y here if you want to enable Gen3 PCIe controller support on
 	  MediaTek SoCs.
 
+config PCIE_BCM63XX
+	tristate "BCM63XX SoCs PCIe endpoint driver."
+	depends on ARCH_BCMBCA || COMPILE_TEST
+	depends on OF
+	depends on PCI_MSI
+
 config PCIE_MT7621
 	tristate "MediaTek MT7621 PCIe controller"
 	depends on SOC_MT7621 || COMPILE_TEST
diff -ruw linux-6.4/drivers/pci/controller/Makefile linux-6.4-fbx/drivers/pci/controller/Makefile
--- linux-6.4/drivers/pci/controller/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/controller/Makefile	2023-05-22 20:06:42.623810711 +0200
@@ -33,6 +33,7 @@
 obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
 obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
 obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
+obj-$(CONFIG_PCIE_BCM63XX) += pcie-bcm63xx.o
 obj-$(CONFIG_VMD) += vmd.o
 obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
 obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
diff -ruw linux-6.4/drivers/pci/controller/dwc/pcie-designware-host.c linux-6.4-fbx/drivers/pci/controller/dwc/pcie-designware-host.c
--- linux-6.4/drivers/pci/controller/dwc/pcie-designware-host.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/controller/dwc/pcie-designware-host.c	2024-01-29 14:45:04.485460473 +0100
@@ -492,7 +492,9 @@
 	}
 
 	/* Ignore errors, the link may come up later */
-	dw_pcie_wait_for_link(pci);
+	ret = dw_pcie_wait_for_link(pci);
+	if (ret == -ETIMEDOUT && pci->ops && pci->ops->link_failed)
+		pci->ops->link_failed(pci);
 
 	bridge->sysdata = pp;
 
diff -ruw linux-6.4/drivers/pci/controller/dwc/pcie-designware.h linux-6.4-fbx/drivers/pci/controller/dwc/pcie-designware.h
--- linux-6.4/drivers/pci/controller/dwc/pcie-designware.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/controller/dwc/pcie-designware.h	2023-06-07 16:07:24.940750559 +0200
@@ -364,6 +364,7 @@
 	void    (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
 			      size_t size, u32 val);
 	int	(*link_up)(struct dw_pcie *pcie);
+	void	(*link_failed)(struct dw_pcie *pcie);
 	int	(*start_link)(struct dw_pcie *pcie);
 	void	(*stop_link)(struct dw_pcie *pcie);
 };
diff -ruw linux-6.4/drivers/pci/controller/dwc/pcie-qcom.c linux-6.4-fbx/drivers/pci/controller/dwc/pcie-qcom.c
--- linux-6.4/drivers/pci/controller/dwc/pcie-qcom.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/controller/dwc/pcie-qcom.c	2024-01-29 18:45:58.808367055 +0100
@@ -107,6 +107,7 @@
 
 /* PARF_SLV_ADDR_SPACE_SIZE register value */
 #define SLV_ADDR_SPACE_SZ			0x10000000
+#define SLV_ADDR_SPACE_SZ_1_27_0		0x08000000
 
 /* PARF_MHI_CLOCK_RESET_CTRL register fields */
 #define AHB_CLK_EN				BIT(0)
@@ -202,10 +203,11 @@
 	struct reset_control *rst;
 };
 
-#define QCOM_PCIE_2_9_0_MAX_CLOCKS		5
 struct qcom_pcie_resources_2_9_0 {
-	struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
+	struct clk_bulk_data *clks;
+	struct regulator *vddpe;
 	struct reset_control *rst;
+	int num_clks;
 };
 
 union qcom_pcie_resources {
@@ -245,6 +247,9 @@
 	const struct qcom_pcie_cfg *cfg;
 	struct dentry *debugfs;
 	bool suspended;
+	bool ep_reset_asserted;
+
+	struct mutex reinit_link_mutex;
 };
 
 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
@@ -252,6 +257,7 @@
 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
 {
 	gpiod_set_value_cansleep(pcie->reset, 1);
+	pcie->ep_reset_asserted = true;
 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
 }
 
@@ -260,6 +266,7 @@
 	/* Ensure that PERST has been asserted for at least 100 ms */
 	msleep(100);
 	gpiod_set_value_cansleep(pcie->reset, 0);
+	pcie->ep_reset_asserted = false;
 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
 }
 
@@ -1050,17 +1057,18 @@
 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
 	struct dw_pcie *pci = pcie->pci;
 	struct device *dev = pci->dev;
-	int ret;
 
-	res->clks[0].id = "iface";
-	res->clks[1].id = "axi_m";
-	res->clks[2].id = "axi_s";
-	res->clks[3].id = "axi_bridge";
-	res->clks[4].id = "rchng";
+	res->vddpe = devm_regulator_get_optional(dev, "vddpe-3v3");
+	if (IS_ERR(res->vddpe)) {
+		int ret = PTR_ERR(res->vddpe);
+		if (ret != -ENODEV)
+			return PTR_ERR(res->vddpe);
+		res->vddpe = NULL;
+	}
 
-	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
-	if (ret < 0)
-		return ret;
+	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+	if (res->clks < 0)
+		return res->num_clks;
 
 	res->rst = devm_reset_control_array_get_exclusive(dev);
 	if (IS_ERR(res->rst))
@@ -1073,7 +1081,7 @@
 {
 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
 
-	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+	clk_bulk_disable_unprepare(res->num_clks, res->clks);
 }
 
 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
@@ -1082,6 +1090,14 @@
 	struct device *dev = pcie->pci->dev;
 	int ret;
 
+	if (res->vddpe) {
+		ret = regulator_enable(res->vddpe);
+		if (ret) {
+			dev_err(dev, "cannot enable vddpe-3v3 regulator\n");
+			return ret;
+		}
+	}
+
 	ret = reset_control_assert(res->rst);
 	if (ret) {
 		dev_err(dev, "reset assert failed (%d)\n", ret);
@@ -1102,19 +1118,16 @@
 
 	usleep_range(2000, 2500);
 
-	return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+	return clk_bulk_prepare_enable(res->num_clks, res->clks);
 }
 
-static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+static int qcom_pcie_post_init(struct qcom_pcie *pcie)
 {
 	struct dw_pcie *pci = pcie->pci;
 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
 	u32 val;
 	int i;
 
-	writel(SLV_ADDR_SPACE_SZ,
-		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
 	val = readl(pcie->parf + PARF_PHY_CTRL);
 	val &= ~PHY_TEST_PWR_DOWN;
 	writel(val, pcie->parf + PARF_PHY_CTRL);
@@ -1151,6 +1164,26 @@
 	return 0;
 }
 
+static int qcom_pcie_post_init_1_27_0(struct qcom_pcie *pcie)
+{
+	writel(SLV_ADDR_SPACE_SZ_1_27_0,
+	       pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+
+	qcom_pcie_post_init(pcie);
+
+	return 0;
+}
+
+static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+{
+	writel(SLV_ADDR_SPACE_SZ,
+	       pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+
+	qcom_pcie_post_init(pcie);
+
+	return 0;
+}
+
 static int qcom_pcie_link_up(struct dw_pcie *pci)
 {
 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
@@ -1291,6 +1324,15 @@
 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
 };
 
+/* Qcom IP rev.: 1.27.0  Synopsys IP rev.: 5.80a */
+static const struct qcom_pcie_ops ops_1_27_0 = {
+	.get_resources = qcom_pcie_get_resources_2_9_0,
+	.init = qcom_pcie_init_2_9_0,
+	.post_init = qcom_pcie_post_init_1_27_0,
+	.deinit = qcom_pcie_deinit_2_9_0,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
 static const struct qcom_pcie_cfg cfg_1_0_0 = {
 	.ops = &ops_1_0_0,
 };
@@ -1323,6 +1365,10 @@
 	.ops = &ops_2_9_0,
 };
 
+static const struct qcom_pcie_cfg cfg_1_27_0 = {
+	.ops = &ops_1_27_0,
+};
+
 static const struct dw_pcie_ops dw_pcie_ops = {
 	.link_up = qcom_pcie_link_up,
 	.start_link = qcom_pcie_start_link,
@@ -1433,6 +1479,126 @@
 				    qcom_pcie_link_transition_count);
 }
 
+/*
+ * if link is down, try to bring it back up again by calling host
+ * deinit / reinit functions, and kicking the LTSSM again.
+ */
+static ssize_t qcom_pcie_store_reinit_link(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	struct qcom_pcie *pcie = dev_get_drvdata(dev);;
+
+	if (!count)
+		goto done;
+
+	if (buf[0] != '1')
+		goto done;
+
+	if (!dw_pcie_link_up(pcie->pci)) {
+		int ret;
+
+		dev_info(dev, "link is down: doing host deinit -> init.\n");
+
+		mutex_lock(&pcie->reinit_link_mutex);
+		qcom_pcie_host_deinit(&pcie->pci->pp);
+		qcom_pcie_host_init(&pcie->pci->pp);
+		qcom_pcie_start_link(pcie->pci);
+		mutex_unlock(&pcie->reinit_link_mutex);
+
+		ret = dw_pcie_setup_rc(&pcie->pci->pp);
+		if (ret)
+			dev_warn(dev, "dw_pcie_setup_rc() failed: %d\n", ret);
+	} else
+		dev_dbg(dev, "link not down: doing nothing.\n");
+
+done:
+	return count;
+}
+
+static DEVICE_ATTR(reinit_link, 0200, NULL, qcom_pcie_store_reinit_link);
+
+/*
+ * report current link state seen by the root complex.
+ */
+static ssize_t qcom_pcie_show_link(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct qcom_pcie *pcie = dev_get_drvdata(dev);
+
+	mutex_lock(&pcie->reinit_link_mutex);
+	buf[0] = qcom_pcie_link_up(pcie->pci) ? '1' : '0';
+	buf[1] = '\n';
+	mutex_unlock(&pcie->reinit_link_mutex);
+
+	return 2;
+}
+
+static DEVICE_ATTR(link, 0400, qcom_pcie_show_link, NULL);
+
+/*
+ * assert the endpoint reset GPIO. preferably, all PCI device should
+ * be removed from the bus via the sysfs remove attribute, and drivers
+ * unbound beforehand.
+ */
+static ssize_t qcom_pcie_store_ep_reset_assert(struct device *dev,
+					       struct device_attribute *attr,
+					       const char *buf, size_t count)
+{
+	struct qcom_pcie *pcie = dev_get_drvdata(dev);
+
+	if (!count)
+		goto done;
+
+	if (buf[0] != '1')
+		goto done;
+
+	qcom_ep_reset_assert(pcie);
+
+done:
+	return count;
+}
+
+static DEVICE_ATTR(ep_reset_assert, 0200, NULL,
+		   qcom_pcie_store_ep_reset_assert);
+
+/*
+ * reset assertion status
+ */
+static ssize_t qcom_pcie_show_ep_reset_asserted(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct qcom_pcie *pcie = dev_get_drvdata(dev);
+
+	buf[0] = pcie->ep_reset_asserted ? '1' : '0';
+	buf[1] = '\n';
+
+	return 2;
+}
+
+static DEVICE_ATTR(ep_reset_asserted, 0400, qcom_pcie_show_ep_reset_asserted,
+		   NULL);
+
+
+static const struct attribute *qcom_pcie_sysfs_attributes[] = {
+	&dev_attr_reinit_link.attr,
+	&dev_attr_link.attr,
+	&dev_attr_ep_reset_assert.attr,
+	&dev_attr_ep_reset_asserted.attr,
+	NULL,
+};
+
+/*
+ * create attributes for PCIe link & EP reset GPIO assert control.
+ */
+static int qcom_pcie_init_sysfs(struct qcom_pcie *pcie)
+{
+	return sysfs_create_files(&pcie->pci->dev->kobj,
+				  qcom_pcie_sysfs_attributes);
+}
+
 static int qcom_pcie_probe(struct platform_device *pdev)
 {
 	const struct qcom_pcie_cfg *pcie_cfg;
@@ -1467,6 +1633,7 @@
 	pp = &pci->pp;
 
 	pcie->pci = pci;
+	mutex_init(&pcie->reinit_link_mutex);
 
 	pcie->cfg = pcie_cfg;
 
@@ -1531,6 +1698,9 @@
 	if (pcie->mhi)
 		qcom_pcie_init_debugfs(pcie);
 
+	if (qcom_pcie_init_sysfs(pcie) < 0)
+		goto err_phy_exit;
+
 	return 0;
 
 err_phy_exit:
@@ -1607,6 +1777,7 @@
 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
 	{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
 	{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+	{ .compatible = "qcom,pcie-ipq9574", .data = &cfg_1_27_0 },
 	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
 	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
 	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
@@ -1635,6 +1806,7 @@
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1108, qcom_fixup_class);
 
 static const struct dev_pm_ops qcom_pcie_pm_ops = {
 	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
diff -ruw linux-6.4/drivers/pci/pci-sysfs.c linux-6.4-fbx/drivers/pci/pci-sysfs.c
--- linux-6.4/drivers/pci/pci-sysfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/pci-sysfs.c	2024-01-26 20:19:34.752196606 +0100
@@ -1149,6 +1149,7 @@
 {
 	int i;
 
+	mutex_lock(&pdev->sysfs_init_lock);
 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 		struct bin_attribute *res_attr;
 
@@ -1164,6 +1165,9 @@
 			kfree(res_attr);
 		}
 	}
+
+	pdev->sysfs_init_done = 0;
+	mutex_unlock(&pdev->sysfs_init_lock);
 }
 
 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
@@ -1226,6 +1230,12 @@
 	int i;
 	int retval;
 
+	mutex_lock(&pdev->sysfs_init_lock);
+	if (pdev->sysfs_init_done) {
+		mutex_unlock(&pdev->sysfs_init_lock);
+		return 0;
+	}
+
 	/* Expose the PCI resources from this device as files */
 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 
@@ -1239,10 +1249,14 @@
 		    pdev->resource[i].flags & IORESOURCE_PREFETCH)
 			retval = pci_create_attr(pdev, i, 1);
 		if (retval) {
+			mutex_unlock(&pdev->sysfs_init_lock);
 			pci_remove_resource_files(pdev);
 			return retval;
 		}
 	}
+
+	pdev->sysfs_init_done = 1;
+	mutex_unlock(&pdev->sysfs_init_lock);
 	return 0;
 }
 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
diff -ruw linux-6.4/drivers/pci/pcie/portdrv.c linux-6.4-fbx/drivers/pci/pcie/portdrv.c
--- linux-6.4/drivers/pci/pcie/portdrv.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/pcie/portdrv.c	2023-11-22 18:41:18.370269029 +0100
@@ -21,6 +21,8 @@
 #include "../pci.h"
 #include "portdrv.h"
 
+static DEFINE_MUTEX(irq_alloc_mutex);
+
 /*
  * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
  * be one of the first 32 MSI-X entries.  Per PCI r3.0, sec 6.8.3.1, MSI
@@ -114,16 +116,21 @@
 	int nr_entries, nvec, pcie_irq;
 	u32 pme = 0, aer = 0, dpc = 0;
 
+	mutex_lock(&irq_alloc_mutex);
+
 	/* Allocate the maximum possible number of MSI/MSI-X vectors */
 	nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
 			PCI_IRQ_MSIX | PCI_IRQ_MSI);
-	if (nr_entries < 0)
+	if (nr_entries < 0) {
+		mutex_unlock(&irq_alloc_mutex);
 		return nr_entries;
+	}
 
 	/* See how many and which Interrupt Message Numbers we actually use */
 	nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
 	if (nvec > nr_entries) {
 		pci_free_irq_vectors(dev);
+		mutex_unlock(&irq_alloc_mutex);
 		return -EIO;
 	}
 
@@ -143,9 +150,13 @@
 
 		nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
 				PCI_IRQ_MSIX | PCI_IRQ_MSI);
-		if (nr_entries < 0)
+		if (nr_entries < 0) {
+			mutex_unlock(&irq_alloc_mutex);
 			return nr_entries;
 	}
+	}
+
+	mutex_unlock(&irq_alloc_mutex);
 
 	/* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */
 	if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
diff -ruw linux-6.4/drivers/pci/probe.c linux-6.4-fbx/drivers/pci/probe.c
--- linux-6.4/drivers/pci/probe.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/probe.c	2024-01-29 18:35:21.690960369 +0100
@@ -2310,6 +2310,7 @@
 		return NULL;
 
 	INIT_LIST_HEAD(&dev->bus_list);
+	mutex_init(&dev->sysfs_init_lock);
 	dev->dev.type = &pci_dev_type;
 	dev->bus = pci_bus_get(bus);
 	dev->driver_exclusive_resource = (struct resource) {
@@ -3270,6 +3271,34 @@
 	return max;
 }
 
+/*
+ * Walks the PCI/PCIe tree to find the first instance of a PCIe device and
+ * hands off the PCIe bus to pcie_bus_configure_settings to walk the rest.
+ */
+static int pcie_rescan_bus_configure_settings(struct pci_dev *dev, void *data)
+{
+	if (pci_is_pcie(dev)) {
+		struct pci_bus *child, *bus = dev->bus;
+
+		list_for_each_entry(child, &bus->children, node)
+			pcie_bus_configure_settings(child);
+
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * pci_bus_configure_settings - Configure bus settings
+ * @bus: PCI/PCIE bus to configure
+ *
+ * Currently only configures PCIe bus settings related to MPS and MRRS.
+ */
+static void pci_bus_configure_settings(struct pci_bus *bus)
+{
+	pci_walk_bus(bus, pcie_rescan_bus_configure_settings, NULL);
+}
+
 /**
  * pci_rescan_bus - Scan a PCI bus for devices
  * @bus: PCI bus to scan
@@ -3285,6 +3314,7 @@
 
 	max = pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
+	pci_bus_configure_settings(bus);
 	pci_bus_add_devices(bus);
 
 	return max;
diff -ruw linux-6.4/drivers/pci/quirks.c linux-6.4-fbx/drivers/pci/quirks.c
--- linux-6.4/drivers/pci/quirks.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pci/quirks.c	2023-06-27 11:47:15.875861725 +0200
@@ -3150,6 +3150,8 @@
 	dev->is_hotplug_bridge = 1;
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PI7C9X20303SL,
+			 quirk_hotplug_bridge);
 
 /*
  * This is a quirk for the Ricoh MMC controller found as a part of some
diff -ruw linux-6.4/drivers/phy/Kconfig linux-6.4-fbx/drivers/phy/Kconfig
--- linux-6.4/drivers/phy/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/Kconfig	2023-05-22 20:06:42.691812520 +0200
@@ -71,6 +71,10 @@
 	  functional modes using gpios and sets the attribute max link
 	  rate, for CAN drivers.
 
+config XDSL_PHY_API
+	tristate "xDSL PHY API"
+	select GENERIC_PHY
+
 source "drivers/phy/allwinner/Kconfig"
 source "drivers/phy/amlogic/Kconfig"
 source "drivers/phy/broadcom/Kconfig"
diff -ruw linux-6.4/drivers/phy/Makefile linux-6.4-fbx/drivers/phy/Makefile
--- linux-6.4/drivers/phy/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/Makefile	2023-05-22 20:06:42.691812520 +0200
@@ -10,6 +10,8 @@
 obj-$(CONFIG_PHY_XGENE)			+= phy-xgene.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
 obj-$(CONFIG_USB_LGM_PHY)		+= phy-lgm-usb.o
+obj-$(CONFIG_XDSL_PHY_API)		+= xdsl_phy_api.o
+
 obj-y					+= allwinner/	\
 					   amlogic/	\
 					   broadcom/	\
diff -ruw linux-6.4/drivers/phy/broadcom/Kconfig linux-6.4-fbx/drivers/phy/broadcom/Kconfig
--- linux-6.4/drivers/phy/broadcom/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/broadcom/Kconfig	2023-05-22 20:06:42.691812520 +0200
@@ -39,6 +39,11 @@
 	help
 	  Enable this to support the Broadcom Kona USB 2.0 PHY.
 
+config PHY_BRCM_USB_63138
+	tristate "Broadcom 63138 USB 2.0/3.0 PHY Driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+	select GENERIC_PHY
+
 config PHY_BCM_NS_USB2
 	tristate "Broadcom Northstar USB 2.0 PHY Driver"
 	depends on ARCH_BCM_IPROC || COMPILE_TEST
diff -ruw linux-6.4/drivers/phy/broadcom/Makefile linux-6.4-fbx/drivers/phy/broadcom/Makefile
--- linux-6.4/drivers/phy/broadcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/broadcom/Makefile	2023-03-09 15:06:11.376234546 +0100
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_PHY_BCM63XX_USBH)		+= phy-bcm63xx-usbh.o
 obj-$(CONFIG_PHY_CYGNUS_PCIE)		+= phy-bcm-cygnus-pcie.o
+obj-$(CONFIG_PHY_BRCM_USB_63138)	+= phy-brcm-usb-63138.o
 obj-$(CONFIG_BCM_KONA_USB2_PHY)		+= phy-bcm-kona-usb2.o
 obj-$(CONFIG_PHY_BCM_NS_USB2)		+= phy-bcm-ns-usb2.o
 obj-$(CONFIG_PHY_BCM_NS_USB3)		+= phy-bcm-ns-usb3.o
diff -ruw linux-6.4/drivers/phy/marvell/Kconfig linux-6.4-fbx/drivers/phy/marvell/Kconfig
--- linux-6.4/drivers/phy/marvell/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/marvell/Kconfig	2023-03-09 15:22:14.035699334 +0100
@@ -136,3 +136,11 @@
 	  components on MMP3-based boards.
 
 	  To compile this driver as a module, choose M here.
+
+config PHY_UTMI_CP110
+	bool "Marvell CP110 UTMI PHY Driver"
+	depends on ARCH_MVEBU
+	depends on OF
+	help
+	  Enable this to support Marvell USB2.0 PHY driver for Marvell
+	  CP110-based SoCs (A7K and A8K).
diff -ruw linux-6.4/drivers/phy/marvell/Makefile linux-6.4-fbx/drivers/phy/marvell/Makefile
--- linux-6.4/drivers/phy/marvell/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/marvell/Makefile	2023-03-09 15:22:14.035699334 +0100
@@ -13,3 +13,4 @@
 obj-$(CONFIG_PHY_PXA_28NM_HSIC)		+= phy-pxa-28nm-hsic.o
 obj-$(CONFIG_PHY_PXA_28NM_USB2)		+= phy-pxa-28nm-usb2.o
 obj-$(CONFIG_PHY_PXA_USB)		+= phy-pxa-usb.o
+obj-$(CONFIG_PHY_UTMI_CP110)		+= phy-utmi-cp110.o
diff -ruw linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
--- linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c	2023-05-22 20:30:14.545854148 +0200
@@ -515,6 +515,250 @@
 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
 };
 
+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_serdes_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x20),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_serdes_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89),
+	QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_pcie_tx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_HIGHZ_DRVR_EN, 0x10),
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_pcie_rx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x61),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1e),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x73),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x80),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_misc_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x14),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x10),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0b),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x06),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_misc_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x14),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x10),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0b),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_PRE, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_POST, 0x58),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4, 0x19),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x49),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x2a),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x02),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03),
+	QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
 static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
 	QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
 	QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -2042,6 +2286,10 @@
 	"aux", "cfg_ahb",
 };
 
+static const char * const ipq9574_pciephy_clk_l[] = {
+	"aux", "cfg_ahb", "anoc_lane", "snoc_lane",
+};
+
 static const char * const msm8996_phy_clk_l[] = {
 	"aux", "cfg_ahb", "ref",
 };
@@ -2072,6 +2320,24 @@
 	"phy",
 };
 
+static const struct qmp_pcie_offsets qmp_pcie_offsets_3x1_ipq9574 = {
+	.serdes         = 0,
+	.tx             = 0x0200,
+	.rx             = 0x0400,
+	.pcs            = 0x0800,
+	.pcs_misc       = 0x0c00,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_3x2_ipq9574 = {
+	.serdes		= 0,
+	.tx		= 0x0200,
+	.rx		= 0x0400,
+	.tx2		= 0x0600,
+	.rx2		= 0x0800,
+	.pcs		= 0x1000,
+	.pcs_misc	= 0x1400,
+};
+
 static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = {
 	.serdes		= 0,
 	.pcs		= 0x0200,
@@ -2174,6 +2440,66 @@
 	.phy_status		= PHYSTATUS,
 };
 
+static const struct qmp_phy_cfg ipq9574_gen3x1_pciephy_cfg = {
+	.lanes			= 1,
+
+	.offsets		= &qmp_pcie_offsets_3x1_ipq9574,
+
+	.tbls = {
+		.serdes		= ipq9574_gen3x1_pcie_serdes_tbl,
+		.serdes_num	= ARRAY_SIZE(ipq9574_gen3x1_pcie_serdes_tbl),
+		.tx		= ipq9574_pcie_tx_tbl,
+		.tx_num		= ARRAY_SIZE(ipq9574_pcie_tx_tbl),
+		.rx		= ipq9574_pcie_rx_tbl,
+		.rx_num		= ARRAY_SIZE(ipq9574_pcie_rx_tbl),
+		.pcs		= ipq9574_gen3x1_pcie_pcs_tbl,
+		.pcs_num	= ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_tbl),
+		.pcs_misc	= ipq9574_gen3x1_pcie_pcs_misc_tbl,
+		.pcs_misc_num	= ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_misc_tbl),
+	},
+	.clk_list		= ipq9574_pciephy_clk_l,
+	.num_clks		= ARRAY_SIZE(ipq9574_pciephy_clk_l),
+	.reset_list		= ipq8074_pciephy_reset_l,
+	.num_resets		= ARRAY_SIZE(ipq8074_pciephy_reset_l),
+	.vreg_list		= NULL,
+	.num_vregs		= 0,
+	.regs			= pciephy_v4_regs_layout,
+
+	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+	.phy_status		= PHYSTATUS,
+	.pipe_clock_rate	= 250000000,
+};
+
+static const struct qmp_phy_cfg ipq9574_gen3x2_pciephy_cfg = {
+	.lanes			= 2,
+
+	.offsets		= &qmp_pcie_offsets_3x2_ipq9574,
+
+	.tbls = {
+		.serdes		= ipq9574_gen3x2_pcie_serdes_tbl,
+		.serdes_num	= ARRAY_SIZE(ipq9574_gen3x2_pcie_serdes_tbl),
+		.tx		= ipq9574_pcie_tx_tbl,
+		.tx_num		= ARRAY_SIZE(ipq9574_pcie_tx_tbl),
+		.rx		= ipq9574_pcie_rx_tbl,
+		.rx_num		= ARRAY_SIZE(ipq9574_pcie_rx_tbl),
+		.pcs		= ipq9574_gen3x2_pcie_pcs_tbl,
+		.pcs_num	= ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_tbl),
+		.pcs_misc	= ipq9574_gen3x2_pcie_pcs_misc_tbl,
+		.pcs_misc_num	= ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_misc_tbl),
+	},
+	.clk_list		= ipq9574_pciephy_clk_l,
+	.num_clks		= ARRAY_SIZE(ipq9574_pciephy_clk_l),
+	.reset_list		= ipq8074_pciephy_reset_l,
+	.num_resets		= ARRAY_SIZE(ipq8074_pciephy_reset_l),
+	.vreg_list		= NULL,
+	.num_vregs		= 0,
+	.regs			= pciephy_v4_regs_layout,
+
+	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+	.phy_status		= PHYSTATUS,
+	.pipe_clock_rate	= 250000000,
+};
+
 static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
 	.lanes			= 1,
 
@@ -3375,6 +3701,12 @@
 		.compatible = "qcom,ipq8074-qmp-pcie-phy",
 		.data = &ipq8074_pciephy_cfg,
 	}, {
+		.compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy",
+		.data = &ipq9574_gen3x1_pciephy_cfg,
+	}, {
+		.compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy",
+		.data = &ipq9574_gen3x2_pciephy_cfg,
+	}, {
 		.compatible = "qcom,msm8998-qmp-pcie-phy",
 		.data = &msm8998_pciephy_cfg,
 	}, {
diff -ruw linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
--- linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h	2023-05-22 20:30:14.545854148 +0200
@@ -11,8 +11,22 @@
 #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2		0x0c
 #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4		0x14
 #define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE		0x20
+#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L		0x44
+#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H		0x48
+#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L		0x4c
+#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H		0x50
 #define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1		0x54
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1			0x5c
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2			0x60
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4			0x68
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2			0x7c
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4			0x84
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5			0x88
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6			0x8c
 #define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS		0x94
+#define QPHY_V5_PCS_PCIE_EQ_CONFIG1				0xa4
 #define QPHY_V5_PCS_PCIE_EQ_CONFIG2			0xa8
+#define QPHY_V5_PCS_PCIE_PRESET_P10_PRE				0xc0
+#define QPHY_V5_PCS_PCIE_PRESET_P10_POST			0xe4
 
 #endif
diff -ruw linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
--- linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h	2023-05-22 20:30:14.545854148 +0200
@@ -8,6 +8,9 @@
 
 /* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */
 #define QSERDES_PLL_BG_TIMER				0x00c
+#define QSERDES_PLL_SSC_EN_CENTER			0x010
+#define QSERDES_PLL_SSC_ADJ_PER1			0x014
+#define QSERDES_PLL_SSC_ADJ_PER2			0x018
 #define QSERDES_PLL_SSC_PER1				0x01c
 #define QSERDES_PLL_SSC_PER2				0x020
 #define QSERDES_PLL_SSC_STEP_SIZE1_MODE0		0x024
diff -ruw linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-usb.c linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
--- linux-6.4/drivers/phy/qualcomm/phy-qcom-qmp-usb.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qmp-usb.c	2023-05-22 20:30:14.545854148 +0200
@@ -139,6 +139,88 @@
 	[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
 };
 
+static const struct qmp_phy_init_tbl ipq9574_usb3_serdes_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
+	QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+	QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+	QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+	/* PLL and Loop filter settings */
+	QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x68),
+	QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0xab),
+	QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0xaa),
+	QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x09),
+	QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0xa0),
+	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0xaa),
+	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x29),
+	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+	/* SSC settings */
+	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x7d),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x05),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_usb3_tx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+	QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+	QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_usb3_rx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x06),
+	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x6c),
+	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xb8),
+	QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+	QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+	QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl ipq9574_usb3_pcs_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+	QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+};
+
 static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = {
 	QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
 	QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
@@ -1558,6 +1640,14 @@
 	"vdda-phy", "vdda-pll",
 };
 
+static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
+	.serdes		= 0,
+	.pcs		= 0x800,
+	.pcs_usb	= 0x800,
+	.tx		= 0x200,
+	.rx		= 0x400,
+};
+
 static const struct qmp_usb_offsets qmp_usb_offsets_v5 = {
 	.serdes		= 0,
 	.pcs		= 0x0200,
@@ -1586,6 +1676,28 @@
 	.regs			= qmp_v3_usb3phy_regs_layout,
 };
 
+static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
+	.lanes			= 1,
+
+	.offsets		= &qmp_usb_offsets_ipq9574,
+
+	.serdes_tbl		= ipq9574_usb3_serdes_tbl,
+	.serdes_tbl_num		= ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
+	.tx_tbl			= ipq9574_usb3_tx_tbl,
+	.tx_tbl_num		= ARRAY_SIZE(ipq9574_usb3_tx_tbl),
+	.rx_tbl			= ipq9574_usb3_rx_tbl,
+	.rx_tbl_num		= ARRAY_SIZE(ipq9574_usb3_rx_tbl),
+	.pcs_tbl		= ipq9574_usb3_pcs_tbl,
+	.pcs_tbl_num		= ARRAY_SIZE(ipq9574_usb3_pcs_tbl),
+	.clk_list		= msm8996_phy_clk_l,
+	.num_clks		= ARRAY_SIZE(msm8996_phy_clk_l),
+	.reset_list		= qcm2290_usb3phy_reset_l,
+	.num_resets		= ARRAY_SIZE(qcm2290_usb3phy_reset_l),
+	.vreg_list		= qmp_phy_vreg_l,
+	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+	.regs			= qmp_v3_usb3phy_regs_layout,
+};
+
 static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
 	.lanes			= 1,
 
@@ -2589,6 +2701,9 @@
 		.compatible = "qcom,ipq8074-qmp-usb3-phy",
 		.data = &ipq8074_usb3phy_cfg,
 	}, {
+		.compatible = "qcom,ipq9574-qmp-usb3-phy",
+		.data = &ipq9574_usb3phy_cfg,
+	}, {
 		.compatible = "qcom,msm8996-qmp-usb3-phy",
 		.data = &msm8996_usb3phy_cfg,
 	}, {
diff -ruw linux-6.4/drivers/phy/qualcomm/phy-qcom-qusb2.c linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qusb2.c
--- linux-6.4/drivers/phy/qualcomm/phy-qcom-qusb2.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/phy/qualcomm/phy-qcom-qusb2.c	2023-05-22 20:30:14.545854148 +0200
@@ -912,6 +912,9 @@
 		.compatible	= "qcom,ipq8074-qusb2-phy",
 		.data		= &msm8996_phy_cfg,
 	}, {
+		.compatible	= "qcom,ipq9574-qusb2-phy",
+		.data		= &ipq6018_phy_cfg,
+	}, {
 		.compatible	= "qcom,msm8953-qusb2-phy",
 		.data		= &msm8996_phy_cfg,
 	}, {
diff -ruw linux-6.4/drivers/pinctrl/bcm/Kconfig linux-6.4-fbx/drivers/pinctrl/bcm/Kconfig
--- linux-6.4/drivers/pinctrl/bcm/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pinctrl/bcm/Kconfig	2023-09-01 15:04:29.529632893 +0200
@@ -52,6 +52,16 @@
 	select REGMAP
 	select GPIO_REGMAP
 
+config PINCTRL_BCM63138
+	bool "Broadcom 63138 pinmux driver"
+	depends on OF && (ARCH_BCMBCA || COMPILE_TEST)
+	default ARCH_BCMBCA
+	select PINMUX
+	select PINCONF
+	select GENERIC_PINCONF
+	select GPIOLIB
+	select GPIOLIB_IRQCHIP
+
 config PINCTRL_BCM6318
 	bool "Broadcom BCM6318 GPIO driver"
 	depends on (BMIPS_GENERIC || COMPILE_TEST)
diff -ruw linux-6.4/drivers/pinctrl/bcm/Makefile linux-6.4-fbx/drivers/pinctrl/bcm/Makefile
--- linux-6.4/drivers/pinctrl/bcm/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pinctrl/bcm/Makefile	2023-05-22 20:06:42.723813371 +0200
@@ -11,6 +11,7 @@
 obj-$(CONFIG_PINCTRL_BCM6362)		+= pinctrl-bcm6362.o
 obj-$(CONFIG_PINCTRL_BCM6368)		+= pinctrl-bcm6368.o
 obj-$(CONFIG_PINCTRL_BCM63268)		+= pinctrl-bcm63268.o
+obj-$(CONFIG_PINCTRL_BCM63138)		+= pinctrl-bcm63138.o
 obj-$(CONFIG_PINCTRL_IPROC_GPIO)	+= pinctrl-iproc-gpio.o
 obj-$(CONFIG_PINCTRL_CYGNUS_MUX)	+= pinctrl-cygnus-mux.o
 obj-$(CONFIG_PINCTRL_NS)		+= pinctrl-ns.o
diff -ruw linux-6.4/drivers/pinctrl/qcom/pinctrl-msm.c linux-6.4-fbx/drivers/pinctrl/qcom/pinctrl-msm.c
--- linux-6.4/drivers/pinctrl/qcom/pinctrl-msm.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/pinctrl/qcom/pinctrl-msm.c	2023-12-05 17:14:42.315715453 +0100
@@ -664,6 +664,7 @@
 {
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
+	const char *label = gpiochip_is_requested(chip, offset);
 	unsigned func;
 	int is_out;
 	int drive;
@@ -717,6 +718,8 @@
 		seq_printf(s, " %s", pulls_no_keeper[pull]);
 	else
 		seq_printf(s, " %s", pulls_keeper[pull]);
+
+	seq_printf(s, " (%s)", label ? label : "");
 	seq_puts(s, "\n");
 }
 
diff -ruw linux-6.4/drivers/platform/Kconfig linux-6.4-fbx/drivers/platform/Kconfig
--- linux-6.4/drivers/platform/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/platform/Kconfig	2023-07-20 17:19:14.610365797 +0200
@@ -14,3 +14,11 @@
 source "drivers/platform/surface/Kconfig"
 
 source "drivers/platform/x86/Kconfig"
+
+if X86_INTEL_CE
+source "drivers/platform/intelce/Kconfig"
+endif
+
+source "drivers/platform/fbxgw7r/Kconfig"
+
+source "drivers/platform/ipq/Kconfig"
diff -ruw linux-6.4/drivers/platform/Makefile linux-6.4-fbx/drivers/platform/Makefile
--- linux-6.4/drivers/platform/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/platform/Makefile	2023-07-20 17:19:14.610365797 +0200
@@ -11,3 +11,6 @@
 obj-$(CONFIG_GOLDFISH)		+= goldfish/
 obj-$(CONFIG_CHROME_PLATFORMS)	+= chrome/
 obj-$(CONFIG_SURFACE_PLATFORMS)	+= surface/
+obj-$(CONFIG_X86_INTEL_CE)	+= intelce/
+obj-$(CONFIG_FBXGW7R_PLATFORM)	+= fbxgw7r/
+obj-$(CONFIG_QCOM_IPQ_PLATFORM)	+= ipq/
diff -ruw linux-6.4/drivers/regulator/Kconfig linux-6.4-fbx/drivers/regulator/Kconfig
--- linux-6.4/drivers/regulator/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/regulator/Kconfig	2024-04-19 15:59:31.193600561 +0200
@@ -30,6 +30,12 @@
 	help
 	  Say yes here to enable debugging support.
 
+config REGULATOR_FAULT_SENSING
+	bool "Regulator fault-sensing detection"
+	help
+	  Add support for fault-sensing gpio which will cause
+	  regulator to be forced-disabled.
+
 config REGULATOR_FIXED_VOLTAGE
 	tristate "Fixed voltage regulator support"
 	help
diff -ruw linux-6.4/drivers/regulator/core.c linux-6.4-fbx/drivers/regulator/core.c
--- linux-6.4/drivers/regulator/core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/regulator/core.c	2024-04-04 13:46:08.950952914 +0200
@@ -2877,6 +2877,13 @@
 
 	lockdep_assert_held_once(&rdev->mutex.base);
 
+	if (IS_ENABLED(CONFIG_REGULATOR_FAULT_SENSING) &&
+	    rdev->fault_sense_gpiod &&
+	    gpiod_get_value_cansleep(rdev->fault_sense_gpiod)) {
+		rdev_err(rdev, "can't enable regulator, active fault\n");
+		return -EIO;
+	}
+
 	if (rdev->use_count == 0 && rdev->supply) {
 		ret = _regulator_enable(rdev->supply);
 		if (ret < 0)
@@ -5476,6 +5483,59 @@
 	.attach_regulator = generic_coupler_attach,
 };
 
+static int read_stable_gpio(struct gpio_desc *d, int count, int wait_ms)
+{
+	int val;
+	int i;
+
+	val = gpiod_get_value_cansleep(d);
+	if (val < 0)
+		return val;
+
+	for (i = 1; i < count; i++) {
+		int nval;
+
+		nval = gpiod_get_value_cansleep(d);
+		if (nval < 0)
+			return nval;
+
+		if (val != nval)
+			return -EAGAIN;
+
+		if (wait_ms)
+			msleep(wait_ms);
+	}
+
+	return val;
+}
+
+static irqreturn_t fault_sense_irq_handler(int irq, void *dev_id)
+{
+	struct regulator_dev *rdev = dev_id;
+	int val;
+
+	if (!_regulator_is_enabled(rdev))
+		return IRQ_HANDLED;
+
+	val = read_stable_gpio(rdev->fault_sense_gpiod, 5, 5);
+	if (val < 0) {
+		if (val == -EAGAIN)
+			rdev_warn(rdev, "unstable fault-gpio value\n");
+		else
+			rdev_warn(rdev, "fault-gpio read failure:%d\n", val);
+
+		return IRQ_HANDLED;
+	}
+
+	if (val) {
+		_regulator_force_disable(rdev);
+		disable_irq_nosync(irq);
+		rdev_err(rdev, "fault detected, force disable regulator\n");
+	}
+
+	return IRQ_HANDLED;
+}
+
 /**
  * regulator_register - register regulator
  * @dev: the device that drive the regulator
@@ -5496,7 +5556,9 @@
 	static atomic_t regulator_no = ATOMIC_INIT(-1);
 	struct regulator_dev *rdev;
 	bool dangling_cfg_gpiod = false;
+	bool dangling_cfg_fs_gpiod = false;
 	bool dangling_of_gpiod = false;
+	bool dangling_of_fs_gpiod = false;
 	int ret, i;
 	bool resolved_early = false;
 
@@ -5504,6 +5566,8 @@
 		return ERR_PTR(-EINVAL);
 	if (cfg->ena_gpiod)
 		dangling_cfg_gpiod = true;
+	if (cfg->fault_sense_gpiod)
+		dangling_cfg_fs_gpiod = true;
 	if (regulator_desc == NULL) {
 		ret = -EINVAL;
 		goto rinse;
@@ -5581,6 +5645,8 @@
 	 */
 	if (!cfg->ena_gpiod && config->ena_gpiod)
 		dangling_of_gpiod = true;
+	if (!cfg->fault_sense_gpiod && config->fault_sense_gpiod)
+		dangling_of_fs_gpiod = true;
 	if (!init_data) {
 		init_data = config->init_data;
 		rdev->dev.of_node = of_node_get(config->of_node);
@@ -5656,6 +5722,31 @@
 		dangling_of_gpiod = false;
 	}
 
+	if (IS_ENABLED(CONFIG_REGULATOR_FAULT_SENSING) &&
+	    config->fault_sense_gpiod) {
+		int irq, ret;
+
+		irq = gpiod_to_irq(config->fault_sense_gpiod);
+		if (irq < 0) {
+			rdev_err(rdev, "failed to get irq: %d\n", irq);
+			goto wash;
+		}
+
+		ret = devm_request_threaded_irq(dev, irq, NULL,
+						fault_sense_irq_handler,
+						IRQF_ONESHOT |
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING,
+						"reg-fault-sense", rdev);
+		if (ret < 0) {
+			rdev_err(rdev, "failed to request irq: %d\n", ret);
+			goto wash;
+		}
+	}
+	dangling_cfg_fs_gpiod = false;
+	dangling_of_fs_gpiod = false;
+	rdev->fault_sense_gpiod = config->fault_sense_gpiod;
+
 	ret = set_machine_constraints(rdev);
 	if (ret == -EPROBE_DEFER && !resolved_early) {
 		/* Regulator might be in bypass mode and so needs its supply
@@ -5733,6 +5824,8 @@
 clean:
 	if (dangling_of_gpiod)
 		gpiod_put(config->ena_gpiod);
+	if (dangling_of_fs_gpiod)
+		gpiod_put(config->fault_sense_gpiod);
 	if (rdev && rdev->dev.of_node)
 		of_node_put(rdev->dev.of_node);
 	kfree(rdev);
@@ -5740,6 +5833,8 @@
 rinse:
 	if (dangling_cfg_gpiod)
 		gpiod_put(cfg->ena_gpiod);
+	if (dangling_cfg_fs_gpiod)
+		gpiod_put(cfg->fault_sense_gpiod);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(regulator_register);
@@ -5770,6 +5865,7 @@
 	unset_regulator_supplies(rdev);
 	list_del(&rdev->list);
 	regulator_ena_gpio_free(rdev);
+	gpiod_put(rdev->fault_sense_gpiod);
 	device_unregister(&rdev->dev);
 
 	mutex_unlock(&regulator_list_mutex);
diff -ruw linux-6.4/drivers/regulator/fixed.c linux-6.4-fbx/drivers/regulator/fixed.c
--- linux-6.4/drivers/regulator/fixed.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/regulator/fixed.c	2023-10-05 12:33:41.387635388 +0200
@@ -34,6 +34,7 @@
 struct fixed_voltage_data {
 	struct regulator_desc desc;
 	struct regulator_dev *dev;
+	struct gpio_desc *fault_sense;
 
 	struct clk *enable_clock;
 	unsigned int enable_counter;
@@ -276,6 +277,12 @@
 		return dev_err_probe(&pdev->dev, PTR_ERR(cfg.ena_gpiod),
 				     "can't get GPIO\n");
 
+	cfg.fault_sense_gpiod = gpiod_get_optional(&pdev->dev, "fault-sense",
+						   GPIOD_IN);
+	if (IS_ERR(cfg.fault_sense_gpiod))
+		return dev_err_probe(dev, PTR_ERR(cfg.fault_sense_gpiod),
+				     "can't get  fault-sense gpio");
+
 	cfg.dev = &pdev->dev;
 	cfg.init_data = config->init_data;
 	cfg.driver_data = drvdata;
diff -ruw linux-6.4/drivers/rtc/Kconfig linux-6.4-fbx/drivers/rtc/Kconfig
--- linux-6.4/drivers/rtc/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/rtc/Kconfig	2024-01-08 17:49:53.079558710 +0100
@@ -1391,6 +1391,15 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-meson-vrtc.
 
+config RTC_DRV_MESON_AXG
+	tristate "Amlogic Meson AXG RTC + wakeup"
+	depends on RTC_HCTOSYS
+	depends on ARCH_MESON || COMPILE_TEST
+	default m if ARCH_MESON
+	help
+	  If you say yes here you will get support for the RTC of Amlogic SoCs
+	  as implemented in M3's firmware for AXG platform by Freebox.
+
 config RTC_DRV_OMAP
 	tristate "TI OMAP Real Time Clock"
 	depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
diff -ruw linux-6.4/drivers/rtc/Makefile linux-6.4-fbx/drivers/rtc/Makefile
--- linux-6.4/drivers/rtc/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/rtc/Makefile	2024-01-08 17:49:53.079558710 +0100
@@ -97,6 +97,7 @@
 obj-$(CONFIG_RTC_DRV_MAX8997)	+= rtc-max8997.o
 obj-$(CONFIG_RTC_DRV_MAX8998)	+= rtc-max8998.o
 obj-$(CONFIG_RTC_DRV_MESON_VRTC)+= rtc-meson-vrtc.o
+obj-$(CONFIG_RTC_DRV_MESON_AXG)	+= rtc-meson-axg.o
 obj-$(CONFIG_RTC_DRV_MC13XXX)	+= rtc-mc13xxx.o
 obj-$(CONFIG_RTC_DRV_MCP795)	+= rtc-mcp795.o
 obj-$(CONFIG_RTC_DRV_MESON)	+= rtc-meson.o
diff -ruw linux-6.4/drivers/soc/bcm/Kconfig linux-6.4-fbx/drivers/soc/bcm/Kconfig
--- linux-6.4/drivers/soc/bcm/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/soc/bcm/Kconfig	2023-05-22 20:06:43.275828054 +0200
@@ -24,7 +24,8 @@
 
 config SOC_BCM63XX
 	bool "Broadcom 63xx SoC drivers"
-	depends on BMIPS_GENERIC || COMPILE_TEST
+	depends on BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST
+	select RESET_CONTROLLER
 	help
 	  Enables drivers for the Broadcom 63xx series of chips.
 	  Drivers can be enabled individually within this menu.
diff -ruw linux-6.4/drivers/soc/bcm/Makefile linux-6.4-fbx/drivers/soc/bcm/Makefile
--- linux-6.4/drivers/soc/bcm/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/soc/bcm/Makefile	2023-05-22 20:06:43.275828054 +0200
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_BCM2835_POWER)	+= bcm2835-power.o
 obj-$(CONFIG_RASPBERRYPI_POWER)	+= raspberrypi-power.o
-obj-y				+= bcm63xx/
 obj-$(CONFIG_SOC_BRCMSTB)	+= brcmstb/
+obj-$(CONFIG_SOC_BCM63XX)	+= bcm63xx/
diff -ruw linux-6.4/drivers/soc/bcm/bcm63xx/Kconfig linux-6.4-fbx/drivers/soc/bcm/bcm63xx/Kconfig
--- linux-6.4/drivers/soc/bcm/bcm63xx/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/soc/bcm/bcm63xx/Kconfig	2023-05-22 20:06:43.275828054 +0200
@@ -9,6 +9,31 @@
 	  This enables support for the BCM63xx power domains controller on
 	  BCM6318, BCM6328, BCM6362 and BCM63268 SoCs.
 
+config SOC_BCM63XX_RDP
+	bool "rdp subsystem"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
+config SOC_BCM63XX_XRDP
+	tristate "xrdp subsystem"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+	select UBUS4_BCM63158
+
+config SOC_BCM63XX_XRDP_IOCTL
+	bool "ioctl interface"
+	depends on SOC_BCM63XX_XRDP
+
+config UBUS4_BCM63158
+	bool "Broadcom 63158 UBUS4 driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
+config SOC_MEMC_BCM63158
+	tristate "Broadcom 63158 MEMC driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
+config PROCMON_BCM63158
+	bool "Broadcom 63158 PROCMON driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
 endif # SOC_BCM63XX
 
 config BCM_PMB
@@ -19,3 +44,4 @@
 	help
 	  This enables support for the Broadcom's PMB (Power Management Bus) that
 	  is used for disabling and enabling SoC devices.
+
diff -ruw linux-6.4/drivers/soc/bcm/bcm63xx/Makefile linux-6.4-fbx/drivers/soc/bcm/bcm63xx/Makefile
--- linux-6.4/drivers/soc/bcm/bcm63xx/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/soc/bcm/bcm63xx/Makefile	2023-05-22 20:06:43.275828054 +0200
@@ -1,3 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_BCM63XX_POWER) += bcm63xx-power.o
 obj-$(CONFIG_BCM_PMB)		+= bcm-pmb.o
+
+obj-$(CONFIG_ARCH_BCMBCA) += pmc.o
+obj-$(CONFIG_SOC_BCM63XX_RDP) += rdp/
+obj-$(CONFIG_SOC_BCM63XX_XRDP) += xrdp/
+obj-$(CONFIG_UBUS4_BCM63158)	+= ubus4-bcm63158.o
+obj-$(CONFIG_PROCMON_BCM63158)	+= procmon-bcm63158.o
+obj-$(CONFIG_SOC_MEMC_BCM63158)	+= memc-bcm63158.o
diff -ruw linux-6.4/drivers/soc/qcom/Kconfig linux-6.4-fbx/drivers/soc/qcom/Kconfig
--- linux-6.4/drivers/soc/qcom/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/soc/qcom/Kconfig	2024-04-19 15:59:31.193600561 +0200
@@ -279,4 +279,8 @@
 	tristate
 	select QCOM_SCM
 
+config QCOM_IMEM_RESET_REASON
+	tristate "QCOM IMEM based reset reason"
+	depends on ARCH_QCOM || COMPILE_TEST
+
 endmenu
diff -ruw linux-6.4/drivers/soc/qcom/Makefile linux-6.4-fbx/drivers/soc/qcom/Makefile
--- linux-6.4/drivers/soc/qcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/soc/qcom/Makefile	2024-04-19 15:59:31.193600561 +0200
@@ -34,3 +34,5 @@
 obj-$(CONFIG_QCOM_ICC_BWMON)	+= icc-bwmon.o
 qcom_ice-objs			+= ice.o
 obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE)	+= qcom_ice.o
+
+obj-$(CONFIG_QCOM_IMEM_RESET_REASON) += qcom-imem-reset-reason.o
diff -ruw linux-6.4/drivers/spi/Kconfig linux-6.4-fbx/drivers/spi/Kconfig
--- linux-6.4/drivers/spi/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/spi/Kconfig	2023-05-22 20:06:43.315829118 +0200
@@ -771,6 +771,12 @@
 	help
 	  This selects a driver for the PPC4xx SPI Controller.
 
+config SPI_TDM_ORION
+	tristate "Orion TDM SPI master"
+	depends on PLAT_ORION
+	help
+	  This enables using the TDM SPI master controller on the Orion chips.
+
 config SPI_PXA2XX
 	tristate "PXA2xx SSP SPI master"
 	depends on ARCH_PXA || ARCH_MMP || PCI || ACPI || COMPILE_TEST
diff -ruw linux-6.4/drivers/spi/Makefile linux-6.4-fbx/drivers/spi/Makefile
--- linux-6.4/drivers/spi/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/spi/Makefile	2023-05-22 20:06:43.315829118 +0200
@@ -99,6 +99,7 @@
 obj-$(CONFIG_SPI_PCI1XXXX)		+= spi-pci1xxxx.o
 obj-$(CONFIG_SPI_PIC32)			+= spi-pic32.o
 obj-$(CONFIG_SPI_PIC32_SQI)		+= spi-pic32-sqi.o
+obj-$(CONFIG_SPI_TDM_ORION)		+= orion_tdm_spi.o
 obj-$(CONFIG_SPI_PL022)			+= spi-pl022.o
 obj-$(CONFIG_SPI_PPC4xx)		+= spi-ppc4xx.o
 spi-pxa2xx-platform-objs		:= spi-pxa2xx.o spi-pxa2xx-dma.o
diff -ruw linux-6.4/drivers/spi/spi-qup.c linux-6.4-fbx/drivers/spi/spi-qup.c
--- linux-6.4/drivers/spi/spi-qup.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/spi/spi-qup.c	2023-11-08 17:40:48.567615299 +0100
@@ -987,6 +987,9 @@
 	else
 		spi_ioc &= ~SPI_IO_C_FORCE_CS;
 
+	spi_ioc &= ~SPI_IO_C_CS_SELECT_MASK;
+	spi_ioc |= SPI_IO_C_CS_SELECT(spi_get_chipselect(spi, 0));
+
 	if (spi_ioc != spi_ioc_orig)
 		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
 }
diff -ruw linux-6.4/drivers/tee/optee/smc_abi.c linux-6.4-fbx/drivers/tee/optee/smc_abi.c
--- linux-6.4/drivers/tee/optee/smc_abi.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/tee/optee/smc_abi.c	2023-06-27 11:47:15.907862595 +0200
@@ -1782,9 +1782,14 @@
 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
 		pr_info("dynamic shared memory is enabled\n");
 
+	if (device_property_present(&pdev->dev, "skip-enumeration"))
+		optee->scan_bus_done = true;
+
+	if (!optee->scan_bus_done) {
 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
 	if (rc)
 		goto err_disable_shm_cache;
+	}
 
 	pr_info("initialized driver\n");
 	return 0;
diff -ruw linux-6.4/drivers/thermal/qcom/tsens.c linux-6.4-fbx/drivers/thermal/qcom/tsens.c
--- linux-6.4/drivers/thermal/qcom/tsens.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/thermal/qcom/tsens.c	2023-05-22 20:30:14.545854148 +0200
@@ -1093,6 +1093,9 @@
 		.compatible = "qcom,ipq8074-tsens",
 		.data = &data_ipq8074,
 	}, {
+		.compatible = "qcom,ipq9574-tsens",
+		.data = &data_ipq8074,
+	}, {
 		.compatible = "qcom,mdm9607-tsens",
 		.data = &data_9607,
 	}, {
diff -ruw linux-6.4/drivers/thermal/thermal_core.c linux-6.4-fbx/drivers/thermal/thermal_core.c
--- linux-6.4/drivers/thermal/thermal_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/thermal/thermal_core.c	2023-05-22 20:06:43.535834969 +0200
@@ -810,6 +810,7 @@
 
 /**
  * __thermal_cooling_device_register() - register a new thermal cooling device
+ * @dev:	parent device
  * @np:		a pointer to a device tree node.
  * @type:	the thermal cooling device type.
  * @devdata:	device private data.
@@ -825,7 +826,7 @@
  * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
  */
 static struct thermal_cooling_device *
-__thermal_cooling_device_register(struct device_node *np,
+__thermal_cooling_device_register(struct device *pdev, struct device_node *np,
 				  const char *type, void *devdata,
 				  const struct thermal_cooling_device_ops *ops)
 {
@@ -873,6 +874,7 @@
 	ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
 	if (ret)
 		goto out_cooling_dev;
+	cdev->device.parent = pdev;
 
 	ret = device_register(&cdev->device);
 	if (ret) {
@@ -926,11 +928,30 @@
 thermal_cooling_device_register(const char *type, void *devdata,
 				const struct thermal_cooling_device_ops *ops)
 {
-	return __thermal_cooling_device_register(NULL, type, devdata, ops);
+	return __thermal_cooling_device_register(NULL, NULL, type, devdata, ops);
 }
 EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
 
 /**
+ * thermal_cooling_device_register_with_parent() - register a new thermal cooling device
+ * @pdev:	parent device
+ * @type:	the thermal cooling device type.
+ * @devdata:	device private data.
+ * @ops:		standard thermal cooling devices callbacks.
+ *
+ * Same as thermal_cooling_device_register but take also the parent device.
+ * Then, hwpath will include the parent device to uniquely identify this device
+ */
+struct thermal_cooling_device *
+thermal_cooling_device_register_with_parent(struct device *pdev,
+				const char *type, void *devdata,
+				const struct thermal_cooling_device_ops *ops)
+{
+	return __thermal_cooling_device_register(pdev, NULL, type, devdata, ops);
+}
+EXPORT_SYMBOL_GPL(thermal_cooling_device_register_with_parent);
+
+/**
  * thermal_of_cooling_device_register() - register an OF thermal cooling device
  * @np:		a pointer to a device tree node.
  * @type:	the thermal cooling device type.
@@ -950,7 +971,7 @@
 				   const char *type, void *devdata,
 				   const struct thermal_cooling_device_ops *ops)
 {
-	return __thermal_cooling_device_register(np, type, devdata, ops);
+	return __thermal_cooling_device_register(NULL, np, type, devdata, ops);
 }
 EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
 
@@ -990,7 +1011,7 @@
 	if (!ptr)
 		return ERR_PTR(-ENOMEM);
 
-	tcd = __thermal_cooling_device_register(np, type, devdata, ops);
+	tcd = __thermal_cooling_device_register(NULL, np, type, devdata, ops);
 	if (IS_ERR(tcd)) {
 		devres_free(ptr);
 		return tcd;
diff -ruw linux-6.4/drivers/tty/serial/Kconfig linux-6.4-fbx/drivers/tty/serial/Kconfig
--- linux-6.4/drivers/tty/serial/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/tty/serial/Kconfig	2023-06-27 11:47:15.911862703 +0200
@@ -1075,6 +1075,11 @@
 	    BCM68xx (PON)
 	    BCM7xxx (STB) - DOCSIS console
 
+config SERIAL_BCM63XX_HS
+	tristate "Broadcom BCM63xx HS UART support"
+	select SERIAL_CORE
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
 config SERIAL_BCM63XX_CONSOLE
 	bool "Console on BCM63xx serial port"
 	depends on SERIAL_BCM63XX=y
diff -ruw linux-6.4/drivers/tty/serial/Makefile linux-6.4-fbx/drivers/tty/serial/Makefile
--- linux-6.4/drivers/tty/serial/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/tty/serial/Makefile	2023-05-22 20:06:43.571835927 +0200
@@ -30,6 +30,7 @@
 obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
 obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
 obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
+obj-$(CONFIG_SERIAL_BCM63XX_HS) += bcm63xx-hs-uart.o
 obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
 obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
 obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
diff -ruw linux-6.4/drivers/usb/dwc3/host.c linux-6.4-fbx/drivers/usb/dwc3/host.c
--- linux-6.4/drivers/usb/dwc3/host.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/usb/dwc3/host.c	2023-08-18 13:36:38.221267991 +0200
@@ -65,12 +65,17 @@
 	struct platform_device	*xhci;
 	int			ret, irq;
 	int			prop_idx = 0;
+	u32 devid;
 
 	irq = dwc3_host_get_irq(dwc);
 	if (irq < 0)
 		return irq;
 
-	xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
+	if (!dwc->dev->of_node ||
+	    of_property_read_u32(dwc->dev->of_node, "dev_id", &devid) < 0)
+		devid = PLATFORM_DEVID_AUTO;
+
+	xhci = platform_device_alloc("xhci-hcd", devid);
 	if (!xhci) {
 		dev_err(dwc->dev, "couldn't allocate xHCI device\n");
 		return -ENOMEM;
diff -ruw linux-6.4/drivers/usb/host/Kconfig linux-6.4-fbx/drivers/usb/host/Kconfig
--- linux-6.4/drivers/usb/host/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/usb/host/Kconfig	2023-05-22 20:06:43.695839225 +0200
@@ -78,13 +78,13 @@
 	  If unsure, say N.
 
 config USB_XHCI_MVEBU
-	tristate "xHCI support for Marvell Armada 375/38x/37xx"
+	tristate "xHCI support for Marvell Armada 375/38x/37xx/70x0/80x0"
 	select USB_XHCI_PLATFORM
 	depends on HAS_IOMEM
 	depends on ARCH_MVEBU || COMPILE_TEST
 	help
 	  Say 'Y' to enable the support for the xHCI host controller
-	  found in Marvell Armada 375/38x/37xx ARM SOCs.
+	  found in Marvell Armada 375/38x/37xx/70x0/80x0 ARM SOCs.
 
 config USB_XHCI_RCAR
 	tristate "xHCI support for Renesas R-Car SoCs"
@@ -690,6 +690,10 @@
 
 	  If unsure, say N.
 
+config USB_BCM63158
+	tristate "Broadcom BCM63158 SoC USB host driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
 config USB_HCD_SSB
 	tristate "SSB usb host driver"
 	depends on SSB
diff -ruw linux-6.4/drivers/usb/host/Makefile linux-6.4-fbx/drivers/usb/host/Makefile
--- linux-6.4/drivers/usb/host/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/usb/host/Makefile	2023-05-22 20:06:43.695839225 +0200
@@ -85,3 +85,4 @@
 obj-$(CONFIG_USB_HCD_SSB)	+= ssb-hcd.o
 obj-$(CONFIG_USB_MAX3421_HCD)	+= max3421-hcd.o
 obj-$(CONFIG_USB_XEN_HCD)	+= xen-hcd.o
+obj-$(CONFIG_USB_BCM63158)	+= usb-bcm63158.o
diff -ruw linux-6.4/drivers/usb/host/xhci-plat.c linux-6.4-fbx/drivers/usb/host/xhci-plat.c
--- linux-6.4/drivers/usb/host/xhci-plat.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/usb/host/xhci-plat.c	2023-11-16 16:11:31.633535041 +0100
@@ -133,6 +133,8 @@
 		.compatible = "marvell,armada3700-xhci",
 		.data = &xhci_plat_marvell_armada3700,
 	}, {
+		.compatible = "marvell,armada-8k-xhci",
+	}, {
 		.compatible = "brcm,xhci-brcm-v2",
 		.data = &xhci_plat_brcm,
 	}, {
@@ -294,10 +296,6 @@
 		xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
 			    "usb-phy", 1);
 		if (IS_ERR(xhci->shared_hcd->usb_phy)) {
-			if (PTR_ERR(xhci->shared_hcd->usb_phy) != -ENODEV)
-				dev_err(sysdev, "%s get usb3phy fail (ret=%d)\n",
-					     __func__,
-					    (int)PTR_ERR(xhci->shared_hcd->usb_phy));
 			xhci->shared_hcd->usb_phy = NULL;
 		} else {
 			ret = usb_phy_init(xhci->shared_hcd->usb_phy);
diff -ruw linux-6.4/drivers/usb/host/xhci-ring.c linux-6.4-fbx/drivers/usb/host/xhci-ring.c
--- linux-6.4/drivers/usb/host/xhci-ring.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/usb/host/xhci-ring.c	2023-09-01 15:04:29.533633001 +0200
@@ -1890,7 +1890,8 @@
 	}
 
 	/* We might get interrupts after shared_hcd is removed */
-	if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
+	if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL &&
+	    !xhci_has_one_roothub(xhci)) {
 		xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
 		bogus_port_status = true;
 		goto cleanup;
diff -ruw linux-6.4/drivers/usb/storage/usb.c linux-6.4-fbx/drivers/usb/storage/usb.c
--- linux-6.4/drivers/usb/storage/usb.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/usb/storage/usb.c	2024-02-13 16:56:46.401367196 +0100
@@ -67,7 +67,7 @@
 MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
 MODULE_LICENSE("GPL");
 
-static unsigned int delay_use = 1;
+static unsigned int delay_use = 5;
 module_param(delay_use, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
 
diff -ruw linux-6.4/drivers/video/Kconfig linux-6.4-fbx/drivers/video/Kconfig
--- linux-6.4/drivers/video/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/video/Kconfig	2023-05-22 20:06:43.791841779 +0200
@@ -59,5 +59,4 @@
 
 endif
 
-
 endmenu
diff -ruw linux-6.4/drivers/video/fbdev/Kconfig linux-6.4-fbx/drivers/video/fbdev/Kconfig
--- linux-6.4/drivers/video/fbdev/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/video/fbdev/Kconfig	2023-05-22 20:06:43.799841992 +0200
@@ -2208,6 +2208,24 @@
 	  called sm712fb. If you want to compile it as a module, say M
 	  here and read <file:Documentation/kbuild/modules.rst>.
 
+config FB_SSD1320
+	tristate "SSD1320 OLED driver"
+	depends on FB && SPI
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	select FB_BACKLIGHT
+
+config FB_SSD1327
+	tristate "SSD1327 OLED driver"
+	depends on FB && SPI
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	select FB_BACKLIGHT
+
 source "drivers/video/fbdev/omap/Kconfig"
 source "drivers/video/fbdev/omap2/Kconfig"
 source "drivers/video/fbdev/mmp/Kconfig"
diff -ruw linux-6.4/drivers/video/fbdev/Makefile linux-6.4-fbx/drivers/video/fbdev/Makefile
--- linux-6.4/drivers/video/fbdev/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/drivers/video/fbdev/Makefile	2023-05-22 20:06:43.799841992 +0200
@@ -126,6 +126,8 @@
 obj-$(CONFIG_FB_DA8XX)		  += da8xx-fb.o
 obj-$(CONFIG_FB_SSD1307)	  += ssd1307fb.o
 obj-$(CONFIG_FB_SIMPLE)           += simplefb.o
+obj-$(CONFIG_FB_SSD1327)          += ssd1327.o
+obj-$(CONFIG_FB_SSD1320)          += ssd1320.o
 
 # the test framebuffer is last
 obj-$(CONFIG_FB_VIRTUAL)          += vfb.o
diff -ruw linux-6.4/fs/Kconfig linux-6.4-fbx/fs/Kconfig
--- linux-6.4/fs/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/Kconfig	2023-06-27 11:47:15.923863029 +0200
@@ -156,6 +156,7 @@
 source "fs/exfat/Kconfig"
 source "fs/ntfs/Kconfig"
 source "fs/ntfs3/Kconfig"
+source "fs/exfat-fbx/Kconfig"
 
 endmenu
 endif # BLOCK
diff -ruw linux-6.4/fs/Makefile linux-6.4-fbx/fs/Makefile
--- linux-6.4/fs/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/Makefile	2023-06-27 11:47:15.923863029 +0200
@@ -135,3 +135,4 @@
 obj-$(CONFIG_EROFS_FS)		+= erofs/
 obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
 obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
+obj-$(CONFIG_EXFAT_FS_FBX)		+= exfat-fbx/
diff -ruw linux-6.4/fs/exec.c linux-6.4-fbx/fs/exec.c
--- linux-6.4/fs/exec.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/exec.c	2023-05-22 20:06:44.067849120 +0200
@@ -1895,6 +1895,23 @@
 		return PTR_ERR(filename);
 
 	/*
+	 * handle current->exec_mode:
+	 * - if unlimited, then nothing to do.
+	 * - if once, then set it to denied and continue (next execve
+	 *   after this one will fail).
+	 * - if denied, then effectively fail the execve call with EPERM.
+	 */
+	switch (current->exec_mode) {
+	case EXEC_MODE_UNLIMITED:
+		break;
+	case EXEC_MODE_ONCE:
+		current->exec_mode = EXEC_MODE_DENIED;
+		break;
+	case EXEC_MODE_DENIED:
+		return -EPERM;
+	}
+
+	/*
 	 * We move the actual failure in case of RLIMIT_NPROC excess from
 	 * set*uid() to execve() because too many poorly written programs
 	 * don't check setuid() return code.  Here we additionally recheck
diff -ruw linux-6.4/fs/proc/array.c linux-6.4-fbx/fs/proc/array.c
--- linux-6.4/fs/proc/array.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/proc/array.c	2023-05-22 20:06:44.315855717 +0200
@@ -145,6 +145,21 @@
 	return task_state_array[task_state_index(tsk)];
 }
 
+static const char *const task_exec_mode_array[] = {
+	"0 (Denied)",
+	"1 (Once)",
+	"2 (Unlimited)",
+};
+
+static inline const char *get_task_exec_mode(struct task_struct *tsk)
+{
+	unsigned int exec_mode = tsk->exec_mode;
+
+	if (exec_mode > EXEC_MODE_UNLIMITED)
+		return "? (Invalid)";
+	return task_exec_mode_array[exec_mode];
+}
+
 static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
 				struct pid *pid, struct task_struct *p)
 {
@@ -403,6 +418,12 @@
 	seq_putc(m, '\n');
 }
 
+static inline void task_exec_mode(struct seq_file *m,
+				  struct task_struct *p)
+{
+	seq_printf(m, "Exec mode: %s\n", get_task_exec_mode(p));
+}
+
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
 	seq_printf(m, "Cpus_allowed:\t%*pb\n",
@@ -455,6 +476,7 @@
 	task_cpus_allowed(m, task);
 	cpuset_task_status_allowed(m, task);
 	task_context_switch_counts(m, task);
+	task_exec_mode(m, task);
 	return 0;
 }
 
diff -ruw linux-6.4/fs/pstore/inode.c linux-6.4-fbx/fs/pstore/inode.c
--- linux-6.4/fs/pstore/inode.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/pstore/inode.c	2023-05-22 20:06:44.323855930 +0200
@@ -371,9 +371,10 @@
 		goto fail;
 	inode->i_mode = S_IFREG | 0444;
 	inode->i_fop = &pstore_file_operations;
-	scnprintf(name, sizeof(name), "%s-%s-%llu%s",
+	scnprintf(name, sizeof(name), "%s-%s-%s%llu%s",
 			pstore_type_to_name(record->type),
-			record->psi->name, record->id,
+		        record->psi->name, record->old ? "old-" : "",
+		        record->id,
 			record->compressed ? ".enc.z" : "");
 
 	private = kzalloc(sizeof(*private), GFP_KERNEL);
diff -ruw linux-6.4/fs/pstore/ram.c linux-6.4-fbx/fs/pstore/ram.c
--- linux-6.4/fs/pstore/ram.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/pstore/ram.c	2023-05-22 20:06:44.323855930 +0200
@@ -137,6 +137,7 @@
 
 	record->type = prz->type;
 	record->id = id;
+	record->old = prz->old_zone;
 
 	return prz;
 }
@@ -481,7 +482,7 @@
 static int ramoops_init_przs(const char *name,
 			     struct device *dev, struct ramoops_context *cxt,
 			     struct persistent_ram_zone ***przs,
-			     phys_addr_t *paddr, size_t mem_sz,
+			     phys_addr_t *paddr, void *vaddr, size_t mem_sz,
 			     ssize_t record_size,
 			     unsigned int *cnt, u32 sig, u32 flags)
 {
@@ -545,7 +546,7 @@
 		else
 			label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
 					  name, i, *cnt - 1);
-		prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
+		prz_ar[i] = persistent_ram_new(*paddr, vaddr, zone_sz, sig,
 					       &cxt->ecc_info,
 					       cxt->memtype, flags, label);
 		kfree(label);
@@ -578,7 +579,7 @@
 static int ramoops_init_prz(const char *name,
 			    struct device *dev, struct ramoops_context *cxt,
 			    struct persistent_ram_zone **prz,
-			    phys_addr_t *paddr, size_t sz, u32 sig)
+			    phys_addr_t *paddr, void *vaddr, size_t sz, u32 sig)
 {
 	char *label;
 
@@ -593,7 +594,7 @@
 	}
 
 	label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
-	*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
+	*prz = persistent_ram_new(*paddr, vaddr, sz, sig, &cxt->ecc_info,
 				  cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
 	kfree(label);
 	if (IS_ERR(*prz)) {
@@ -781,17 +782,20 @@
 	dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
 			- cxt->pmsg_size;
 	err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr,
+				pdata->mem_ptr,
 				dump_mem_sz, cxt->record_size,
 				&cxt->max_dump_cnt, 0, 0);
 	if (err)
 		goto fail_init;
 
 	err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr,
+			       pdata->mem_ptr,
 			       cxt->console_size, 0);
 	if (err)
 		goto fail_init;
 
 	err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr,
+			       pdata->mem_ptr,
 				cxt->pmsg_size, 0);
 	if (err)
 		goto fail_init;
@@ -800,6 +804,7 @@
 				? nr_cpu_ids
 				: 1;
 	err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr,
+				pdata->mem_ptr,
 				cxt->ftrace_size, -1,
 				&cxt->max_ftrace_cnt, LINUX_VERSION_CODE,
 				(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
diff -ruw linux-6.4/fs/pstore/ram_core.c linux-6.4-fbx/fs/pstore/ram_core.c
--- linux-6.4/fs/pstore/ram_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/pstore/ram_core.c	2023-05-22 20:06:44.323855930 +0200
@@ -35,6 +35,7 @@
 	uint32_t    sig;
 	atomic_t    start;
 	atomic_t    size;
+	atomic_t    flags;
 	uint8_t     data[];
 };
 
@@ -394,6 +395,7 @@
 {
 	atomic_set(&prz->buffer->start, 0);
 	atomic_set(&prz->buffer->size, 0);
+	atomic_set(&prz->buffer->flags, 0);
 	persistent_ram_update_header_ecc(prz);
 }
 
@@ -480,13 +482,16 @@
 	return va;
 }
 
-static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+static int persistent_ram_buffer_map(phys_addr_t start, void *vaddr,
+				     phys_addr_t size,
 		struct persistent_ram_zone *prz, int memtype)
 {
 	prz->paddr = start;
 	prz->size = size;
 
-	if (pfn_valid(start >> PAGE_SHIFT))
+	if (vaddr)
+		prz->vaddr = vaddr;
+	else if (pfn_valid(start >> PAGE_SHIFT))
 		prz->vaddr = persistent_ram_vmap(start, size, memtype);
 	else
 		prz->vaddr = persistent_ram_iomap(start, size, memtype,
@@ -533,6 +538,15 @@
 			pr_debug("found existing buffer, size %zu, start %zu\n",
 				 buffer_size(prz), buffer_start(prz));
 			persistent_ram_save_old(prz);
+
+			if (atomic_read(&prz->buffer->flags) > 0) {
+				pr_info("old ramoops!\n");
+				prz->old_zone = true;
+			} else {
+				pr_info("fresh ramoops!\n");
+				atomic_set(&prz->buffer->flags, 1);
+			}
+			persistent_ram_update_header_ecc(prz);
 		}
 	} else {
 		pr_debug("no valid data in buffer (sig = 0x%08x)\n",
@@ -582,7 +596,8 @@
 	*_prz = NULL;
 }
 
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+struct persistent_ram_zone *persistent_ram_new(phys_addr_t start,
+					       void *vaddr, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
 			unsigned int memtype, u32 flags, char *label)
 {
@@ -600,7 +615,7 @@
 	prz->flags = flags;
 	prz->label = kstrdup(label, GFP_KERNEL);
 
-	ret = persistent_ram_buffer_map(start, size, prz, memtype);
+	ret = persistent_ram_buffer_map(start, vaddr, size, prz, memtype);
 	if (ret)
 		goto err;
 
diff -ruw linux-6.4/fs/pstore/ram_internal.h linux-6.4-fbx/fs/pstore/ram_internal.h
--- linux-6.4/fs/pstore/ram_internal.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/pstore/ram_internal.h	2023-05-22 20:06:44.323855930 +0200
@@ -55,6 +55,10 @@
  * @old_log_size:
  *	bytes contained in @old_log
  *
+ * @old_zone:
+ *      tells whether the zone has just been freshly created, and has
+ *      been read for the first time, this boot, or if it is old, and
+ *      has been created many boots ago.
  */
 struct persistent_ram_zone {
 	phys_addr_t paddr;
@@ -77,9 +81,12 @@
 
 	char *old_log;
 	size_t old_log_size;
+
+	bool old_zone;
 };
 
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+struct persistent_ram_zone *persistent_ram_new(phys_addr_t start,
+					       void *addr, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
 			unsigned int memtype, u32 flags, char *label);
 void persistent_ram_free(struct persistent_ram_zone **_prz);
diff -ruw linux-6.4/fs/smb/common/smb2pdu.h linux-6.4-fbx/fs/smb/common/smb2pdu.h
--- linux-6.4/fs/smb/common/smb2pdu.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/common/smb2pdu.h	2024-01-25 13:36:32.714984559 +0100
@@ -1206,6 +1206,7 @@
 #define SMB2_LEASE_WRITE_CACHING_LE		cpu_to_le32(0x04)
 
 #define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE	cpu_to_le32(0x02)
+#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE	cpu_to_le32(0x04)
 
 #define SMB2_LEASE_KEY_SIZE			16
 
diff -ruw linux-6.4/fs/smb/server/Kconfig linux-6.4-fbx/fs/smb/server/Kconfig
--- linux-6.4/fs/smb/server/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/Kconfig	2023-11-07 13:38:44.038256036 +0100
@@ -56,6 +56,16 @@
 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
 	  say N.
 
+config SMB_INSECURE_SERVER
+	bool "Support for insecure SMB1/CIFS and SMB2.0 protocols"
+	depends on SMB_SERVER
+	select CRYPTO_MD4
+	default n
+
+	help
+	  This enables deprecated insecure protocols dialects: SMB1/CIFS
+	  and SMB2.0
+
 endif
 
 config SMB_SERVER_CHECK_CAP_NET_ADMIN
diff -ruw linux-6.4/fs/smb/server/Makefile linux-6.4-fbx/fs/smb/server/Makefile
--- linux-6.4/fs/smb/server/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/Makefile	2023-11-07 13:38:44.038256036 +0100
@@ -18,3 +18,4 @@
 $(obj)/ksmbd_spnego_negtokentarg.asn1.o: $(obj)/ksmbd_spnego_negtokentarg.asn1.c $(obj)/ksmbd_spnego_negtokentarg.asn1.h
 
 ksmbd-$(CONFIG_SMB_SERVER_SMBDIRECT) += transport_rdma.o
+ksmbd-$(CONFIG_SMB_INSECURE_SERVER) += smb1pdu.o smb1ops.o smb1misc.o netmisc.o
diff -ruw linux-6.4/fs/smb/server/asn1.c linux-6.4-fbx/fs/smb/server/asn1.c
--- linux-6.4/fs/smb/server/asn1.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/asn1.c	2024-01-25 13:36:32.714984559 +0100
@@ -214,12 +214,15 @@
 {
 	struct ksmbd_conn *conn = context;
 
-	conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
+	if (!vlen)
+		return -EINVAL;
+
+	conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
 	if (!conn->mechToken)
 		return -ENOMEM;
 
-	memcpy(conn->mechToken, value, vlen);
-	conn->mechToken[vlen] = '\0';
+	conn->mechTokenLen = (unsigned int)vlen;
+
 	return 0;
 }
 
diff -ruw linux-6.4/fs/smb/server/auth.c linux-6.4-fbx/fs/smb/server/auth.c
--- linux-6.4/fs/smb/server/auth.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/auth.c	2023-11-07 13:38:44.038256036 +0100
@@ -68,6 +68,126 @@
 {
 	memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
 }
+#ifdef CONFIG_SMB_INSECURE_SERVER
+static void
+str_to_key(unsigned char *str, unsigned char *key)
+{
+	int i;
+
+	key[0] = str[0] >> 1;
+	key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
+	key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
+	key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
+	key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
+	key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
+	key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
+	key[7] = str[6] & 0x7F;
+	for (i = 0; i < 8; i++)
+		key[i] = (key[i] << 1);
+}
+
+static int
+smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
+{
+	unsigned char key2[8];
+	struct des_ctx ctx;
+
+	if (fips_enabled) {
+		ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");
+		return -ENOENT;
+	}
+
+	str_to_key(key, key2);
+	des_expand_key(&ctx, key2, DES_KEY_SIZE);
+	des_encrypt(&ctx, out, in);
+	memzero_explicit(&ctx, sizeof(ctx));
+	return 0;
+}
+
+static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
+{
+	int rc;
+
+	rc = smbhash(p24, c8, p21);
+	if (rc)
+		return rc;
+	rc = smbhash(p24 + 8, c8, p21 + 7);
+	if (rc)
+		return rc;
+	return smbhash(p24 + 16, c8, p21 + 14);
+}
+
+/* produce a md4 message digest from data of length n bytes */
+static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,
+			 int link_len)
+{
+	int rc;
+	struct ksmbd_crypto_ctx *ctx;
+
+	ctx = ksmbd_crypto_ctx_find_md4();
+	if (!ctx) {
+		ksmbd_debug(AUTH, "Crypto md4 allocation error\n");
+		return -ENOMEM;
+	}
+
+	rc = crypto_shash_init(CRYPTO_MD4(ctx));
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not init md4 shash\n");
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not update with link_str\n");
+		goto out;
+	}
+
+	rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);
+	if (rc)
+		ksmbd_debug(AUTH, "Could not generate md4 hash\n");
+out:
+	ksmbd_release_crypto_ctx(ctx);
+	return rc;
+}
+
+static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,
+				     char *server_challenge, int len)
+{
+	int rc;
+	struct ksmbd_crypto_ctx *ctx;
+
+	ctx = ksmbd_crypto_ctx_find_md5();
+	if (!ctx) {
+		ksmbd_debug(AUTH, "Crypto md5 allocation error\n");
+		return -ENOMEM;
+	}
+
+	rc = crypto_shash_init(CRYPTO_MD5(ctx));
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not init md5 shash\n");
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not update with challenge\n");
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not update with nonce\n");
+		goto out;
+	}
+
+	rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);
+	if (rc)
+		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+out:
+	ksmbd_release_crypto_ctx(ctx);
+	return rc;
+}
+#endif
 
 /**
  * ksmbd_gen_sess_key() - function to generate session key
@@ -206,6 +326,46 @@
 	return ret;
 }
 
+// XXX missing config_smb_insecure_server ?
+/**
+ * ksmbd_auth_ntlm() - NTLM authentication handler
+ * @sess:	session of connection
+ * @pw_buf:	NTLM challenge response
+ * @passkey:	user password
+ *
+ * Return:	0 on success, error number on error
+ */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf, char *cryptkey)
+{
+	int rc;
+	unsigned char p21[21];
+	char key[CIFS_AUTH_RESP_SIZE];
+
+	memset(p21, '\0', 21);
+	memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
+	rc = ksmbd_enc_p24(p21, cryptkey, key);
+	if (rc) {
+		pr_err("password processing failed\n");
+		return rc;
+	}
+
+	ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),
+		      CIFS_SMB1_SESSKEY_SIZE);
+	memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,
+	       CIFS_AUTH_RESP_SIZE);
+	sess->sequence_number = 1;
+
+	if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {
+		ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");
+		return -EINVAL;
+	}
+
+	ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");
+	return 0;
+}
+#endif
+
 /**
  * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
  * @sess:	session of connection
@@ -290,6 +450,46 @@
 	return rc;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler
+ * @sess:	session of connection
+ * @client_nonce:	client nonce from LM response.
+ * @ntlm_resp:		ntlm response data from client.
+ *
+ * Return:	0 on success, error number on error
+ */
+static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess,
+			       char *client_nonce,
+			       char *ntlm_resp,
+			       char *cryptkey)
+{
+	char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};
+	int rc;
+	unsigned char p21[21];
+	char key[CIFS_AUTH_RESP_SIZE];
+
+	rc = ksmbd_enc_update_sess_key(sess_key, client_nonce, cryptkey, 8);
+	if (rc) {
+		pr_err("password processing failed\n");
+		goto out;
+	}
+
+	memset(p21, '\0', 21);
+	memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
+	rc = ksmbd_enc_p24(p21, sess_key, key);
+	if (rc) {
+		pr_err("password processing failed\n");
+		goto out;
+	}
+
+	if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)
+		rc = -EINVAL;
+out:
+	return rc;
+}
+#endif
+
 /**
  * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
  * authenticate blob
@@ -306,6 +506,10 @@
 	char *domain_name;
 	unsigned int nt_off, dn_off;
 	unsigned short nt_len, dn_len;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	unsigned int lm_off;
+	unsigned short lm_len;
+#endif
 	int ret;
 
 	if (blob_len < sizeof(struct authenticate_message)) {
@@ -329,6 +533,26 @@
 	    nt_len < CIFS_ENCPWD_SIZE)
 		return -EINVAL;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);
+	lm_len = le16_to_cpu(authblob->LmChallengeResponse.Length);
+	if (blob_len < (u64)lm_off + lm_len)
+		return -EINVAL;
+
+	/* process NTLM authentication */
+	if (nt_len == CIFS_AUTH_RESP_SIZE) {
+		if (le32_to_cpu(authblob->NegotiateFlags) &
+		    NTLMSSP_NEGOTIATE_EXTENDED_SEC)
+			return __ksmbd_auth_ntlmv2(sess,
+						   (char *)authblob + lm_off,
+						   (char *)authblob + nt_off,
+						   conn->ntlmssp.cryptkey);
+		else
+			return ksmbd_auth_ntlm(sess, (char *)authblob +
+				nt_off, conn->ntlmssp.cryptkey);
+	}
+#endif
+
 	/* TODO : use domain name that imported from configuration file */
 	domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
 					     dn_len, true, conn->local_nls);
@@ -355,6 +579,9 @@
 		if (blob_len < (u64)sess_key_off + sess_key_len)
 			return -EINVAL;
 
+		if (sess_key_len > CIFS_KEY_SIZE)
+			return -EINVAL;
+
 		ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
 		if (!ctx_arc4)
 			return -ENOMEM;
@@ -560,6 +787,60 @@
 }
 #endif
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * ksmbd_sign_smb1_pdu() - function to generate SMB1 packet signing
+ * @sess:	session of connection
+ * @iov:        buffer iov array
+ * @n_vec:	number of iovecs
+ * @sig:        signature value generated for client request packet
+ *
+ */
+int ksmbd_sign_smb1_pdu(struct ksmbd_session *sess, struct kvec *iov, int n_vec,
+			char *sig)
+{
+	struct ksmbd_crypto_ctx *ctx;
+	int rc, i;
+
+	ctx = ksmbd_crypto_ctx_find_md5();
+	if (!ctx) {
+		ksmbd_debug(AUTH, "could not crypto alloc md5\n");
+		return -ENOMEM;
+	}
+
+	rc = crypto_shash_init(CRYPTO_MD5(ctx));
+	if (rc) {
+		ksmbd_debug(AUTH, "md5 init error %d\n", rc);
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD5(ctx), sess->sess_key, 40);
+	if (rc) {
+		ksmbd_debug(AUTH, "md5 update error %d\n", rc);
+		goto out;
+	}
+
+	for (i = 0; i < n_vec; i++) {
+		rc = crypto_shash_update(CRYPTO_MD5(ctx),
+					 iov[i].iov_base,
+					 iov[i].iov_len);
+		if (rc) {
+			ksmbd_debug(AUTH, "md5 update error %d\n", rc);
+			goto out;
+		}
+	}
+
+	rc = crypto_shash_final(CRYPTO_MD5(ctx), sig);
+	if (rc)
+		ksmbd_debug(AUTH, "md5 generation error %d\n", rc);
+
+out:
+	ksmbd_release_crypto_ctx(ctx);
+	return rc;
+}
+#endif
+
+
 /**
  * ksmbd_sign_smb2_pdu() - function to generate packet signing
  * @conn:	connection
@@ -1029,11 +1310,15 @@
 {
 	struct scatterlist *sg;
 	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
-	int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
+	int i, *nr_entries, total_entries = 0, sg_idx = 0;
 
 	if (!nvec)
 		return NULL;
 
+	nr_entries = kcalloc(nvec, sizeof(int), GFP_KERNEL);
+	if (!nr_entries)
+		return NULL;
+
 	for (i = 0; i < nvec - 1; i++) {
 		unsigned long kaddr = (unsigned long)iov[i + 1].iov_base;
 
@@ -1051,8 +1336,10 @@
 	total_entries += 2;
 
 	sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL);
-	if (!sg)
+	if (!sg) {
+		kfree(nr_entries);
 		return NULL;
+	}
 
 	sg_init_table(sg, total_entries);
 	smb2_sg_set_buf(&sg[sg_idx++], iov[0].iov_base + 24, assoc_data_len);
@@ -1086,6 +1373,7 @@
 		}
 	}
 	smb2_sg_set_buf(&sg[sg_idx], sign, SMB2_SIGNATURE_SIZE);
+	kfree(nr_entries);
 	return sg;
 }
 
diff -ruw linux-6.4/fs/smb/server/auth.h linux-6.4-fbx/fs/smb/server/auth.h
--- linux-6.4/fs/smb/server/auth.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/auth.h	2023-11-07 13:38:44.038256036 +0100
@@ -52,6 +52,11 @@
 				   struct ksmbd_conn *conn);
 int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
 			    int in_len,	char *out_blob, int *out_len);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf, char *cryptkey);
+int ksmbd_sign_smb1_pdu(struct ksmbd_session *sess, struct kvec *iov, int n_vec,
+			char *sig);
+#endif
 int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
 			int n_vec, char *sig);
 int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
diff -ruw linux-6.4/fs/smb/server/connection.c linux-6.4-fbx/fs/smb/server/connection.c
--- linux-6.4/fs/smb/server/connection.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/connection.c	2024-01-25 13:36:32.714984559 +0100
@@ -10,6 +10,9 @@
 
 #include "server.h"
 #include "smb_common.h"
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#include "smb1pdu.h"
+#endif
 #include "mgmt/ksmbd_ida.h"
 #include "connection.h"
 #include "transport_tcp.h"
@@ -84,6 +87,8 @@
 	spin_lock_init(&conn->llist_lock);
 	INIT_LIST_HEAD(&conn->lock_list);
 
+	init_rwsem(&conn->session_lock);
+
 	down_write(&conn_list_lock);
 	list_add(&conn->conns_list, &conn_list);
 	up_write(&conn_list_lock);
@@ -111,9 +116,20 @@
 {
 	struct ksmbd_conn *conn = work->conn;
 	struct list_head *requests_queue = NULL;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	struct smb2_hdr *hdr = work->request_buf;
 
+	if (hdr->ProtocolId == SMB2_PROTO_NUMBER) {
 	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
 		requests_queue = &conn->requests;
+	} else {
+		if (conn->ops->get_cmd_val(work) != SMB_COM_NT_CANCEL)
+			requests_queue = &conn->requests;
+	}
+#else
+	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
+		requests_queue = &conn->requests;
+#endif
 
 	if (requests_queue) {
 		atomic_inc(&conn->req_running);
@@ -123,28 +139,22 @@
 	}
 }
 
-int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
-	int ret = 1;
 
 	if (list_empty(&work->request_entry) &&
 	    list_empty(&work->async_request_entry))
-		return 0;
+		return;
 
-	if (!work->multiRsp)
 		atomic_dec(&conn->req_running);
-	if (!work->multiRsp) {
 		spin_lock(&conn->request_lock);
 		list_del_init(&work->request_entry);
 		spin_unlock(&conn->request_lock);
 		if (work->asynchronous)
 			release_async_work(work);
-		ret = 0;
-	}
 
 	wake_up_all(&conn->req_running_q);
-	return ret;
 }
 
 void ksmbd_conn_lock(struct ksmbd_conn *conn)
@@ -171,61 +181,35 @@
 
 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
 {
-	struct ksmbd_conn *bind_conn;
-
 	wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
-
-	down_read(&conn_list_lock);
-	list_for_each_entry(bind_conn, &conn_list, conns_list) {
-		if (bind_conn == conn)
-			continue;
-
-		if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
-		    !ksmbd_conn_releasing(bind_conn) &&
-		    atomic_read(&bind_conn->req_running)) {
-			wait_event(bind_conn->req_running_q,
-				atomic_read(&bind_conn->req_running) == 0);
-		}
-	}
-	up_read(&conn_list_lock);
 }
 
 int ksmbd_conn_write(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
-	size_t len = 0;
 	int sent;
-	struct kvec iov[3];
-	int iov_idx = 0;
 
 	if (!work->response_buf) {
 		pr_err("NULL response header\n");
 		return -EINVAL;
 	}
 
-	if (work->tr_buf) {
-		iov[iov_idx] = (struct kvec) { work->tr_buf,
-				sizeof(struct smb2_transform_hdr) + 4 };
-		len += iov[iov_idx++].iov_len;
-	}
+	if (work->send_no_response)
+		return 0;
 
-	if (work->aux_payload_sz) {
-		iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
-		len += iov[iov_idx++].iov_len;
-		iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
-		len += iov[iov_idx++].iov_len;
-	} else {
-		if (work->tr_buf)
-			iov[iov_idx].iov_len = work->resp_hdr_sz;
-		else
-			iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
-		iov[iov_idx].iov_base = work->response_buf;
-		len += iov[iov_idx++].iov_len;
-	}
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (!work->iov_idx)
+		ksmbd_iov_pin_rsp(work, (char *)work->response_buf + 4,
+				  work->response_offset);
+#endif
+
+	if (!work->iov_idx)
+		return -EINVAL;
 
 	ksmbd_conn_lock(conn);
-	sent = conn->transport->ops->writev(conn->transport, &iov[0],
-					iov_idx, len,
+	sent = conn->transport->ops->writev(conn->transport, work->iov,
+			work->iov_cnt,
+			get_rfc1002_len(work->iov[0].iov_base) + 4,
 					work->need_invalidate_rkey,
 					work->remote_key);
 	ksmbd_conn_unlock(conn);
@@ -294,7 +278,11 @@
 	return true;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define SMB1_MIN_SUPPORTED_HEADER_SIZE SMB_HEADER_SIZE
+#else
 #define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr))
+#endif
 #define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4)
 
 /**
@@ -320,6 +308,7 @@
 		goto out;
 
 	conn->last_active = jiffies;
+	set_freezable();
 	while (ksmbd_conn_alive(conn)) {
 		if (try_to_freeze())
 			continue;
@@ -341,8 +330,9 @@
 			max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
 
 		if (pdu_size > max_allowed_pdu_size) {
-			pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n",
+			pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) from %pISc (status=%d\n",
 					pdu_size, max_allowed_pdu_size,
+					KSMBD_TCP_PEER_SOCKADDR(conn),
 					READ_ONCE(conn->status));
 			break;
 		}
@@ -451,13 +441,7 @@
 again:
 	down_read(&conn_list_lock);
 	list_for_each_entry(conn, &conn_list, conns_list) {
-		struct task_struct *task;
-
 		t = conn->transport;
-		task = t->handler;
-		if (task)
-			ksmbd_debug(CONN, "Stop session handler %s/%d\n",
-				    task->comm, task_pid_nr(task));
 		ksmbd_conn_set_exiting(conn);
 		if (t->ops->shutdown) {
 			up_read(&conn_list_lock);
diff -ruw linux-6.4/fs/smb/server/connection.h linux-6.4-fbx/fs/smb/server/connection.h
--- linux-6.4/fs/smb/server/connection.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/connection.h	2024-01-25 13:36:32.714984559 +0100
@@ -50,6 +50,7 @@
 	struct nls_table		*local_nls;
 	struct unicode_map		*um;
 	struct list_head		conns_list;
+	struct rw_semaphore		session_lock;
 	/* smb session 1 per user */
 	struct xarray			sessions;
 	unsigned long			last_active;
@@ -87,6 +88,7 @@
 	__u16				dialect;
 
 	char				*mechToken;
+	unsigned int			mechTokenLen;
 
 	struct ksmbd_conn_ops	*conn_ops;
 
@@ -133,7 +135,6 @@
 struct ksmbd_transport {
 	struct ksmbd_conn		*conn;
 	struct ksmbd_transport_ops	*ops;
-	struct task_struct		*handler;
 };
 
 #define KSMBD_TCP_RECV_TIMEOUT	(7 * HZ)
@@ -158,7 +159,7 @@
 			  struct smb2_buffer_desc_v1 *desc,
 			  unsigned int desc_len);
 void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
-int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
+void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
 int ksmbd_conn_handler_loop(void *p);
 int ksmbd_conn_transport_init(void);
diff -ruw linux-6.4/fs/smb/server/crypto_ctx.c linux-6.4-fbx/fs/smb/server/crypto_ctx.c
--- linux-6.4/fs/smb/server/crypto_ctx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/crypto_ctx.c	2023-11-07 13:38:44.038256036 +0100
@@ -81,6 +81,14 @@
 	case CRYPTO_SHASH_SHA512:
 		tfm = crypto_alloc_shash("sha512", 0, 0);
 		break;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case CRYPTO_SHASH_MD4:
+		tfm = crypto_alloc_shash("md4", 0, 0);
+		break;
+	case CRYPTO_SHASH_MD5:
+		tfm = crypto_alloc_shash("md5", 0, 0);
+		break;
+#endif
 	default:
 		return NULL;
 	}
@@ -207,6 +215,17 @@
 {
 	return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
 }
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void)
+{
+	return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void)
+{
+	return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5);
+}
+#endif
 
 static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
 {
diff -ruw linux-6.4/fs/smb/server/crypto_ctx.h linux-6.4-fbx/fs/smb/server/crypto_ctx.h
--- linux-6.4/fs/smb/server/crypto_ctx.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/crypto_ctx.h	2023-11-07 13:38:44.038256036 +0100
@@ -15,6 +15,10 @@
 	CRYPTO_SHASH_CMACAES,
 	CRYPTO_SHASH_SHA256,
 	CRYPTO_SHASH_SHA512,
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	CRYPTO_SHASH_MD4,
+	CRYPTO_SHASH_MD5,
+#endif
 	CRYPTO_SHASH_MAX,
 };
 
@@ -41,6 +45,10 @@
 #define CRYPTO_CMACAES(c)	((c)->desc[CRYPTO_SHASH_CMACAES])
 #define CRYPTO_SHA256(c)	((c)->desc[CRYPTO_SHASH_SHA256])
 #define CRYPTO_SHA512(c)	((c)->desc[CRYPTO_SHASH_SHA512])
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define CRYPTO_MD4(c)		((c)->desc[CRYPTO_SHASH_MD4])
+#define CRYPTO_MD5(c)		((c)->desc[CRYPTO_SHASH_MD5])
+#endif
 
 #define CRYPTO_HMACMD5_TFM(c)	((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
 #define CRYPTO_HMACSHA256_TFM(c)\
@@ -48,6 +56,10 @@
 #define CRYPTO_CMACAES_TFM(c)	((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
 #define CRYPTO_SHA256_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
 #define CRYPTO_SHA512_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define CRYPTO_MD4_TFM(c)	((c)->desc[CRYPTO_SHASH_MD4]->tfm)
+#define CRYPTO_MD5_TFM(c)	((c)->desc[CRYPTO_SHASH_MD5]->tfm)
+#endif
 
 #define CRYPTO_GCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
 #define CRYPTO_CCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
@@ -58,6 +70,10 @@
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void);
+#endif
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
 void ksmbd_crypto_destroy(void);
diff -ruw linux-6.4/fs/smb/server/glob.h linux-6.4-fbx/fs/smb/server/glob.h
--- linux-6.4/fs/smb/server/glob.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/glob.h	2023-11-07 13:38:44.038256036 +0100
@@ -46,4 +46,8 @@
 
 #define UNICODE_LEN(x)		((x) * 2)
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+extern void ntstatus_to_dos(__le32 ntstatus, __u8 *eclass, __le16 *ecode);
+#endif
+
 #endif /* __KSMBD_GLOB_H */
diff -ruw linux-6.4/fs/smb/server/ksmbd_netlink.h linux-6.4-fbx/fs/smb/server/ksmbd_netlink.h
--- linux-6.4/fs/smb/server/ksmbd_netlink.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/ksmbd_netlink.h	2024-04-19 15:59:31.193600561 +0200
@@ -166,7 +166,8 @@
 	__u16	force_uid;
 	__u16	force_gid;
 	__s8	share_name[KSMBD_REQ_MAX_SHARE_NAME];
-	__u32	reserved[112];		/* Reserved room */
+	__u32	reserved[111];		/* Reserved room */
+	__u32	payload_sz;
 	__u32	veto_list_sz;
 	__s8	____payload[];
 };
@@ -304,7 +305,8 @@
 	KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
 	KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE	= 15,
 
-	KSMBD_EVENT_MAX
+	__KSMBD_EVENT_MAX,
+	KSMBD_EVENT_MAX = __KSMBD_EVENT_MAX - 1
 };
 
 /*
@@ -353,6 +355,7 @@
 #define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS	BIT(12)
 #define KSMBD_SHARE_FLAG_ACL_XATTR		BIT(13)
 #define KSMBD_SHARE_FLAG_UPDATE		BIT(14)
+#define KSMBD_SHARE_FLAG_CROSSMNT		BIT(15)
 
 /*
  * Tree connect request flags.
diff -ruw linux-6.4/fs/smb/server/ksmbd_work.c linux-6.4-fbx/fs/smb/server/ksmbd_work.c
--- linux-6.4/fs/smb/server/ksmbd_work.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/ksmbd_work.c	2024-01-25 13:36:32.714984559 +0100
@@ -27,18 +27,38 @@
 		INIT_LIST_HEAD(&work->async_request_entry);
 		INIT_LIST_HEAD(&work->fp_entry);
 		INIT_LIST_HEAD(&work->interim_entry);
+		INIT_LIST_HEAD(&work->aux_read_list);
+		work->iov_alloc_cnt = 4;
+		work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
+				    GFP_KERNEL);
+		if (!work->iov) {
+			kmem_cache_free(work_cache, work);
+			work = NULL;
+		}
 	}
 	return work;
 }
 
 void ksmbd_free_work_struct(struct ksmbd_work *work)
 {
+	struct aux_read *ar, *tmp;
+
 	WARN_ON(work->saved_cred != NULL);
 
 	kvfree(work->response_buf);
-	kvfree(work->aux_payload_buf);
+
+	list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
+		kvfree(ar->buf);
+		list_del(&ar->entry);
+		kfree(ar);
+	}
+
 	kfree(work->tr_buf);
 	kvfree(work->request_buf);
+	kfree(work->iov);
+	if (!list_empty(&work->interim_entry))
+		list_del(&work->interim_entry);
+
 	if (work->async_id)
 		ksmbd_release_id(&work->conn->async_ida, work->async_id);
 	kmem_cache_free(work_cache, work);
@@ -77,3 +97,81 @@
 {
 	return queue_work(ksmbd_wq, &work->work);
 }
+
+static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
+				   unsigned int ib_len)
+{
+	work->iov[++work->iov_idx].iov_base = ib;
+	work->iov[work->iov_idx].iov_len = ib_len;
+	work->iov_cnt++;
+}
+
+static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
+			       void *aux_buf, unsigned int aux_size)
+{
+	struct aux_read *ar = NULL;
+	int need_iov_cnt = 1;
+
+	if (aux_size) {
+		need_iov_cnt++;
+		ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
+		if (!ar)
+			return -ENOMEM;
+	}
+
+	if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
+		struct kvec *new;
+
+		work->iov_alloc_cnt += 4;
+		new = krealloc(work->iov,
+			       sizeof(struct kvec) * work->iov_alloc_cnt,
+			       GFP_KERNEL | __GFP_ZERO);
+		if (!new) {
+			kfree(ar);
+			work->iov_alloc_cnt -= 4;
+			return -ENOMEM;
+		}
+		work->iov = new;
+	}
+
+	/* Plus rfc_length size on first iov */
+	if (!work->iov_idx) {
+		work->iov[work->iov_idx].iov_base = work->response_buf;
+		*(__be32 *)work->iov[0].iov_base = 0;
+		work->iov[work->iov_idx].iov_len = 4;
+		work->iov_cnt++;
+	}
+
+	__ksmbd_iov_pin(work, ib, len);
+	inc_rfc1001_len(work->iov[0].iov_base, len);
+
+	if (aux_size) {
+		__ksmbd_iov_pin(work, aux_buf, aux_size);
+		inc_rfc1001_len(work->iov[0].iov_base, aux_size);
+
+		ar->buf = aux_buf;
+		list_add(&ar->entry, &work->aux_read_list);
+	}
+
+	return 0;
+}
+
+int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
+{
+	return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
+}
+
+int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
+			   void *aux_buf, unsigned int aux_size)
+{
+	return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
+}
+
+int allocate_interim_rsp_buf(struct ksmbd_work *work)
+{
+	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
+	if (!work->response_buf)
+		return -ENOMEM;
+	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+	return 0;
+}
diff -ruw linux-6.4/fs/smb/server/ksmbd_work.h linux-6.4-fbx/fs/smb/server/ksmbd_work.h
--- linux-6.4/fs/smb/server/ksmbd_work.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/ksmbd_work.h	2023-11-07 13:38:44.038256036 +0100
@@ -19,6 +19,11 @@
 	KSMBD_WORK_CLOSED,
 };
 
+struct aux_read {
+	void *buf;
+	struct list_head entry;
+};
+
 /* one of these for every pending CIFS request at the connection */
 struct ksmbd_work {
 	/* Server corresponding to this mid */
@@ -30,14 +35,23 @@
 	void                            *request_buf;
 	/* Response buffer */
 	void                            *response_buf;
-
-	/* Read data buffer */
-	void                            *aux_payload_buf;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	size_t				response_offset;
+#endif
+
+	struct list_head		aux_read_list;
+
+	struct kvec			*iov;
+	int				iov_alloc_cnt;
+	int				iov_cnt;
+	int				iov_idx;
 
 	/* Next cmd hdr in compound req buf*/
 	int                             next_smb2_rcv_hdr_off;
 	/* Next cmd hdr in compound rsp buf*/
 	int                             next_smb2_rsp_hdr_off;
+	/* Current cmd hdr in compound rsp buf*/
+	int                             curr_smb2_rsp_hdr_off;
 
 	/*
 	 * Current Local FID assigned compound response if SMB2 CREATE
@@ -53,16 +67,11 @@
 	unsigned int			credits_granted;
 
 	/* response smb header size */
-	unsigned int                    resp_hdr_sz;
 	unsigned int                    response_sz;
-	/* Read data count */
-	unsigned int                    aux_payload_sz;
 
 	void				*tr_buf;
 
 	unsigned char			state;
-	/* Multiple responses for one request e.g. SMB ECHO */
-	bool                            multiRsp:1;
 	/* No response for cancelled request */
 	bool                            send_no_response:1;
 	/* Request is encrypted */
@@ -96,6 +105,15 @@
 }
 
 /**
+ * ksmbd_resp_buf_curr - Get current buffer on compound response.
+ * @work: smb work containing response buffer
+ */
+static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work)
+{
+	return work->response_buf + work->curr_smb2_rsp_hdr_off + 4;
+}
+
+/**
  * ksmbd_req_buf_next - Get next buffer on compound request.
  * @work: smb work containing response buffer
  */
@@ -113,5 +131,8 @@
 int ksmbd_workqueue_init(void);
 void ksmbd_workqueue_destroy(void);
 bool ksmbd_queue_work(struct ksmbd_work *work);
-
+int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
+			   void *aux_buf, unsigned int aux_size);
+int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len);
+int allocate_interim_rsp_buf(struct ksmbd_work *work);
 #endif /* __KSMBD_WORK_H__ */
diff -ruw linux-6.4/fs/smb/server/mgmt/ksmbd_ida.c linux-6.4-fbx/fs/smb/server/mgmt/ksmbd_ida.c
--- linux-6.4/fs/smb/server/mgmt/ksmbd_ida.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/ksmbd_ida.c	2023-11-07 13:38:44.038256036 +0100
@@ -10,6 +10,13 @@
 	return ida_simple_get(ida, from, to, GFP_KERNEL);
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_acquire_smb1_tid(struct ida *ida)
+{
+	return __acquire_id(ida, 1, 0xFFFF);
+}
+#endif
+
 int ksmbd_acquire_smb2_tid(struct ida *ida)
 {
 	int id;
@@ -19,6 +26,13 @@
 	return id;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_acquire_smb1_uid(struct ida *ida)
+{
+	return __acquire_id(ida, 1, 0xFFFE);
+}
+#endif
+
 int ksmbd_acquire_smb2_uid(struct ida *ida)
 {
 	int id;
diff -ruw linux-6.4/fs/smb/server/mgmt/ksmbd_ida.h linux-6.4-fbx/fs/smb/server/mgmt/ksmbd_ida.h
--- linux-6.4/fs/smb/server/mgmt/ksmbd_ida.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/ksmbd_ida.h	2023-11-07 13:38:44.038256036 +0100
@@ -16,6 +16,9 @@
  *    The value 0xFFFF is used to specify all TIDs or no TID,
  *    depending upon the context in which it is used.
  */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_acquire_smb1_tid(struct ida *ida);
+#endif
 int ksmbd_acquire_smb2_tid(struct ida *ida);
 
 /*
@@ -25,6 +28,9 @@
  *    valid UID.<21> All other possible values for a UID, excluding
  *    zero (0x0000), are valid.
  */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_acquire_smb1_uid(struct ida *ida);
+#endif
 int ksmbd_acquire_smb2_uid(struct ida *ida);
 int ksmbd_acquire_async_msg_id(struct ida *ida);
 
diff -ruw linux-6.4/fs/smb/server/mgmt/share_config.c linux-6.4-fbx/fs/smb/server/mgmt/share_config.c
--- linux-6.4/fs/smb/server/mgmt/share_config.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/share_config.c	2024-04-19 15:59:31.193600561 +0200
@@ -158,7 +158,12 @@
 	share->name = kstrdup(name, GFP_KERNEL);
 
 	if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
-		share->path = kstrdup(ksmbd_share_config_path(resp),
+		int path_len = PATH_MAX;
+
+		if (resp->payload_sz)
+			path_len = resp->payload_sz - resp->veto_list_sz;
+
+		share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
 				      GFP_KERNEL);
 		if (share->path)
 			share->path_sz = strlen(share->path);
diff -ruw linux-6.4/fs/smb/server/mgmt/share_config.h linux-6.4-fbx/fs/smb/server/mgmt/share_config.h
--- linux-6.4/fs/smb/server/mgmt/share_config.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/share_config.h	2023-11-07 13:38:44.038256036 +0100
@@ -34,29 +34,22 @@
 #define KSMBD_SHARE_INVALID_UID	((__u16)-1)
 #define KSMBD_SHARE_INVALID_GID	((__u16)-1)
 
-static inline int share_config_create_mode(struct ksmbd_share_config *share,
+static inline umode_t
+share_config_create_mode(struct ksmbd_share_config *share,
 					   umode_t posix_mode)
 {
-	if (!share->force_create_mode) {
-		if (!posix_mode)
-			return share->create_mask;
-		else
-			return posix_mode & share->create_mask;
-	}
-	return share->force_create_mode & share->create_mask;
+	umode_t mode = (posix_mode ?: (umode_t)-1) & share->create_mask;
+
+	return mode | share->force_create_mode;
 }
 
-static inline int share_config_directory_mode(struct ksmbd_share_config *share,
+static inline umode_t
+share_config_directory_mode(struct ksmbd_share_config *share,
 					      umode_t posix_mode)
 {
-	if (!share->force_directory_mode) {
-		if (!posix_mode)
-			return share->directory_mask;
-		else
-			return posix_mode & share->directory_mask;
-	}
+	umode_t mode = (posix_mode ?: (umode_t)-1) & share->directory_mask;
 
-	return share->force_directory_mode & share->directory_mask;
+	return mode | share->force_directory_mode;
 }
 
 static inline int test_share_config_flag(struct ksmbd_share_config *share,
diff -ruw linux-6.4/fs/smb/server/mgmt/tree_connect.c linux-6.4-fbx/fs/smb/server/mgmt/tree_connect.c
--- linux-6.4/fs/smb/server/mgmt/tree_connect.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/tree_connect.c	2023-11-07 13:38:44.038256036 +0100
@@ -73,7 +73,10 @@
 
 	tree_conn->user = sess->user;
 	tree_conn->share_conf = sc;
+	tree_conn->t_state = TREE_NEW;
 	status.tree_conn = tree_conn;
+	atomic_set(&tree_conn->refcount, 1);
+	init_waitqueue_head(&tree_conn->refcount_q);
 
 	ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
 			      GFP_KERNEL));
@@ -93,14 +96,33 @@
 	return status;
 }
 
+void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon)
+{
+	/*
+	 * Checking waitqueue to releasing tree connect on
+	 * tree disconnect. waitqueue_active is safe because it
+	 * uses atomic operation for condition.
+	 */
+	if (!atomic_dec_return(&tcon->refcount) &&
+	    waitqueue_active(&tcon->refcount_q))
+		wake_up(&tcon->refcount_q);
+}
+
 int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
 			       struct ksmbd_tree_connect *tree_conn)
 {
 	int ret;
 
+	write_lock(&sess->tree_conns_lock);
+	xa_erase(&sess->tree_conns, tree_conn->id);
+	write_unlock(&sess->tree_conns_lock);
+
+	if (!atomic_dec_and_test(&tree_conn->refcount))
+		wait_event(tree_conn->refcount_q,
+			   atomic_read(&tree_conn->refcount) == 0);
+
 	ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
 	ksmbd_release_tree_conn_id(sess, tree_conn->id);
-	xa_erase(&sess->tree_conns, tree_conn->id);
 	ksmbd_share_config_put(tree_conn->share_conf);
 	kfree(tree_conn);
 	return ret;
@@ -111,26 +133,19 @@
 {
 	struct ksmbd_tree_connect *tcon;
 
+	read_lock(&sess->tree_conns_lock);
 	tcon = xa_load(&sess->tree_conns, id);
 	if (tcon) {
-		if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
+		if (tcon->t_state != TREE_CONNECTED)
+			tcon = NULL;
+		else if (!atomic_inc_not_zero(&tcon->refcount))
 			tcon = NULL;
 	}
+	read_unlock(&sess->tree_conns_lock);
 
 	return tcon;
 }
 
-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
-						 unsigned int id)
-{
-	struct ksmbd_tree_connect *tc;
-
-	tc = ksmbd_tree_conn_lookup(sess, id);
-	if (tc)
-		return tc->share_conf;
-	return NULL;
-}
-
 int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
 {
 	int ret = 0;
@@ -140,8 +155,18 @@
 	if (!sess)
 		return -EINVAL;
 
-	xa_for_each(&sess->tree_conns, id, tc)
+	xa_for_each(&sess->tree_conns, id, tc) {
+		write_lock(&sess->tree_conns_lock);
+		if (tc->t_state == TREE_DISCONNECTED) {
+			write_unlock(&sess->tree_conns_lock);
+			ret = -ENOENT;
+			continue;
+		}
+		tc->t_state = TREE_DISCONNECTED;
+		write_unlock(&sess->tree_conns_lock);
+
 		ret |= ksmbd_tree_conn_disconnect(sess, tc);
+	}
 	xa_destroy(&sess->tree_conns);
 	return ret;
 }
diff -ruw linux-6.4/fs/smb/server/mgmt/tree_connect.h linux-6.4-fbx/fs/smb/server/mgmt/tree_connect.h
--- linux-6.4/fs/smb/server/mgmt/tree_connect.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/tree_connect.h	2023-11-07 13:38:44.038256036 +0100
@@ -14,7 +14,11 @@
 struct ksmbd_user;
 struct ksmbd_conn;
 
-#define TREE_CONN_EXPIRE		1
+enum {
+	TREE_NEW = 0,
+	TREE_CONNECTED,
+	TREE_DISCONNECTED
+};
 
 struct ksmbd_tree_connect {
 	int				id;
@@ -27,7 +31,9 @@
 
 	int				maximal_access;
 	bool				posix_extensions;
-	unsigned long			status;
+	atomic_t			refcount;
+	wait_queue_head_t		refcount_q;
+	unsigned int			t_state;
 };
 
 struct ksmbd_tree_conn_status {
@@ -46,6 +52,7 @@
 struct ksmbd_tree_conn_status
 ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
 			const char *share_name);
+void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
 
 int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
 			       struct ksmbd_tree_connect *tree_conn);
@@ -53,9 +60,6 @@
 struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
 						  unsigned int id);
 
-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
-						 unsigned int id);
-
 int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess);
 
 #endif /* __TREE_CONNECT_MANAGEMENT_H__ */
diff -ruw linux-6.4/fs/smb/server/mgmt/user_session.c linux-6.4-fbx/fs/smb/server/mgmt/user_session.c
--- linux-6.4/fs/smb/server/mgmt/user_session.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/user_session.c	2023-11-07 13:38:44.038256036 +0100
@@ -174,18 +174,23 @@
 	unsigned long id;
 	struct ksmbd_session *sess;
 
-	down_write(&sessions_table_lock);
+	down_write(&conn->session_lock);
 	xa_for_each(&conn->sessions, id, sess) {
 		if (sess->state != SMB2_SESSION_VALID ||
 		    time_after(jiffies,
 			       sess->last_active + SMB2_SESSION_TIMEOUT)) {
 			xa_erase(&conn->sessions, sess->id);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+			if (hash_hashed(&sess->hlist))
+				hash_del(&sess->hlist);
+#else
 			hash_del(&sess->hlist);
+#endif
 			ksmbd_session_destroy(sess);
 			continue;
 		}
 	}
-	up_write(&sessions_table_lock);
+	up_write(&conn->session_lock);
 }
 
 int ksmbd_session_register(struct ksmbd_conn *conn,
@@ -222,12 +227,19 @@
 		hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
 			if (!ksmbd_chann_del(conn, sess) &&
 			    xa_empty(&sess->ksmbd_chann_list)) {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+			if (hash_hashed(&sess->hlist))
 				hash_del(&sess->hlist);
+#else
+				hash_del(&sess->hlist);
+#endif
 				ksmbd_session_destroy(sess);
 			}
 		}
 	}
+	up_write(&sessions_table_lock);
 
+	down_write(&conn->session_lock);
 	xa_for_each(&conn->sessions, id, sess) {
 		unsigned long chann_id;
 		struct channel *chann;
@@ -240,11 +252,16 @@
 		ksmbd_chann_del(conn, sess);
 		if (xa_empty(&sess->ksmbd_chann_list)) {
 			xa_erase(&conn->sessions, sess->id);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+			if (hash_hashed(&sess->hlist))
 			hash_del(&sess->hlist);
+#else
+			hash_del(&sess->hlist);
+#endif
 			ksmbd_session_destroy(sess);
 		}
 	}
-	up_write(&sessions_table_lock);
+	up_write(&conn->session_lock);
 }
 
 struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
@@ -252,9 +269,11 @@
 {
 	struct ksmbd_session *sess;
 
+	down_read(&conn->session_lock);
 	sess = xa_load(&conn->sessions, id);
 	if (sess)
 		sess->last_active = jiffies;
+	up_read(&conn->session_lock);
 	return sess;
 }
 
@@ -319,6 +338,18 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+static int __init_smb1_session(struct ksmbd_session *sess)
+{
+	int id = ksmbd_acquire_smb1_uid(&session_ida);
+
+	if (id < 0)
+		return -EINVAL;
+	sess->id = id;
+	return 0;
+}
+#endif
+
 static int __init_smb2_session(struct ksmbd_session *sess)
 {
 	int id = ksmbd_acquire_smb2_uid(&session_ida);
@@ -334,8 +365,10 @@
 	struct ksmbd_session *sess;
 	int ret;
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 	if (protocol != CIFDS_SESSION_FLAG_SMB2)
 		return NULL;
+#endif
 
 	sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
 	if (!sess)
@@ -351,8 +384,22 @@
 	xa_init(&sess->ksmbd_chann_list);
 	xa_init(&sess->rpc_handle_list);
 	sess->sequence_number = 1;
+	rwlock_init(&sess->tree_conns_lock);
 
+	switch (protocol) {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case CIFDS_SESSION_FLAG_SMB1:
+		ret = __init_smb1_session(sess);
+		break;
+#endif
+	case CIFDS_SESSION_FLAG_SMB2:
 	ret = __init_smb2_session(sess);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
 	if (ret)
 		goto error;
 
@@ -369,6 +416,13 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_session *ksmbd_smb1_session_create(void)
+{
+	return __session_create(CIFDS_SESSION_FLAG_SMB1);
+}
+#endif
+
 struct ksmbd_session *ksmbd_smb2_session_create(void)
 {
 	return __session_create(CIFDS_SESSION_FLAG_SMB2);
@@ -378,6 +432,10 @@
 {
 	int id = -EINVAL;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB1))
+		id = ksmbd_acquire_smb1_tid(&sess->tree_conn_ida);
+#endif
 	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
 		id = ksmbd_acquire_smb2_tid(&sess->tree_conn_ida);
 
diff -ruw linux-6.4/fs/smb/server/mgmt/user_session.h linux-6.4-fbx/fs/smb/server/mgmt/user_session.h
--- linux-6.4/fs/smb/server/mgmt/user_session.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/mgmt/user_session.h	2023-11-07 13:38:44.042256145 +0100
@@ -12,6 +12,9 @@
 #include "../smb_common.h"
 #include "../ntlmssp.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define CIFDS_SESSION_FLAG_SMB1		BIT(0)
+#endif
 #define CIFDS_SESSION_FLAG_SMB2		BIT(1)
 
 #define PREAUTH_HASHVALUE_SIZE		64
@@ -60,6 +63,7 @@
 
 	struct ksmbd_file_table		file_table;
 	unsigned long			last_active;
+	rwlock_t			tree_conns_lock;
 };
 
 static inline int test_session_flag(struct ksmbd_session *sess, int bit)
@@ -77,6 +81,9 @@
 	sess->flags &= ~bit;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_session *ksmbd_smb1_session_create(void);
+#endif
 struct ksmbd_session *ksmbd_smb2_session_create(void);
 
 void ksmbd_session_destroy(struct ksmbd_session *sess);
diff -ruw linux-6.4/fs/smb/server/oplock.c linux-6.4-fbx/fs/smb/server/oplock.c
--- linux-6.4/fs/smb/server/oplock.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/oplock.c	2024-03-18 14:40:14.867741770 +0100
@@ -10,6 +10,9 @@
 #include "oplock.h"
 
 #include "smb_common.h"
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#include "smb1pdu.h"
+#endif
 #include "smbstatus.h"
 #include "connection.h"
 #include "mgmt/user_session.h"
@@ -45,6 +48,9 @@
 	opinfo->pending_break = 0;
 	opinfo->fid = id;
 	opinfo->Tid = Tid;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	opinfo->is_smb2 = IS_SMB2(conn);
+#endif
 	INIT_LIST_HEAD(&opinfo->op_entry);
 	INIT_LIST_HEAD(&opinfo->interim_list);
 	init_waitqueue_head(&opinfo->oplock_q);
@@ -102,9 +108,10 @@
 	lease->new_state = 0;
 	lease->flags = lctx->flags;
 	lease->duration = lctx->duration;
+	lease->is_dir = lctx->is_dir;
 	memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
 	lease->version = lctx->version;
-	lease->epoch = 0;
+	lease->epoch = le16_to_cpu(lctx->epoch) + 1;
 	INIT_LIST_HEAD(&opinfo->lease_entry);
 	opinfo->o_lease = lease;
 
@@ -258,6 +265,8 @@
 {
 	struct lease *lease = opinfo->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo->is_smb2) {
 	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
 	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
 		pr_err("bad oplock(0x%x)\n", opinfo->level);
@@ -269,6 +278,27 @@
 
 	if (opinfo->is_lease)
 		lease->state = lease->new_state;
+	} else {
+		if (!(opinfo->level == OPLOCK_EXCLUSIVE ||
+		      opinfo->level == OPLOCK_BATCH)) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			return -EINVAL;
+		}
+		opinfo->level = OPLOCK_READ;
+	}
+#else
+	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+		pr_err("bad oplock(0x%x)\n", opinfo->level);
+		if (opinfo->is_lease)
+			pr_err("lease state(0x%x)\n", lease->state);
+		return -EINVAL;
+	}
+	opinfo->level = SMB2_OPLOCK_LEVEL_II;
+
+	if (opinfo->is_lease)
+		lease->state = lease->new_state;
+#endif
 	return 0;
 }
 
@@ -297,6 +327,27 @@
 {
 	struct lease *lease = opinfo->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo->is_smb2) {
+		if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+		      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			if (opinfo->is_lease)
+				pr_err("lease state(0x%x)\n", lease->state);
+			return -EINVAL;
+		}
+		opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+		if (opinfo->is_lease)
+			lease->state = lease->new_state;
+	} else {
+		if (!(opinfo->level == OPLOCK_EXCLUSIVE ||
+		      opinfo->level == OPLOCK_BATCH)) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			return -EINVAL;
+		}
+		opinfo->level = OPLOCK_NONE;
+	}
+#else
 	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
 	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
 		pr_err("bad oplock(0x%x)\n", opinfo->level);
@@ -307,6 +358,7 @@
 	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
 	if (opinfo->is_lease)
 		lease->state = lease->new_state;
+#endif
 	return 0;
 }
 
@@ -320,6 +372,8 @@
 {
 	struct lease *lease = opinfo->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo->is_smb2) {
 	if (opinfo->level != SMB2_OPLOCK_LEVEL_II) {
 		pr_err("bad oplock(0x%x)\n", opinfo->level);
 		if (opinfo->is_lease)
@@ -329,6 +383,24 @@
 	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
 	if (opinfo->is_lease)
 		lease->state = lease->new_state;
+	} else {
+		if (opinfo->level != OPLOCK_READ) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			return -EINVAL;
+		}
+		opinfo->level = OPLOCK_NONE;
+	}
+#else
+	if (opinfo->level != SMB2_OPLOCK_LEVEL_II) {
+		pr_err("bad oplock(0x%x)\n", opinfo->level);
+		if (opinfo->is_lease)
+			pr_err("lease state(0x%x)\n", lease->state);
+		return -EINVAL;
+	}
+	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+	if (opinfo->is_lease)
+		lease->state = lease->new_state;
+#endif
 	return 0;
 }
 
@@ -395,8 +467,8 @@
 {
 	struct oplock_info *opinfo;
 
-	if (S_ISDIR(file_inode(fp->filp)->i_mode))
-		return;
+	if (fp->reserve_lease_break)
+		smb_lazy_parent_lease_break_close(fp);
 
 	opinfo = opinfo_get(fp);
 	if (!opinfo)
@@ -432,10 +504,24 @@
 {
 	struct lease *lease = opinfo_new->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo_new->is_smb2) {
+		if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH)
+			opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH;
+		else
+			opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+	} else {
+		if (req_oplock == REQ_BATCHOPLOCK)
+			opinfo_new->level = OPLOCK_BATCH;
+		else
+			opinfo_new->level = OPLOCK_EXCLUSIVE;
+	}
+#else
 	if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH)
 		opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH;
 	else
 		opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+#endif
 
 	if (lctx) {
 		lease->state = lctx->req_state;
@@ -455,7 +541,14 @@
 {
 	struct lease *lease = opinfo_new->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo_new->is_smb2)
 	opinfo_new->level = SMB2_OPLOCK_LEVEL_II;
+	else
+		opinfo_new->level = OPLOCK_READ;
+#else
+	opinfo_new->level = SMB2_OPLOCK_LEVEL_II;
+#endif
 
 	if (lctx) {
 		lease->state = SMB2_LEASE_READ_CACHING_LE;
@@ -477,7 +570,14 @@
 {
 	struct lease *lease = opinfo_new->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo_new->is_smb2)
+		opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE;
+	else
+		opinfo_new->level = OPLOCK_NONE;
+#else
 	opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE;
+#endif
 
 	if (lctx) {
 		lease->state = 0;
@@ -543,25 +643,31 @@
 			/* upgrading lease */
 			if ((atomic_read(&ci->op_count) +
 			     atomic_read(&ci->sop_count)) == 1) {
-				if (lease->state ==
-				    (lctx->req_state & lease->state)) {
+				if (lease->state != SMB2_LEASE_NONE_LE &&
+				    lease->state == (lctx->req_state & lease->state)) {
+					lease->epoch++;
 					lease->state |= lctx->req_state;
 					if (lctx->req_state &
 						SMB2_LEASE_WRITE_CACHING_LE)
 						lease_read_to_write(opinfo);
+
 				}
 			} else if ((atomic_read(&ci->op_count) +
 				    atomic_read(&ci->sop_count)) > 1) {
 				if (lctx->req_state ==
 				    (SMB2_LEASE_READ_CACHING_LE |
-				     SMB2_LEASE_HANDLE_CACHING_LE))
+				     SMB2_LEASE_HANDLE_CACHING_LE)) {
+					lease->epoch++;
 					lease->state = lctx->req_state;
 			}
+			}
 
 			if (lctx->req_state && lease->state ==
-			    SMB2_LEASE_NONE_LE)
+			    SMB2_LEASE_NONE_LE) {
+				lease->epoch++;
 				lease_none_upgrade(opinfo, lctx->req_state);
 		}
+		}
 		read_lock(&ci->m_lock);
 	}
 	read_unlock(&ci->m_lock);
@@ -616,6 +722,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
 static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
 {
 	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
@@ -626,6 +733,108 @@
 }
 
 /**
+ * smb1_oplock_break_noti() - send smb1 oplock break cmd from conn
+ * to client
+ * @work:     smb work object
+ *
+ * There are two ways this function can be called. 1- while file open we break
+ * from exclusive/batch lock to levelII oplock and 2- while file write/truncate
+ * we break from levelII oplock no oplock.
+ * work->request_buf contains oplock_info.
+ */
+static void __smb1_oplock_break_noti(struct work_struct *wk)
+{
+	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_hdr *rsp_hdr;
+	struct smb_com_lock_req *req;
+	struct oplock_info *opinfo = work->request_buf;
+
+	if (allocate_oplock_break_buf(work)) {
+		pr_err("smb_allocate_rsp_buf failed! ");
+		ksmbd_free_work_struct(work);
+		return;
+	}
+
+	/* Init response header */
+	rsp_hdr = work->response_buf;
+	/* wct is 8 for locking andx(18) */
+	memset(rsp_hdr, 0, sizeof(struct smb_hdr) + 18);
+	rsp_hdr->smb_buf_length =
+		cpu_to_be32(conn->vals->header_size - 4 + 18);
+	rsp_hdr->Protocol[0] = 0xFF;
+	rsp_hdr->Protocol[1] = 'S';
+	rsp_hdr->Protocol[2] = 'M';
+	rsp_hdr->Protocol[3] = 'B';
+
+	rsp_hdr->Command = SMB_COM_LOCKING_ANDX;
+	/* we know unicode, long file name and use nt error codes */
+	rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_KNOWS_LONG_NAMES |
+		SMBFLG2_ERR_STATUS;
+	rsp_hdr->Uid = cpu_to_le16(work->sess->id);
+	rsp_hdr->Pid = cpu_to_le16(0xFFFF);
+	rsp_hdr->Mid = cpu_to_le16(0xFFFF);
+	rsp_hdr->Tid = cpu_to_le16(opinfo->Tid);
+	rsp_hdr->WordCount = 8;
+
+	/* Init locking request */
+	req = work->response_buf;
+
+	req->AndXCommand = 0xFF;
+	req->AndXReserved = 0;
+	req->AndXOffset = 0;
+	req->Fid = opinfo->fid;
+	req->LockType = LOCKING_ANDX_OPLOCK_RELEASE;
+	if (!opinfo->open_trunc &&
+	    (opinfo->level == OPLOCK_BATCH ||
+	     opinfo->level == OPLOCK_EXCLUSIVE))
+		req->OplockLevel = 1;
+	else
+		req->OplockLevel = 0;
+	req->Timeout = 0;
+	req->NumberOfUnlocks = 0;
+	req->ByteCount = 0;
+	ksmbd_debug(OPLOCK, "sending oplock break for fid %d lock level = %d\n",
+		    req->Fid, req->OplockLevel);
+
+	ksmbd_conn_write(work);
+	ksmbd_free_work_struct(work);
+}
+
+/**
+ * smb1_oplock_break() - send smb1 exclusive/batch to level2 oplock
+ *		break command from server to client
+ * @opinfo:		oplock info object
+ * @ack_required	if requiring ack
+ *
+ * Return:      0 on success, otherwise error
+ */
+static int smb1_oplock_break_noti(struct oplock_info *opinfo)
+{
+	struct ksmbd_conn *conn = opinfo->conn;
+	struct ksmbd_work *work = ksmbd_alloc_work_struct();
+
+	if (!work)
+		return -ENOMEM;
+
+	work->request_buf = (char *)opinfo;
+	work->conn = conn;
+
+	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+		INIT_WORK(&work->work, __smb1_oplock_break_noti);
+		ksmbd_queue_work(work);
+
+		wait_for_break_ack(opinfo);
+	} else {
+		__smb1_oplock_break_noti(&work->work);
+		if (opinfo->level == OPLOCK_READ)
+			opinfo->level = OPLOCK_NONE;
+	}
+	return 0;
+}
+#endif
+
+/**
  * __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
  * to client
  * @wk:     smb work object
@@ -639,7 +848,6 @@
 {
 	struct smb2_oplock_break *rsp = NULL;
 	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
-	struct ksmbd_conn *conn = work->conn;
 	struct oplock_break_info *br_info = work->request_buf;
 	struct smb2_hdr *rsp_hdr;
 	struct ksmbd_file *fp;
@@ -648,7 +856,7 @@
 	if (!fp)
 		goto out;
 
-	if (allocate_oplock_break_buf(work)) {
+	if (allocate_interim_rsp_buf(work)) {
 		pr_err("smb2_allocate_rsp_buf failed! ");
 		ksmbd_fd_put(work, fp);
 		goto out;
@@ -656,8 +864,6 @@
 
 	rsp_hdr = smb2_get_msg(work->response_buf);
 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
-	*(__be32 *)work->response_buf =
-		cpu_to_be32(conn->vals->header_size);
 	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
 	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
 	rsp_hdr->CreditRequest = cpu_to_le16(0);
@@ -684,13 +890,15 @@
 	rsp->PersistentFid = fp->persistent_id;
 	rsp->VolatileFid = fp->volatile_id;
 
-	inc_rfc1001_len(work->response_buf, 24);
+	ksmbd_fd_put(work, fp);
+	if (ksmbd_iov_pin_rsp(work, (void *)rsp,
+			      sizeof(struct smb2_oplock_break)))
+		goto out;
 
 	ksmbd_debug(OPLOCK,
 		    "sending oplock break v_id %llu p_id = %llu lock level = %d\n",
 		    rsp->VolatileFid, rsp->PersistentFid, rsp->OplockLevel);
 
-	ksmbd_fd_put(work, fp);
 	ksmbd_conn_write(work);
 
 out:
@@ -751,18 +959,15 @@
 	struct smb2_lease_break *rsp = NULL;
 	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
 	struct lease_break_info *br_info = work->request_buf;
-	struct ksmbd_conn *conn = work->conn;
 	struct smb2_hdr *rsp_hdr;
 
-	if (allocate_oplock_break_buf(work)) {
+	if (allocate_interim_rsp_buf(work)) {
 		ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
 		goto out;
 	}
 
 	rsp_hdr = smb2_get_msg(work->response_buf);
 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
-	*(__be32 *)work->response_buf =
-		cpu_to_be32(conn->vals->header_size);
 	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
 	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
 	rsp_hdr->CreditRequest = cpu_to_le16(0);
@@ -791,7 +996,9 @@
 	rsp->AccessMaskHint = 0;
 	rsp->ShareMaskHint = 0;
 
-	inc_rfc1001_len(work->response_buf, 44);
+	if (ksmbd_iov_pin_rsp(work, (void *)rsp,
+			      sizeof(struct smb2_lease_break)))
+		goto out;
 
 	ksmbd_conn_write(work);
 
@@ -844,7 +1051,8 @@
 					     interim_entry);
 			setup_async_work(in_work, NULL, NULL);
 			smb2_send_interim_resp(in_work, STATUS_PENDING);
-			list_del(&in_work->interim_entry);
+			list_del_init(&in_work->interim_entry);
+			release_async_work(in_work);
 		}
 		INIT_WORK(&work->work, __smb2_lease_break_noti);
 		ksmbd_queue_work(work);
@@ -910,7 +1118,8 @@
 					lease->new_state =
 						SMB2_LEASE_READ_CACHING_LE;
 			} else {
-				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE &&
+						!lease->is_dir)
 					lease->new_state =
 						SMB2_LEASE_READ_CACHING_LE;
 				else
@@ -933,10 +1142,20 @@
 			brk_opinfo->op_state = OPLOCK_ACK_WAIT;
 	}
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (brk_opinfo->is_smb2)
+		if (brk_opinfo->is_lease)
+			err = smb2_lease_break_noti(brk_opinfo);
+		else
+			err = smb2_oplock_break_noti(brk_opinfo);
+	else
+		err = smb1_oplock_break_noti(brk_opinfo);
+#else
 	if (brk_opinfo->is_lease)
 		err = smb2_lease_break_noti(brk_opinfo);
 	else
 		err = smb2_oplock_break_noti(brk_opinfo);
+#endif
 
 	ksmbd_debug(OPLOCK, "oplock granted = %d\n", brk_opinfo->level);
 	if (brk_opinfo->op_state == OPLOCK_CLOSING)
@@ -1042,6 +1261,8 @@
 	       SMB2_LEASE_KEY_SIZE);
 	lease2->duration = lease1->duration;
 	lease2->flags = lease1->flags;
+	lease2->epoch = lease1->epoch;
+	lease2->version = lease1->version;
 }
 
 static int add_lease_global_list(struct oplock_info *opinfo)
@@ -1078,6 +1299,10 @@
 			     struct lease_ctx_info *lctx)
 {
 	switch (level) {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case REQ_OPLOCK:
+	case REQ_BATCHOPLOCK:
+#endif
 	case SMB2_OPLOCK_LEVEL_BATCH:
 	case SMB2_OPLOCK_LEVEL_EXCLUSIVE:
 		grant_write_oplock(opinfo, level, lctx);
@@ -1091,6 +1316,89 @@
 	}
 }
 
+void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+				      struct lease_ctx_info *lctx)
+{
+	struct oplock_info *opinfo;
+	struct ksmbd_inode *p_ci = NULL;
+
+	if (lctx->version != 2)
+		return;
+
+	p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
+	if (!p_ci)
+		return;
+
+	read_lock(&p_ci->m_lock);
+	list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
+		if (!opinfo->is_lease)
+			continue;
+
+		if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE &&
+		    (!(lctx->flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE) ||
+		     !compare_guid_key(opinfo, fp->conn->ClientGUID,
+				      lctx->parent_lease_key))) {
+			if (!atomic_inc_not_zero(&opinfo->refcount))
+				continue;
+
+			atomic_inc(&opinfo->conn->r_count);
+			if (ksmbd_conn_releasing(opinfo->conn)) {
+				atomic_dec(&opinfo->conn->r_count);
+				continue;
+			}
+
+			read_unlock(&p_ci->m_lock);
+			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
+			opinfo_conn_put(opinfo);
+			read_lock(&p_ci->m_lock);
+		}
+	}
+	read_unlock(&p_ci->m_lock);
+
+	ksmbd_inode_put(p_ci);
+}
+
+void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
+{
+	struct oplock_info *opinfo;
+	struct ksmbd_inode *p_ci = NULL;
+
+	rcu_read_lock();
+	opinfo = rcu_dereference(fp->f_opinfo);
+	rcu_read_unlock();
+
+	if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2)
+		return;
+
+	p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
+	if (!p_ci)
+		return;
+
+	read_lock(&p_ci->m_lock);
+	list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
+		if (!opinfo->is_lease)
+			continue;
+
+		if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE) {
+			if (!atomic_inc_not_zero(&opinfo->refcount))
+				continue;
+
+			atomic_inc(&opinfo->conn->r_count);
+			if (ksmbd_conn_releasing(opinfo->conn)) {
+				atomic_dec(&opinfo->conn->r_count);
+				continue;
+			}
+			read_unlock(&p_ci->m_lock);
+			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
+			opinfo_conn_put(opinfo);
+			read_lock(&p_ci->m_lock);
+		}
+	}
+	read_unlock(&p_ci->m_lock);
+
+	ksmbd_inode_put(p_ci);
+}
+
 /**
  * smb_grant_oplock() - handle oplock/lease request on file open
  * @work:		smb work
@@ -1114,9 +1422,11 @@
 	bool prev_op_has_lease;
 	__le32 prev_op_state = 0;
 
-	/* not support directory lease */
-	if (S_ISDIR(file_inode(fp->filp)->i_mode))
+	/* Only v2 leases handle the directory */
+	if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
+		if (!lctx || lctx->version != 2)
 		return 0;
+	}
 
 	opinfo = alloc_opinfo(work, pid, tid);
 	if (!opinfo)
@@ -1286,6 +1596,36 @@
 		}
 
 		rcu_read_unlock();
+
+#ifdef CONFIG_SMB_INSECURE_SERVER
+		if (brk_op->is_smb2) {
+			if (brk_op->is_lease && (brk_op->o_lease->state &
+					(~(SMB2_LEASE_READ_CACHING_LE |
+					   SMB2_LEASE_HANDLE_CACHING_LE)))) {
+				ksmbd_debug(OPLOCK,
+					    "unexpected lease state(0x%x)\n",
+					    brk_op->o_lease->state);
+				goto next;
+			} else if (brk_op->level !=
+					SMB2_OPLOCK_LEVEL_II) {
+				ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n",
+					    brk_op->level);
+				goto next;
+			}
+
+			/* Skip oplock being break to none */
+			if (brk_op->is_lease &&
+			    brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE &&
+			    atomic_read(&brk_op->breaking_cnt))
+				goto next;
+		} else {
+			if (brk_op->level != OPLOCK_READ) {
+				ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n",
+					    brk_op->level);
+				goto next;
+			}
+		}
+#else
 		if (brk_op->is_lease && (brk_op->o_lease->state &
 		    (~(SMB2_LEASE_READ_CACHING_LE |
 				SMB2_LEASE_HANDLE_CACHING_LE)))) {
@@ -1304,6 +1644,7 @@
 		    brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE &&
 		    atomic_read(&brk_op->breaking_cnt))
 			goto next;
+#endif
 
 		if (op && op->is_lease && brk_op->is_lease &&
 		    !memcmp(conn->ClientGUID, brk_op->conn->ClientGUID,
@@ -1374,6 +1715,7 @@
 		memcpy(buf->lcontext.LeaseKey, lease->lease_key,
 		       SMB2_LEASE_KEY_SIZE);
 		buf->lcontext.LeaseFlags = lease->flags;
+		buf->lcontext.Epoch = cpu_to_le16(lease->epoch);
 		buf->lcontext.LeaseState = lease->state;
 		memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
 		       SMB2_LEASE_KEY_SIZE);
@@ -1410,10 +1752,11 @@
 /**
  * parse_lease_state() - parse lease context containted in file open request
  * @open_req:	buffer containing smb2 file open(create) request
+ * @is_dir:	whether leasing file is directory
  *
  * Return:  oplock state, -ENOENT if create lease context not found
  */
-struct lease_ctx_info *parse_lease_state(void *open_req)
+struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir)
 {
 	struct create_context *cc;
 	struct smb2_create_req *req = (struct smb2_create_req *)open_req;
@@ -1431,8 +1774,14 @@
 		struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
 
 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+		if (is_dir) {
+			lreq->req_state = lc->lcontext.LeaseState &
+				~SMB2_LEASE_WRITE_CACHING_LE;
+			lreq->is_dir = true;
+		} else
 		lreq->req_state = lc->lcontext.LeaseState;
 		lreq->flags = lc->lcontext.LeaseFlags;
+		lreq->epoch = lc->lcontext.Epoch;
 		lreq->duration = lc->lcontext.LeaseDuration;
 		memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
 				SMB2_LEASE_KEY_SIZE);
@@ -1492,7 +1841,7 @@
 		    name_len < 4 ||
 		    name_off + name_len > cc_len ||
 		    (value_off & 0x7) != 0 ||
-		    (value_off && (value_off < name_off + name_len)) ||
+		    (value_len && value_off < name_off + (name_len < 8 ? 8 : name_len)) ||
 		    ((u64)value_off + value_len > cc_len))
 			return ERR_PTR(-EINVAL);
 
diff -ruw linux-6.4/fs/smb/server/oplock.h linux-6.4-fbx/fs/smb/server/oplock.h
--- linux-6.4/fs/smb/server/oplock.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/oplock.h	2024-01-25 13:36:32.714984559 +0100
@@ -11,6 +11,14 @@
 
 #define OPLOCK_WAIT_TIME	(35 * HZ)
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/* SMB Oplock levels */
+#define OPLOCK_NONE      0
+#define OPLOCK_EXCLUSIVE 1
+#define OPLOCK_BATCH     2
+#define OPLOCK_READ      3  /* level 2 oplock */
+#endif
+
 /* SMB2 Oplock levels */
 #define SMB2_OPLOCK_LEVEL_NONE          0x00
 #define SMB2_OPLOCK_LEVEL_II            0x01
@@ -34,7 +42,9 @@
 	__le32			flags;
 	__le64			duration;
 	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
+	__le16			epoch;
 	int			version;
+	bool			is_dir;
 };
 
 struct lease_table {
@@ -53,6 +63,7 @@
 	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
 	int			version;
 	unsigned short		epoch;
+	bool			is_dir;
 	struct lease_table	*l_lb;
 };
 
@@ -69,6 +80,9 @@
 	atomic_t		refcount;
 	__u16                   Tid;
 	bool			is_lease;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	bool			is_smb2;
+#endif
 	bool			open_trunc;	/* truncate on open */
 	struct lease		*o_lease;
 	struct list_head        interim_list;
@@ -108,7 +122,7 @@
 
 /* Lease related functions */
 void create_lease_buf(u8 *rbuf, struct lease *lease);
-struct lease_ctx_info *parse_lease_state(void *open_req);
+struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir);
 __u8 smb2_map_lease_to_oplock(__le32 lease_state);
 int lease_read_to_write(struct oplock_info *opinfo);
 
@@ -124,4 +138,7 @@
 int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
 			struct lease_ctx_info *lctx);
 void destroy_lease_table(struct ksmbd_conn *conn);
+void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
+				      struct lease_ctx_info *lctx);
+void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp);
 #endif /* __KSMBD_OPLOCK_H */
diff -ruw linux-6.4/fs/smb/server/server.c linux-6.4-fbx/fs/smb/server/server.c
--- linux-6.4/fs/smb/server/server.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/server.c	2023-11-07 13:38:44.042256145 +0100
@@ -115,8 +115,10 @@
 	if (check_conn_state(work))
 		return SERVER_HANDLER_CONTINUE;
 
-	if (ksmbd_verify_smb_message(work))
+	if (ksmbd_verify_smb_message(work)) {
+		conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
 		return SERVER_HANDLER_ABORT;
+	}
 
 	command = conn->ops->get_cmd_val(work);
 	*cmd = command;
@@ -163,6 +165,7 @@
 {
 	u16 command = 0;
 	int rc;
+	bool is_chained = false;
 
 	if (conn->ops->allocate_rsp_buf(work))
 		return;
@@ -229,16 +232,17 @@
 			}
 		}
 
+		is_chained = is_chained_smb2_message(work);
+
 		if (work->sess &&
 		    (work->sess->sign || smb3_11_final_sess_setup_resp(work) ||
 		     conn->ops->is_sign_req(work, command)))
 			conn->ops->set_sign_rsp(work);
-	} while (is_chained_smb2_message(work));
-
-	if (work->send_no_response)
-		return;
+	} while (is_chained == true);
 
 send:
+	if (work->tcon)
+		ksmbd_tree_connect_put(work->tcon);
 	smb3_preauth_hash_rsp(work);
 	if (work->sess && work->sess->enc && work->encrypted &&
 	    conn->ops->encrypt_resp) {
@@ -286,6 +290,7 @@
 static int queue_ksmbd_work(struct ksmbd_conn *conn)
 {
 	struct ksmbd_work *work;
+	int err;
 
 	work = ksmbd_alloc_work_struct();
 	if (!work) {
@@ -297,7 +302,11 @@
 	work->request_buf = conn->request_buf;
 	conn->request_buf = NULL;
 
-	ksmbd_init_smb_server(work);
+	err = ksmbd_init_smb_server(work);
+	if (err) {
+		ksmbd_free_work_struct(work);
+		return 0;
+	}
 
 	ksmbd_conn_enqueue_request(work);
 	atomic_inc(&conn->r_count);
diff -ruw linux-6.4/fs/smb/server/smb2misc.c linux-6.4-fbx/fs/smb/server/smb2misc.c
--- linux-6.4/fs/smb/server/smb2misc.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smb2misc.c	2024-03-22 17:24:19.542846394 +0100
@@ -101,29 +101,46 @@
 		*len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength);
 		break;
 	case SMB2_TREE_CONNECT:
-		*off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset);
+		*off = max_t(unsigned short int,
+			     le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset),
+			     offsetof(struct smb2_tree_connect_req, Buffer));
 		*len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength);
 		break;
 	case SMB2_CREATE:
 	{
+		unsigned short int name_off =
+			max_t(unsigned short int,
+			      le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset),
+			      offsetof(struct smb2_create_req, Buffer));
+		unsigned short int name_len =
+			le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
+
 		if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
 			*off = le32_to_cpu(((struct smb2_create_req *)
 				hdr)->CreateContextsOffset);
 			*len = le32_to_cpu(((struct smb2_create_req *)
 				hdr)->CreateContextsLength);
+			if (!name_len)
+				break;
+
+			if (name_off + name_len < (u64)*off + *len)
 			break;
 		}
 
-		*off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
-		*len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
+		*off = name_off;
+		*len = name_len;
 		break;
 	}
 	case SMB2_QUERY_INFO:
-		*off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset);
+		*off = max_t(unsigned int,
+			     le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset),
+			     offsetof(struct smb2_query_info_req, Buffer));
 		*len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength);
 		break;
 	case SMB2_SET_INFO:
-		*off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset);
+		*off = max_t(unsigned int,
+			     le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset),
+			     offsetof(struct smb2_set_info_req, Buffer));
 		*len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength);
 		break;
 	case SMB2_READ:
@@ -133,7 +150,7 @@
 	case SMB2_WRITE:
 		if (((struct smb2_write_req *)hdr)->DataOffset ||
 		    ((struct smb2_write_req *)hdr)->Length) {
-			*off = max_t(unsigned int,
+			*off = max_t(unsigned short int,
 				     le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset),
 				     offsetof(struct smb2_write_req, Buffer));
 			*len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
@@ -144,7 +161,9 @@
 		*len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength);
 		break;
 	case SMB2_QUERY_DIRECTORY:
-		*off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset);
+		*off = max_t(unsigned short int,
+			     le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset),
+			     offsetof(struct smb2_query_directory_req, Buffer));
 		*len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength);
 		break;
 	case SMB2_LOCK:
@@ -159,7 +178,9 @@
 		break;
 	}
 	case SMB2_IOCTL:
-		*off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
+		*off = max_t(unsigned int,
+			     le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset),
+			     offsetof(struct smb2_ioctl_req, Buffer));
 		*len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
 		break;
 	default:
@@ -380,13 +401,13 @@
 	}
 
 	if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
-		if (command == SMB2_OPLOCK_BREAK_HE &&
-		    le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
-		    le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
+		if (!(command == SMB2_OPLOCK_BREAK_HE &&
+		    (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 ||
+		    le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) {
 			/* special case for SMB2.1 lease break message */
 			ksmbd_debug(SMB,
-				    "Illegal request size %d for oplock break\n",
-				    le16_to_cpu(pdu->StructureSize2));
+				"Illegal request size %u for command %d\n",
+				le16_to_cpu(pdu->StructureSize2), command);
 			return 1;
 		}
 	}
@@ -440,10 +461,8 @@
 
 validate_credit:
 	if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
-	    smb2_validate_credit_charge(work->conn, hdr)) {
-		work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+	    smb2_validate_credit_charge(work->conn, hdr))
 		return 1;
-	}
 
 	return 0;
 }
diff -ruw linux-6.4/fs/smb/server/smb2ops.c linux-6.4-fbx/fs/smb/server/smb2ops.c
--- linux-6.4/fs/smb/server/smb2ops.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smb2ops.c	2024-04-19 15:59:31.193600561 +0200
@@ -12,6 +12,34 @@
 #include "smb_common.h"
 #include "server.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+static struct smb_version_values smb20_server_values = {
+	.version_string = SMB20_VERSION_STRING,
+	.protocol_id = SMB20_PROT_ID,
+	.capabilities = 0,
+	.max_read_size = CIFS_DEFAULT_IOSIZE,
+	.max_write_size = CIFS_DEFAULT_IOSIZE,
+	.max_trans_size = CIFS_DEFAULT_IOSIZE,
+	.max_credits = SMB2_MAX_CREDITS,
+	.large_lock_type = 0,
+	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+	.header_size = sizeof(struct smb2_hdr),
+	.max_header_size = MAX_SMB2_HDR_SIZE,
+	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+	.lock_cmd = SMB2_LOCK,
+	.cap_unix = 0,
+	.cap_nt_find = SMB2_NT_FIND,
+	.cap_large_files = SMB2_LARGE_FILES,
+	.create_lease_size = sizeof(struct create_lease),
+	.create_durable_size = sizeof(struct create_durable_rsp),
+	.create_mxac_size = sizeof(struct create_mxac_rsp),
+	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
+	.create_posix_size = sizeof(struct create_posix_rsp),
+};
+#endif
+
 static struct smb_version_values smb21_server_values = {
 	.version_string = SMB21_VERSION_STRING,
 	.protocol_id = SMB21_PROT_ID,
@@ -190,6 +218,22 @@
 	[SMB2_CHANGE_NOTIFY_HE]	=	{ .proc = smb2_notify},
 };
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * init_smb2_0_server() - initialize a smb server connection with smb2.0
+ *			command dispatcher
+ * @conn:	connection instance
+ */
+void init_smb2_0_server(struct ksmbd_conn *conn)
+{
+	conn->vals = &smb20_server_values;
+	conn->ops = &smb2_0_server_ops;
+	conn->cmds = smb2_0_server_cmds;
+	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+	conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256;
+}
+#endif
+
 /**
  * init_smb2_1_server() - initialize a smb server connection with smb2.1
  *			command dispatcher
@@ -221,12 +265,18 @@
 	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
 
 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
+			SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
 
 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
 	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
 
+	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
+	    (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
+	     conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
+		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+
 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
 }
@@ -245,7 +295,8 @@
 	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
 
 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
+			SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
 
 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
 	    (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
@@ -270,12 +321,8 @@
 	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
 
 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
-
-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
-	    (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
-	     conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
+			SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
 
 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
diff -ruw linux-6.4/fs/smb/server/smb2pdu.c linux-6.4-fbx/fs/smb/server/smb2pdu.c
--- linux-6.4/fs/smb/server/smb2pdu.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smb2pdu.c	2024-04-19 15:59:31.197600671 +0200
@@ -87,9 +87,9 @@
  */
 int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
 {
-	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+	struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
 	unsigned int cmd = le16_to_cpu(req_hdr->Command);
-	int tree_id;
+	unsigned int tree_id;
 
 	if (cmd == SMB2_TREE_CONNECT_HE ||
 	    cmd ==  SMB2_CANCEL_HE ||
@@ -114,7 +114,7 @@
 			pr_err("The first operation in the compound does not have tcon\n");
 			return -EINVAL;
 		}
-		if (work->tcon->id != tree_id) {
+		if (tree_id != UINT_MAX && work->tcon->id != tree_id) {
 			pr_err("tree id(%u) is different with id(%u) in first operation\n",
 					tree_id, work->tcon->id);
 			return -EINVAL;
@@ -145,12 +145,18 @@
 		err_rsp = smb2_get_msg(work->response_buf);
 
 	if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) {
+		int err;
+
 		err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE;
 		err_rsp->ErrorContextCount = 0;
 		err_rsp->Reserved = 0;
 		err_rsp->ByteCount = 0;
 		err_rsp->ErrorData[0] = 0;
-		inc_rfc1001_len(work->response_buf, SMB2_ERROR_STRUCTURE_SIZE2);
+		err = ksmbd_iov_pin_rsp(work, (void *)err_rsp,
+					__SMB2_HEADER_STRUCTURE_SIZE +
+						SMB2_ERROR_STRUCTURE_SIZE2);
+		if (err)
+			work->send_no_response = 1;
 	}
 }
 
@@ -225,11 +231,12 @@
 {
 	struct smb2_hdr *rsp_hdr;
 
-	if (work->next_smb2_rcv_hdr_off)
-		rsp_hdr = ksmbd_resp_buf_next(work);
-	else
 		rsp_hdr = smb2_get_msg(work->response_buf);
 	rsp_hdr->Status = err;
+
+	work->iov_idx = 0;
+	work->iov_cnt = 0;
+	work->next_smb2_rcv_hdr_off = 0;
 	smb2_set_err_rsp(work);
 }
 
@@ -245,9 +252,7 @@
 	struct smb2_hdr *rsp_hdr;
 	struct smb2_negotiate_rsp *rsp;
 	struct ksmbd_conn *conn = work->conn;
-
-	*(__be32 *)work->response_buf =
-		cpu_to_be32(conn->vals->header_size);
+	int err;
 
 	rsp_hdr = smb2_get_msg(work->response_buf);
 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
@@ -286,12 +291,13 @@
 	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
 	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
 		le16_to_cpu(rsp->SecurityBufferOffset));
-	inc_rfc1001_len(work->response_buf,
-			sizeof(struct smb2_negotiate_rsp) -
-			sizeof(struct smb2_hdr) + AUTH_GSS_LENGTH);
 	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
 	if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
 		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+	err = ksmbd_iov_pin_rsp(work, rsp,
+				sizeof(struct smb2_negotiate_rsp) + AUTH_GSS_LENGTH);
+	if (err)
+		return err;
 	conn->use_spnego = true;
 
 	ksmbd_conn_set_need_negotiate(conn);
@@ -390,11 +396,12 @@
 	next_hdr_offset = le32_to_cpu(req->NextCommand);
 
 	new_len = ALIGN(len, 8);
-	inc_rfc1001_len(work->response_buf,
-			sizeof(struct smb2_hdr) + new_len - len);
+	work->iov[work->iov_idx].iov_len += (new_len - len);
+	inc_rfc1001_len(work->response_buf, new_len - len);
 	rsp->NextCommand = cpu_to_le32(new_len);
 
 	work->next_smb2_rcv_hdr_off += next_hdr_offset;
+	work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off;
 	work->next_smb2_rsp_hdr_off += new_len;
 	ksmbd_debug(SMB,
 		    "Compound req new_len = %d rcv off = %d rsp off = %d\n",
@@ -470,10 +477,10 @@
 		len = len - get_rfc1002_len(work->response_buf);
 		if (len) {
 			ksmbd_debug(SMB, "padding len %u\n", len);
+			work->iov[work->iov_idx].iov_len += len;
 			inc_rfc1001_len(work->response_buf, len);
-			if (work->aux_payload_sz)
-				work->aux_payload_sz += len;
 		}
+		work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off;
 	}
 	return false;
 }
@@ -488,11 +495,8 @@
 {
 	struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf);
 	struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf);
-	struct ksmbd_conn *conn = work->conn;
 
 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
-	*(__be32 *)work->response_buf =
-		cpu_to_be32(conn->vals->header_size);
 	rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
 	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
 	rsp_hdr->Command = rcv_hdr->Command;
@@ -543,7 +547,7 @@
 	if (le32_to_cpu(hdr->NextCommand) > 0)
 		sz = large_sz;
 
-	work->response_buf = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+	work->response_buf = kvzalloc(sz, GFP_KERNEL);
 	if (!work->response_buf)
 		return -ENOMEM;
 
@@ -559,9 +563,9 @@
  */
 int smb2_check_user_session(struct ksmbd_work *work)
 {
-	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+	struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
 	struct ksmbd_conn *conn = work->conn;
-	unsigned int cmd = conn->ops->get_cmd_val(work);
+	unsigned int cmd = le16_to_cpu(req_hdr->Command);
 	unsigned long long sess_id;
 
 	/*
@@ -587,7 +591,7 @@
 			pr_err("The first operation in the compound does not have sess\n");
 			return -EINVAL;
 		}
-		if (work->sess->id != sess_id) {
+		if (sess_id != ULLONG_MAX && work->sess->id != sess_id) {
 			pr_err("session id(%llu) is different with the first operation(%lld)\n",
 					sess_id, work->sess->id);
 			return -EINVAL;
@@ -653,13 +657,9 @@
 
 int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
 {
-	struct smb2_hdr *rsp_hdr;
 	struct ksmbd_conn *conn = work->conn;
 	int id;
 
-	rsp_hdr = smb2_get_msg(work->response_buf);
-	rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
-
 	id = ksmbd_acquire_async_msg_id(&conn->async_ida);
 	if (id < 0) {
 		pr_err("Failed to alloc async message id\n");
@@ -667,7 +667,6 @@
 	}
 	work->asynchronous = true;
 	work->async_id = id;
-	rsp_hdr->Id.AsyncId = cpu_to_le64(id);
 
 	ksmbd_debug(SMB,
 		    "Send interim Response to inform async request id : %d\n",
@@ -706,15 +705,26 @@
 void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
 {
 	struct smb2_hdr *rsp_hdr;
+	struct ksmbd_work *in_work = ksmbd_alloc_work_struct();
 
-	rsp_hdr = smb2_get_msg(work->response_buf);
-	smb2_set_err_rsp(work);
+	if (allocate_interim_rsp_buf(in_work)) {
+		pr_err("smb_allocate_rsp_buf failed!\n");
+		ksmbd_free_work_struct(in_work);
+		return;
+	}
+
+	in_work->conn = work->conn;
+	memcpy(smb2_get_msg(in_work->response_buf), ksmbd_resp_buf_next(work),
+	       __SMB2_HEADER_STRUCTURE_SIZE);
+
+	rsp_hdr = smb2_get_msg(in_work->response_buf);
+	rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
+	rsp_hdr->Id.AsyncId = cpu_to_le64(work->async_id);
+	smb2_set_err_rsp(in_work);
 	rsp_hdr->Status = status;
 
-	work->multiRsp = 1;
-	ksmbd_conn_write(work);
-	rsp_hdr->Status = 0;
-	work->multiRsp = 0;
+	ksmbd_conn_write(in_work);
+	ksmbd_free_work_struct(in_work);
 }
 
 static __le32 smb2_get_reparse_tag_special_file(umode_t mode)
@@ -821,9 +831,8 @@
 	pneg_ctxt->Name[15] = 0x7C;
 }
 
-static void assemble_neg_contexts(struct ksmbd_conn *conn,
-				  struct smb2_negotiate_rsp *rsp,
-				  void *smb2_buf_len)
+static unsigned int assemble_neg_contexts(struct ksmbd_conn *conn,
+				  struct smb2_negotiate_rsp *rsp)
 {
 	char * const pneg_ctxt = (char *)rsp +
 			le32_to_cpu(rsp->NegotiateContextOffset);
@@ -834,7 +843,6 @@
 		    "assemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
 	build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt,
 			   conn->preauth_info->Preauth_HashId);
-	inc_rfc1001_len(smb2_buf_len, AUTH_GSS_PADDING);
 	ctxt_size = sizeof(struct smb2_preauth_neg_context);
 
 	if (conn->cipher_type) {
@@ -874,7 +882,7 @@
 	}
 
 	rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
-	inc_rfc1001_len(smb2_buf_len, ctxt_size);
+	return ctxt_size + AUTH_GSS_PADDING;
 }
 
 static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
@@ -1090,7 +1098,7 @@
 	struct smb2_negotiate_req *req = smb2_get_msg(work->request_buf);
 	struct smb2_negotiate_rsp *rsp = smb2_get_msg(work->response_buf);
 	int rc = 0;
-	unsigned int smb2_buf_len, smb2_neg_size;
+	unsigned int smb2_buf_len, smb2_neg_size, neg_ctxt_len = 0;
 	__le32 status;
 
 	ksmbd_debug(SMB, "Received negotiate request\n");
@@ -1183,7 +1191,7 @@
 						 conn->preauth_info->Preauth_HashValue);
 		rsp->NegotiateContextOffset =
 				cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
-		assemble_neg_contexts(conn, rsp, work->response_buf);
+		neg_ctxt_len = assemble_neg_contexts(conn, rsp);
 		break;
 	case SMB302_PROT_ID:
 		init_smb3_02_server(conn);
@@ -1194,6 +1202,11 @@
 	case SMB21_PROT_ID:
 		init_smb2_1_server(conn);
 		break;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case SMB20_PROT_ID:
+		init_smb2_0_server(conn);
+		break;
+#endif
 	case SMB2X_PROT_ID:
 	case BAD_PROT_ID:
 	default:
@@ -1233,8 +1246,7 @@
 	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
 	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
 				  le16_to_cpu(rsp->SecurityBufferOffset));
-	inc_rfc1001_len(work->response_buf, sizeof(struct smb2_negotiate_rsp) -
-			sizeof(struct smb2_hdr) + AUTH_GSS_LENGTH);
+
 	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
 	conn->use_spnego = true;
 
@@ -1252,9 +1264,15 @@
 	ksmbd_conn_set_need_negotiate(conn);
 
 err_out:
+	if (rc)
+		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+
+	if (!rc)
+		rc = ksmbd_iov_pin_rsp(work, rsp,
+				       sizeof(struct smb2_negotiate_rsp) +
+					AUTH_GSS_LENGTH + neg_ctxt_len);
 	if (rc < 0)
 		smb2_set_err_rsp(work);
-
 	return rc;
 }
 
@@ -1322,9 +1340,8 @@
 
 static int ntlm_negotiate(struct ksmbd_work *work,
 			  struct negotiate_message *negblob,
-			  size_t negblob_len)
+			  size_t negblob_len, struct smb2_sess_setup_rsp *rsp)
 {
-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
 	struct challenge_message *chgblob;
 	unsigned char *spnego_blob = NULL;
 	u16 spnego_blob_len;
@@ -1402,6 +1419,9 @@
 	char *name;
 	unsigned int name_off, name_len, secbuf_len;
 
+	if (conn->use_spnego && conn->mechToken)
+		secbuf_len = conn->mechTokenLen;
+	else
 	secbuf_len = le16_to_cpu(req->SecurityBufferLength);
 	if (secbuf_len < sizeof(struct authenticate_message)) {
 		ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
@@ -1429,10 +1449,10 @@
 	return user;
 }
 
-static int ntlm_authenticate(struct ksmbd_work *work)
+static int ntlm_authenticate(struct ksmbd_work *work,
+			     struct smb2_sess_setup_req *req,
+			     struct smb2_sess_setup_rsp *rsp)
 {
-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
 	struct ksmbd_conn *conn = work->conn;
 	struct ksmbd_session *sess = work->sess;
 	struct channel *chann = NULL;
@@ -1455,7 +1475,6 @@
 		memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
 		rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
 		kfree(spnego_blob);
-		inc_rfc1001_len(work->response_buf, spnego_blob_len - 1);
 	}
 
 	user = session_user(conn, req);
@@ -1494,6 +1513,9 @@
 		struct authenticate_message *authblob;
 
 		authblob = user_authblob(conn, req);
+		if (conn->use_spnego && conn->mechToken)
+			sz = conn->mechTokenLen;
+		else
 		sz = le16_to_cpu(req->SecurityBufferLength);
 		rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
 		if (rc) {
@@ -1566,10 +1588,10 @@
 }
 
 #ifdef CONFIG_SMB_SERVER_KERBEROS5
-static int krb5_authenticate(struct ksmbd_work *work)
+static int krb5_authenticate(struct ksmbd_work *work,
+			     struct smb2_sess_setup_req *req,
+			     struct smb2_sess_setup_rsp *rsp)
 {
-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
 	struct ksmbd_conn *conn = work->conn;
 	struct ksmbd_session *sess = work->sess;
 	char *in_blob, *out_blob;
@@ -1601,7 +1623,6 @@
 		return -EINVAL;
 	}
 	rsp->SecurityBufferLength = cpu_to_le16(out_len);
-	inc_rfc1001_len(work->response_buf, out_len - 1);
 
 	if ((conn->sign || server_conf.enforced_signing) ||
 	    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
@@ -1647,7 +1668,9 @@
 	return 0;
 }
 #else
-static int krb5_authenticate(struct ksmbd_work *work)
+static int krb5_authenticate(struct ksmbd_work *work,
+			     struct smb2_sess_setup_req *req,
+			     struct smb2_sess_setup_rsp *rsp)
 {
 	return -EOPNOTSUPP;
 }
@@ -1656,8 +1679,8 @@
 int smb2_sess_setup(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_sess_setup_req *req;
+	struct smb2_sess_setup_rsp *rsp;
 	struct ksmbd_session *sess;
 	struct negotiate_message *negblob;
 	unsigned int negblob_len, negblob_off;
@@ -1665,11 +1688,12 @@
 
 	ksmbd_debug(SMB, "Received request for session setup\n");
 
+	WORK_BUFFERS(work, req, rsp);
+
 	rsp->StructureSize = cpu_to_le16(9);
 	rsp->SessionFlags = 0;
 	rsp->SecurityBufferOffset = cpu_to_le16(72);
 	rsp->SecurityBufferLength = 0;
-	inc_rfc1001_len(work->response_buf, 9);
 
 	ksmbd_conn_lock(conn);
 	if (!req->hdr.SessionId) {
@@ -1765,8 +1789,7 @@
 
 	negblob_off = le16_to_cpu(req->SecurityBufferOffset);
 	negblob_len = le16_to_cpu(req->SecurityBufferLength);
-	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
-	    negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer)) {
 		rc = -EINVAL;
 		goto out_err;
 	}
@@ -1775,8 +1798,15 @@
 			negblob_off);
 
 	if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
-		if (conn->mechToken)
+		if (conn->mechToken) {
 			negblob = (struct negotiate_message *)conn->mechToken;
+			negblob_len = conn->mechTokenLen;
+		}
+	}
+
+	if (negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+		rc = -EINVAL;
+		goto out_err;
 	}
 
 	if (server_conf.auth_mechs & conn->auth_mechs) {
@@ -1786,7 +1816,7 @@
 
 		if (conn->preferred_auth_mech &
 				(KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5)) {
-			rc = krb5_authenticate(work);
+			rc = krb5_authenticate(work, req, rsp);
 			if (rc) {
 				rc = -EINVAL;
 				goto out_err;
@@ -1800,20 +1830,13 @@
 			sess->Preauth_HashValue = NULL;
 		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
 			if (negblob->MessageType == NtLmNegotiate) {
-				rc = ntlm_negotiate(work, negblob, negblob_len);
+				rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
 				if (rc)
 					goto out_err;
 				rsp->hdr.Status =
 					STATUS_MORE_PROCESSING_REQUIRED;
-				/*
-				 * Note: here total size -1 is done as an
-				 * adjustment for 0 size blob
-				 */
-				inc_rfc1001_len(work->response_buf,
-						le16_to_cpu(rsp->SecurityBufferLength) - 1);
-
 			} else if (negblob->MessageType == NtLmAuthenticate) {
-				rc = ntlm_authenticate(work);
+				rc = ntlm_authenticate(work, req, rsp);
 				if (rc)
 					goto out_err;
 
@@ -1896,6 +1919,18 @@
 				ksmbd_conn_set_need_negotiate(conn);
 			}
 		}
+		smb2_set_err_rsp(work);
+	} else {
+		unsigned int iov_len;
+
+		if (rsp->SecurityBufferLength)
+			iov_len = offsetof(struct smb2_sess_setup_rsp, Buffer) +
+				le16_to_cpu(rsp->SecurityBufferLength);
+		else
+			iov_len = sizeof(struct smb2_sess_setup_rsp);
+		rc = ksmbd_iov_pin_rsp(work, rsp, iov_len);
+		if (rc)
+			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
 	}
 
 	ksmbd_conn_unlock(conn);
@@ -1911,15 +1946,17 @@
 int smb2_tree_connect(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
-	struct smb2_tree_connect_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_tree_connect_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_tree_connect_req *req;
+	struct smb2_tree_connect_rsp *rsp;
 	struct ksmbd_session *sess = work->sess;
 	char *treename = NULL, *name = NULL;
 	struct ksmbd_tree_conn_status status;
 	struct ksmbd_share_config *share;
 	int rc = -EINVAL;
 
-	treename = smb_strndup_from_utf16(req->Buffer,
+	WORK_BUFFERS(work, req, rsp);
+
+	treename = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->PathOffset),
 					  le16_to_cpu(req->PathLength), true,
 					  conn->local_nls);
 	if (IS_ERR(treename)) {
@@ -1971,14 +2008,20 @@
 	if (conn->posix_ext_supported)
 		status.tree_conn->posix_extensions = true;
 
+	write_lock(&sess->tree_conns_lock);
+	status.tree_conn->t_state = TREE_CONNECTED;
+	write_unlock(&sess->tree_conns_lock);
 	rsp->StructureSize = cpu_to_le16(16);
-	inc_rfc1001_len(work->response_buf, 16);
 out_err1:
 	rsp->Capabilities = 0;
 	rsp->Reserved = 0;
 	/* default manual caching */
 	rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
 
+	rc = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_tree_connect_rsp));
+	if (rc)
+		status.ret = KSMBD_TREE_CONN_STATUS_NOMEM;
+
 	if (!IS_ERR(treename))
 		kfree(treename);
 	if (!IS_ERR(name))
@@ -2087,30 +2130,60 @@
  */
 int smb2_tree_disconnect(struct ksmbd_work *work)
 {
-	struct smb2_tree_disconnect_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_tree_disconnect_rsp *rsp;
+	struct smb2_tree_disconnect_req *req;
 	struct ksmbd_session *sess = work->sess;
 	struct ksmbd_tree_connect *tcon = work->tcon;
+	int err;
 
-	rsp->StructureSize = cpu_to_le16(4);
-	inc_rfc1001_len(work->response_buf, 4);
+	WORK_BUFFERS(work, req, rsp);
 
 	ksmbd_debug(SMB, "request\n");
 
-	if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
-		struct smb2_tree_disconnect_req *req =
-			smb2_get_msg(work->request_buf);
-
+	if (!tcon) {
 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
 
 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
-		smb2_set_err_rsp(work);
-		return 0;
+		err = -ENOENT;
+		goto err_out;
 	}
 
 	ksmbd_close_tree_conn_fds(work);
-	ksmbd_tree_conn_disconnect(sess, tcon);
+
+	write_lock(&sess->tree_conns_lock);
+	if (tcon->t_state == TREE_DISCONNECTED) {
+		write_unlock(&sess->tree_conns_lock);
+		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+		err = -ENOENT;
+		goto err_out;
+	}
+
+	WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount));
+	tcon->t_state = TREE_DISCONNECTED;
+	write_unlock(&sess->tree_conns_lock);
+
+	err = ksmbd_tree_conn_disconnect(sess, tcon);
+	if (err) {
+		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+		goto err_out;
+	}
+
 	work->tcon = NULL;
+
+	rsp->StructureSize = cpu_to_le16(4);
+	err = ksmbd_iov_pin_rsp(work, rsp,
+				sizeof(struct smb2_tree_disconnect_rsp));
+	if (err) {
+		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+		goto err_out;
+	}
+
 	return 0;
+
+err_out:
+	smb2_set_err_rsp(work);
+	return err;
+
 }
 
 /**
@@ -2122,17 +2195,27 @@
 int smb2_session_logoff(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
-	struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_logoff_req *req;
+	struct smb2_logoff_rsp *rsp;
 	struct ksmbd_session *sess;
-	struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
-	u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+	u64 sess_id;
+	int err;
 
-	rsp->StructureSize = cpu_to_le16(4);
-	inc_rfc1001_len(work->response_buf, 4);
+	WORK_BUFFERS(work, req, rsp);
 
 	ksmbd_debug(SMB, "request\n");
 
+	ksmbd_conn_lock(conn);
+	if (!ksmbd_conn_good(conn)) {
+		ksmbd_conn_unlock(conn);
+		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+		smb2_set_err_rsp(work);
+		return -ENOENT;
+	}
+	sess_id = le64_to_cpu(req->hdr.SessionId);
 	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+	ksmbd_conn_unlock(conn);
+
 	ksmbd_close_session_fds(work);
 	ksmbd_conn_wait_idle(conn, sess_id);
 
@@ -2145,7 +2228,7 @@
 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
 		smb2_set_err_rsp(work);
-		return 0;
+		return -ENOENT;
 	}
 
 	ksmbd_destroy_file_table(&sess->file_table);
@@ -2154,6 +2237,14 @@
 	ksmbd_free_user(sess->user);
 	sess->user = NULL;
 	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+
+	rsp->StructureSize = cpu_to_le16(4);
+	err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
+	if (err) {
+		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+		smb2_set_err_rsp(work);
+		return err;
+	}
 	return 0;
 }
 
@@ -2165,12 +2256,14 @@
  */
 static noinline int create_smb2_pipe(struct ksmbd_work *work)
 {
-	struct smb2_create_rsp *rsp = smb2_get_msg(work->response_buf);
-	struct smb2_create_req *req = smb2_get_msg(work->request_buf);
+	struct smb2_create_rsp *rsp;
+	struct smb2_create_req *req;
 	int id;
 	int err;
 	char *name;
 
+	WORK_BUFFERS(work, req, rsp);
+
 	name = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->NameLength),
 				      1, work->conn->local_nls);
 	if (IS_ERR(name)) {
@@ -2204,7 +2297,10 @@
 	rsp->CreateContextsOffset = 0;
 	rsp->CreateContextsLength = 0;
 
-	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
+	err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_create_rsp, Buffer));
+	if (err)
+		goto out;
+
 	kfree(name);
 	return 0;
 
@@ -2232,11 +2328,12 @@
  * @eabuf:	set info command buffer
  * @buf_len:	set info command buffer length
  * @path:	dentry path for get ea
+ * @get_write:	get write access to a mount
  *
  * Return:	0 on success, otherwise error
  */
 static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
-		       const struct path *path)
+		       const struct path *path, bool get_write)
 {
 	struct mnt_idmap *idmap = mnt_idmap(path->mnt);
 	char *attr_name = NULL, *value;
@@ -2298,7 +2395,8 @@
 			rc = 0;
 		} else {
 			rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
-						le16_to_cpu(eabuf->EaValueLength), 0);
+						le16_to_cpu(eabuf->EaValueLength),
+						0, true);
 			if (rc < 0) {
 				ksmbd_debug(SMB,
 					    "ksmbd_vfs_setxattr is failed(%d)\n",
@@ -2313,9 +2411,16 @@
 			break;
 		buf_len -= next;
 		eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
-		if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
+		if (buf_len < sizeof(struct smb2_ea_info)) {
+			rc = -EINVAL;
 			break;
+		}
 
+		if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+				le16_to_cpu(eabuf->EaValueLength)) {
+			rc = -EINVAL;
+			break;
+		}
 	} while (next != 0);
 
 	kfree(attr_name);
@@ -2354,7 +2459,7 @@
 		return -EBADF;
 	}
 
-	rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0);
+	rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0, false);
 	if (rc < 0)
 		pr_err("Failed to store XATTR stream name :%d\n", rc);
 	return 0;
@@ -2429,7 +2534,7 @@
 	da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
 		XATTR_DOSINFO_ITIME;
 
-	rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da);
+	rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, true);
 	if (rc)
 		ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
 }
@@ -2456,8 +2561,9 @@
 	}
 }
 
-static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
-		      int open_flags, umode_t posix_mode, bool is_dir)
+static int smb2_creat(struct ksmbd_work *work, struct path *parent_path,
+		      struct path *path, char *name, int open_flags,
+		      umode_t posix_mode, bool is_dir)
 {
 	struct ksmbd_tree_connect *tcon = work->tcon;
 	struct ksmbd_share_config *share = tcon->share_conf;
@@ -2484,7 +2590,7 @@
 			return rc;
 	}
 
-	rc = ksmbd_vfs_kern_path_locked(work, name, 0, path, 0);
+	rc = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0);
 	if (rc) {
 		pr_err("cannot get linux path (%s), err = %d\n",
 		       name, rc);
@@ -2518,7 +2624,7 @@
 	    sizeof(struct create_sd_buf_req))
 		return -EINVAL;
 	return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
-			    le32_to_cpu(sd_buf->ccontext.DataLength), true);
+			    le32_to_cpu(sd_buf->ccontext.DataLength), true, false);
 }
 
 static void ksmbd_acls_fattr(struct smb_fattr *fattr,
@@ -2554,7 +2660,7 @@
 	struct ksmbd_tree_connect *tcon = work->tcon;
 	struct smb2_create_req *req;
 	struct smb2_create_rsp *rsp;
-	struct path path;
+	struct path path, parent_path;
 	struct ksmbd_share_config *share = tcon->share_conf;
 	struct ksmbd_file *fp = NULL;
 	struct file *filp = NULL;
@@ -2578,6 +2684,7 @@
 	u64 time;
 	umode_t posix_mode = 0;
 	__le32 daccess, maximal_access = 0;
+	int iov_len = 0;
 
 	WORK_BUFFERS(work, req, rsp);
 
@@ -2599,10 +2706,10 @@
 		    *(char *)req->Buffer == '\\') {
 			pr_err("not allow directory name included leading slash\n");
 			rc = -EINVAL;
-			goto err_out1;
+			goto err_out2;
 		}
 
-		name = smb2_get_name(req->Buffer,
+		name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
 				     le16_to_cpu(req->NameLength),
 				     work->conn->local_nls);
 		if (IS_ERR(name)) {
@@ -2610,7 +2717,7 @@
 			if (rc != -ENOMEM)
 				rc = -ENOENT;
 			name = NULL;
-			goto err_out1;
+			goto err_out2;
 		}
 
 		ksmbd_debug(SMB, "converted name = %s\n", name);
@@ -2618,48 +2725,44 @@
 			if (!test_share_config_flag(work->tcon->share_conf,
 						    KSMBD_SHARE_FLAG_STREAMS)) {
 				rc = -EBADF;
-				goto err_out1;
+				goto err_out2;
 			}
 			rc = parse_stream_name(name, &stream_name, &s_type);
 			if (rc < 0)
-				goto err_out1;
+				goto err_out2;
 		}
 
 		rc = ksmbd_validate_filename(name);
 		if (rc < 0)
-			goto err_out1;
+			goto err_out2;
 
 		if (ksmbd_share_veto_filename(share, name)) {
 			rc = -ENOENT;
 			ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n",
 				    name);
-			goto err_out1;
+			goto err_out2;
 		}
 	} else {
 		name = kstrdup("", GFP_KERNEL);
 		if (!name) {
 			rc = -ENOMEM;
-			goto err_out1;
+			goto err_out2;
 		}
 	}
 
-	req_op_level = req->RequestedOplockLevel;
-	if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
-		lc = parse_lease_state(req);
-
 	if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
 		pr_err("Invalid impersonationlevel : 0x%x\n",
 		       le32_to_cpu(req->ImpersonationLevel));
 		rc = -EIO;
 		rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL;
-		goto err_out1;
+		goto err_out2;
 	}
 
 	if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
 		pr_err("Invalid create options : 0x%x\n",
 		       le32_to_cpu(req->CreateOptions));
 		rc = -EINVAL;
-		goto err_out1;
+		goto err_out2;
 	} else {
 		if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE &&
 		    req->CreateOptions & FILE_RANDOM_ACCESS_LE)
@@ -2669,13 +2772,13 @@
 		    (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION |
 		     FILE_RESERVE_OPFILTER_LE)) {
 			rc = -EOPNOTSUPP;
-			goto err_out1;
+			goto err_out2;
 		}
 
 		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
 			if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) {
 				rc = -EINVAL;
-				goto err_out1;
+				goto err_out2;
 			} else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) {
 				req->CreateOptions = ~(FILE_NO_COMPRESSION_LE);
 			}
@@ -2687,21 +2790,21 @@
 		pr_err("Invalid create disposition : 0x%x\n",
 		       le32_to_cpu(req->CreateDisposition));
 		rc = -EINVAL;
-		goto err_out1;
+		goto err_out2;
 	}
 
 	if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) {
 		pr_err("Invalid desired access : 0x%x\n",
 		       le32_to_cpu(req->DesiredAccess));
 		rc = -EACCES;
-		goto err_out1;
+		goto err_out2;
 	}
 
 	if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
 		pr_err("Invalid file attribute : 0x%x\n",
 		       le32_to_cpu(req->FileAttributes));
 		rc = -EINVAL;
-		goto err_out1;
+		goto err_out2;
 	}
 
 	if (req->CreateContextsOffset) {
@@ -2709,19 +2812,19 @@
 		context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
 		if (IS_ERR(context)) {
 			rc = PTR_ERR(context);
-			goto err_out1;
+			goto err_out2;
 		} else if (context) {
 			ea_buf = (struct create_ea_buf_req *)context;
 			if (le16_to_cpu(context->DataOffset) +
 			    le32_to_cpu(context->DataLength) <
 			    sizeof(struct create_ea_buf_req)) {
 				rc = -EINVAL;
-				goto err_out1;
+				goto err_out2;
 			}
 			if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
 				rsp->hdr.Status = STATUS_ACCESS_DENIED;
 				rc = -EACCES;
-				goto err_out1;
+				goto err_out2;
 			}
 		}
 
@@ -2729,7 +2832,7 @@
 						 SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
 		if (IS_ERR(context)) {
 			rc = PTR_ERR(context);
-			goto err_out1;
+			goto err_out2;
 		} else if (context) {
 			ksmbd_debug(SMB,
 				    "get query maximal access context\n");
@@ -2740,11 +2843,11 @@
 						 SMB2_CREATE_TIMEWARP_REQUEST, 4);
 		if (IS_ERR(context)) {
 			rc = PTR_ERR(context);
-			goto err_out1;
+			goto err_out2;
 		} else if (context) {
 			ksmbd_debug(SMB, "get timewarp context\n");
 			rc = -EBADF;
-			goto err_out1;
+			goto err_out2;
 		}
 
 		if (tcon->posix_extensions) {
@@ -2752,7 +2855,7 @@
 							 SMB2_CREATE_TAG_POSIX, 16);
 			if (IS_ERR(context)) {
 				rc = PTR_ERR(context);
-				goto err_out1;
+				goto err_out2;
 			} else if (context) {
 				struct create_posix *posix =
 					(struct create_posix *)context;
@@ -2760,7 +2863,7 @@
 				    le32_to_cpu(context->DataLength) <
 				    sizeof(struct create_posix) - 4) {
 					rc = -EINVAL;
-					goto err_out1;
+					goto err_out2;
 				}
 				ksmbd_debug(SMB, "get posix context\n");
 
@@ -2772,10 +2875,11 @@
 
 	if (ksmbd_override_fsids(work)) {
 		rc = -ENOMEM;
-		goto err_out1;
+		goto err_out2;
 	}
 
-	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
+	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, 1);
 	if (!rc) {
 		file_present = true;
 
@@ -2872,11 +2976,9 @@
 		if (!file_present) {
 			daccess = cpu_to_le32(GENERIC_ALL_FLAGS);
 		} else {
-			rc = ksmbd_vfs_query_maximal_access(idmap,
+			ksmbd_vfs_query_maximal_access(idmap,
 							    path.dentry,
 							    &daccess);
-			if (rc)
-				goto err_out;
 			already_permitted = true;
 		}
 		maximal_access = daccess;
@@ -2887,7 +2989,7 @@
 					    &may_flags);
 
 	if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
-		if (open_flags & O_CREAT) {
+		if (open_flags & (O_CREAT | O_TRUNC)) {
 			ksmbd_debug(SMB,
 				    "User does not have write permission\n");
 			rc = -EACCES;
@@ -2897,7 +2999,8 @@
 
 	/*create file if not present */
 	if (!file_present) {
-		rc = smb2_creat(work, &path, name, open_flags, posix_mode,
+		rc = smb2_creat(work, &parent_path, &path, name, open_flags,
+				posix_mode,
 				req->CreateOptions & FILE_DIRECTORY_FILE_LE);
 		if (rc) {
 			if (rc == -ENOENT) {
@@ -2918,7 +3021,7 @@
 
 			rc = smb2_set_ea(&ea_buf->ea,
 					 le32_to_cpu(ea_buf->ccontext.DataLength),
-					 &path);
+					 &path, false);
 			if (rc == -EOPNOTSUPP)
 				rc = 0;
 			else if (rc)
@@ -2947,7 +3050,7 @@
 		}
 	}
 
-	rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent));
+	rc = ksmbd_query_inode_status(path.dentry->d_parent);
 	if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
 		rc = -EBUSY;
 		goto err_out;
@@ -3061,7 +3164,8 @@
 								    idmap,
 								    &path,
 								    pntsd,
-								    pntsd_size);
+								    pntsd_size,
+								    false);
 					kfree(pntsd);
 					if (rc)
 						pr_err("failed to store ntacl in xattr : %d\n",
@@ -3084,11 +3188,6 @@
 
 	fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE |
 			FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE));
-	if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
-	    !fp->attrib_only && !stream_name) {
-		smb_break_all_oplock(work, fp);
-		need_truncate = 1;
-	}
 
 	/* fp should be searchable through ksmbd_inode.m_fp_list
 	 * after daccess, saccess, attrib_only, and stream are
@@ -3104,23 +3203,43 @@
 		goto err_out;
 	}
 
+	if (file_present || created)
+		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+
+	if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
+	    !fp->attrib_only && !stream_name) {
+		smb_break_all_oplock(work, fp);
+		need_truncate = 1;
+	}
+
+	req_op_level = req->RequestedOplockLevel;
+	if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
+		lc = parse_lease_state(req, S_ISDIR(file_inode(filp)->i_mode));
+
 	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
 	if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
 	    (req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
 	     !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) {
 		if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) {
 			rc = share_ret;
-			goto err_out;
+			goto err_out1;
 		}
 	} else {
 		if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
+			/*
+			 * Compare parent lease using parent key. If there is no
+			 * a lease that has same parent key, Send lease break
+			 * notification.
+			 */
+			smb_send_parent_lease_break_noti(fp, lc);
+
 			req_op_level = smb2_map_lease_to_oplock(lc->req_state);
 			ksmbd_debug(SMB,
 				    "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n",
 				    name, req_op_level, lc->req_state);
 			rc = find_same_lease_key(sess, fp->f_ci, lc);
 			if (rc)
-				goto err_out;
+				goto err_out1;
 		} else if (open_flags == O_RDONLY &&
 			   (req_op_level == SMB2_OPLOCK_LEVEL_BATCH ||
 			    req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
@@ -3131,16 +3250,16 @@
 				      le32_to_cpu(req->hdr.Id.SyncId.TreeId),
 				      lc, share_ret);
 		if (rc < 0)
-			goto err_out;
+			goto err_out1;
 	}
 
 	if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
 		ksmbd_fd_set_delete_on_close(fp, file_info);
 
 	if (need_truncate) {
-		rc = smb2_create_truncate(&path);
+		rc = smb2_create_truncate(&fp->filp->f_path);
 		if (rc)
-			goto err_out;
+			goto err_out1;
 	}
 
 	if (req->CreateContextsOffset) {
@@ -3150,7 +3269,7 @@
 					SMB2_CREATE_ALLOCATION_SIZE, 4);
 		if (IS_ERR(az_req)) {
 			rc = PTR_ERR(az_req);
-			goto err_out;
+			goto err_out1;
 		} else if (az_req) {
 			loff_t alloc_size;
 			int err;
@@ -3159,7 +3278,7 @@
 			    le32_to_cpu(az_req->ccontext.DataLength) <
 			    sizeof(struct create_alloc_size_req)) {
 				rc = -EINVAL;
-				goto err_out;
+				goto err_out1;
 			}
 			alloc_size = le64_to_cpu(az_req->AllocationSize);
 			ksmbd_debug(SMB,
@@ -3177,7 +3296,7 @@
 		context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
 		if (IS_ERR(context)) {
 			rc = PTR_ERR(context);
-			goto err_out;
+			goto err_out1;
 		} else if (context) {
 			ksmbd_debug(SMB, "get query on disk id context\n");
 			query_disk_id = 1;
@@ -3186,7 +3305,7 @@
 
 	rc = ksmbd_vfs_getattr(&path, &stat);
 	if (rc)
-		goto err_out;
+		goto err_out1;
 
 	if (stat.result_mask & STATX_BTIME)
 		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
@@ -3229,7 +3348,7 @@
 
 	rsp->CreateContextsOffset = 0;
 	rsp->CreateContextsLength = 0;
-	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
+	iov_len = offsetof(struct smb2_create_rsp, Buffer);
 
 	/* If lease is request send lease context response */
 	if (opinfo && opinfo->is_lease) {
@@ -3244,8 +3363,7 @@
 		create_lease_buf(rsp->Buffer, opinfo->o_lease);
 		le32_add_cpu(&rsp->CreateContextsLength,
 			     conn->vals->create_lease_size);
-		inc_rfc1001_len(work->response_buf,
-				conn->vals->create_lease_size);
+		iov_len += conn->vals->create_lease_size;
 		next_ptr = &lease_ccontext->Next;
 		next_off = conn->vals->create_lease_size;
 	}
@@ -3265,8 +3383,7 @@
 				le32_to_cpu(maximal_access));
 		le32_add_cpu(&rsp->CreateContextsLength,
 			     conn->vals->create_mxac_size);
-		inc_rfc1001_len(work->response_buf,
-				conn->vals->create_mxac_size);
+		iov_len += conn->vals->create_mxac_size;
 		if (next_ptr)
 			*next_ptr = cpu_to_le32(next_off);
 		next_ptr = &mxac_ccontext->Next;
@@ -3284,8 +3401,7 @@
 				stat.ino, tcon->id);
 		le32_add_cpu(&rsp->CreateContextsLength,
 			     conn->vals->create_disk_id_size);
-		inc_rfc1001_len(work->response_buf,
-				conn->vals->create_disk_id_size);
+		iov_len += conn->vals->create_disk_id_size;
 		if (next_ptr)
 			*next_ptr = cpu_to_le32(next_off);
 		next_ptr = &disk_id_ccontext->Next;
@@ -3299,8 +3415,7 @@
 				fp);
 		le32_add_cpu(&rsp->CreateContextsLength,
 			     conn->vals->create_posix_size);
-		inc_rfc1001_len(work->response_buf,
-				conn->vals->create_posix_size);
+		iov_len += conn->vals->create_posix_size;
 		if (next_ptr)
 			*next_ptr = cpu_to_le32(next_off);
 	}
@@ -3311,13 +3426,17 @@
 	}
 
 err_out:
-	if (file_present || created) {
-		inode_unlock(d_inode(path.dentry->d_parent));
-		dput(path.dentry);
-	}
-	ksmbd_revert_fsids(work);
+	if (rc && (file_present || created))
+		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+
 err_out1:
+	ksmbd_revert_fsids(work);
 
+err_out2:
+	if (!rc) {
+		ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+		rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len);
+	}
 	if (rc) {
 		if (rc == -EINVAL)
 			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
@@ -3714,11 +3833,16 @@
 		}
 
 		ksmbd_kstat.kstat = &kstat;
-		if (priv->info_level != FILE_NAMES_INFORMATION)
-			ksmbd_vfs_fill_dentry_attrs(priv->work,
+		if (priv->info_level != FILE_NAMES_INFORMATION) {
+			rc = ksmbd_vfs_fill_dentry_attrs(priv->work,
 						    idmap,
 						    dent,
 						    &ksmbd_kstat);
+			if (rc) {
+				dput(dent);
+				continue;
+			}
+		}
 
 		rc = smb2_populate_readdir_entry(priv->work->conn,
 						 priv->info_level,
@@ -3961,7 +4085,7 @@
 	}
 
 	srch_flag = req->Flags;
-	srch_ptr = smb_strndup_from_utf16(req->Buffer,
+	srch_ptr = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->FileNameOffset),
 					  le16_to_cpu(req->FileNameLength), 1,
 					  conn->local_nls);
 	if (IS_ERR(srch_ptr)) {
@@ -4043,7 +4167,10 @@
 		rsp->OutputBufferOffset = cpu_to_le16(0);
 		rsp->OutputBufferLength = cpu_to_le32(0);
 		rsp->Buffer[0] = 0;
-		inc_rfc1001_len(work->response_buf, 9);
+		rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
+				       sizeof(struct smb2_query_directory_rsp));
+		if (rc)
+			goto err_out;
 	} else {
 no_buf_len:
 		((struct file_directory_info *)
@@ -4055,7 +4182,11 @@
 		rsp->StructureSize = cpu_to_le16(9);
 		rsp->OutputBufferOffset = cpu_to_le16(72);
 		rsp->OutputBufferLength = cpu_to_le32(d_info.data_count);
-		inc_rfc1001_len(work->response_buf, 8 + d_info.data_count);
+		rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
+				       offsetof(struct smb2_query_directory_rsp, Buffer) +
+				       d_info.data_count);
+		if (rc)
+			goto err_out;
 	}
 
 	kfree(srch_ptr);
@@ -4102,22 +4233,14 @@
  */
 static int buffer_check_err(int reqOutputBufferLength,
 			    struct smb2_query_info_rsp *rsp,
-			    void *rsp_org, int infoclass_size)
+			    void *rsp_org)
 {
 	if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) {
-		if (reqOutputBufferLength < infoclass_size) {
 			pr_err("Invalid Buffer Size Requested\n");
 			rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
 			*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr));
 			return -EINVAL;
 		}
-
-		ksmbd_debug(SMB, "Buffer Overflow\n");
-		rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
-		*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr) +
-				reqOutputBufferLength);
-		rsp->OutputBufferLength = cpu_to_le32(reqOutputBufferLength);
-	}
 	return 0;
 }
 
@@ -4135,7 +4258,6 @@
 	sinfo->Directory = 0;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_standard_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_standard_info));
 }
 
 static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num,
@@ -4149,7 +4271,6 @@
 	file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63));
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_internal_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
 }
 
 static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
@@ -4175,14 +4296,12 @@
 	case FILE_STANDARD_INFORMATION:
 		get_standard_info_pipe(rsp, rsp_org);
 		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
-				      rsp, rsp_org,
-				      FILE_STANDARD_INFORMATION_SIZE);
+				      rsp, rsp_org);
 		break;
 	case FILE_INTERNAL_INFORMATION:
 		get_internal_info_pipe(rsp, id, rsp_org);
 		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
-				      rsp, rsp_org,
-				      FILE_INTERNAL_INFORMATION_SIZE);
+				      rsp, rsp_org);
 		break;
 	default:
 		ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n",
@@ -4227,7 +4346,8 @@
 		    sizeof(struct smb2_ea_info_req))
 			return -EINVAL;
 
-		ea_req = (struct smb2_ea_info_req *)req->Buffer;
+		ea_req = (struct smb2_ea_info_req *)((char *)req +
+						     le16_to_cpu(req->InputBufferOffset));
 	} else {
 		/* need to send all EAs, if no specific EA is requested*/
 		if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY)
@@ -4288,7 +4408,7 @@
 		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
 			name_len -= XATTR_USER_PREFIX_LEN;
 
-		ptr = (char *)(&eainfo->name + name_len + 1);
+		ptr = eainfo->name + name_len + 1;
 		buf_free_len -= (offsetof(struct smb2_ea_info, name) +
 				name_len + 1);
 		/* bailout if xattr can't fit in buf_free_len */
@@ -4350,7 +4470,6 @@
 	if (rsp_data_cnt == 0)
 		rsp->hdr.Status = STATUS_NO_EAS_ON_FILE;
 	rsp->OutputBufferLength = cpu_to_le32(rsp_data_cnt);
-	inc_rfc1001_len(rsp_org, rsp_data_cnt);
 out:
 	kvfree(xattr_list);
 	return rc;
@@ -4365,7 +4484,6 @@
 	file_info->AccessFlags = fp->daccess;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_access_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_access_info));
 }
 
 static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
@@ -4374,6 +4492,7 @@
 	struct smb2_file_basic_info *basic_info;
 	struct kstat stat;
 	u64 time;
+	int ret;
 
 	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
 		pr_err("no right to read the attributes : 0x%x\n",
@@ -4381,9 +4500,12 @@
 		return -EACCES;
 	}
 
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
+
 	basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
-	generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
-			 &stat);
 	basic_info->CreationTime = cpu_to_le64(fp->create_time);
 	time = ksmbd_UnixTimeToNT(stat.atime);
 	basic_info->LastAccessTime = cpu_to_le64(time);
@@ -4395,33 +4517,34 @@
 	basic_info->Pad1 = 0;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_basic_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
 	return 0;
 }
 
-static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
+static int get_file_standard_info(struct smb2_query_info_rsp *rsp,
 				   struct ksmbd_file *fp, void *rsp_org)
 {
 	struct smb2_file_standard_info *sinfo;
 	unsigned int delete_pending;
-	struct inode *inode;
 	struct kstat stat;
+	int ret;
 
-	inode = file_inode(fp->filp);
-	generic_fillattr(file_mnt_idmap(fp->filp), inode, &stat);
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
 
 	sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
 	delete_pending = ksmbd_inode_pending_delete(fp);
 
-	sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
+	sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9);
 	sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
 	sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
 	sinfo->DeletePending = delete_pending;
 	sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_standard_info));
-	inc_rfc1001_len(rsp_org,
-			sizeof(struct smb2_file_standard_info));
+
+	return 0;
 }
 
 static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
@@ -4433,8 +4556,6 @@
 	file_info->AlignmentRequirement = 0;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_alignment_info));
-	inc_rfc1001_len(rsp_org,
-			sizeof(struct smb2_file_alignment_info));
 }
 
 static int get_file_all_info(struct ksmbd_work *work,
@@ -4445,11 +4566,11 @@
 	struct ksmbd_conn *conn = work->conn;
 	struct smb2_file_all_info *file_info;
 	unsigned int delete_pending;
-	struct inode *inode;
 	struct kstat stat;
 	int conv_len;
 	char *filename;
 	u64 time;
+	int ret;
 
 	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
 		ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n",
@@ -4461,8 +4582,10 @@
 	if (IS_ERR(filename))
 		return PTR_ERR(filename);
 
-	inode = file_inode(fp->filp);
-	generic_fillattr(file_mnt_idmap(fp->filp), inode, &stat);
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
 
 	ksmbd_debug(SMB, "filename = %s\n", filename);
 	delete_pending = ksmbd_inode_pending_delete(fp);
@@ -4477,8 +4600,7 @@
 	file_info->ChangeTime = cpu_to_le64(time);
 	file_info->Attributes = fp->f_ci->m_fattr;
 	file_info->Pad1 = 0;
-	file_info->AllocationSize =
-		cpu_to_le64(inode->i_blocks << 9);
+	file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
 	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
 	file_info->NumberOfLinks =
 			cpu_to_le32(get_nlink(&stat) - delete_pending);
@@ -4498,7 +4620,6 @@
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_all_info) + conv_len - 1);
 	kfree(filename);
-	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
 	return 0;
 }
 
@@ -4521,10 +4642,9 @@
 	file_info->FileNameLength = cpu_to_le32(conv_len);
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len);
-	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
 }
 
-static void get_file_stream_info(struct ksmbd_work *work,
+static int get_file_stream_info(struct ksmbd_work *work,
 				 struct smb2_query_info_rsp *rsp,
 				 struct ksmbd_file *fp,
 				 void *rsp_org)
@@ -4538,9 +4658,13 @@
 	int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
 	int buf_free_len;
 	struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
+	int ret;
+
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
 
-	generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
-			 &stat);
 	file_info = (struct smb2_file_stream_info *)rsp->Buffer;
 
 	buf_free_len =
@@ -4621,31 +4745,37 @@
 	kvfree(xattr_list);
 
 	rsp->OutputBufferLength = cpu_to_le32(nbytes);
-	inc_rfc1001_len(rsp_org, nbytes);
+
+	return 0;
 }
 
-static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
+static int get_file_internal_info(struct smb2_query_info_rsp *rsp,
 				   struct ksmbd_file *fp, void *rsp_org)
 {
 	struct smb2_file_internal_info *file_info;
 	struct kstat stat;
+	int ret;
+
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
 
-	generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
-			 &stat);
 	file_info = (struct smb2_file_internal_info *)rsp->Buffer;
 	file_info->IndexNumber = cpu_to_le64(stat.ino);
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_internal_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
+
+	return 0;
 }
 
 static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
 				      struct ksmbd_file *fp, void *rsp_org)
 {
 	struct smb2_file_ntwrk_info *file_info;
-	struct inode *inode;
 	struct kstat stat;
 	u64 time;
+	int ret;
 
 	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
 		pr_err("no right to read the attributes : 0x%x\n",
@@ -4653,10 +4783,12 @@
 		return -EACCES;
 	}
 
-	file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
 
-	inode = file_inode(fp->filp);
-	generic_fillattr(file_mnt_idmap(fp->filp), inode, &stat);
+	file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
 
 	file_info->CreationTime = cpu_to_le64(fp->create_time);
 	time = ksmbd_UnixTimeToNT(stat.atime);
@@ -4666,13 +4798,11 @@
 	time = ksmbd_UnixTimeToNT(stat.ctime);
 	file_info->ChangeTime = cpu_to_le64(time);
 	file_info->Attributes = fp->f_ci->m_fattr;
-	file_info->AllocationSize =
-		cpu_to_le64(inode->i_blocks << 9);
+	file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
 	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
 	file_info->Reserved = cpu_to_le32(0);
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_ntwrk_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ntwrk_info));
 	return 0;
 }
 
@@ -4684,7 +4814,6 @@
 	file_info->EASize = 0;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_ea_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ea_info));
 }
 
 static void get_file_position_info(struct smb2_query_info_rsp *rsp,
@@ -4696,7 +4825,6 @@
 	file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_pos_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_pos_info));
 }
 
 static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
@@ -4708,17 +4836,19 @@
 	file_info->Mode = fp->coption & FILE_MODE_INFO_MASK;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_mode_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_mode_info));
 }
 
-static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
+static int get_file_compression_info(struct smb2_query_info_rsp *rsp,
 				      struct ksmbd_file *fp, void *rsp_org)
 {
 	struct smb2_file_comp_info *file_info;
 	struct kstat stat;
+	int ret;
 
-	generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
-			 &stat);
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
 
 	file_info = (struct smb2_file_comp_info *)rsp->Buffer;
 	file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9);
@@ -4730,7 +4860,8 @@
 
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_comp_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_comp_info));
+
+	return 0;
 }
 
 static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
@@ -4749,7 +4880,6 @@
 	file_info->ReparseTag = 0;
 	rsp->OutputBufferLength =
 		cpu_to_le32(sizeof(struct smb2_file_attr_tag_info));
-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_attr_tag_info));
 	return 0;
 }
 
@@ -4761,24 +4891,31 @@
 	struct mnt_idmap *idmap = file_mnt_idmap(fp->filp);
 	vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
 	vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
+	struct kstat stat;
 	u64 time;
 	int out_buf_len = sizeof(struct smb311_posix_qinfo) + 32;
+	int ret;
+
+	ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (ret)
+		return ret;
 
 	file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
 	file_info->CreationTime = cpu_to_le64(fp->create_time);
-	time = ksmbd_UnixTimeToNT(inode->i_atime);
+	time = ksmbd_UnixTimeToNT(stat.atime);
 	file_info->LastAccessTime = cpu_to_le64(time);
-	time = ksmbd_UnixTimeToNT(inode->i_mtime);
+	time = ksmbd_UnixTimeToNT(stat.mtime);
 	file_info->LastWriteTime = cpu_to_le64(time);
-	time = ksmbd_UnixTimeToNT(inode->i_ctime);
+	time = ksmbd_UnixTimeToNT(stat.ctime);
 	file_info->ChangeTime = cpu_to_le64(time);
 	file_info->DosAttributes = fp->f_ci->m_fattr;
-	file_info->Inode = cpu_to_le64(inode->i_ino);
-	file_info->EndOfFile = cpu_to_le64(inode->i_size);
-	file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
-	file_info->HardLinks = cpu_to_le32(inode->i_nlink);
-	file_info->Mode = cpu_to_le32(inode->i_mode & 0777);
-	file_info->DeviceId = cpu_to_le32(inode->i_rdev);
+	file_info->Inode = cpu_to_le64(stat.ino);
+	file_info->EndOfFile = cpu_to_le64(stat.size);
+	file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
+	file_info->HardLinks = cpu_to_le32(stat.nlink);
+	file_info->Mode = cpu_to_le32(stat.mode & 0777);
+	file_info->DeviceId = cpu_to_le32(stat.rdev);
 
 	/*
 	 * Sids(32) contain two sids(Domain sid(16), UNIX group sid(16)).
@@ -4791,8 +4928,8 @@
 		  SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]);
 
 	rsp->OutputBufferLength = cpu_to_le32(out_buf_len);
-	inc_rfc1001_len(rsp_org, out_buf_len);
-	return out_buf_len;
+
+	return 0;
 }
 
 static int smb2_get_info_file(struct ksmbd_work *work,
@@ -4802,7 +4939,6 @@
 	struct ksmbd_file *fp;
 	int fileinfoclass = 0;
 	int rc = 0;
-	int file_infoclass_size;
 	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
 
 	if (test_share_config_flag(work->tcon->share_conf,
@@ -4835,85 +4971,69 @@
 	switch (fileinfoclass) {
 	case FILE_ACCESS_INFORMATION:
 		get_file_access_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_ACCESS_INFORMATION_SIZE;
 		break;
 
 	case FILE_BASIC_INFORMATION:
 		rc = get_file_basic_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_BASIC_INFORMATION_SIZE;
 		break;
 
 	case FILE_STANDARD_INFORMATION:
-		get_file_standard_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_STANDARD_INFORMATION_SIZE;
+		rc = get_file_standard_info(rsp, fp, work->response_buf);
 		break;
 
 	case FILE_ALIGNMENT_INFORMATION:
 		get_file_alignment_info(rsp, work->response_buf);
-		file_infoclass_size = FILE_ALIGNMENT_INFORMATION_SIZE;
 		break;
 
 	case FILE_ALL_INFORMATION:
 		rc = get_file_all_info(work, rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_ALL_INFORMATION_SIZE;
 		break;
 
 	case FILE_ALTERNATE_NAME_INFORMATION:
 		get_file_alternate_info(work, rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_ALTERNATE_NAME_INFORMATION_SIZE;
 		break;
 
 	case FILE_STREAM_INFORMATION:
-		get_file_stream_info(work, rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_STREAM_INFORMATION_SIZE;
+		rc = get_file_stream_info(work, rsp, fp, work->response_buf);
 		break;
 
 	case FILE_INTERNAL_INFORMATION:
-		get_file_internal_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_INTERNAL_INFORMATION_SIZE;
+		rc = get_file_internal_info(rsp, fp, work->response_buf);
 		break;
 
 	case FILE_NETWORK_OPEN_INFORMATION:
 		rc = get_file_network_open_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_NETWORK_OPEN_INFORMATION_SIZE;
 		break;
 
 	case FILE_EA_INFORMATION:
 		get_file_ea_info(rsp, work->response_buf);
-		file_infoclass_size = FILE_EA_INFORMATION_SIZE;
 		break;
 
 	case FILE_FULL_EA_INFORMATION:
 		rc = smb2_get_ea(work, fp, req, rsp, work->response_buf);
-		file_infoclass_size = FILE_FULL_EA_INFORMATION_SIZE;
 		break;
 
 	case FILE_POSITION_INFORMATION:
 		get_file_position_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_POSITION_INFORMATION_SIZE;
 		break;
 
 	case FILE_MODE_INFORMATION:
 		get_file_mode_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_MODE_INFORMATION_SIZE;
 		break;
 
 	case FILE_COMPRESSION_INFORMATION:
-		get_file_compression_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_COMPRESSION_INFORMATION_SIZE;
+		rc = get_file_compression_info(rsp, fp, work->response_buf);
 		break;
 
 	case FILE_ATTRIBUTE_TAG_INFORMATION:
 		rc = get_file_attribute_tag_info(rsp, fp, work->response_buf);
-		file_infoclass_size = FILE_ATTRIBUTE_TAG_INFORMATION_SIZE;
 		break;
 	case SMB_FIND_FILE_POSIX_INFO:
 		if (!work->tcon->posix_extensions) {
 			pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
 			rc = -EOPNOTSUPP;
 		} else {
-			file_infoclass_size = find_file_posix_info(rsp, fp,
-					work->response_buf);
+			rc = find_file_posix_info(rsp, fp, work->response_buf);
 		}
 		break;
 	default:
@@ -4923,8 +5043,7 @@
 	}
 	if (!rc)
 		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
-				      rsp, work->response_buf,
-				      file_infoclass_size);
+				      rsp, work->response_buf);
 	ksmbd_fd_put(work, fp);
 	return rc;
 }
@@ -4940,7 +5059,6 @@
 	struct kstatfs stfs;
 	struct path path;
 	int rc = 0, len;
-	int fs_infoclass_size = 0;
 
 	if (!share->path)
 		return -EIO;
@@ -4970,8 +5088,6 @@
 		info->DeviceType = cpu_to_le32(stfs.f_type);
 		info->DeviceCharacteristics = cpu_to_le32(0x00000020);
 		rsp->OutputBufferLength = cpu_to_le32(8);
-		inc_rfc1001_len(work->response_buf, 8);
-		fs_infoclass_size = FS_DEVICE_INFORMATION_SIZE;
 		break;
 	}
 	case FS_ATTRIBUTE_INFORMATION:
@@ -5000,8 +5116,6 @@
 		info->FileSystemNameLen = cpu_to_le32(len);
 		sz = sizeof(struct filesystem_attribute_info) - 2 + len;
 		rsp->OutputBufferLength = cpu_to_le32(sz);
-		inc_rfc1001_len(work->response_buf, sz);
-		fs_infoclass_size = FS_ATTRIBUTE_INFORMATION_SIZE;
 		break;
 	}
 	case FS_VOLUME_INFORMATION:
@@ -5028,8 +5142,6 @@
 		info->Reserved = 0;
 		sz = sizeof(struct filesystem_vol_info) - 2 + len;
 		rsp->OutputBufferLength = cpu_to_le32(sz);
-		inc_rfc1001_len(work->response_buf, sz);
-		fs_infoclass_size = FS_VOLUME_INFORMATION_SIZE;
 		break;
 	}
 	case FS_SIZE_INFORMATION:
@@ -5042,8 +5154,6 @@
 		info->SectorsPerAllocationUnit = cpu_to_le32(1);
 		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
 		rsp->OutputBufferLength = cpu_to_le32(24);
-		inc_rfc1001_len(work->response_buf, 24);
-		fs_infoclass_size = FS_SIZE_INFORMATION_SIZE;
 		break;
 	}
 	case FS_FULL_SIZE_INFORMATION:
@@ -5059,8 +5169,6 @@
 		info->SectorsPerAllocationUnit = cpu_to_le32(1);
 		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
 		rsp->OutputBufferLength = cpu_to_le32(32);
-		inc_rfc1001_len(work->response_buf, 32);
-		fs_infoclass_size = FS_FULL_SIZE_INFORMATION_SIZE;
 		break;
 	}
 	case FS_OBJECT_ID_INFORMATION:
@@ -5080,8 +5188,6 @@
 		info->extended_info.rel_date = 0;
 		memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0"));
 		rsp->OutputBufferLength = cpu_to_le32(64);
-		inc_rfc1001_len(work->response_buf, 64);
-		fs_infoclass_size = FS_OBJECT_ID_INFORMATION_SIZE;
 		break;
 	}
 	case FS_SECTOR_SIZE_INFORMATION:
@@ -5103,8 +5209,6 @@
 		info->ByteOffsetForSectorAlignment = 0;
 		info->ByteOffsetForPartitionAlignment = 0;
 		rsp->OutputBufferLength = cpu_to_le32(28);
-		inc_rfc1001_len(work->response_buf, 28);
-		fs_infoclass_size = FS_SECTOR_SIZE_INFORMATION_SIZE;
 		break;
 	}
 	case FS_CONTROL_INFORMATION:
@@ -5125,8 +5229,6 @@
 		info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID);
 		info->Padding = 0;
 		rsp->OutputBufferLength = cpu_to_le32(48);
-		inc_rfc1001_len(work->response_buf, 48);
-		fs_infoclass_size = FS_CONTROL_INFORMATION_SIZE;
 		break;
 	}
 	case FS_POSIX_INFORMATION:
@@ -5146,8 +5248,6 @@
 			info->TotalFileNodes = cpu_to_le64(stfs.f_files);
 			info->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
 			rsp->OutputBufferLength = cpu_to_le32(56);
-			inc_rfc1001_len(work->response_buf, 56);
-			fs_infoclass_size = FS_POSIX_INFORMATION_SIZE;
 		}
 		break;
 	}
@@ -5156,8 +5256,7 @@
 		return -EOPNOTSUPP;
 	}
 	rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
-			      rsp, work->response_buf,
-			      fs_infoclass_size);
+			      rsp, work->response_buf);
 	path_put(&path);
 	return rc;
 }
@@ -5191,7 +5290,6 @@
 
 		secdesclen = sizeof(struct smb_ntsd);
 		rsp->OutputBufferLength = cpu_to_le32(secdesclen);
-		inc_rfc1001_len(work->response_buf, secdesclen);
 
 		return 0;
 	}
@@ -5236,7 +5334,6 @@
 		return rc;
 
 	rsp->OutputBufferLength = cpu_to_le32(secdesclen);
-	inc_rfc1001_len(work->response_buf, secdesclen);
 	return 0;
 }
 
@@ -5275,6 +5372,14 @@
 		rc = -EOPNOTSUPP;
 	}
 
+	if (!rc) {
+		rsp->StructureSize = cpu_to_le16(9);
+		rsp->OutputBufferOffset = cpu_to_le16(72);
+		rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
+				       offsetof(struct smb2_query_info_rsp, Buffer) +
+					le32_to_cpu(rsp->OutputBufferLength));
+	}
+
 	if (rc < 0) {
 		if (rc == -EACCES)
 			rsp->hdr.Status = STATUS_ACCESS_DENIED;
@@ -5282,6 +5387,8 @@
 			rsp->hdr.Status = STATUS_FILE_CLOSED;
 		else if (rc == -EIO)
 			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+		else if (rc == -ENOMEM)
+			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
 		else if (rc == -EOPNOTSUPP || rsp->hdr.Status == 0)
 			rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
 		smb2_set_err_rsp(work);
@@ -5290,9 +5397,6 @@
 			    rc);
 		return rc;
 	}
-	rsp->StructureSize = cpu_to_le16(9);
-	rsp->OutputBufferOffset = cpu_to_le16(72);
-	inc_rfc1001_len(work->response_buf, 8);
 	return 0;
 }
 
@@ -5305,8 +5409,10 @@
 static noinline int smb2_close_pipe(struct ksmbd_work *work)
 {
 	u64 id;
-	struct smb2_close_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_close_req *req;
+	struct smb2_close_rsp *rsp;
+
+	WORK_BUFFERS(work, req, rsp);
 
 	id = req->VolatileFileId;
 	ksmbd_session_rpc_close(work->sess, id);
@@ -5321,8 +5427,9 @@
 	rsp->AllocationSize = 0;
 	rsp->EndOfFile = 0;
 	rsp->Attributes = 0;
-	inc_rfc1001_len(work->response_buf, 60);
-	return 0;
+
+	return ksmbd_iov_pin_rsp(work, (void *)rsp,
+				 sizeof(struct smb2_close_rsp));
 }
 
 /**
@@ -5339,7 +5446,6 @@
 	struct smb2_close_rsp *rsp;
 	struct ksmbd_conn *conn = work->conn;
 	struct ksmbd_file *fp;
-	struct inode *inode;
 	u64 time;
 	int err = 0;
 
@@ -5394,24 +5500,33 @@
 	rsp->Reserved = 0;
 
 	if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) {
+		struct kstat stat;
+		int ret;
+
 		fp = ksmbd_lookup_fd_fast(work, volatile_id);
 		if (!fp) {
 			err = -ENOENT;
 			goto out;
 		}
 
-		inode = file_inode(fp->filp);
+		ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (ret) {
+			ksmbd_fd_put(work, fp);
+			goto out;
+		}
+
 		rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
-		rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 :
-			cpu_to_le64(inode->i_blocks << 9);
-		rsp->EndOfFile = cpu_to_le64(inode->i_size);
+		rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 :
+			cpu_to_le64(stat.blocks << 9);
+		rsp->EndOfFile = cpu_to_le64(stat.size);
 		rsp->Attributes = fp->f_ci->m_fattr;
 		rsp->CreationTime = cpu_to_le64(fp->create_time);
-		time = ksmbd_UnixTimeToNT(inode->i_atime);
+		time = ksmbd_UnixTimeToNT(stat.atime);
 		rsp->LastAccessTime = cpu_to_le64(time);
-		time = ksmbd_UnixTimeToNT(inode->i_mtime);
+		time = ksmbd_UnixTimeToNT(stat.mtime);
 		rsp->LastWriteTime = cpu_to_le64(time);
-		time = ksmbd_UnixTimeToNT(inode->i_ctime);
+		time = ksmbd_UnixTimeToNT(stat.ctime);
 		rsp->ChangeTime = cpu_to_le64(time);
 		ksmbd_fd_put(work, fp);
 	} else {
@@ -5427,15 +5542,17 @@
 
 	err = ksmbd_close_fd(work, volatile_id);
 out:
+	if (!err)
+		err = ksmbd_iov_pin_rsp(work, (void *)rsp,
+					sizeof(struct smb2_close_rsp));
+
 	if (err) {
 		if (rsp->hdr.Status == 0)
 			rsp->hdr.Status = STATUS_FILE_CLOSED;
 		smb2_set_err_rsp(work);
-	} else {
-		inc_rfc1001_len(work->response_buf, 60);
 	}
 
-	return 0;
+	return err;
 }
 
 /**
@@ -5448,10 +5565,12 @@
 {
 	struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf);
 
+	if (work->next_smb2_rcv_hdr_off)
+		rsp = ksmbd_resp_buf_next(work);
+
 	rsp->StructureSize = cpu_to_le16(4);
 	rsp->Reserved = 0;
-	inc_rfc1001_len(work->response_buf, 4);
-	return 0;
+	return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_echo_rsp));
 }
 
 static int smb2_rename(struct ksmbd_work *work,
@@ -5497,7 +5616,7 @@
 		rc = ksmbd_vfs_setxattr(file_mnt_idmap(fp->filp),
 					&fp->filp->f_path,
 					xattr_stream_name,
-					NULL, 0, 0);
+					NULL, 0, 0, true);
 		if (rc < 0) {
 			pr_err("failed to store stream name in xattr: %d\n",
 			       rc);
@@ -5519,6 +5638,8 @@
 		flags = RENAME_NOREPLACE;
 
 	rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
+	if (!rc)
+		smb_break_all_levII_oplock(work, fp, 0);
 out:
 	kfree(new_name);
 	return rc;
@@ -5531,7 +5652,7 @@
 			    struct nls_table *local_nls)
 {
 	char *link_name = NULL, *target_name = NULL, *pathname = NULL;
-	struct path path;
+	struct path path, parent_path;
 	bool file_present = false;
 	int rc;
 
@@ -5561,7 +5682,7 @@
 
 	ksmbd_debug(SMB, "target name is %s\n", target_name);
 	rc = ksmbd_vfs_kern_path_locked(work, link_name, LOOKUP_NO_SYMLINKS,
-					&path, 0);
+					&parent_path, &path, 0);
 	if (rc) {
 		if (rc != -ENOENT)
 			goto out;
@@ -5590,10 +5711,9 @@
 	if (rc)
 		rc = -EINVAL;
 out:
-	if (file_present) {
-		inode_unlock(d_inode(path.dentry->d_parent));
-		path_put(&path);
-	}
+	if (file_present)
+		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+
 	if (!IS_ERR(link_name))
 		kfree(link_name);
 	kfree(pathname);
@@ -5660,7 +5780,8 @@
 		da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
 			XATTR_DOSINFO_ITIME;
 
-		rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da);
+		rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da,
+				true);
 		if (rc)
 			ksmbd_debug(SMB,
 				    "failed to restore file attribute in EA\n");
@@ -5695,15 +5816,21 @@
 
 	loff_t alloc_blks;
 	struct inode *inode;
+	struct kstat stat;
 	int rc;
 
 	if (!(fp->daccess & FILE_WRITE_DATA_LE))
 		return -EACCES;
 
+	rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+			 AT_STATX_SYNC_AS_STAT);
+	if (rc)
+		return rc;
+
 	alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
 	inode = file_inode(fp->filp);
 
-	if (alloc_blks > inode->i_blocks) {
+	if (alloc_blks > stat.blocks) {
 		smb_break_all_levII_oplock(work, fp, 1);
 		rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
 				   alloc_blks * 512);
@@ -5711,7 +5838,7 @@
 			pr_err("vfs_fallocate is failed : %d\n", rc);
 			return rc;
 		}
-	} else if (alloc_blks < inode->i_blocks) {
+	} else if (alloc_blks < stat.blocks) {
 		loff_t size;
 
 		/*
@@ -5866,6 +5993,7 @@
 			      struct ksmbd_share_config *share)
 {
 	unsigned int buf_len = le32_to_cpu(req->BufferLength);
+	char *buffer = (char *)req + le16_to_cpu(req->BufferOffset);
 
 	switch (req->FileInfoClass) {
 	case FILE_BASIC_INFORMATION:
@@ -5873,7 +6001,7 @@
 		if (buf_len < sizeof(struct smb2_file_basic_info))
 			return -EINVAL;
 
-		return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
+		return set_file_basic_info(fp, (struct smb2_file_basic_info *)buffer, share);
 	}
 	case FILE_ALLOCATION_INFORMATION:
 	{
@@ -5881,7 +6009,7 @@
 			return -EINVAL;
 
 		return set_file_allocation_info(work, fp,
-						(struct smb2_file_alloc_info *)req->Buffer);
+						(struct smb2_file_alloc_info *)buffer);
 	}
 	case FILE_END_OF_FILE_INFORMATION:
 	{
@@ -5889,21 +6017,15 @@
 			return -EINVAL;
 
 		return set_end_of_file_info(work, fp,
-					    (struct smb2_file_eof_info *)req->Buffer);
+					    (struct smb2_file_eof_info *)buffer);
 	}
 	case FILE_RENAME_INFORMATION:
 	{
-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
-			ksmbd_debug(SMB,
-				    "User does not have write permission\n");
-			return -EACCES;
-		}
-
 		if (buf_len < sizeof(struct smb2_file_rename_info))
 			return -EINVAL;
 
 		return set_rename_info(work, fp,
-				       (struct smb2_file_rename_info *)req->Buffer,
+				       (struct smb2_file_rename_info *)buffer,
 				       buf_len);
 	}
 	case FILE_LINK_INFORMATION:
@@ -5912,23 +6034,17 @@
 			return -EINVAL;
 
 		return smb2_create_link(work, work->tcon->share_conf,
-					(struct smb2_file_link_info *)req->Buffer,
+					(struct smb2_file_link_info *)buffer,
 					buf_len, fp->filp,
 					work->conn->local_nls);
 	}
 	case FILE_DISPOSITION_INFORMATION:
 	{
-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
-			ksmbd_debug(SMB,
-				    "User does not have write permission\n");
-			return -EACCES;
-		}
-
 		if (buf_len < sizeof(struct smb2_file_disposition_info))
 			return -EINVAL;
 
 		return set_file_disposition_info(fp,
-						 (struct smb2_file_disposition_info *)req->Buffer);
+						 (struct smb2_file_disposition_info *)buffer);
 	}
 	case FILE_FULL_EA_INFORMATION:
 	{
@@ -5941,22 +6057,22 @@
 		if (buf_len < sizeof(struct smb2_ea_info))
 			return -EINVAL;
 
-		return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
-				   buf_len, &fp->filp->f_path);
+		return smb2_set_ea((struct smb2_ea_info *)buffer,
+				   buf_len, &fp->filp->f_path, true);
 	}
 	case FILE_POSITION_INFORMATION:
 	{
 		if (buf_len < sizeof(struct smb2_file_pos_info))
 			return -EINVAL;
 
-		return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
+		return set_file_position_info(fp, (struct smb2_file_pos_info *)buffer);
 	}
 	case FILE_MODE_INFORMATION:
 	{
 		if (buf_len < sizeof(struct smb2_file_mode_info))
 			return -EINVAL;
 
-		return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
+		return set_file_mode_info(fp, (struct smb2_file_mode_info *)buffer);
 	}
 	}
 
@@ -5972,7 +6088,7 @@
 	fp->saccess |= FILE_SHARE_DELETE_LE;
 
 	return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
-			buf_len, false);
+			buf_len, false, true);
 }
 
 /**
@@ -5985,7 +6101,7 @@
 {
 	struct smb2_set_info_req *req;
 	struct smb2_set_info_rsp *rsp;
-	struct ksmbd_file *fp;
+	struct ksmbd_file *fp = NULL;
 	int rc = 0;
 	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
 
@@ -6005,6 +6121,13 @@
 		rsp = smb2_get_msg(work->response_buf);
 	}
 
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB, "User does not have write permission\n");
+		pr_err("User does not have write permission\n");
+		rc = -EACCES;
+		goto err_out;
+	}
+
 	if (!has_file_id(id)) {
 		id = req->VolatileFileId;
 		pid = req->PersistentFileId;
@@ -6030,7 +6153,7 @@
 		}
 		rc = smb2_set_info_sec(fp,
 				       le32_to_cpu(req->AdditionalInformation),
-				       req->Buffer,
+				       (char *)req + le16_to_cpu(req->BufferOffset),
 				       le32_to_cpu(req->BufferLength));
 		ksmbd_revert_fsids(work);
 		break;
@@ -6042,7 +6165,10 @@
 		goto err_out;
 
 	rsp->StructureSize = cpu_to_le16(2);
-	inc_rfc1001_len(work->response_buf, 2);
+	rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
+			       sizeof(struct smb2_set_info_rsp));
+	if (rc)
+		goto err_out;
 	ksmbd_fd_put(work, fp);
 	return 0;
 
@@ -6082,33 +6208,45 @@
 	int nbytes = 0, err;
 	u64 id;
 	struct ksmbd_rpc_command *rpc_resp;
-	struct smb2_read_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_read_req *req;
+	struct smb2_read_rsp *rsp;
+
+	WORK_BUFFERS(work, req, rsp);
 
 	id = req->VolatileFileId;
 
-	inc_rfc1001_len(work->response_buf, 16);
 	rpc_resp = ksmbd_rpc_read(work->sess, id);
 	if (rpc_resp) {
+		void *aux_payload_buf;
+
 		if (rpc_resp->flags != KSMBD_RPC_OK) {
 			err = -EINVAL;
 			goto out;
 		}
 
-		work->aux_payload_buf =
-			kvmalloc(rpc_resp->payload_sz, GFP_KERNEL | __GFP_ZERO);
-		if (!work->aux_payload_buf) {
+		aux_payload_buf =
+			kvmalloc(rpc_resp->payload_sz, GFP_KERNEL);
+		if (!aux_payload_buf) {
 			err = -ENOMEM;
 			goto out;
 		}
 
-		memcpy(work->aux_payload_buf, rpc_resp->payload,
-		       rpc_resp->payload_sz);
+		memcpy(aux_payload_buf, rpc_resp->payload, rpc_resp->payload_sz);
 
 		nbytes = rpc_resp->payload_sz;
-		work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
-		work->aux_payload_sz = nbytes;
+		err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
+					     offsetof(struct smb2_read_rsp, Buffer),
+					     aux_payload_buf, nbytes);
+		if (err) {
+			kvfree(aux_payload_buf);
+			goto out;
+		}
 		kvfree(rpc_resp);
+	} else {
+		err = ksmbd_iov_pin_rsp(work, (void *)rsp,
+					offsetof(struct smb2_read_rsp, Buffer));
+		if (err)
+			goto out;
 	}
 
 	rsp->StructureSize = cpu_to_le16(17);
@@ -6117,7 +6255,6 @@
 	rsp->DataLength = cpu_to_le32(nbytes);
 	rsp->DataRemaining = 0;
 	rsp->Flags = 0;
-	inc_rfc1001_len(work->response_buf, nbytes);
 	return 0;
 
 out:
@@ -6191,8 +6328,8 @@
 	int err = 0;
 	bool is_rdma_channel = false;
 	unsigned int max_read_size = conn->vals->max_read_size;
-
-	WORK_BUFFERS(work, req, rsp);
+	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+	void *aux_payload_buf;
 
 	if (test_share_config_flag(work->tcon->share_conf,
 				   KSMBD_SHARE_FLAG_PIPE)) {
@@ -6200,6 +6337,25 @@
 		return smb2_read_pipe(work);
 	}
 
+	if (work->next_smb2_rcv_hdr_off) {
+		req = ksmbd_req_buf_next(work);
+		rsp = ksmbd_resp_buf_next(work);
+		if (!has_file_id(req->VolatileFileId)) {
+			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+					work->compound_fid);
+			id = work->compound_fid;
+			pid = work->compound_pfid;
+		}
+	} else {
+		req = smb2_get_msg(work->request_buf);
+		rsp = smb2_get_msg(work->response_buf);
+	}
+
+	if (!has_file_id(id)) {
+		id = req->VolatileFileId;
+		pid = req->PersistentFileId;
+	}
+
 	if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
 	    req->Channel == SMB2_CHANNEL_RDMA_V1) {
 		is_rdma_channel = true;
@@ -6222,7 +6378,7 @@
 			goto out;
 	}
 
-	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
+	fp = ksmbd_lookup_fd_slow(work, id, pid);
 	if (!fp) {
 		err = -ENOENT;
 		goto out;
@@ -6248,21 +6404,20 @@
 	ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
 		    fp->filp, offset, length);
 
-	work->aux_payload_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
-	if (!work->aux_payload_buf) {
+	aux_payload_buf = kvzalloc(length, GFP_KERNEL);
+	if (!aux_payload_buf) {
 		err = -ENOMEM;
 		goto out;
 	}
 
-	nbytes = ksmbd_vfs_read(work, fp, length, &offset);
+	nbytes = ksmbd_vfs_read(work, fp, length, &offset, aux_payload_buf);
 	if (nbytes < 0) {
 		err = nbytes;
 		goto out;
 	}
 
 	if ((nbytes == 0 && length != 0) || nbytes < mincount) {
-		kvfree(work->aux_payload_buf);
-		work->aux_payload_buf = NULL;
+		kvfree(aux_payload_buf);
 		rsp->hdr.Status = STATUS_END_OF_FILE;
 		smb2_set_err_rsp(work);
 		ksmbd_fd_put(work, fp);
@@ -6275,11 +6430,10 @@
 	if (is_rdma_channel == true) {
 		/* write data to the client using rdma channel */
 		remain_bytes = smb2_read_rdma_channel(work, req,
-						      work->aux_payload_buf,
+						      aux_payload_buf,
 						      nbytes);
-		kvfree(work->aux_payload_buf);
-		work->aux_payload_buf = NULL;
-
+		kvfree(aux_payload_buf);
+		aux_payload_buf = NULL;
 		nbytes = 0;
 		if (remain_bytes < 0) {
 			err = (int)remain_bytes;
@@ -6293,10 +6447,13 @@
 	rsp->DataLength = cpu_to_le32(nbytes);
 	rsp->DataRemaining = cpu_to_le32(remain_bytes);
 	rsp->Flags = 0;
-	inc_rfc1001_len(work->response_buf, 16);
-	work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
-	work->aux_payload_sz = nbytes;
-	inc_rfc1001_len(work->response_buf, nbytes);
+	err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
+				     offsetof(struct smb2_read_rsp, Buffer),
+				     aux_payload_buf, nbytes);
+	if (err) {
+		kvfree(aux_payload_buf);
+		goto out;
+	}
 	ksmbd_fd_put(work, fp);
 	return 0;
 
@@ -6331,14 +6488,16 @@
  */
 static noinline int smb2_write_pipe(struct ksmbd_work *work)
 {
-	struct smb2_write_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_write_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_write_req *req;
+	struct smb2_write_rsp *rsp;
 	struct ksmbd_rpc_command *rpc_resp;
 	u64 id = 0;
 	int err = 0, ret = 0;
 	char *data_buf;
 	size_t length;
 
+	WORK_BUFFERS(work, req, rsp);
+
 	length = le32_to_cpu(req->Length);
 	id = req->VolatileFileId;
 
@@ -6377,8 +6536,8 @@
 	rsp->DataLength = cpu_to_le32(length);
 	rsp->DataRemaining = 0;
 	rsp->Reserved2 = 0;
-	inc_rfc1001_len(work->response_buf, 16);
-	return 0;
+	err = ksmbd_iov_pin_rsp(work, (void *)rsp,
+				offsetof(struct smb2_write_rsp, Buffer));
 out:
 	if (err) {
 		rsp->hdr.Status = STATUS_INVALID_HANDLE;
@@ -6397,7 +6556,7 @@
 	int ret;
 	ssize_t nbytes;
 
-	data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+	data_buf = kvzalloc(length, GFP_KERNEL);
 	if (!data_buf)
 		return -ENOMEM;
 
@@ -6534,7 +6693,9 @@
 	rsp->DataLength = cpu_to_le32(nbytes);
 	rsp->DataRemaining = 0;
 	rsp->Reserved2 = 0;
-	inc_rfc1001_len(work->response_buf, 16);
+	err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_write_rsp, Buffer));
+	if (err)
+		goto out;
 	ksmbd_fd_put(work, fp);
 	return 0;
 
@@ -6581,15 +6742,11 @@
 
 	rsp->StructureSize = cpu_to_le16(4);
 	rsp->Reserved = 0;
-	inc_rfc1001_len(work->response_buf, 4);
-	return 0;
+	return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_flush_rsp));
 
 out:
-	if (err) {
 		rsp->hdr.Status = STATUS_INVALID_HANDLE;
 		smb2_set_err_rsp(work);
-	}
-
 	return err;
 }
 
@@ -6607,6 +6764,9 @@
 	struct ksmbd_work *iter;
 	struct list_head *command_list;
 
+	if (work->next_smb2_rcv_hdr_off)
+		hdr = ksmbd_resp_buf_next(work);
+
 	ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
 		    hdr->MessageId, hdr->Flags);
 
@@ -6766,8 +6926,8 @@
  */
 int smb2_lock(struct ksmbd_work *work)
 {
-	struct smb2_lock_req *req = smb2_get_msg(work->request_buf);
-	struct smb2_lock_rsp *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_lock_req *req;
+	struct smb2_lock_rsp *rsp;
 	struct smb2_lock_element *lock_ele;
 	struct ksmbd_file *fp = NULL;
 	struct file_lock *flock = NULL;
@@ -6784,6 +6944,8 @@
 	LIST_HEAD(rollback_list);
 	int prior_lock = 0;
 
+	WORK_BUFFERS(work, req, rsp);
+
 	ksmbd_debug(SMB, "Received lock request\n");
 	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
 	if (!fp) {
@@ -6989,10 +7151,6 @@
 
 				ksmbd_debug(SMB,
 					    "would have to wait for getting lock\n");
-				spin_lock(&work->conn->llist_lock);
-				list_add_tail(&smb_lock->clist,
-					      &work->conn->lock_list);
-				spin_unlock(&work->conn->llist_lock);
 				list_add(&smb_lock->llist, &rollback_list);
 
 				argv = kmalloc(sizeof(void *), GFP_KERNEL);
@@ -7006,6 +7164,7 @@
 						      smb2_remove_blocked_lock,
 						      argv);
 				if (rc) {
+					kfree(argv);
 					err = -ENOMEM;
 					goto out;
 				}
@@ -7023,9 +7182,6 @@
 
 				if (work->state != KSMBD_WORK_ACTIVE) {
 					list_del(&smb_lock->llist);
-					spin_lock(&work->conn->llist_lock);
-					list_del(&smb_lock->clist);
-					spin_unlock(&work->conn->llist_lock);
 					locks_free_lock(flock);
 
 					if (work->state == KSMBD_WORK_CANCELLED) {
@@ -7038,8 +7194,6 @@
 						goto out;
 					}
 
-					init_smb2_rsp_hdr(work);
-					smb2_set_err_rsp(work);
 					rsp->hdr.Status =
 						STATUS_RANGE_NOT_LOCKED;
 					kfree(smb_lock);
@@ -7047,19 +7201,16 @@
 				}
 
 				list_del(&smb_lock->llist);
-				spin_lock(&work->conn->llist_lock);
-				list_del(&smb_lock->clist);
-				spin_unlock(&work->conn->llist_lock);
 				release_async_work(work);
 				goto retry;
 			} else if (!rc) {
+				list_add(&smb_lock->llist, &rollback_list);
 				spin_lock(&work->conn->llist_lock);
 				list_add_tail(&smb_lock->clist,
 					      &work->conn->lock_list);
 				list_add_tail(&smb_lock->flist,
 					      &fp->lock_list);
 				spin_unlock(&work->conn->llist_lock);
-				list_add(&smb_lock->llist, &rollback_list);
 				ksmbd_debug(SMB, "successful in taking lock\n");
 			} else {
 				goto out;
@@ -7074,7 +7225,10 @@
 	ksmbd_debug(SMB, "successful in taking lock\n");
 	rsp->hdr.Status = STATUS_SUCCESS;
 	rsp->Reserved = 0;
-	inc_rfc1001_len(work->response_buf, 4);
+	err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lock_rsp));
+	if (err)
+		goto out;
+
 	ksmbd_fd_put(work, fp);
 	return 0;
 
@@ -7445,7 +7599,7 @@
 				 struct smb2_ioctl_rsp *rsp)
 {
 	struct ksmbd_rpc_command *rpc_resp;
-	char *data_buf = (char *)&req->Buffer[0];
+	char *data_buf = (char *)req + le32_to_cpu(req->InputOffset);
 	int nbytes = 0;
 
 	rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
@@ -7515,7 +7669,8 @@
 
 		da.attr = le32_to_cpu(fp->f_ci->m_fattr);
 		ret = ksmbd_vfs_set_dos_attrib_xattr(idmap,
-						     &fp->filp->f_path, &da);
+						     &fp->filp->f_path,
+						     &da, true);
 		if (ret)
 			fp->f_ci->m_fattr = old_fattr;
 	}
@@ -7557,6 +7712,7 @@
 	u64 id = KSMBD_NO_FID;
 	struct ksmbd_conn *conn = work->conn;
 	int ret = 0;
+	char *buffer;
 
 	if (work->next_smb2_rcv_hdr_off) {
 		req = ksmbd_req_buf_next(work);
@@ -7579,6 +7735,8 @@
 		goto out;
 	}
 
+	buffer = (char *)req + le32_to_cpu(req->InputOffset);
+
 	cnt_code = le32_to_cpu(req->CtlCode);
 	ret = smb2_calc_max_out_buf_len(work, 48,
 					le32_to_cpu(req->MaxOutputResponse));
@@ -7636,7 +7794,7 @@
 		}
 
 		ret = fsctl_validate_negotiate_info(conn,
-			(struct validate_negotiate_info_req *)&req->Buffer[0],
+			(struct validate_negotiate_info_req *)buffer,
 			(struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
 			in_buf_len);
 		if (ret < 0)
@@ -7689,7 +7847,7 @@
 		rsp->VolatileFileId = req->VolatileFileId;
 		rsp->PersistentFileId = req->PersistentFileId;
 		fsctl_copychunk(work,
-				(struct copychunk_ioctl_req *)&req->Buffer[0],
+				(struct copychunk_ioctl_req *)buffer,
 				le32_to_cpu(req->CtlCode),
 				le32_to_cpu(req->InputCount),
 				req->VolatileFileId,
@@ -7702,8 +7860,7 @@
 			goto out;
 		}
 
-		ret = fsctl_set_sparse(work, id,
-				       (struct file_sparse *)&req->Buffer[0]);
+		ret = fsctl_set_sparse(work, id, (struct file_sparse *)buffer);
 		if (ret < 0)
 			goto out;
 		break;
@@ -7726,7 +7883,7 @@
 		}
 
 		zero_data =
-			(struct file_zero_data_information *)&req->Buffer[0];
+			(struct file_zero_data_information *)buffer;
 
 		off = le64_to_cpu(zero_data->FileOffset);
 		bfz = le64_to_cpu(zero_data->BeyondFinalZero);
@@ -7757,7 +7914,7 @@
 		}
 
 		ret = fsctl_query_allocated_ranges(work, id,
-			(struct file_allocated_range_buffer *)&req->Buffer[0],
+			(struct file_allocated_range_buffer *)buffer,
 			(struct file_allocated_range_buffer *)&rsp->Buffer[0],
 			out_buf_len /
 			sizeof(struct file_allocated_range_buffer), &nbytes);
@@ -7801,7 +7958,7 @@
 			goto out;
 		}
 
-		dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
+		dup_ext = (struct duplicate_extents_to_file *)buffer;
 
 		fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
 					     dup_ext->PersistentFileHandle);
@@ -7870,9 +8027,9 @@
 	rsp->Reserved = cpu_to_le16(0);
 	rsp->Flags = cpu_to_le32(0);
 	rsp->Reserved2 = cpu_to_le32(0);
-	inc_rfc1001_len(work->response_buf, 48 + nbytes);
-
-	return 0;
+	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_ioctl_rsp) + nbytes);
+	if (!ret)
+		return ret;
 
 out:
 	if (ret == -EACCES)
@@ -7897,8 +8054,8 @@
  */
 static void smb20_oplock_break_ack(struct ksmbd_work *work)
 {
-	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
-	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_oplock_break *req;
+	struct smb2_oplock_break *rsp;
 	struct ksmbd_file *fp;
 	struct oplock_info *opinfo = NULL;
 	__le32 err = 0;
@@ -7907,6 +8064,8 @@
 	char req_oplevel = 0, rsp_oplevel = 0;
 	unsigned int oplock_change_type;
 
+	WORK_BUFFERS(work, req, rsp);
+
 	volatile_id = req->VolatileFid;
 	persistent_id = req->PersistentFid;
 	req_oplevel = req->OplockLevel;
@@ -7994,10 +8153,10 @@
 		goto err_out;
 	}
 
-	opinfo_put(opinfo);
-	ksmbd_fd_put(work, fp);
 	opinfo->op_state = OPLOCK_STATE_NONE;
 	wake_up_interruptible_all(&opinfo->oplock_q);
+	opinfo_put(opinfo);
+	ksmbd_fd_put(work, fp);
 
 	rsp->StructureSize = cpu_to_le16(24);
 	rsp->OplockLevel = rsp_oplevel;
@@ -8005,7 +8164,8 @@
 	rsp->Reserved2 = 0;
 	rsp->VolatileFid = volatile_id;
 	rsp->PersistentFid = persistent_id;
-	inc_rfc1001_len(work->response_buf, 24);
+	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
+	if (!ret)
 	return;
 
 err_out:
@@ -8041,8 +8201,8 @@
 static void smb21_lease_break_ack(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
-	struct smb2_lease_ack *req = smb2_get_msg(work->request_buf);
-	struct smb2_lease_ack *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_lease_ack *req;
+	struct smb2_lease_ack *rsp;
 	struct oplock_info *opinfo;
 	__le32 err = 0;
 	int ret = 0;
@@ -8050,6 +8210,8 @@
 	__le32 lease_state;
 	struct lease *lease;
 
+	WORK_BUFFERS(work, req, rsp);
+
 	ksmbd_debug(OPLOCK, "smb21 lease break, lease state(0x%x)\n",
 		    le32_to_cpu(req->LeaseState));
 	opinfo = lookup_lease_in_table(conn, req->LeaseKey);
@@ -8136,6 +8298,11 @@
 			    le32_to_cpu(req->LeaseState));
 	}
 
+	if (ret < 0) {
+		rsp->hdr.Status = err;
+		goto err_out;
+	}
+
 	lease_state = lease->state;
 	opinfo->op_state = OPLOCK_STATE_NONE;
 	wake_up_interruptible_all(&opinfo->oplock_q);
@@ -8143,22 +8310,17 @@
 	wake_up_interruptible_all(&opinfo->oplock_brk);
 	opinfo_put(opinfo);
 
-	if (ret < 0) {
-		rsp->hdr.Status = err;
-		goto err_out;
-	}
-
 	rsp->StructureSize = cpu_to_le16(36);
 	rsp->Reserved = 0;
 	rsp->Flags = 0;
 	memcpy(rsp->LeaseKey, req->LeaseKey, 16);
 	rsp->LeaseState = lease_state;
 	rsp->LeaseDuration = 0;
-	inc_rfc1001_len(work->response_buf, 36);
+	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
+	if (!ret)
 	return;
 
 err_out:
-	opinfo->op_state = OPLOCK_STATE_NONE;
 	wake_up_interruptible_all(&opinfo->oplock_q);
 	atomic_dec(&opinfo->breaking_cnt);
 	wake_up_interruptible_all(&opinfo->oplock_brk);
@@ -8175,8 +8337,10 @@
  */
 int smb2_oplock_break(struct ksmbd_work *work)
 {
-	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
-	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
+	struct smb2_oplock_break *req;
+	struct smb2_oplock_break *rsp;
+
+	WORK_BUFFERS(work, req, rsp);
 
 	switch (le16_to_cpu(req->StructureSize)) {
 	case OP_BREAK_STRUCT_SIZE_20:
@@ -8291,43 +8455,19 @@
 void smb2_set_sign_rsp(struct ksmbd_work *work)
 {
 	struct smb2_hdr *hdr;
-	struct smb2_hdr *req_hdr;
 	char signature[SMB2_HMACSHA256_SIZE];
-	struct kvec iov[2];
-	size_t len;
+	struct kvec *iov;
 	int n_vec = 1;
 
-	hdr = smb2_get_msg(work->response_buf);
-	if (work->next_smb2_rsp_hdr_off)
-		hdr = ksmbd_resp_buf_next(work);
-
-	req_hdr = ksmbd_req_buf_next(work);
-
-	if (!work->next_smb2_rsp_hdr_off) {
-		len = get_rfc1002_len(work->response_buf);
-		if (req_hdr->NextCommand)
-			len = ALIGN(len, 8);
-	} else {
-		len = get_rfc1002_len(work->response_buf) -
-			work->next_smb2_rsp_hdr_off;
-		len = ALIGN(len, 8);
-	}
-
-	if (req_hdr->NextCommand)
-		hdr->NextCommand = cpu_to_le32(len);
-
+	hdr = ksmbd_resp_buf_curr(work);
 	hdr->Flags |= SMB2_FLAGS_SIGNED;
 	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
 
-	iov[0].iov_base = (char *)&hdr->ProtocolId;
-	iov[0].iov_len = len;
-
-	if (work->aux_payload_sz) {
-		iov[0].iov_len -= work->aux_payload_sz;
-
-		iov[1].iov_base = work->aux_payload_buf;
-		iov[1].iov_len = work->aux_payload_sz;
+	if (hdr->Command == SMB2_READ) {
+		iov = &work->iov[work->iov_idx - 1];
 		n_vec++;
+	} else {
+		iov = &work->iov[work->iov_idx];
 	}
 
 	if (!ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, n_vec,
@@ -8403,29 +8543,14 @@
 void smb3_set_sign_rsp(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
-	struct smb2_hdr *req_hdr, *hdr;
+	struct smb2_hdr *hdr;
 	struct channel *chann;
 	char signature[SMB2_CMACAES_SIZE];
-	struct kvec iov[2];
+	struct kvec *iov;
 	int n_vec = 1;
-	size_t len;
 	char *signing_key;
 
-	hdr = smb2_get_msg(work->response_buf);
-	if (work->next_smb2_rsp_hdr_off)
-		hdr = ksmbd_resp_buf_next(work);
-
-	req_hdr = ksmbd_req_buf_next(work);
-
-	if (!work->next_smb2_rsp_hdr_off) {
-		len = get_rfc1002_len(work->response_buf);
-		if (req_hdr->NextCommand)
-			len = ALIGN(len, 8);
-	} else {
-		len = get_rfc1002_len(work->response_buf) -
-			work->next_smb2_rsp_hdr_off;
-		len = ALIGN(len, 8);
-	}
+	hdr = ksmbd_resp_buf_curr(work);
 
 	if (conn->binding == false &&
 	    le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
@@ -8441,21 +8566,18 @@
 	if (!signing_key)
 		return;
 
-	if (req_hdr->NextCommand)
-		hdr->NextCommand = cpu_to_le32(len);
-
 	hdr->Flags |= SMB2_FLAGS_SIGNED;
 	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
-	iov[0].iov_base = (char *)&hdr->ProtocolId;
-	iov[0].iov_len = len;
-	if (work->aux_payload_sz) {
-		iov[0].iov_len -= work->aux_payload_sz;
-		iov[1].iov_base = work->aux_payload_buf;
-		iov[1].iov_len = work->aux_payload_sz;
+
+	if (hdr->Command == SMB2_READ) {
+		iov = &work->iov[work->iov_idx - 1];
 		n_vec++;
+	} else {
+		iov = &work->iov[work->iov_idx];
 	}
 
-	if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec, signature))
+	if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec,
+				 signature))
 		memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
 }
 
@@ -8522,45 +8644,22 @@
 
 int smb3_encrypt_resp(struct ksmbd_work *work)
 {
-	char *buf = work->response_buf;
-	struct kvec iov[3];
+	struct kvec *iov = work->iov;
 	int rc = -ENOMEM;
-	int buf_size = 0, rq_nvec = 2 + (work->aux_payload_sz ? 1 : 0);
-
-	if (ARRAY_SIZE(iov) < rq_nvec)
-		return -ENOMEM;
+	void *tr_buf;
 
-	work->tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
-	if (!work->tr_buf)
+	tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
+	if (!tr_buf)
 		return rc;
 
 	/* fill transform header */
-	fill_transform_hdr(work->tr_buf, buf, work->conn->cipher_type);
+	fill_transform_hdr(tr_buf, work->response_buf, work->conn->cipher_type);
 
-	iov[0].iov_base = work->tr_buf;
+	iov[0].iov_base = tr_buf;
 	iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
-	buf_size += iov[0].iov_len - 4;
-
-	iov[1].iov_base = buf + 4;
-	iov[1].iov_len = get_rfc1002_len(buf);
-	if (work->aux_payload_sz) {
-		iov[1].iov_len = work->resp_hdr_sz - 4;
+	work->tr_buf = tr_buf;
 
-		iov[2].iov_base = work->aux_payload_buf;
-		iov[2].iov_len = work->aux_payload_sz;
-		buf_size += iov[2].iov_len;
-	}
-	buf_size += iov[1].iov_len;
-	work->resp_hdr_sz = iov[1].iov_len;
-
-	rc = ksmbd_crypt_message(work, iov, rq_nvec, 1);
-	if (rc)
-		return rc;
-
-	memmove(buf, iov[1].iov_base, iov[1].iov_len);
-	*(__be32 *)work->tr_buf = cpu_to_be32(buf_size);
-
-	return rc;
+	return ksmbd_crypt_message(work, iov, work->iov_idx + 1, 1);
 }
 
 bool smb3_is_transform_hdr(void *buf)
@@ -8580,7 +8679,8 @@
 	struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf);
 	int rc = 0;
 
-	if (buf_data_size < sizeof(struct smb2_hdr)) {
+	if (pdu_length < sizeof(struct smb2_transform_hdr) ||
+	    buf_data_size < sizeof(struct smb2_hdr)) {
 		pr_err("Transform message is too small (%u)\n",
 		       pdu_length);
 		return -ECONNABORTED;
diff -ruw linux-6.4/fs/smb/server/smb2pdu.h linux-6.4-fbx/fs/smb/server/smb2pdu.h
--- linux-6.4/fs/smb/server/smb2pdu.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smb2pdu.h	2023-11-07 13:38:44.046256254 +0100
@@ -361,7 +361,7 @@
 	__u8   Flags;
 	__u8   EaNameLength;
 	__le16 EaValueLength;
-	char name[1];
+	char name[];
 	/* optionally followed by value */
 } __packed; /* level 15 Query */
 
@@ -407,6 +407,9 @@
 } __packed;
 
 /* functions */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+void init_smb2_0_server(struct ksmbd_conn *conn);
+#endif
 void init_smb2_1_server(struct ksmbd_conn *conn);
 void init_smb3_0_server(struct ksmbd_conn *conn);
 void init_smb3_02_server(struct ksmbd_conn *conn);
diff -ruw linux-6.4/fs/smb/server/smb_common.c linux-6.4-fbx/fs/smb/server/smb_common.c
--- linux-6.4/fs/smb/server/smb_common.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smb_common.c	2024-01-25 13:36:32.718984668 +0100
@@ -17,6 +17,10 @@
 #include "mgmt/tree_connect.h"
 #include "mgmt/share_config.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#include "smb1pdu.h"
+#endif
+
 /*for shortname implementation */
 static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
 #define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1)
@@ -32,6 +36,20 @@
 };
 
 static struct smb_protocol smb1_protos[] = {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	{
+		SMB1_PROT,
+		"\2NT LM 0.12",
+		"NT1",
+		SMB10_PROT_ID
+	},
+	{
+		SMB2_PROT,
+		"\2SMB 2.002",
+		"SMB2_02",
+		SMB20_PROT_ID
+	},
+#endif
 	{
 		SMB21_PROT,
 		"\2SMB 2.1",
@@ -90,7 +108,11 @@
 
 inline int ksmbd_min_protocol(void)
 {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	return SMB1_PROT;
+#else
 	return SMB21_PROT;
+#endif
 }
 
 inline int ksmbd_max_protocol(void)
@@ -135,6 +157,16 @@
 int ksmbd_verify_smb_message(struct ksmbd_work *work)
 {
 	struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work);
+
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER) {
+		ksmbd_debug(SMB, "got SMB2 command\n");
+		return ksmbd_smb2_check_message(work);
+	}
+
+	work->conn->outstanding_credits++;
+	return ksmbd_smb1_check_message(work);
+#else
 	struct smb_hdr *hdr;
 
 	if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
@@ -148,6 +180,7 @@
 	}
 
 	return -EINVAL;
+#endif
 }
 
 /**
@@ -158,8 +191,12 @@
  */
 bool ksmbd_smb_request(struct ksmbd_conn *conn)
 {
-	__le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
+	__le32 *proto;
 
+	if (conn->request_buf[0] != 0)
+		return false;
+
+	proto = (__le32 *)smb2_get_msg(conn->request_buf);
 	if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
 		pr_err_ratelimited("smb2 compression not support yet");
 		return false;
@@ -295,6 +332,7 @@
 	return BAD_PROT_ID;
 }
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 #define SMB_COM_NEGOTIATE_EX	0x0
 
 /**
@@ -319,12 +357,6 @@
 	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
 	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
 
-	/*
-	 * Remove 4 byte direct TCP header.
-	 */
-	*(__be32 *)work->response_buf =
-		cpu_to_be32(sizeof(struct smb_hdr) - 4);
-
 	rsp_hdr->Command = SMB_COM_NEGOTIATE;
 	*(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
 	rsp_hdr->Flags = SMBFLG_RESPONSE;
@@ -359,8 +391,8 @@
  */
 static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
 {
-	work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
-			GFP_KERNEL | __GFP_ZERO);
+	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+			GFP_KERNEL);
 	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
 
 	if (!work->response_buf) {
@@ -388,26 +420,30 @@
 	[SMB_COM_NEGOTIATE_EX]	= { .proc = smb1_negotiate, },
 };
 
-static void init_smb1_server(struct ksmbd_conn *conn)
+static int init_smb1_server(struct ksmbd_conn *conn)
 {
 	conn->ops = &smb1_server_ops;
 	conn->cmds = smb1_server_cmds;
 	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
+	return 0;
 }
+#endif
 
-void ksmbd_init_smb_server(struct ksmbd_work *work)
+int ksmbd_init_smb_server(struct ksmbd_work *work)
 {
 	struct ksmbd_conn *conn = work->conn;
 	__le32 proto;
 
-	if (conn->need_neg == false)
-		return;
-
 	proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
+	if (conn->need_neg == false) {
 	if (proto == SMB1_PROTO_NUMBER)
-		init_smb1_server(conn);
-	else
-		init_smb3_11_server(conn);
+			return -EINVAL;
+		return 0;
+	}
+
+	if (proto == SMB1_PROTO_NUMBER)
+		return init_smb1_server(conn);
+	return init_smb3_11_server(conn);
 }
 
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
@@ -551,21 +587,24 @@
 		conn->dialect <= SMB311_PROT_ID);
 }
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 static int smb_handle_negotiate(struct ksmbd_work *work)
 {
 	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
 
 	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
 
-	/* Add 2 byte bcc and 2 byte DialectIndex. */
-	inc_rfc1001_len(work->response_buf, 4);
-	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	if (ksmbd_iov_pin_rsp(work, (void *)neg_rsp,
+			      sizeof(struct smb_negotiate_rsp) - 4))
+		return -ENOMEM;
 
+	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
 	neg_rsp->hdr.WordCount = 1;
 	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
 	neg_rsp->ByteCount = 0;
 	return 0;
 }
+#endif
 
 int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
 {
diff -ruw linux-6.4/fs/smb/server/smb_common.h linux-6.4-fbx/fs/smb/server/smb_common.h
--- linux-6.4/fs/smb/server/smb_common.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smb_common.h	2023-11-07 13:38:44.046256254 +0100
@@ -49,6 +49,42 @@
 /*
  * File Attribute flags
  */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define ATTR_READONLY			0x0001
+#define ATTR_HIDDEN			0x0002
+#define ATTR_SYSTEM			0x0004
+#define ATTR_VOLUME			0x0008
+#define ATTR_DIRECTORY			0x0010
+#define ATTR_ARCHIVE			0x0020
+#define ATTR_DEVICE			0x0040
+#define ATTR_NORMAL			0x0080
+#define ATTR_TEMPORARY			0x0100
+#define ATTR_SPARSE			0x0200
+#define ATTR_REPARSE			0x0400
+#define ATTR_COMPRESSED			0x0800
+#define ATTR_OFFLINE			0x1000
+#define ATTR_NOT_CONTENT_INDEXED	0x2000
+#define ATTR_ENCRYPTED			0x4000
+
+#define ATTR_READONLY_LE		cpu_to_le32(ATTR_READONLY)
+#define ATTR_HIDDEN_LE			cpu_to_le32(ATTR_HIDDEN)
+#define ATTR_SYSTEM_LE			cpu_to_le32(ATTR_SYSTEM)
+#define ATTR_DIRECTORY_LE		cpu_to_le32(ATTR_DIRECTORY)
+#define ATTR_ARCHIVE_LE			cpu_to_le32(ATTR_ARCHIVE)
+#define ATTR_NORMAL_LE			cpu_to_le32(ATTR_NORMAL)
+#define ATTR_TEMPORARY_LE		cpu_to_le32(ATTR_TEMPORARY)
+#define ATTR_SPARSE_FILE_LE		cpu_to_le32(ATTR_SPARSE)
+#define ATTR_REPARSE_POINT_LE		cpu_to_le32(ATTR_REPARSE)
+#define ATTR_COMPRESSED_LE		cpu_to_le32(ATTR_COMPRESSED)
+#define ATTR_OFFLINE_LE			cpu_to_le32(ATTR_OFFLINE)
+#define ATTR_NOT_CONTENT_INDEXED_LE	cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
+#define ATTR_ENCRYPTED_LE		cpu_to_le32(ATTR_ENCRYPTED)
+#define ATTR_INTEGRITY_STREAML_LE	cpu_to_le32(0x00008000)
+#define ATTR_NO_SCRUB_DATA_LE		cpu_to_le32(0x00020000)
+#define ATTR_MASK_LE			cpu_to_le32(0x00007FB7)
+
+#define IS_SMB2(x)			((x)->vals->protocol_id != SMB10_PROT_ID)
+#endif
 #define ATTR_POSIX_SEMANTICS		0x01000000
 #define ATTR_BACKUP_SEMANTICS		0x02000000
 #define ATTR_DELETE_ON_CLOSE		0x04000000
@@ -203,11 +239,13 @@
 	unsigned char DialectsArray[1];
 } __packed;
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 struct smb_negotiate_rsp {
 	struct smb_hdr hdr;     /* wct = 17 */
 	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
 	__le16 ByteCount;
 } __packed;
+#endif
 
 struct filesystem_attribute_info {
 	__le32 Attributes;
@@ -236,6 +274,14 @@
 	__le32 BytesPerSector;
 } __packed;     /* size info, level 0x103 */
 
+struct filesystem_full_info {
+	__le64 TotalAllocationUnits;
+	__le64 FreeAllocationUnits;
+	__le64 ActualAvailableUnits;
+	__le32 SectorsPerAllocationUnit;
+	__le32 BytesPerSector;
+} __packed;     /* size info, level 0x3ef */
+
 #define EXTENDED_INFO_MAGIC 0x43667364	/* Cfsd */
 #define STRING_LENGTH 28
 
@@ -427,7 +473,7 @@
 
 int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
 
-void ksmbd_init_smb_server(struct ksmbd_work *work);
+int ksmbd_init_smb_server(struct ksmbd_work *work);
 
 struct ksmbd_kstat;
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
diff -ruw linux-6.4/fs/smb/server/smbacl.c linux-6.4-fbx/fs/smb/server/smbacl.c
--- linux-6.4/fs/smb/server/smbacl.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smbacl.c	2024-01-25 13:36:32.718984668 +0100
@@ -401,10 +401,6 @@
 	if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
 		return;
 
-	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
-	if (!ppace)
-		return;
-
 	ret = init_acl_state(&acl_state, num_aces);
 	if (ret)
 		return;
@@ -414,6 +410,13 @@
 		return;
 	}
 
+	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
+	if (!ppace) {
+		free_acl_state(&default_acl_state);
+		free_acl_state(&acl_state);
+		return;
+	}
+
 	/*
 	 * reset rwx permissions for user/group/other.
 	 * Also, if num_aces is 0 i.e. DACL has no ACEs,
@@ -1107,6 +1110,7 @@
 		struct smb_acl *pdacl;
 		struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
 		int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
+		int pntsd_alloc_size;
 
 		if (parent_pntsd->osidoffset) {
 			powner_sid = (struct smb_sid *)((char *)parent_pntsd +
@@ -1119,9 +1123,10 @@
 			pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
 		}
 
-		pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
-				pgroup_sid_size + sizeof(struct smb_acl) +
-				nt_size, GFP_KERNEL);
+		pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
+			pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
+
+		pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
 		if (!pntsd) {
 			rc = -ENOMEM;
 			goto free_aces_base;
@@ -1136,6 +1141,27 @@
 		pntsd->gsidoffset = parent_pntsd->gsidoffset;
 		pntsd->dacloffset = parent_pntsd->dacloffset;
 
+		if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
+		    pntsd_alloc_size) {
+			rc = -EINVAL;
+			kfree(pntsd);
+			goto free_aces_base;
+		}
+
+		if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
+		    pntsd_alloc_size) {
+			rc = -EINVAL;
+			kfree(pntsd);
+			goto free_aces_base;
+		}
+
+		if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
+		    pntsd_alloc_size) {
+			rc = -EINVAL;
+			kfree(pntsd);
+			goto free_aces_base;
+		}
+
 		if (pntsd->osidoffset) {
 			struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
 					le32_to_cpu(pntsd->osidoffset));
@@ -1162,7 +1188,7 @@
 			pntsd_size += sizeof(struct smb_acl) + nt_size;
 		}
 
-		ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size);
+		ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size, false);
 		kfree(pntsd);
 	}
 
@@ -1354,7 +1380,7 @@
 
 int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
 		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
-		 bool type_check)
+		 bool type_check, bool get_write)
 {
 	int rc;
 	struct smb_fattr fattr = {{0}};
@@ -1414,13 +1440,13 @@
 	if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
 		/* Update WinACL in xattr */
 		ksmbd_vfs_remove_sd_xattrs(idmap, path);
-		ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len);
+		ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len,
+				get_write);
 	}
 
 out:
 	posix_acl_release(fattr.cf_acls);
 	posix_acl_release(fattr.cf_dacls);
-	mark_inode_dirty(inode);
 	return rc;
 }
 
diff -ruw linux-6.4/fs/smb/server/smbacl.h linux-6.4-fbx/fs/smb/server/smbacl.h
--- linux-6.4/fs/smb/server/smbacl.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/smbacl.h	2024-01-25 13:36:32.722984777 +0100
@@ -207,7 +207,7 @@
 			__le32 *pdaccess, int uid);
 int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
 		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
-		 bool type_check);
+		 bool type_check, bool get_write);
 void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
 void ksmbd_init_domain(u32 *sub_auth);
 
diff -ruw linux-6.4/fs/smb/server/transport_ipc.c linux-6.4-fbx/fs/smb/server/transport_ipc.c
--- linux-6.4/fs/smb/server/transport_ipc.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/transport_ipc.c	2024-04-19 15:59:31.197600671 +0200
@@ -65,6 +65,7 @@
 	struct hlist_node	ipc_table_hlist;
 
 	void			*response;
+	unsigned int		msg_sz;
 };
 
 static struct delayed_work ipc_timer_work;
@@ -74,7 +75,7 @@
 static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
 static int ksmbd_ipc_heartbeat_request(void);
 
-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
+static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
 	[KSMBD_EVENT_UNSPEC] = {
 		.len = 0,
 	},
@@ -229,7 +230,7 @@
 	struct ksmbd_ipc_msg *msg;
 	size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
 
-	msg = kvmalloc(msg_sz, GFP_KERNEL | __GFP_ZERO);
+	msg = kvzalloc(msg_sz, GFP_KERNEL);
 	if (msg)
 		msg->sz = sz;
 	return msg;
@@ -268,13 +269,14 @@
 			       entry->type + 1, type);
 		}
 
-		entry->response = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+		entry->response = kvzalloc(sz, GFP_KERNEL);
 		if (!entry->response) {
 			ret = -ENOMEM;
 			break;
 		}
 
 		memcpy(entry->response, payload, sz);
+		entry->msg_sz = sz;
 		wake_up_interruptible(&entry->wait);
 		ret = 0;
 		break;
@@ -403,7 +405,7 @@
 		return -EPERM;
 #endif
 
-	if (type >= KSMBD_EVENT_MAX) {
+	if (type > KSMBD_EVENT_MAX) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -453,6 +455,34 @@
 	return ret;
 }
 
+static int ipc_validate_msg(struct ipc_msg_table_entry *entry)
+{
+	unsigned int msg_sz = entry->msg_sz;
+
+	if (entry->type == KSMBD_EVENT_RPC_REQUEST) {
+		struct ksmbd_rpc_command *resp = entry->response;
+
+		msg_sz = sizeof(struct ksmbd_rpc_command) + resp->payload_sz;
+	} else if (entry->type == KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST) {
+		struct ksmbd_spnego_authen_response *resp = entry->response;
+
+		msg_sz = sizeof(struct ksmbd_spnego_authen_response) +
+				resp->session_key_len + resp->spnego_blob_len;
+	} else if (entry->type == KSMBD_EVENT_SHARE_CONFIG_REQUEST) {
+		struct ksmbd_share_config_response *resp = entry->response;
+
+		if (resp->payload_sz) {
+			if (resp->payload_sz < resp->veto_list_sz)
+				return -EINVAL;
+
+			msg_sz = sizeof(struct ksmbd_share_config_response) +
+					resp->payload_sz;
+		}
+	}
+
+	return entry->msg_sz != msg_sz ? -EINVAL : 0;
+}
+
 static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
 {
 	struct ipc_msg_table_entry entry;
@@ -477,6 +507,13 @@
 	ret = wait_event_interruptible_timeout(entry.wait,
 					       entry.response != NULL,
 					       IPC_WAIT_TIMEOUT);
+	if (entry.response) {
+		ret = ipc_validate_msg(&entry);
+		if (ret) {
+			kvfree(entry.response);
+			entry.response = NULL;
+		}
+	}
 out:
 	down_write(&ipc_msg_table_lock);
 	hash_del(&entry.ipc_table_hlist);
diff -ruw linux-6.4/fs/smb/server/transport_tcp.c linux-6.4-fbx/fs/smb/server/transport_tcp.c
--- linux-6.4/fs/smb/server/transport_tcp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/transport_tcp.c	2024-01-25 13:36:32.722984777 +0100
@@ -185,6 +185,7 @@
 	struct sockaddr *csin;
 	int rc = 0;
 	struct tcp_transport *t;
+	struct task_struct *handler;
 
 	t = alloc_transport(client_sk);
 	if (!t) {
@@ -199,13 +200,13 @@
 		goto out_error;
 	}
 
-	KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+	handler = kthread_run(ksmbd_conn_handler_loop,
 					      KSMBD_TRANS(t)->conn,
 					      "ksmbd:%u",
 					      ksmbd_tcp_get_port(csin));
-	if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+	if (IS_ERR(handler)) {
 		pr_err("cannot start conn thread\n");
-		rc = PTR_ERR(KSMBD_TRANS(t)->handler);
+		rc = PTR_ERR(handler);
 		free_transport(t);
 	}
 	return rc;
diff -ruw linux-6.4/fs/smb/server/unicode.c linux-6.4-fbx/fs/smb/server/unicode.c
--- linux-6.4/fs/smb/server/unicode.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/unicode.c	2023-11-07 13:38:44.046256254 +0100
@@ -14,46 +14,28 @@
 #include "uniupr.h"
 #include "smb_common.h"
 
-/*
- * smb_utf16_bytes() - how long will a string be after conversion?
- * @from:	pointer to input string
- * @maxbytes:	don't go past this many bytes of input string
- * @codepage:	destination codepage
- *
- * Walk a utf16le string and return the number of bytes that the string will
- * be after being converted to the given charset, not including any null
- * termination required. Don't walk past maxbytes in the source buffer.
- *
- * Return:	string length after conversion
- */
-static int smb_utf16_bytes(const __le16 *from, int maxbytes,
-			   const struct nls_table *codepage)
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int smb1_utf16_name_length(const __le16 *from, int maxbytes)
 {
-	int i;
-	int charlen, outlen = 0;
+	int i, len = 0;
 	int maxwords = maxbytes / 2;
-	char tmp[NLS_MAX_CHARSET_SIZE];
 	__u16 ftmp;
 
 	for (i = 0; i < maxwords; i++) {
 		ftmp = get_unaligned_le16(&from[i]);
+		len += 2;
 		if (ftmp == 0)
 			break;
-
-		charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
-		if (charlen > 0)
-			outlen += charlen;
-		else
-			outlen++;
 	}
 
-	return outlen;
+	return len;
 }
+#endif
 
 /*
  * cifs_mapchar() - convert a host-endian char to proper char in codepage
  * @target:	where converted character should be copied
- * @src_char:	2 byte host-endian source character
+ * @from:	host-endian source string
  * @cp:		codepage to which character should be converted
  * @mapchar:	should character be mapped according to mapchars mount option?
  *
@@ -64,10 +46,13 @@
  * Return:	string length after conversion
  */
 static int
-cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
 	     bool mapchar)
 {
 	int len = 1;
+	__u16 src_char;
+
+	src_char = *from;
 
 	if (!mapchar)
 		goto cp_convert;
@@ -105,12 +90,66 @@
 
 cp_convert:
 	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
-	if (len <= 0) {
+	if (len <= 0)
+		goto surrogate_pair;
+
+	goto out;
+
+surrogate_pair:
+	/* convert SURROGATE_PAIR and IVS */
+	if (strcmp(cp->charset, "utf8"))
+		goto unknown;
+	len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
+	if (len <= 0)
+		goto unknown;
+	return len;
+
+unknown:
 		*target = '?';
 		len = 1;
+	goto out;
 	}
 
-	goto out;
+/*
+ * smb_utf16_bytes() - how long will a string be after conversion?
+ * @from:	pointer to input string
+ * @maxbytes:	don't go past this many bytes of input string
+ * @codepage:	destination codepage
+ *
+ * Walk a utf16le string and return the number of bytes that the string will
+ * be after being converted to the given charset, not including any null
+ * termination required. Don't walk past maxbytes in the source buffer.
+ *
+ * Return:	string length after conversion
+ */
+static int smb_utf16_bytes(const __le16 *from, int maxbytes,
+			   const struct nls_table *codepage)
+{
+	int i, j;
+	int charlen, outlen = 0;
+	int maxwords = maxbytes / 2;
+	char tmp[NLS_MAX_CHARSET_SIZE];
+	__u16 ftmp[3];
+
+	for (i = 0; i < maxwords; i++) {
+		ftmp[0] = get_unaligned_le16(&from[i]);
+		if (ftmp[0] == 0)
+			break;
+		for (j = 1; j <= 2; j++) {
+			if (i + j < maxwords)
+				ftmp[j] = get_unaligned_le16(&from[i + j]);
+			else
+				ftmp[j] = 0;
+		}
+
+		charlen = cifs_mapchar(tmp, ftmp, codepage, 0);
+		if (charlen > 0)
+			outlen += charlen;
+		else
+			outlen++;
+	}
+
+	return outlen;
 }
 
 /*
@@ -140,12 +179,12 @@
 static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
 			  const struct nls_table *codepage, bool mapchar)
 {
-	int i, charlen, safelen;
+	int i, j, charlen, safelen;
 	int outlen = 0;
 	int nullsize = nls_nullsize(codepage);
 	int fromwords = fromlen / 2;
 	char tmp[NLS_MAX_CHARSET_SIZE];
-	__u16 ftmp;
+	__u16 ftmp[3];	/* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
 
 	/*
 	 * because the chars can be of varying widths, we need to take care
@@ -156,9 +195,15 @@
 	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
 
 	for (i = 0; i < fromwords; i++) {
-		ftmp = get_unaligned_le16(&from[i]);
-		if (ftmp == 0)
+		ftmp[0] = get_unaligned_le16(&from[i]);
+		if (ftmp[0] == 0)
 			break;
+		for (j = 1; j <= 2; j++) {
+			if (i + j < fromwords)
+				ftmp[j] = get_unaligned_le16(&from[i + j]);
+			else
+				ftmp[j] = 0;
+		}
 
 		/*
 		 * check to see if converting this character might make the
@@ -173,6 +218,19 @@
 		/* put converted char into 'to' buffer */
 		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
 		outlen += charlen;
+
+		/*
+		 * charlen (=bytes of UTF-8 for 1 character)
+		 * 4bytes UTF-8(surrogate pair) is charlen=4
+		 * (4bytes UTF-16 code)
+		 * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
+		 * (2 UTF-8 pairs divided to 2 UTF-16 pairs)
+		 */
+		if (charlen == 4)
+			i++;
+		else if (charlen >= 5)
+			/* 5-6bytes UTF-8 */
+			i += 2;
 	}
 
 	/* properly null-terminate string */
@@ -307,10 +365,15 @@
 	char src_char;
 	__le16 dst_char;
 	wchar_t tmp;
+	wchar_t *wchar_to;	/* UTF-16 */
+	int ret;
+	unicode_t u;
 
 	if (!mapchars)
 		return smb_strtoUTF16(target, source, srclen, cp);
 
+	wchar_to = kzalloc(6, GFP_KERNEL);
+
 	for (i = 0, j = 0; i < srclen; j++) {
 		src_char = source[i];
 		charlen = 1;
@@ -349,11 +412,55 @@
 			 * if no match, use question mark, which at least in
 			 * some cases serves as wild card
 			 */
-			if (charlen < 1) {
+			if (charlen > 0)
+				goto ctoUTF16;
+
+			/* convert SURROGATE_PAIR */
+			if (strcmp(cp->charset, "utf8") || !wchar_to)
+				goto unknown;
+			if (*(source + i) & 0x80) {
+				charlen = utf8_to_utf32(source + i, 6, &u);
+				if (charlen < 0)
+					goto unknown;
+			} else
+				goto unknown;
+			ret  = utf8s_to_utf16s(source + i, charlen,
+					UTF16_LITTLE_ENDIAN,
+					wchar_to, 6);
+			if (ret < 0)
+				goto unknown;
+
+			i += charlen;
+			dst_char = cpu_to_le16(*wchar_to);
+			if (charlen <= 3)
+				/* 1-3bytes UTF-8 to 2bytes UTF-16 */
+				put_unaligned(dst_char, &target[j]);
+			else if (charlen == 4) {
+				/* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
+				 * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
+				 *   (charlen=3+4 or 4+4) */
+				put_unaligned(dst_char, &target[j]);
+				dst_char = cpu_to_le16(*(wchar_to + 1));
+				j++;
+				put_unaligned(dst_char, &target[j]);
+			} else if (charlen >= 5) {
+				/* 5-6bytes UTF-8 to 6bytes UTF-16 */
+				put_unaligned(dst_char, &target[j]);
+				dst_char = cpu_to_le16(*(wchar_to + 1));
+				j++;
+				put_unaligned(dst_char, &target[j]);
+				dst_char = cpu_to_le16(*(wchar_to + 2));
+				j++;
+				put_unaligned(dst_char, &target[j]);
+			}
+			continue;
+
+unknown:
 				dst_char = cpu_to_le16(0x003f);
 				charlen = 1;
 			}
-		}
+
+ctoUTF16:
 		/*
 		 * character may take more than one byte in the source string,
 		 * but will take exactly two bytes in the target string
diff -ruw linux-6.4/fs/smb/server/unicode.h linux-6.4-fbx/fs/smb/server/unicode.h
--- linux-6.4/fs/smb/server/unicode.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/unicode.h	2023-11-07 13:38:44.046256254 +0100
@@ -63,6 +63,9 @@
 #endif				/* UNIUPR_NOLOWER */
 
 #ifdef __KERNEL__
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int smb1_utf16_name_length(const __le16 *from, int maxbytes);
+#endif
 int smb_strtoUTF16(__le16 *to, const char *from, int len,
 		   const struct nls_table *codepage);
 char *smb_strndup_from_utf16(const char *src, const int maxlen,
diff -ruw linux-6.4/fs/smb/server/vfs.c linux-6.4-fbx/fs/smb/server/vfs.c
--- linux-6.4/fs/smb/server/vfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/vfs.c	2024-03-18 14:40:14.867741770 +0100
@@ -63,13 +63,13 @@
 
 static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
 					char *pathname, unsigned int flags,
+					struct path *parent_path,
 					struct path *path)
 {
 	struct qstr last;
 	struct filename *filename;
 	struct path *root_share_path = &share_conf->vfs_path;
 	int err, type;
-	struct path parent_path;
 	struct dentry *d;
 
 	if (pathname[0] == '\0') {
@@ -84,7 +84,7 @@
 		return PTR_ERR(filename);
 
 	err = vfs_path_parent_lookup(filename, flags,
-				     &parent_path, &last, &type,
+				     parent_path, &last, &type,
 				     root_share_path);
 	if (err) {
 		putname(filename);
@@ -92,13 +92,20 @@
 	}
 
 	if (unlikely(type != LAST_NORM)) {
-		path_put(&parent_path);
+		path_put(parent_path);
+		putname(filename);
+		return -ENOENT;
+	}
+
+	err = mnt_want_write(parent_path->mnt);
+	if (err) {
+		path_put(parent_path);
 		putname(filename);
 		return -ENOENT;
 	}
 
-	inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT);
-	d = lookup_one_qstr_excl(&last, parent_path.dentry, 0);
+	inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
+	d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
 	if (IS_ERR(d))
 		goto err_out;
 
@@ -108,24 +115,30 @@
 	}
 
 	path->dentry = d;
-	path->mnt = share_conf->vfs_path.mnt;
-	path_put(&parent_path);
-	putname(filename);
+	path->mnt = mntget(parent_path->mnt);
 
+	if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_CROSSMNT)) {
+		err = follow_down(path, 0);
+		if (err < 0) {
+			path_put(path);
+			goto err_out;
+		}
+	}
+
+	putname(filename);
 	return 0;
 
 err_out:
-	inode_unlock(parent_path.dentry->d_inode);
-	path_put(&parent_path);
+	inode_unlock(d_inode(parent_path->dentry));
+	mnt_drop_write(parent_path->mnt);
+	path_put(parent_path);
 	putname(filename);
 	return -ENOENT;
 }
 
-int ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
+void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
 				   struct dentry *dentry, __le32 *daccess)
 {
-	int ret = 0;
-
 	*daccess = cpu_to_le32(FILE_READ_ATTRIBUTES | READ_CONTROL);
 
 	if (!inode_permission(idmap, d_inode(dentry), MAY_OPEN | MAY_WRITE))
@@ -142,8 +155,6 @@
 
 	if (!inode_permission(idmap, d_inode(dentry->d_parent), MAY_EXEC | MAY_WRITE))
 		*daccess |= FILE_DELETE_LE;
-
-	return ret;
 }
 
 /**
@@ -170,10 +181,6 @@
 		return err;
 	}
 
-	err = mnt_want_write(path.mnt);
-	if (err)
-		goto out_err;
-
 	mode |= S_IFREG;
 	err = vfs_create(mnt_idmap(path.mnt), d_inode(path.dentry),
 			 dentry, mode, true);
@@ -183,9 +190,7 @@
 	} else {
 		pr_err("File(%s): creation failed (err:%d)\n", name, err);
 	}
-	mnt_drop_write(path.mnt);
 
-out_err:
 	done_path_create(&path, dentry);
 	return err;
 }
@@ -216,10 +221,6 @@
 		return err;
 	}
 
-	err = mnt_want_write(path.mnt);
-	if (err)
-		goto out_err2;
-
 	idmap = mnt_idmap(path.mnt);
 	mode |= S_IFDIR;
 	err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
@@ -230,21 +231,19 @@
 			       dentry->d_name.len);
 		if (IS_ERR(d)) {
 			err = PTR_ERR(d);
-			goto out_err1;
+			goto out_err;
 		}
 		if (unlikely(d_is_negative(d))) {
 			dput(d);
 			err = -ENOENT;
-			goto out_err1;
+			goto out_err;
 		}
 
 		ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
 		dput(d);
 	}
 
-out_err1:
-	mnt_drop_write(path.mnt);
-out_err2:
+out_err:
 	done_path_create(&path, dentry);
 	if (err)
 		pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
@@ -364,15 +363,15 @@
  * @fid:	file id of open file
  * @count:	read byte count
  * @pos:	file pos
+ * @rbuf:	read data buffer
  *
  * Return:	number of read bytes on success, otherwise error
  */
 int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
-		   loff_t *pos)
+		   loff_t *pos, char *rbuf)
 {
 	struct file *filp = fp->filp;
 	ssize_t nbytes = 0;
-	char *rbuf = work->aux_payload_buf;
 	struct inode *inode = file_inode(filp);
 
 	if (S_ISDIR(inode->i_mode))
@@ -416,7 +415,8 @@
 {
 	char *stream_buf = NULL, *wbuf;
 	struct mnt_idmap *idmap = file_mnt_idmap(fp->filp);
-	size_t size, v_len;
+	size_t size;
+	ssize_t v_len;
 	int err = 0;
 
 	ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
@@ -433,14 +433,14 @@
 				       fp->stream.name,
 				       fp->stream.size,
 				       &stream_buf);
-	if ((int)v_len < 0) {
+	if (v_len < 0) {
 		pr_err("not found stream in xattr : %zd\n", v_len);
-		err = (int)v_len;
+		err = v_len;
 		goto out;
 	}
 
 	if (v_len < size) {
-		wbuf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+		wbuf = kvzalloc(size, GFP_KERNEL);
 		if (!wbuf) {
 			err = -ENOMEM;
 			goto out;
@@ -459,7 +459,8 @@
 				 fp->stream.name,
 				 (void *)stream_buf,
 				 size,
-				 0);
+				 0,
+				 true);
 	if (err < 0)
 		goto out;
 
@@ -516,6 +517,9 @@
 		}
 	}
 
+	/* Reserve lease break for parent dir at closing time */
+	fp->reserve_lease_break = true;
+
 	/* Do we need to break any of a levelII oplock? */
 	smb_break_all_levII_oplock(work, fp, 1);
 
@@ -557,6 +561,245 @@
 	return err;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * smb_check_attrs() - sanitize inode attributes
+ * @inode:	inode
+ * @attrs:	inode attributes
+ */
+static void smb_check_attrs(struct inode *inode, struct iattr *attrs)
+{
+	/* sanitize the mode change */
+	if (attrs->ia_valid & ATTR_MODE) {
+		attrs->ia_mode &= S_IALLUGO;
+		attrs->ia_mode |= (inode->i_mode & ~S_IALLUGO);
+	}
+
+	/* Revoke setuid/setgid on chown */
+	if (!S_ISDIR(inode->i_mode) &&
+	    (((attrs->ia_valid & ATTR_UID) &&
+	      !uid_eq(attrs->ia_uid, inode->i_uid)) ||
+	     ((attrs->ia_valid & ATTR_GID) &&
+	      !gid_eq(attrs->ia_gid, inode->i_gid)))) {
+		attrs->ia_valid |= ATTR_KILL_PRIV;
+		if (attrs->ia_valid & ATTR_MODE) {
+			/* we're setting mode too, just clear the s*id bits */
+			attrs->ia_mode &= ~S_ISUID;
+			if (attrs->ia_mode & 0010)
+				attrs->ia_mode &= ~S_ISGID;
+		} else {
+			/* set ATTR_KILL_* bits and let VFS handle it */
+			attrs->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
+		}
+	}
+}
+
+/**
+ * ksmbd_vfs_setattr() - vfs helper for smb setattr
+ * @work:	work
+ * @name:	file name
+ * @fid:	file id of open file
+ * @attrs:	inode attributes
+ *
+ * Return:	0 on success, otherwise error
+ */
+int ksmbd_vfs_setattr(struct ksmbd_work *work, const char *name, u64 fid,
+		      struct iattr *attrs)
+{
+	struct file *filp;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct path path;
+	bool update_size = false;
+	int err = 0;
+	struct ksmbd_file *fp = NULL;
+	struct mnt_idmap *idmap;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	if (name) {
+		err = kern_path(name, 0, &path);
+		if (err) {
+			ksmbd_revert_fsids(work);
+			ksmbd_debug(VFS, "lookup failed for %s, err = %d\n",
+				    name, err);
+			return -ENOENT;
+		}
+		dentry = path.dentry;
+		inode = d_inode(dentry);
+		idmap = mnt_idmap(path.mnt);
+	} else {
+		fp = ksmbd_lookup_fd_fast(work, fid);
+		if (!fp) {
+			ksmbd_revert_fsids(work);
+			pr_err("failed to get filp for fid %llu\n", fid);
+			return -ENOENT;
+		}
+
+		filp = fp->filp;
+		dentry = filp->f_path.dentry;
+		inode = d_inode(dentry);
+		idmap = file_mnt_idmap(filp);
+	}
+
+	err = inode_permission(idmap, d_inode(dentry), MAY_WRITE);
+	if (err)
+		goto out;
+
+	/* no need to update mode of symlink */
+	if (S_ISLNK(inode->i_mode))
+		attrs->ia_valid &= ~ATTR_MODE;
+
+	/* skip setattr, if nothing to update */
+	if (!attrs->ia_valid) {
+		err = 0;
+		goto out;
+	}
+
+	smb_check_attrs(inode, attrs);
+	if (attrs->ia_valid & ATTR_SIZE) {
+		err = get_write_access(inode);
+		if (err)
+			goto out;
+		update_size = true;
+	}
+
+	attrs->ia_valid |= ATTR_CTIME;
+
+	inode_lock(inode);
+	err = notify_change(idmap, dentry, attrs, NULL);
+	inode_unlock(inode);
+
+	if (update_size)
+		put_write_access(inode);
+
+	if (!err) {
+		sync_inode_metadata(inode, 1);
+		ksmbd_debug(VFS, "fid %llu, setattr done\n", fid);
+	}
+
+out:
+	if (name)
+		path_put(&path);
+	ksmbd_fd_put(work, fp);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+
+/**
+ * ksmbd_vfs_symlink() - vfs helper for creating smb symlink
+ * @name:	source file name
+ * @symname:	symlink name
+ *
+ * Return:	0 on success, otherwise error
+ */
+int ksmbd_vfs_symlink(struct ksmbd_work *work, const char *name,
+		      const char *symname)
+{
+	struct path path;
+	struct dentry *dentry;
+	int err;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	dentry = kern_path_create(AT_FDCWD, symname, &path, 0);
+	if (IS_ERR(dentry)) {
+		ksmbd_revert_fsids(work);
+		err = PTR_ERR(dentry);
+		pr_err("path create failed for %s, err %d\n", name, err);
+		return err;
+	}
+
+	err = vfs_symlink(mnt_idmap(path.mnt), d_inode(dentry->d_parent), dentry, name);
+	if (err && (err != -EEXIST || err != -ENOSPC))
+		ksmbd_debug(VFS, "failed to create symlink, err %d\n", err);
+
+	done_path_create(&path, dentry);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+
+/**
+ * ksmbd_vfs_readlink() - vfs helper for reading value of symlink
+ * @path:	path of symlink
+ * @buf:	destination buffer for symlink value
+ * @lenp:	destination buffer length
+ *
+ * Return:	symlink value length on success, otherwise error
+ */
+int ksmbd_vfs_readlink(struct path *path, char *buf, int lenp)
+{
+	struct inode *inode;
+	int err;
+	const char *link;
+	DEFINE_DELAYED_CALL(done);
+	int len;
+
+	if (!path)
+		return -ENOENT;
+
+	inode = d_inode(path->dentry);
+	if (!S_ISLNK(inode->i_mode))
+		return -EINVAL;
+
+	link = vfs_get_link(path->dentry, &done);
+	if (IS_ERR(link)) {
+		err = PTR_ERR(link);
+		pr_err("readlink failed, err = %d\n", err);
+		return err;
+	}
+
+	len = strlen(link);
+	if (len > lenp)
+		len = lenp;
+
+	memcpy(buf, link, len);
+	do_delayed_call(&done);
+
+	return len;
+}
+
+int ksmbd_vfs_readdir_name(struct ksmbd_work *work,
+			   struct mnt_idmap *idmap,
+			   struct ksmbd_kstat *ksmbd_kstat,
+			   const char *de_name, int de_name_len,
+			   const char *dir_path)
+{
+	struct path parent_path, path;
+	int rc, file_pathlen, dir_pathlen;
+	char *name;
+
+	dir_pathlen = strlen(dir_path);
+	/* 1 for '/'*/
+	file_pathlen = dir_pathlen +  de_name_len + 1;
+	name = kmalloc(file_pathlen + 1, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
+	memcpy(name, dir_path, dir_pathlen);
+	memset(name + dir_pathlen, '/', 1);
+	memcpy(name + dir_pathlen + 1, de_name, de_name_len);
+	name[file_pathlen] = '\0';
+
+	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, true);
+	if (rc) {
+		pr_err("lookup failed: %s [%d]\n", name, rc);
+		kfree(name);
+		return -ENOMEM;
+	}
+
+	ksmbd_vfs_fill_dentry_attrs(work, idmap, path.dentry, ksmbd_kstat);
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+	kfree(name);
+	return 0;
+}
+#endif
+
 /**
  * ksmbd_vfs_fsync() - vfs helper for smb fsync
  * @work:	work
@@ -601,10 +844,6 @@
 		goto out_err;
 	}
 
-	err = mnt_want_write(path->mnt);
-	if (err)
-		goto out_err;
-
 	idmap = mnt_idmap(path->mnt);
 	if (S_ISDIR(d_inode(path->dentry)->i_mode)) {
 		err = vfs_rmdir(idmap, d_inode(parent), path->dentry);
@@ -615,7 +854,6 @@
 		if (err)
 			ksmbd_debug(VFS, "unlink failed, err %d\n", err);
 	}
-	mnt_drop_write(path->mnt);
 
 out_err:
 	ksmbd_revert_fsids(work);
@@ -661,16 +899,11 @@
 		goto out3;
 	}
 
-	err = mnt_want_write(newpath.mnt);
-	if (err)
-		goto out3;
-
 	err = vfs_link(oldpath.dentry, mnt_idmap(newpath.mnt),
 		       d_inode(newpath.dentry),
 		       dentry, NULL);
 	if (err)
 		ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
-	mnt_drop_write(newpath.mnt);
 
 out3:
 	done_path_create(&newpath, dentry);
@@ -728,7 +961,7 @@
 		goto out3;
 	}
 
-	parent_fp = ksmbd_lookup_fd_inode(d_inode(old_child->d_parent));
+	parent_fp = ksmbd_lookup_fd_inode(old_child->d_parent);
 	if (parent_fp) {
 		if (parent_fp->daccess & FILE_DELETE_LE) {
 			pr_err("parent dir is opened with delete access\n");
@@ -751,10 +984,13 @@
 		goto out4;
 	}
 
+	/* explicitly handle file overwrite case, for compatibility with
+	 * filesystems that may not support rename flags (e.g: fuse) */
 	if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
 		err = -EEXIST;
 		goto out4;
 	}
+	flags &= ~(RENAME_NOREPLACE);
 
 	if (old_child == trap) {
 		err = -EINVAL;
@@ -857,7 +1093,7 @@
 	if (size <= 0)
 		return size;
 
-	vlist = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+	vlist = kvzalloc(size, GFP_KERNEL);
 	if (!vlist)
 		return -ENOMEM;
 
@@ -920,18 +1156,22 @@
  * @attr_value:	xattr value to set
  * @attr_size:	size of xattr value
  * @flags:	destination buffer length
+ * @get_write:	get write access to a mount
  *
  * Return:	0 on success, otherwise error
  */
 int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
 		       const struct path *path, const char *attr_name,
-		       void *attr_value, size_t attr_size, int flags)
+		       void *attr_value, size_t attr_size, int flags,
+		       bool get_write)
 {
 	int err;
 
+	if (get_write == true) {
 	err = mnt_want_write(path->mnt);
 	if (err)
 		return err;
+	}
 
 	err = vfs_setxattr(idmap,
 			   path->dentry,
@@ -941,10 +1181,43 @@
 			   flags);
 	if (err)
 		ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
+	if (get_write == true)
 	mnt_drop_write(path->mnt);
 	return err;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_vfs_fsetxattr(struct ksmbd_work *work, const char *filename,
+			const char *attr_name, const void *attr_value,
+			size_t attr_size, int flags)
+{
+	struct path path;
+	int err;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	err = kern_path(filename, 0, &path);
+	if (err) {
+		ksmbd_revert_fsids(work);
+		ksmbd_debug(VFS, "cannot get linux path %s, err %d\n",
+			    filename, err);
+		return err;
+	}
+
+	err = vfs_setxattr(mnt_idmap(path.mnt), path.dentry,
+			   attr_name,
+			   attr_value,
+			   attr_size,
+			   flags);
+	if (err)
+		ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
+	path_put(&path);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+#endif
+
 /**
  * ksmbd_vfs_set_fadvise() - convert smb IO caching options to linux options
  * @filp:	file pointer for IO
@@ -1091,6 +1364,63 @@
 	return err;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * ksmbd_vfs_dentry_open() - open a dentry and provide fid for it
+ * @work:	smb work ptr
+ * @path:	path of dentry to be opened
+ * @flags:	open flags
+ * @ret_id:	fid returned on this
+ * @option:	file access pattern options for fadvise
+ * @fexist:	file already present or not
+ *
+ * Return:	allocated struct ksmbd_file on success, otherwise error pointer
+ */
+struct ksmbd_file *ksmbd_vfs_dentry_open(struct ksmbd_work *work,
+					 const struct path *path, int flags,
+					 __le32 option, int fexist)
+{
+	struct file *filp;
+	int err = 0;
+	struct ksmbd_file *fp = NULL;
+
+	filp = dentry_open(path, flags | O_LARGEFILE, current_cred());
+	if (IS_ERR(filp)) {
+		err = PTR_ERR(filp);
+		pr_err("dentry open failed, err %d\n", err);
+		return ERR_PTR(err);
+	}
+
+	ksmbd_vfs_set_fadvise(filp, option);
+
+	fp = ksmbd_open_fd(work, filp);
+	if (IS_ERR(fp)) {
+		fput(filp);
+		err = PTR_ERR(fp);
+		pr_err("id insert failed\n");
+		goto err_out;
+	}
+
+	if (flags & O_TRUNC) {
+		if (fexist)
+			smb_break_all_oplock(work, fp);
+		err = vfs_truncate((struct path *)path, 0);
+		if (err)
+			goto err_out;
+	}
+	return fp;
+
+err_out:
+	if (!IS_ERR(fp))
+		ksmbd_close_fd(work, fp->volatile_id);
+	if (err) {
+		fp = ERR_PTR(err);
+		pr_err("err : %d\n", err);
+	}
+	return fp;
+}
+#endif
+
 static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen,
 		       loff_t offset, u64 ino, unsigned int d_type)
 {
@@ -1198,16 +1528,16 @@
  * Return:	0 on success, otherwise error
  */
 int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
-			       unsigned int flags, struct path *path,
-			       bool caseless)
+			       unsigned int flags, struct path *parent_path,
+			       struct path *path, bool caseless)
 {
 	struct ksmbd_share_config *share_conf = work->tcon->share_conf;
 	int err;
-	struct path parent_path;
 
-	err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, path);
+	err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, parent_path,
+					   path);
 	if (!err)
-		return err;
+		return 0;
 
 	if (caseless) {
 		char *filepath;
@@ -1220,10 +1550,10 @@
 		path_len = strlen(filepath);
 		remain_len = path_len;
 
-		parent_path = share_conf->vfs_path;
-		path_get(&parent_path);
+		*parent_path = share_conf->vfs_path;
+		path_get(parent_path);
 
-		while (d_can_lookup(parent_path.dentry)) {
+		while (d_can_lookup(parent_path->dentry)) {
 			char *filename = filepath + path_len - remain_len;
 			char *next = strchrnul(filename, '/');
 			size_t filename_len = next - filename;
@@ -1232,7 +1562,7 @@
 			if (filename_len == 0)
 				break;
 
-			err = ksmbd_vfs_lookup_in_dir(&parent_path, filename,
+			err = ksmbd_vfs_lookup_in_dir(parent_path, filename,
 						      filename_len,
 						      work->conn->um);
 			if (err)
@@ -1249,8 +1579,8 @@
 				goto out2;
 			else if (is_last)
 				goto out1;
-			path_put(&parent_path);
-			parent_path = *path;
+			path_put(parent_path);
+			*parent_path = *path;
 
 			next[0] = '/';
 			remain_len -= filename_len + 1;
@@ -1258,20 +1588,36 @@
 
 		err = -EINVAL;
 out2:
-		path_put(&parent_path);
+		path_put(parent_path);
 out1:
 		kfree(filepath);
 	}
 
 	if (!err) {
-		err = ksmbd_vfs_lock_parent(parent_path.dentry, path->dentry);
-		if (err)
-			dput(path->dentry);
-		path_put(&parent_path);
+		err = mnt_want_write(parent_path->mnt);
+		if (err) {
+			path_put(path);
+			path_put(parent_path);
+			return err;
+		}
+
+		err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
+		if (err) {
+			path_put(path);
+			path_put(parent_path);
+		}
 	}
 	return err;
 }
 
+void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path)
+{
+	inode_unlock(d_inode(parent_path->dentry));
+	mnt_drop_write(parent_path->mnt);
+	path_put(path);
+	path_put(parent_path);
+}
+
 struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
 					  const char *name,
 					  unsigned int flags,
@@ -1426,7 +1772,8 @@
 int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
 			   struct mnt_idmap *idmap,
 			   const struct path *path,
-			   struct smb_ntsd *pntsd, int len)
+			   struct smb_ntsd *pntsd, int len,
+			   bool get_write)
 {
 	int rc;
 	struct ndr sd_ndr = {0}, acl_ndr = {0};
@@ -1486,7 +1833,7 @@
 
 	rc = ksmbd_vfs_setxattr(idmap, path,
 				XATTR_NAME_SD, sd_ndr.data,
-				sd_ndr.offset, 0);
+				sd_ndr.offset, 0, get_write);
 	if (rc < 0)
 		pr_err("Failed to store XATTR ntacl :%d\n", rc);
 
@@ -1575,7 +1922,8 @@
 
 int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
 				   const struct path *path,
-				   struct xattr_dos_attrib *da)
+				   struct xattr_dos_attrib *da,
+				   bool get_write)
 {
 	struct ndr n;
 	int err;
@@ -1585,7 +1933,7 @@
 		return err;
 
 	err = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_DOS_ATTRIBUTE,
-				 (void *)n.data, n.offset, 0);
+				 (void *)n.data, n.offset, 0, get_write);
 	if (err)
 		ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
 	kfree(n.data);
@@ -1651,10 +1999,19 @@
 				struct dentry *dentry,
 				struct ksmbd_kstat *ksmbd_kstat)
 {
+	struct ksmbd_share_config *share_conf = work->tcon->share_conf;
 	u64 time;
 	int rc;
+	struct path path = {
+		.mnt = share_conf->vfs_path.mnt,
+		.dentry = dentry,
+	};
 
-	generic_fillattr(idmap, d_inode(dentry), ksmbd_kstat->kstat);
+	rc = vfs_getattr(&path, ksmbd_kstat->kstat,
+			 STATX_BASIC_STATS | STATX_BTIME,
+			 AT_STATX_SYNC_AS_STAT);
+	if (rc)
+		return rc;
 
 	time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
 	ksmbd_kstat->create_time = time;
@@ -1856,10 +2213,6 @@
 	}
 	posix_state_to_acl(&acl_state, acls->a_entries);
 
-	rc = mnt_want_write(path->mnt);
-	if (rc)
-		goto out_err;
-
 	rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
 	if (rc < 0)
 		ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
@@ -1871,9 +2224,7 @@
 			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
 				    rc);
 	}
-	mnt_drop_write(path->mnt);
 
-out_err:
 	free_acl_state(&acl_state);
 	posix_acl_release(acls);
 	return rc;
@@ -1903,10 +2254,6 @@
 		}
 	}
 
-	rc = mnt_want_write(path->mnt);
-	if (rc)
-		goto out_err;
-
 	rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
 	if (rc < 0)
 		ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
@@ -1918,9 +2265,7 @@
 			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
 				    rc);
 	}
-	mnt_drop_write(path->mnt);
 
-out_err:
 	posix_acl_release(acls);
 	return rc;
 }
diff -ruw linux-6.4/fs/smb/server/vfs.h linux-6.4-fbx/fs/smb/server/vfs.h
--- linux-6.4/fs/smb/server/vfs.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/vfs.h	2024-01-25 13:36:32.722984777 +0100
@@ -39,6 +39,9 @@
 
 struct ksmbd_dir_info {
 	const char	*name;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	char		*smb1_name;
+#endif
 	char		*wptr;
 	char		*rptr;
 	int		name_len;
@@ -72,12 +75,12 @@
 };
 
 int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child);
-int ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
+void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
 				   struct dentry *dentry, __le32 *daccess);
 int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
 int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
-int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp,
-		   size_t count, loff_t *pos);
+int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
+		   loff_t *pos, char *rbuf);
 int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
 		    char *buf, size_t count, loff_t *pos, bool sync,
 		    ssize_t *written);
@@ -86,6 +89,24 @@
 int ksmbd_vfs_link(struct ksmbd_work *work,
 		   const char *oldname, const char *newname);
 int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_file *ksmbd_vfs_dentry_open(struct ksmbd_work *work,
+					 const struct path *path, int flags,
+					 __le32 option, int fexist);
+int ksmbd_vfs_setattr(struct ksmbd_work *work, const char *name,
+		      u64 fid, struct iattr *attrs);
+int ksmbd_vfs_fsetxattr(struct ksmbd_work *work, const char *filename,
+			const char *attr_name, const void *attr_value,
+			size_t attr_size, int flags);
+int ksmbd_vfs_symlink(struct ksmbd_work *work,
+		      const char *name, const char *symname);
+int ksmbd_vfs_readlink(struct path *path, char *buf, int lenp);
+int ksmbd_vfs_readdir_name(struct ksmbd_work *work,
+			   struct mnt_idmap *idmap,
+			   struct ksmbd_kstat *ksmbd_kstat,
+			   const char *de_name, int de_name_len,
+			   const char *dir_path);
+#endif
 int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
 		     char *newname, int flags);
 int ksmbd_vfs_truncate(struct ksmbd_work *work,
@@ -109,14 +130,16 @@
 				int attr_name_len);
 int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
 		       const struct path *path, const char *attr_name,
-		       void *attr_value, size_t attr_size, int flags);
+		       void *attr_value, size_t attr_size, int flags,
+		       bool get_write);
 int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
 				size_t *xattr_stream_name_size, int s_type);
 int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
 			   const struct path *path, char *attr_name);
 int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
-			       unsigned int flags, struct path *path,
-			       bool caseless);
+			       unsigned int flags, struct path *parent_path,
+			       struct path *path, bool caseless);
+void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path);
 struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
 					  const char *name,
 					  unsigned int flags,
@@ -144,14 +167,16 @@
 int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
 			   struct mnt_idmap *idmap,
 			   const struct path *path,
-			   struct smb_ntsd *pntsd, int len);
+			   struct smb_ntsd *pntsd, int len,
+			   bool get_write);
 int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
 			   struct mnt_idmap *idmap,
 			   struct dentry *dentry,
 			   struct smb_ntsd **pntsd);
 int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
 				   const struct path *path,
-				   struct xattr_dos_attrib *da);
+				   struct xattr_dos_attrib *da,
+				   bool get_write);
 int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap,
 				   struct dentry *dentry,
 				   struct xattr_dos_attrib *da);
diff -ruw linux-6.4/fs/smb/server/vfs_cache.c linux-6.4-fbx/fs/smb/server/vfs_cache.c
--- linux-6.4/fs/smb/server/vfs_cache.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/vfs_cache.c	2024-01-25 13:36:32.722984777 +0100
@@ -66,14 +66,14 @@
 	return tmp & inode_hash_mask;
 }
 
-static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
+static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
 {
 	struct hlist_head *head = inode_hashtable +
-		inode_hash(inode->i_sb, inode->i_ino);
+		inode_hash(d_inode(de)->i_sb, (unsigned long)de);
 	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
 
 	hlist_for_each_entry(ci, head, m_hash) {
-		if (ci->m_inode == inode) {
+		if (ci->m_de == de) {
 			if (atomic_inc_not_zero(&ci->m_count))
 				ret_ci = ci;
 			break;
@@ -84,29 +84,30 @@
 
 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
 {
-	return __ksmbd_inode_lookup(file_inode(fp->filp));
+	return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
 }
 
-static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
+struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
 {
 	struct ksmbd_inode *ci;
 
 	read_lock(&inode_hash_lock);
-	ci = __ksmbd_inode_lookup(inode);
+	ci = __ksmbd_inode_lookup(d);
 	read_unlock(&inode_hash_lock);
+
 	return ci;
 }
 
-int ksmbd_query_inode_status(struct inode *inode)
+int ksmbd_query_inode_status(struct dentry *dentry)
 {
 	struct ksmbd_inode *ci;
 	int ret = KSMBD_INODE_STATUS_UNKNOWN;
 
 	read_lock(&inode_hash_lock);
-	ci = __ksmbd_inode_lookup(inode);
+	ci = __ksmbd_inode_lookup(dentry);
 	if (ci) {
 		ret = KSMBD_INODE_STATUS_OK;
-		if (ci->m_flags & S_DEL_PENDING)
+		if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
 			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
 		atomic_dec(&ci->m_count);
 	}
@@ -116,7 +117,7 @@
 
 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
 {
-	return (fp->f_ci->m_flags & S_DEL_PENDING);
+	return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
 }
 
 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
@@ -143,7 +144,7 @@
 static void ksmbd_inode_hash(struct ksmbd_inode *ci)
 {
 	struct hlist_head *b = inode_hashtable +
-		inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
+		inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
 
 	hlist_add_head(&ci->m_hash, b);
 }
@@ -157,7 +158,6 @@
 
 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
 {
-	ci->m_inode = file_inode(fp->filp);
 	atomic_set(&ci->m_count, 1);
 	atomic_set(&ci->op_count, 0);
 	atomic_set(&ci->sop_count, 0);
@@ -166,6 +166,7 @@
 	INIT_LIST_HEAD(&ci->m_fp_list);
 	INIT_LIST_HEAD(&ci->m_op_list);
 	rwlock_init(&ci->m_lock);
+	ci->m_de = fp->filp->f_path.dentry;
 	return 0;
 }
 
@@ -209,7 +210,7 @@
 	kfree(ci);
 }
 
-static void ksmbd_inode_put(struct ksmbd_inode *ci)
+void ksmbd_inode_put(struct ksmbd_inode *ci)
 {
 	if (atomic_dec_and_test(&ci->m_count))
 		ksmbd_inode_free(ci);
@@ -325,6 +326,9 @@
 		locks_free_lock(smb_lock->fl);
 		kfree(smb_lock);
 	}
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	kfree(fp->filename);
+#endif
 
 	if (ksmbd_stream_fd(fp))
 		kfree(fp->stream.name);
@@ -333,6 +337,9 @@
 
 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
 {
+	if (fp->f_state != FP_INITED)
+		return NULL;
+
 	if (!atomic_inc_not_zero(&fp->refcount))
 		return NULL;
 	return fp;
@@ -382,15 +389,20 @@
 		return 0;
 
 	ft = &work->sess->file_table;
-	read_lock(&ft->lock);
+	write_lock(&ft->lock);
 	fp = idr_find(ft->idr, id);
 	if (fp) {
 		set_close_state_blocked_works(fp);
 
+		if (fp->f_state != FP_INITED)
+			fp = NULL;
+		else {
+			fp->f_state = FP_CLOSED;
 		if (!atomic_dec_and_test(&fp->refcount))
 			fp = NULL;
 	}
-	read_unlock(&ft->lock);
+	}
+	write_unlock(&ft->lock);
 
 	if (!fp)
 		return -EINVAL;
@@ -480,12 +492,61 @@
 	return fp;
 }
 
-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_file *ksmbd_lookup_fd_filename(struct ksmbd_work *work, char *filename)
+{
+	struct ksmbd_file	*fp = NULL;
+	unsigned int		id;
+	char			*pathname;
+
+	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+	if (!pathname)
+		return NULL;
+
+	read_lock(&work->sess->file_table.lock);
+	idr_for_each_entry(work->sess->file_table.idr, fp, id) {
+		char *path = d_path(&fp->filp->f_path, pathname, PATH_MAX);
+
+		if (IS_ERR(path))
+			break;
+
+		if (!strcmp(path, filename)) {
+			fp = ksmbd_fp_get(fp);
+			break;
+		}
+	}
+	read_unlock(&work->sess->file_table.lock);
+
+	kfree(pathname);
+	return fp;
+}
+
+int ksmbd_file_table_flush(struct ksmbd_work *work)
+{
+	struct ksmbd_file	*fp = NULL;
+	unsigned int		id;
+	int			ret;
+
+	read_lock(&work->sess->file_table.lock);
+	idr_for_each_entry(work->sess->file_table.idr, fp,id) {
+		ret = ksmbd_vfs_fsync(work, fp->volatile_id, KSMBD_NO_FID);
+		if (ret)
+			break;
+	}
+	read_unlock(&work->sess->file_table.lock);
+	return ret;
+}
+#endif
+
+struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
 {
 	struct ksmbd_file	*lfp;
 	struct ksmbd_inode	*ci;
+	struct inode		*inode = d_inode(dentry);
 
-	ci = ksmbd_inode_lookup_by_vfsinode(inode);
+	read_lock(&inode_hash_lock);
+	ci = __ksmbd_inode_lookup(dentry);
+	read_unlock(&inode_hash_lock);
 	if (!ci)
 		return NULL;
 
@@ -527,7 +588,13 @@
 
 	idr_preload(GFP_KERNEL);
 	write_lock(&ft->lock);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	ret = idr_alloc_cyclic(ft->idr, fp, 0,
+			       IS_SMB2(fp->conn) ? INT_MAX - 1 : 0xFFFF,
+			       GFP_NOWAIT);
+#else
 	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
+#endif
 	if (ret >= 0) {
 		id = ret;
 		ret = 0;
@@ -570,6 +637,7 @@
 	fp->tcon		= work->tcon;
 	fp->volatile_id		= KSMBD_NO_FID;
 	fp->persistent_id	= KSMBD_NO_FID;
+	fp->f_state		= FP_NEW;
 	fp->f_ci		= ksmbd_inode_get(fp);
 
 	if (!fp->f_ci) {
@@ -591,6 +659,17 @@
 	return ERR_PTR(ret);
 }
 
+void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+			 unsigned int state)
+{
+	if (!fp)
+		return;
+
+	write_lock(&ft->lock);
+	fp->f_state = state;
+	write_unlock(&ft->lock);
+}
+
 static int
 __close_file_table_ids(struct ksmbd_file_table *ft,
 		       struct ksmbd_tree_connect *tcon,
diff -ruw linux-6.4/fs/smb/server/vfs_cache.h linux-6.4-fbx/fs/smb/server/vfs_cache.h
--- linux-6.4/fs/smb/server/vfs_cache.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/fs/smb/server/vfs_cache.h	2024-01-25 13:36:32.722984777 +0100
@@ -51,7 +51,7 @@
 	atomic_t			op_count;
 	/* opinfo count for streams */
 	atomic_t			sop_count;
-	struct inode			*m_inode;
+	struct dentry			*m_de;
 	unsigned int			m_flags;
 	struct hlist_node		m_hash;
 	struct list_head		m_fp_list;
@@ -60,6 +60,12 @@
 	__le32				m_fattr;
 };
 
+enum {
+	FP_NEW = 0,
+	FP_INITED,
+	FP_CLOSED
+};
+
 struct ksmbd_file {
 	struct file			*filp;
 	u64				persistent_id;
@@ -95,9 +101,25 @@
 
 	int				durable_timeout;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	/* for SMB1 */
+	int				pid;
+
+	/* conflict lock fail count for SMB1 */
+	unsigned int			cflock_cnt;
+	/* last lock failure start offset for SMB1 */
+	unsigned long long		llock_fstart;
+
+	int				dirent_offset;
+
+	/* for find_first/find_next */
+	char				*filename;
+#endif
 	/* if ls is happening on directory, below is valid*/
 	struct ksmbd_readdir_data	readdir_data;
 	int				dot_dotdot[2];
+	unsigned int			f_state;
+	bool				reserve_lease_break;
 };
 
 static inline void set_ctx_actor(struct dir_context *ctx,
@@ -131,9 +153,15 @@
 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
 					u64 pid);
 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
+struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d);
+void ksmbd_inode_put(struct ksmbd_inode *ci);
 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_file *ksmbd_lookup_fd_filename(struct ksmbd_work *work, char *filename);
+int ksmbd_file_table_flush(struct ksmbd_work *work);
+#endif
+struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry);
 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
@@ -142,6 +170,8 @@
 int ksmbd_init_global_file_table(void);
 void ksmbd_free_global_file_table(void);
 void ksmbd_set_fd_limit(unsigned long limit);
+void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+			 unsigned int state);
 
 /*
  * INODE hash
@@ -155,7 +185,7 @@
 	KSMBD_INODE_STATUS_PENDING_DELETE,
 };
 
-int ksmbd_query_inode_status(struct inode *inode);
+int ksmbd_query_inode_status(struct dentry *dentry);
 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
diff -ruw linux-6.4/include/asm-generic/vmlinux.lds.h linux-6.4-fbx/include/asm-generic/vmlinux.lds.h
--- linux-6.4/include/asm-generic/vmlinux.lds.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/asm-generic/vmlinux.lds.h	2023-06-27 11:47:16.059866727 +0200
@@ -350,7 +350,7 @@
 #define KERNEL_DTB()							\
 	STRUCT_ALIGN();							\
 	__dtb_start = .;						\
-	KEEP(*(.dtb.init.rodata))					\
+	KEEP(*(.dtb.rodata))						\
 	__dtb_end = .;
 
 /*
@@ -462,6 +462,7 @@
 	. = ALIGN((align));						\
 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
 		__start_rodata = .;					\
+		KERNEL_DTB()						\
 		*(.rodata) *(.rodata.*)					\
 		SCHED_DATA						\
 		RO_AFTER_INIT_DATA	/* Read only after init */	\
@@ -703,7 +704,6 @@
 	TIMER_OF_TABLES()						\
 	CPU_METHOD_OF_TABLES()						\
 	CPUIDLE_METHOD_OF_TABLES()					\
-	KERNEL_DTB()							\
 	IRQCHIP_OF_MATCH_TABLE()					\
 	ACPI_PROBE_TABLE(irqchip)					\
 	ACPI_PROBE_TABLE(timer)						\
diff -ruw linux-6.4/include/dt-bindings/clock/qcom,ipq9574-gcc.h linux-6.4-fbx/include/dt-bindings/clock/qcom,ipq9574-gcc.h
--- linux-6.4/include/dt-bindings/clock/qcom,ipq9574-gcc.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/dt-bindings/clock/qcom,ipq9574-gcc.h	2023-06-09 19:10:10.096815520 +0200
@@ -210,4 +210,15 @@
 #define GCC_SNOC_PCIE1_1LANE_S_CLK			201
 #define GCC_SNOC_PCIE2_2LANE_S_CLK			202
 #define GCC_SNOC_PCIE3_2LANE_S_CLK			203
+#define GCC_PCIE0_PIPE_CLK				204
+#define GCC_PCIE1_PIPE_CLK				205
+#define GCC_PCIE2_PIPE_CLK				206
+#define GCC_PCIE3_PIPE_CLK				207
+#define GCC_USB0_PIPE_CLK				208
+#define GCC_USB0_SLEEP_CLK				209
+#define GCC_LPASS_CORE_AXIM_CLK				210
+#define GCC_PCNOC_LPASS_CLK				211
+#define GCC_LPASS_SWAY_CLK				212
+#define GCC_SNOC_LPASS_CFG_CLK				213
 #endif
+
diff -ruw linux-6.4/include/dt-bindings/input/linux-event-codes.h linux-6.4-fbx/include/dt-bindings/input/linux-event-codes.h
--- linux-6.4/include/dt-bindings/input/linux-event-codes.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/dt-bindings/input/linux-event-codes.h	2023-05-22 20:06:44.879870719 +0200
@@ -803,6 +803,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
diff -ruw linux-6.4/include/linux/brcmphy.h linux-6.4-fbx/include/linux/brcmphy.h
--- linux-6.4/include/linux/brcmphy.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/brcmphy.h	2023-05-22 20:06:44.511860930 +0200
@@ -34,6 +34,10 @@
 #define PHY_ID_BCM72113			0x35905310
 #define PHY_ID_BCM72116			0x35905350
 #define PHY_ID_BCM72165			0x35905340
+
+#define PHY_ID_BCM63138			0x600d85c0
+#define PHY_ID_BCM63138S		0x0143bff0
+
 #define PHY_ID_BCM7250			0xae025280
 #define PHY_ID_BCM7255			0xae025120
 #define PHY_ID_BCM7260			0xae025190
@@ -56,6 +60,8 @@
 #define PHY_ID_BCM_CYGNUS		0xae025200
 #define PHY_ID_BCM_OMEGA		0xae025100
 
+#define PHY_ID_BCM63158			0xae0251c1
+
 #define PHY_BCM_OUI_MASK		0xfffffc00
 #define PHY_BCM_OUI_1			0x00206000
 #define PHY_BCM_OUI_2			0x0143bc00
diff -ruw linux-6.4/include/linux/compiler_attributes.h linux-6.4-fbx/include/linux/compiler_attributes.h
--- linux-6.4/include/linux/compiler_attributes.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/compiler_attributes.h	2023-11-07 13:38:44.050256364 +0100
@@ -89,6 +89,19 @@
 #endif
 
 /*
+ * Optional: only supported since gcc >= 14
+ * Optional: only supported since clang >= 18
+ *
+ *   gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+ * clang: https://reviews.llvm.org/D148381
+ */
+#if __has_attribute(__counted_by__)
+# define __counted_by(member)		__attribute__((__counted_by__(member)))
+#else
+# define __counted_by(member)
+#endif
+
+/*
  * Optional: not supported by gcc
  * Optional: only supported since clang >= 14.0
  *
diff -ruw linux-6.4/include/linux/ethtool.h linux-6.4-fbx/include/linux/ethtool.h
--- linux-6.4/include/linux/ethtool.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/ethtool.h	2023-05-31 17:11:03.421680714 +0200
@@ -912,6 +912,15 @@
 	int	(*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
 			  struct netlink_ext_ack *extack);
 	void	(*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
+	int	(*set_shaper_param)(struct net_device *,
+				    const struct ethtool_shaper_params *);
+	int	(*get_shaper_param)(struct net_device *,
+				    struct ethtool_shaper_params *);
+	int	(*get_epon_param)(struct net_device *,
+				  struct ethtool_epon_param *);
+	int	(*set_epon_param)(struct net_device *,
+				  const struct ethtool_epon_param *);
+	struct phylink *(*get_phylink)(struct net_device *);
 };
 
 int ethtool_check_ops(const struct ethtool_ops *ops);
diff -ruw linux-6.4/include/linux/firmware/qcom/qcom_scm.h linux-6.4-fbx/include/linux/firmware/qcom/qcom_scm.h
--- linux-6.4/include/linux/firmware/qcom/qcom_scm.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/firmware/qcom/qcom_scm.h	2023-07-20 17:19:14.406360259 +0200
@@ -26,6 +26,12 @@
 	int perm;
 };
 
+struct fuse_blow {
+	dma_addr_t address;
+	size_t size;
+	unsigned long *status;
+};
+
 enum qcom_scm_ocmem_client {
 	QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
 	QCOM_SCM_OCMEM_GRAPHICS_ID,
@@ -59,6 +65,14 @@
 #define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
 #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
 
+#define QTI_SCM_SVC_FUSE		0x8
+#define QTI_KERNEL_AUTH_CMD		0x15
+#define TZ_BLOW_FUSE_SECDAT             0x20
+#define QTI_KERNEL_META_AUTH_CMD	0x23
+#define FUSEPROV_SUCCESS                0x0
+#define FUSEPROV_INVALID_HASH           0x09
+#define FUSEPROV_SECDAT_LOCK_BLOWN      0xB
+
 extern bool qcom_scm_is_available(void);
 
 extern int qcom_scm_set_cold_boot_addr(void *entry);
@@ -122,4 +136,6 @@
 extern int qcom_scm_lmh_profile_change(u32 profile_id);
 extern bool qcom_scm_lmh_dcvsh_available(void);
 
+int qti_fuseipq_scm_call(struct device *dev, const struct fuse_blow *fuse_blow);
+
 #endif
diff -ruw linux-6.4/include/linux/ieee80211.h linux-6.4-fbx/include/linux/ieee80211.h
--- linux-6.4/include/linux/ieee80211.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/ieee80211.h	2024-03-18 14:40:14.867741770 +0100
@@ -307,6 +307,13 @@
 #define IEEE80211_TRIGGER_TYPE_BQRP		0x6
 #define IEEE80211_TRIGGER_TYPE_NFRP		0x7
 
+/* UL-bandwidth within common_info of trigger frame */
+#define IEEE80211_TRIGGER_ULBW_MASK		0xc0000
+#define IEEE80211_TRIGGER_ULBW_20MHZ		0x0
+#define IEEE80211_TRIGGER_ULBW_40MHZ		0x1
+#define IEEE80211_TRIGGER_ULBW_80MHZ		0x2
+#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ	0x3
+
 struct ieee80211_hdr {
 	__le16 frame_control;
 	__le16 duration_id;
@@ -836,9 +843,14 @@
 };
 
 /**
- * struct ieee80211_quiet_ie
+ * struct ieee80211_quiet_ie - Quiet element
+ * @count: Quiet Count
+ * @period: Quiet Period
+ * @duration: Quiet Duration
+ * @offset: Quiet Offset
  *
- * This structure refers to "Quiet information element"
+ * This structure represents the payload of the "Quiet element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.22.
  */
 struct ieee80211_quiet_ie {
 	u8 count;
@@ -848,9 +860,15 @@
 } __packed;
 
 /**
- * struct ieee80211_msrment_ie
- *
- * This structure refers to "Measurement Request/Report information element"
+ * struct ieee80211_msrment_ie - Measurement element
+ * @token: Measurement Token
+ * @mode: Measurement Report Mode
+ * @type: Measurement Type
+ * @request: Measurement Request or Measurement Report
+ *
+ * This structure represents the payload of both the "Measurement
+ * Request element" and the "Measurement Report element" as described
+ * in IEEE Std 802.11-2020 sections 9.4.2.20 and 9.4.2.21.
  */
 struct ieee80211_msrment_ie {
 	u8 token;
@@ -860,9 +878,14 @@
 } __packed;
 
 /**
- * struct ieee80211_channel_sw_ie
- *
- * This structure refers to "Channel Switch Announcement information element"
+ * struct ieee80211_channel_sw_ie - Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
+ *
+ * This structure represents the payload of the "Channel Switch
+ * Announcement element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.18.
  */
 struct ieee80211_channel_sw_ie {
 	u8 mode;
@@ -871,9 +894,14 @@
 } __packed;
 
 /**
- * struct ieee80211_ext_chansw_ie
+ * struct ieee80211_ext_chansw_ie - Extended Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_operating_class: New Operating Class
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
  *
- * This structure represents the "Extended Channel Switch Announcement element"
+ * This structure represents the "Extended Channel Switch Announcement
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.52.
  */
 struct ieee80211_ext_chansw_ie {
 	u8 mode;
@@ -894,8 +922,14 @@
 
 /**
  * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
- *
- * This structure represents the "Mesh Channel Switch Paramters element"
+ * @mesh_ttl: Time To Live
+ * @mesh_flags: Flags
+ * @mesh_reason: Reason Code
+ * @mesh_pre_value: Precedence Value
+ *
+ * This structure represents the payload of the "Mesh Channel Switch
+ * Parameters element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.102.
  */
 struct ieee80211_mesh_chansw_params_ie {
 	u8 mesh_ttl;
@@ -906,6 +940,13 @@
 
 /**
  * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
+ * @new_channel_width: New Channel Width
+ * @new_center_freq_seg0: New Channel Center Frequency Segment 0
+ * @new_center_freq_seg1: New Channel Center Frequency Segment 1
+ *
+ * This structure represents the payload of the "Wide Bandwidth
+ * Channel Switch element" as described in IEEE Std 802.11-2020
+ * section 9.4.2.160.
  */
 struct ieee80211_wide_bw_chansw_ie {
 	u8 new_channel_width;
@@ -913,22 +954,42 @@
 } __packed;
 
 /**
- * struct ieee80211_tim
- *
- * This structure refers to "Traffic Indication Map information element"
+ * struct ieee80211_tim_ie - Traffic Indication Map information element
+ * @dtim_count: DTIM Count
+ * @dtim_period: DTIM Period
+ * @bitmap_ctrl: Bitmap Control
+ * @required_octet: "Syntatic sugar" to force the struct size to the
+ *                  minimum valid size when carried in a non-S1G PPDU
+ * @virtual_map: Partial Virtual Bitmap
+ *
+ * This structure represents the payload of the "TIM element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this
+ * definition is only applicable when the element is carried in a
+ * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap
+ * Control and Partial Virtual Bitmap may not be present.
  */
 struct ieee80211_tim_ie {
 	u8 dtim_count;
 	u8 dtim_period;
 	u8 bitmap_ctrl;
-	/* variable size: 1 - 251 bytes */
-	u8 virtual_map[1];
+	union {
+		u8 required_octet;
+		DECLARE_FLEX_ARRAY(u8, virtual_map);
+	};
 } __packed;
 
 /**
- * struct ieee80211_meshconf_ie
+ * struct ieee80211_meshconf_ie - Mesh Configuration element
+ * @meshconf_psel: Active Path Selection Protocol Identifier
+ * @meshconf_pmetric: Active Path Selection Metric Identifier
+ * @meshconf_congest: Congestion Control Mode Identifier
+ * @meshconf_synch: Synchronization Method Identifier
+ * @meshconf_auth: Authentication Protocol Identifier
+ * @meshconf_form: Mesh Formation Info
+ * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags)
  *
- * This structure refers to "Mesh Configuration information element"
+ * This structure represents the payload of the "Mesh Configuration
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.97.
  */
 struct ieee80211_meshconf_ie {
 	u8 meshconf_psel;
@@ -950,6 +1011,9 @@
  *	is ongoing
  * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
  *	neighbors in deep sleep mode
+ *
+ * Enumerates the "Mesh Capability" as described in IEEE Std
+ * 802.11-2020 section 9.4.2.97.7.
  */
 enum mesh_config_capab_flags {
 	IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS		= 0x01,
@@ -960,7 +1024,7 @@
 
 #define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
 
-/**
+/*
  * mesh channel switch parameters element's flag indicator
  *
  */
@@ -969,9 +1033,17 @@
 #define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
 
 /**
- * struct ieee80211_rann_ie
+ * struct ieee80211_rann_ie - RANN (root announcement) element
+ * @rann_flags: Flags
+ * @rann_hopcount: Hop Count
+ * @rann_ttl: Element TTL
+ * @rann_addr: Root Mesh STA Address
+ * @rann_seq: HWMP Sequence Number
+ * @rann_interval: Interval
+ * @rann_metric: Metric
  *
- * This structure refers to "Root Announcement information element"
+ * This structure represents the payload of the "RANN element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.111.
  */
 struct ieee80211_rann_ie {
 	u8 rann_flags;
@@ -993,7 +1065,7 @@
 };
 
 /**
- * enum ieee80211_opmode_bits - VHT operating mode field bits
+ * enum ieee80211_vht_opmode_bits - VHT operating mode field bits
  * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
  * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
  * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
@@ -1042,9 +1114,12 @@
 #define WLAN_USER_POSITION_LEN 16
 
 /**
- * struct ieee80211_tpc_report_ie
+ * struct ieee80211_tpc_report_ie - TPC Report element
+ * @tx_power: Transmit Power
+ * @link_margin: Link Margin
  *
- * This structure refers to "TPC Report element"
+ * This structure represents the payload of the "TPC Report element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.16.
  */
 struct ieee80211_tpc_report_ie {
 	u8 tx_power;
@@ -1062,9 +1137,14 @@
 } __packed;
 
 /**
- * struct ieee80211_s1g_bcn_compat_ie
- *
- * S1G Beacon Compatibility element
+ * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element
+ * @compat_info: Compatibility Information
+ * @beacon_int: Beacon Interval
+ * @tsf_completion: TSF Completion
+ *
+ * This structure represents the payload of the "S1G Beacon
+ * Compatibility element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.196.
  */
 struct ieee80211_s1g_bcn_compat_ie {
 	__le16 compat_info;
@@ -1073,9 +1153,15 @@
 } __packed;
 
 /**
- * struct ieee80211_s1g_oper_ie
+ * struct ieee80211_s1g_oper_ie - S1G Operation element
+ * @ch_width: S1G Operation Information Channel Width
+ * @oper_class: S1G Operation Information Operating Class
+ * @primary_ch: S1G Operation Information Primary Channel Number
+ * @oper_ch: S1G Operation Information  Channel Center Frequency
+ * @basic_mcs_nss: Basic S1G-MCS and NSS Set
  *
- * S1G Operation element
+ * This structure represents the payload of the "S1G Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.212.
  */
 struct ieee80211_s1g_oper_ie {
 	u8 ch_width;
@@ -1086,9 +1172,13 @@
 } __packed;
 
 /**
- * struct ieee80211_aid_response_ie
+ * struct ieee80211_aid_response_ie - AID Response element
+ * @aid: AID/Group AID
+ * @switch_count: AID Switch Count
+ * @response_int: AID Response Interval
  *
- * AID Response element
+ * This structure represents the payload of the "AID Response element"
+ * as described in IEEE Std 802.11-2020 section 9.4.2.194.
  */
 struct ieee80211_aid_response_ie {
 	__le16 aid;
@@ -1163,6 +1253,30 @@
 	u8 params[];
 } __packed;
 
+#define IEEE80211_TTLM_MAX_CNT				2
+#define IEEE80211_TTLM_CONTROL_DIRECTION		0x03
+#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP		0x04
+#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT	0x08
+#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT	0x10
+#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE		0x20
+
+#define IEEE80211_TTLM_DIRECTION_DOWN		0
+#define IEEE80211_TTLM_DIRECTION_UP		1
+#define IEEE80211_TTLM_DIRECTION_BOTH		2
+
+/**
+ * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
+ *
+ * Defined in section 9.4.2.314 in P802.11be_D4
+ *
+ * @control: the first part of control field
+ * @optional: the second part of control field
+ */
+struct ieee80211_ttlm_elem {
+	u8 control;
+	u8 optional[];
+} __packed;
+
 struct ieee80211_mgmt {
 	__le16 frame_control;
 	__le16 duration;
@@ -1349,8 +1463,11 @@
 /* Supported rates membership selectors */
 #define BSS_MEMBERSHIP_SELECTOR_HT_PHY	127
 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY	126
-#define BSS_MEMBERSHIP_SELECTOR_HE_PHY	122
+#define BSS_MEMBERSHIP_SELECTOR_GLK	125
+#define BSS_MEMBERSHIP_SELECTOR_EPS	124
 #define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
+#define BSS_MEMBERSHIP_SELECTOR_HE_PHY	122
+#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY	121
 
 /* mgmt header + 1 byte category code */
 #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1486,7 +1603,7 @@
 /*
  * Peer-to-Peer IE attribute related definitions.
  */
-/**
+/*
  * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
  */
 enum ieee80211_p2p_attr_id {
@@ -1536,10 +1653,16 @@
 #define IEEE80211_P2P_OPPPS_CTWINDOW_MASK	0x7F
 
 /**
- * struct ieee80211_bar - HT Block Ack Request
+ * struct ieee80211_bar - Block Ack Request frame format
+ * @frame_control: Frame Control
+ * @duration: Duration
+ * @ra: RA
+ * @ta: TA
+ * @control: BAR Control
+ * @start_seq_num: Starting Sequence Number (see Figure 9-37)
  *
- * This structure refers to "HT BlockAckReq" as
- * described in 802.11n draft section 7.2.1.7.1
+ * This structure represents the "BlockAckReq frame format"
+ * as described in IEEE Std 802.11-2020 section 9.3.1.7.
  */
 struct ieee80211_bar {
 	__le16 frame_control;
@@ -1560,13 +1683,17 @@
 #define IEEE80211_HT_MCS_MASK_LEN		10
 
 /**
- * struct ieee80211_mcs_info - MCS information
+ * struct ieee80211_mcs_info - Supported MCS Set field
  * @rx_mask: RX mask
  * @rx_highest: highest supported RX rate. If set represents
  *	the highest supported RX data rate in units of 1 Mbps.
  *	If this field is 0 this value should not be used to
  *	consider the highest RX data rate supported.
  * @tx_params: TX parameters
+ * @reserved: Reserved bits
+ *
+ * This structure represents the "Supported MCS Set field" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.55.4.
  */
 struct ieee80211_mcs_info {
 	u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
@@ -1585,6 +1712,8 @@
 #define		IEEE80211_HT_MCS_TX_MAX_STREAMS	4
 #define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION	0x10
 
+#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
+
 /*
  * 802.11n D5.0 20.3.5 / 20.6 says:
  * - indices 0 to 7 and 32 are single spatial stream
@@ -1597,10 +1726,16 @@
 	(IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
 
 /**
- * struct ieee80211_ht_cap - HT capabilities
+ * struct ieee80211_ht_cap - HT capabilities element
+ * @cap_info: HT Capability Information
+ * @ampdu_params_info: A-MPDU Parameters
+ * @mcs: Supported MCS Set
+ * @extended_ht_cap_info: HT Extended Capabilities
+ * @tx_BF_cap_info: Transmit Beamforming Capabilities
+ * @antenna_selection_info: ASEL Capability
  *
- * This structure is the "HT capabilities element" as
- * described in 802.11n D5.0 7.3.2.57
+ * This structure represents the payload of the "HT Capabilities
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.55.
  */
 struct ieee80211_ht_cap {
 	__le16 cap_info;
@@ -1688,9 +1823,14 @@
 
 /**
  * struct ieee80211_ht_operation - HT operation IE
+ * @primary_chan: Primary Channel
+ * @ht_param: HT Operation Information parameters
+ * @operation_mode: HT Operation Information operation mode
+ * @stbc_param: HT Operation Information STBC params
+ * @basic_set: Basic HT-MCS Set
  *
- * This structure is the "HT operation element" as
- * described in 802.11n-2009 7.3.2.57
+ * This structure represents the payload of the "HT Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.56.
  */
 struct ieee80211_ht_operation {
 	u8 primary_chan;
@@ -1859,9 +1999,12 @@
 
 /**
  * struct ieee80211_he_cap_elem - HE capabilities element
+ * @mac_cap_info: HE MAC Capabilities Information
+ * @phy_cap_info: HE PHY Capabilities Information
  *
- * This structure is the "HE capabilities element" fixed fields as
- * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3
+ * This structure represents the fixed fields of the payload of the
+ * "HE capabilities element" as described in IEEE Std 802.11ax-2021
+ * sections 9.4.2.248.2 and 9.4.2.248.3.
  */
 struct ieee80211_he_cap_elem {
 	u8 mac_cap_info[6];
@@ -1920,35 +2063,45 @@
 } __packed;
 
 /**
- * struct ieee80211_he_operation - HE capabilities element
+ * struct ieee80211_he_operation - HE Operation element
+ * @he_oper_params: HE Operation Parameters + BSS Color Information
+ * @he_mcs_nss_set: Basic HE-MCS And NSS Set
+ * @optional: Optional fields VHT Operation Information, Max Co-Hosted
+ *            BSSID Indicator, and 6 GHz Operation Information
  *
- * This structure is the "HE operation element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.243
+ * This structure represents the payload of the "HE Operation
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249.
  */
 struct ieee80211_he_operation {
 	__le32 he_oper_params;
 	__le16 he_mcs_nss_set;
-	/* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */
 	u8 optional[];
 } __packed;
 
 /**
- * struct ieee80211_he_spr - HE spatial reuse element
- *
- * This structure is the "HE spatial reuse element" element as
- * described in P802.11ax_D4.0 section 9.4.2.241
+ * struct ieee80211_he_spr - Spatial Reuse Parameter Set element
+ * @he_sr_control: SR Control
+ * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD
+ *            Min Offset, SRG OBSS PD Max Offset, SRG BSS Color
+ *            Bitmap, and SRG Partial BSSID Bitmap
+ *
+ * This structure represents the payload of the "Spatial Reuse
+ * Parameter Set element" as described in IEEE Std 802.11ax-2021
+ * section 9.4.2.252.
  */
 struct ieee80211_he_spr {
 	u8 he_sr_control;
-	/* Optional 0 to 19 bytes: depends on @he_sr_control */
 	u8 optional[];
 } __packed;
 
 /**
  * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ * @aifsn: ACI/AIFSN
+ * @ecw_min_max: ECWmin/ECWmax
+ * @mu_edca_timer: MU EDCA Timer
  *
- * This structure is the "MU AC Parameter Record" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
+ * This structure represents the "MU AC Parameter Record" as described
+ * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p.
  */
 struct ieee80211_he_mu_edca_param_ac_rec {
 	u8 aifsn;
@@ -1958,9 +2111,14 @@
 
 /**
  * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ * @mu_qos_info: QoS Info
+ * @ac_be: MU AC_BE Parameter Record
+ * @ac_bk: MU AC_BK Parameter Record
+ * @ac_vi: MU AC_VI Parameter Record
+ * @ac_vo: MU AC_VO Parameter Record
  *
- * This structure is the "MU EDCA Parameter Set element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
+ * This structure represents the payload of the "MU EDCA Parameter Set
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251.
  */
 struct ieee80211_mu_edca_param_set {
 	u8 mu_qos_info;
@@ -1993,13 +2151,19 @@
  * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
  *     supported for reception and the maximum number of spatial streams
  *     supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
  */
 struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+	union {
+		struct {
 	u8 rx_tx_mcs7_max_nss;
 	u8 rx_tx_mcs9_max_nss;
 	u8 rx_tx_mcs11_max_nss;
 	u8 rx_tx_mcs13_max_nss;
 };
+		u8 rx_tx_max_nss[4];
+	};
+};
 
 /**
  * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
@@ -2018,12 +2182,18 @@
  * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
  *     supported for reception and the maximum number of spatial streams
  *     supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
  */
 struct ieee80211_eht_mcs_nss_supp_bw {
+	union {
+		struct {
 	u8 rx_tx_mcs9_max_nss;
 	u8 rx_tx_mcs11_max_nss;
 	u8 rx_tx_mcs13_max_nss;
 };
+		u8 rx_tx_max_nss[3];
+	};
+};
 
 /**
  * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
@@ -2075,7 +2245,7 @@
  */
 struct ieee80211_eht_operation {
 	u8 params;
-	__le32 basic_mcs_nss;
+	struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss;
 	u8 optional[];
 } __packed;
 
@@ -2162,9 +2332,9 @@
  * enum ieee80211_ap_reg_power - regulatory power for a Access Point
  *
  * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
- * @IEEE80211_REG_LPI: Indoor Access Point
- * @IEEE80211_REG_SP: Standard power Access Point
- * @IEEE80211_REG_VLP: Very low power Access Point
+ * @IEEE80211_REG_LPI_AP: Indoor Access Point
+ * @IEEE80211_REG_SP_AP: Standard power Access Point
+ * @IEEE80211_REG_VLP_AP: Very low power Access Point
  * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
  * @IEEE80211_REG_AP_POWER_MAX: maximum value
  */
@@ -2552,7 +2722,7 @@
 #define IEEE80211_6GHZ_CTRL_REG_SP_AP	1
 
 /**
- * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
  * @primary: primary channel
  * @control: control flags
  * @ccfs0: channel center frequency segment 0
@@ -2599,9 +2769,13 @@
 };
 
 /**
- * struct ieee80211_tx_pwr_env
- *
- * This structure represents the "Transmit Power Envelope element"
+ * struct ieee80211_tx_pwr_env - Transmit Power Envelope
+ * @tx_power_info: Transmit Power Information field
+ * @tx_power: Maximum Transmit Power field
+ *
+ * This structure represents the payload of the "Transmit Power
+ * Envelope element" as described in IEEE Std 802.11ax-2021 section
+ * 9.4.2.161
  */
 struct ieee80211_tx_pwr_env {
 	u8 tx_power_info;
@@ -2808,6 +2982,10 @@
 #define	IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454	        2
 
 #define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK		0x01
+#define IEEE80211_EHT_MAC_CAP1_EHT_TRS                0x02
+#define IEEE80211_EHT_MAC_CAP1_TXOP_SHARE_MODE2       0x04
+#define IEEE80211_EHT_MAC_CAP1_TWO_BQRS_SUPP          0x08
+#define IEEE80211_EHT_MAC_CAP1_EHT_LINK_ADAPTATION_SUPP         0x30
 
 /* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
 #define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ			0x02
@@ -2856,6 +3034,7 @@
 
 /* Maximum number of supported EHT LTF is split */
 #define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK	0xc0
+#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF		0x40
 #define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK	0x07
 
 #define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK			0x78
@@ -2872,6 +3051,9 @@
 
 #define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA	0x01
 #define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA	0x02
+#define IEEE80211_EHT_PHY_CAP8_20MHZ_ONLY_CAPS                  0x04
+#define IEEE80211_EHT_PHY_CAP8_20MHZ_ONLY_TRIGGER_MUBF_FL_BW_FB_DLMUMIMO   0x08
+#define IEEE80211_EHT_PHY_CAP8_20MHZ_ONLY_MRU_SUPP                         0x10
 
 /*
  * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
@@ -2918,6 +3100,7 @@
 #define IEEE80211_EHT_PPE_THRES_NSS_POS			0
 #define IEEE80211_EHT_PPE_THRES_NSS_MASK		0xf
 #define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK	0x1f0
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_POS   4
 #define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE		3
 #define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE	9
 
@@ -2997,6 +3180,28 @@
 	return len >= needed;
 }
 
+#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT	BIT(1)
+
+struct ieee80211_bandwidth_indication {
+	u8 params;
+	struct ieee80211_eht_operation_info info;
+} __packed;
+
+static inline bool
+ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
+{
+	const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
+
+	if (len < sizeof(*bwi))
+		return false;
+
+	if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
+	    len < sizeof(*bwi) + 2)
+		return false;
+
+	return true;
+}
+
 #define LISTEN_INT_USF	GENMASK(15, 14)
 #define LISTEN_INT_UI	GENMASK(13, 0)
 
@@ -3454,6 +3659,8 @@
 	WLAN_EID_EXT_EHT_OPERATION = 106,
 	WLAN_EID_EXT_EHT_MULTI_LINK = 107,
 	WLAN_EID_EXT_EHT_CAPABILITY = 108,
+	WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
+	WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
 };
 
 /* Action category code */
@@ -3493,6 +3700,16 @@
 	WLAN_ACTION_SPCT_CHL_SWITCH = 4,
 };
 
+/* RADIO_MEASUREMENT action code */
+enum ieee80211_radio_measurement_actioncode {
+	WLAN_ACTION_RADIO_MSR_RM_REQ = 0,
+	WLAN_ACTION_RADIO_MSR_RM_REP = 1,
+	WLAN_ACTION_RADIO_MSR_LINK_MSR_REQ = 2,
+	WLAN_ACTION_RADIO_MSR_LINK_MSR_REP = 3,
+	WLAN_ACTION_RADIO_MSR_NBOR_REP_REQ = 4,
+	WLAN_ACTION_RADIO_MSR_NBOR_REP_REP = 5,
+};
+
 /* HT action codes */
 enum ieee80211_ht_actioncode {
 	WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
@@ -4221,6 +4438,35 @@
 }
 
 /**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+	u8 action;
+
+	if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+	    skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+		return false;
+
+	action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+	return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+		action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+		action != WLAN_PUB_ACTION_MSMT_PILOT &&
+		action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+		action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+		action != WLAN_PUB_ACTION_FTM_REQUEST &&
+		action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+		action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+}
+
+/**
  * _ieee80211_is_group_privacy_action - check if frame is a group addressed
  * privacy action frame
  * @hdr: the frame
@@ -4291,12 +4537,11 @@
 /**
  * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
  * @skb: the skb containing the frame, length will not be checked
- * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
  *
  * This function assumes the frame is a data frame, and that the network header
  * is in the correct place.
  */
-static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
 {
 	if (!skb_is_nonlinear(skb) &&
 	    skb->len > (skb_network_offset(skb) + 2)) {
@@ -4462,7 +4707,7 @@
 	return (const u8 *)element == (const u8 *)data + datalen;
 }
 
-/**
+/*
  * RSNX Capabilities:
  * bits 0-3: Field length (n-1)
  */
@@ -4477,8 +4722,8 @@
 #define IEEE80211_AP_INFO_TBTT_HDR_FILTERED			0x04
 #define IEEE80211_AP_INFO_TBTT_HDR_COLOC			0x08
 #define IEEE80211_AP_INFO_TBTT_HDR_COUNT			0xF0
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM		9
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM	13
+#define IEEE80211_TBTT_INFO_TYPE_TBTT				0
+#define IEEE80211_TBTT_INFO_TYPE_MLD				1
 
 #define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED		0x01
 #define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID			0x02
@@ -4488,6 +4733,9 @@
 #define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE			0x20
 #define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP			0x40
 
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT			127
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED			-128
+
 struct ieee80211_neighbor_ap_info {
 	u8 tbtt_info_hdr;
 	u8 tbtt_info_len;
@@ -4502,6 +4750,42 @@
 	IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED,
 };
 
+/*
+ * reduced neighbor report, based on Draft P802.11be_D3.0,
+ * section 9.4.2.170.2.
+ */
+struct ieee80211_rnr_mld_params {
+	u8 mld_id;
+	__le16 params;
+} __packed;
+
+#define IEEE80211_RNR_MLD_PARAMS_LINK_ID			0x000F
+#define IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT		0x0FF0
+#define IEEE80211_RNR_MLD_PARAMS_UPDATES_INCLUDED		0x1000
+#define IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK			0x2000
+
+/* Format of the TBTT information element if it has 7, 8 or 9 bytes */
+struct ieee80211_tbtt_info_7_8_9 {
+	u8 tbtt_offset;
+	u8 bssid[ETH_ALEN];
+
+	/* The following element is optional, structure may not grow */
+	u8 bss_params;
+	s8 psd_20;
+} __packed;
+
+/* Format of the TBTT information element if it has >= 11 bytes */
+struct ieee80211_tbtt_info_ge_11 {
+	u8 tbtt_offset;
+	u8 bssid[ETH_ALEN];
+	__le32 short_ssid;
+
+	/* The following elements are optional, structure may grow */
+	u8 bss_params;
+	s8 psd_20;
+	struct ieee80211_rnr_mld_params mld_params;
+} __packed;
+
 /* multi-link device */
 #define IEEE80211_MLD_MAX_NUM_LINKS	15
 
@@ -4529,6 +4813,14 @@
 #define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH	0x0f00
 #define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS	0xf000
 
+/*
+ * Described in P802.11be_D3.0
+ * dot11MSDTimerDuration should default to 5484 (i.e. 171.375)
+ * dot11MSDOFDMEDthreshold defaults to -72 (i.e. 0)
+ * dot11MSDTXOPMAX defaults to 1
+ */
+#define IEEE80211_MED_SYNC_DELAY_DEFAULT		0x10ac
+
 #define IEEE80211_EML_CAP_EMLSR_SUPP			0x0001
 #define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY		0x000e
 #define  IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US		0
@@ -4611,15 +4903,12 @@
 	case IEEE80211_ML_CONTROL_TYPE_BASIC:
 	case IEEE80211_ML_CONTROL_TYPE_PREQ:
 	case IEEE80211_ML_CONTROL_TYPE_TDLS:
+	case IEEE80211_ML_CONTROL_TYPE_RECONF:
 		/*
 		 * The length is the first octet pointed by mle->variable so no
 		 * need to add anything
 		 */
 		break;
-	case IEEE80211_ML_CONTROL_TYPE_RECONF:
-		if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
-			common += ETH_ALEN;
-		return common;
 	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
 		if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
 			common += ETH_ALEN;
@@ -4633,6 +4922,95 @@
 }
 
 /**
+ * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
+ * @mle: the basic multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the BSS parameter change count value can't be found (the presence bit
+ * for it is clear), 0 will be returned.
+ */
+static inline u8
+ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle)
+{
+	u16 control = le16_to_cpu(mle->control);
+	const u8 *common = mle->variable;
+
+	/* common points now at the beginning of ieee80211_mle_basic_common_info */
+	common += sizeof(struct ieee80211_mle_basic_common_info);
+
+	if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
+		return 0;
+
+	if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+		common += 1;
+
+	return *common;
+}
+
+/**
+ * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the medium synchronization is not present, then the default value is
+ * returned.
+ */
+static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
+{
+	const struct ieee80211_multi_link_elem *mle = (const void *)data;
+	u16 control = le16_to_cpu(mle->control);
+	const u8 *common = mle->variable;
+
+	/* common points now at the beginning of ieee80211_mle_basic_common_info */
+	common += sizeof(struct ieee80211_mle_basic_common_info);
+
+	if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
+		return IEEE80211_MED_SYNC_DELAY_DEFAULT;
+
+	if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+		common += 1;
+	if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+		common += 1;
+
+	return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_eml_cap - returns the EML capability
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the EML capability is not present, 0 will be returned.
+ */
+static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
+{
+	const struct ieee80211_multi_link_elem *mle = (const void *)data;
+	u16 control = le16_to_cpu(mle->control);
+	const u8 *common = mle->variable;
+
+	/* common points now at the beginning of ieee80211_mle_basic_common_info */
+	common += sizeof(struct ieee80211_mle_basic_common_info);
+
+	if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA))
+		return 0;
+
+	if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+		common += 1;
+	if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+		common += 1;
+	if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+		common += 2;
+
+	return get_unaligned_le16(common);
+}
+
+/**
  * ieee80211_mle_size_ok - validate multi-link element size
  * @data: pointer to the element data
  * @len: length of the containing element
@@ -4700,6 +5078,28 @@
 	return mle->variable[0] >= common;
 }
 
+/**
+ * ieee80211_mle_type_ok - validate multi-link element type and size
+ * @data: pointer to the element data
+ * @type: expected type of the element
+ * @len: length of the containing element
+ */
+static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
+{
+	const struct ieee80211_multi_link_elem *mle = (const void *)data;
+	u16 control;
+
+	if (!ieee80211_mle_size_ok(data, len))
+		return false;
+
+	control = le16_to_cpu(mle->control);
+
+	if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type)
+		return true;
+
+	return false;
+}
+
 enum ieee80211_mle_subelems {
 	IEEE80211_MLE_SUBELEM_PER_STA_PROFILE		= 0,
 	IEEE80211_MLE_SUBELEM_FRAGMENT		        = 254,
@@ -4722,11 +5122,13 @@
 } __packed;
 
 /**
- * ieee80211_mle_sta_prof_size_ok - validate multi-link element sta profile size
+ * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta
+ *	profile size
  * @data: pointer to the sub element data
  * @len: length of the containing sub element
  */
-static inline bool ieee80211_mle_sta_prof_size_ok(const u8 *data, size_t len)
+static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+							size_t len)
 {
 	const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
 	u16 control;
@@ -4746,25 +5148,196 @@
 		info_len += 8;
 	if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
 		info_len += 2;
-	if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
-		info_len += 1;
-
 	if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
-	    control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE) {
+	    control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
 		if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
 			info_len += 2;
 		else
 			info_len += 1;
 	}
+	if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
+		info_len += 1;
 
 	return prof->sta_info_len >= info_len &&
 	       fixed + prof->sta_info_len <= len;
 }
 
+/**
+ * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS
+ *	parameter change count
+ * @prof: the per-STA profile, having been checked with
+ *	ieee80211_mle_basic_sta_prof_size_ok() for the correct length
+ *
+ * Return: The BSS parameter change count value if present, 0 otherwise.
+ */
+static inline u8
+ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof)
+{
+	u16 control = le16_to_cpu(prof->control);
+	const u8 *pos = prof->variable;
+
+	if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT))
+		return 0;
+
+	if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+		pos += 6;
+	if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+		pos += 2;
+	if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+		pos += 8;
+	if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+		pos += 2;
+	if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+	    control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+		if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+			pos += 2;
+		else
+			pos += 1;
+	}
+
+	return *pos;
+}
+
+#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID			0x000f
+#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE		0x0010
+#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT		0x0020
+#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT		0x0040
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE		0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT	0x0800
+
+/**
+ * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
+ *	element sta profile size.
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ */
+static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
+							 size_t len)
+{
+	const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+	u16 control;
+	u8 fixed = sizeof(*prof);
+	u8 info_len = 1;
+
+	if (len < fixed)
+		return false;
+
+	control = le16_to_cpu(prof->control);
+
+	if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
+		info_len += ETH_ALEN;
+	if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+		info_len += 2;
+	if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT)
+		info_len += 2;
+
+	return prof->sta_info_len >= info_len &&
+	       fixed + prof->sta_info_len - 1 <= len;
+}
+
+static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
+{
+	const struct ieee80211_ttlm_elem *t2l = (const void *)data;
+	u8 control, fixed = sizeof(*t2l), elem_len = 0;
+
+	if (len < fixed)
+		return false;
+
+	control = t2l->control;
+
+	if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
+		elem_len += 2;
+	if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
+		elem_len += 3;
+
+	if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
+		u8 bm_size;
+
+		elem_len += 1;
+		if (len < fixed + elem_len)
+			return false;
+
+		if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+			bm_size = 1;
+		else
+			bm_size = 2;
+
+		elem_len += hweight8(t2l->optional[0]) * bm_size;
+	}
+
+	return len >= fixed + elem_len;
+}
+
 #define for_each_mle_subelement(_elem, _data, _len)			\
 	if (ieee80211_mle_size_ok(_data, _len))				\
 		for_each_element(_elem,					\
 				 _data + ieee80211_mle_common_size(_data),\
 				 _len - ieee80211_mle_common_size(_data))
 
+/**
+ * enum ieee80211_critical_updates - Critical Update (CU) flags
+ *
+ * These flags are used to indicate the type of critical update happening
+ * on a link in an interface.
+ *
+ * @IEEE80211_CU_INCLUDE_CSA_ELEM: critical update due to inclusion of a Channel
+ *	Switch Announcement element.
+ * @IEEE80211_CU_INCLUDE_ECSA_ELEM: critical update due to inclusion of an
+ *	Extended Channel Switch Announcement element.
+ * @IEEE80211_CU_MODIFY_EDCA_PARAM_ELEM: critical update due to modification of
+ *	the EDCA parameters element.
+ * @IEEE80211_CU_INCLUDE_QUIET_ELEM: critical update due to inclusion of a Quiet
+ *	element.
+ * @IEEE80211_CU_MODIFY_DSSS_PARAM_ELEM: critical update due to modification of
+ *	the DSSS Parameter Set.
+ * @IEEE80211_CU_MODIFY_HT_OPER_ELEM: critical update due to modification of the
+ *	HT Operation element
+ * @IEEE80211_CU_INCLUDE_WBCS_ELEM: critical update due to inclusion of a Wide
+ *	Bandwidth Channel Switch element.
+ * @IEEE80211_CU_INCLUDE_CSW_ELEM: critical update due to inclusion of a Channel
+ *	Switch Wrapper element.
+ * @IEEE80211_CU_INCLUDE_OMN_ELEM: critical update due to inclusion of an
+ *	Operating Mode Notification element.
+ * @IEEE80211_CU_INCLUDE_Q_CHAN_ELEM: critical update due to inclusion of a
+ *	Quiet Channel element.
+ * @IEEE80211_CU_MODIFY_VHT_OPER_ELEM: critical update due to modification of the
+ *	VHT Operation element.
+ * @IEEE80211_CU_MODIFY_HE_OPER_ELEM: critical update due to modification of the
+ *	HE Operation element.
+ * @IEEE80211_CU_INCLUDE_B_TWT_ELEM: critical update due to inclusion a
+ *	Broadcast TWT element.
+ * @IEEE80211_CU_INCLUDE_B_TWT_EXIST_ELEM: critical update due to inclusion of a
+ *	Broadcast TWT Parameter Set field in an existing Broadcast TWT element.
+ * @IEEE80211_CU_INCLUDE_BCCA_ELEM: critical update due to inclusion of the BSS
+ *	Color Change Announcement element.
+ * @IEEE80211_CU_MODIFY_MU_EDCA_PARAM_ELEM: critical update due to modification
+ *	of the MU EDCA Parameter Set element.
+ * @IEEE80211_CU_MODIFY_SR_PARAM_ELEM: critical update due to modification of the
+ *	Spatial Reuse Parameter Set element.
+ * @IEEE80211_CU_MODIFY_UORA_PARAM_ELEM: critical update due to modification of
+ *	the UORA Parameter Set element.
+ * @IEEE80211_CU_MODIFY_EHT_OPER_ELEM: critical update due to modification of the
+ *	EHT Operation element.
+ */
+enum ieee80211_critical_updates {
+	IEEE80211_CU_INCLUDE_CSA_ELEM			= 1 << 0,
+	IEEE80211_CU_INCLUDE_ECSA_ELEM			= 1 << 1,
+	IEEE80211_CU_MODIFY_EDCA_PARAM_ELEM		= 1 << 2,
+	IEEE80211_CU_INCLUDE_QUIET_ELEM			= 1 << 3,
+	IEEE80211_CU_MODIFY_DSSS_PARAM_ELEM		= 1 << 4,
+	IEEE80211_CU_MODIFY_HT_OPER_ELEM		= 1 << 5,
+	IEEE80211_CU_INCLUDE_WBCS_ELEM			= 1 << 6,
+	IEEE80211_CU_INCLUDE_CSW_ELEM			= 1 << 7,
+	IEEE80211_CU_INCLUDE_OMN_ELEM			= 1 << 8,
+	IEEE80211_CU_INCLUDE_Q_CHAN_ELEM		= 1 << 9,
+	IEEE80211_CU_MODIFY_VHT_OPER_ELEM		= 1 << 10,
+	IEEE80211_CU_MODIFY_HE_OPER_ELEM		= 1 << 11,
+	IEEE80211_CU_INCLUDE_B_TWT_ELEM			= 1 << 12,
+	IEEE80211_CU_INCLUDE_B_TWT_EXIST_ELEM		= 1 << 13,
+	IEEE80211_CU_INCLUDE_BCCA_ELEM			= 1 << 14,
+	IEEE80211_CU_MODIFY_MU_EDCA_PARAM_ELEM		= 1 << 15,
+	IEEE80211_CU_MODIFY_SR_PARAM_ELEM		= 1 << 16,
+	IEEE80211_CU_MODIFY_UORA_PARAM_ELEM		= 1 << 17,
+	IEEE80211_CU_MODIFY_EHT_OPER_ELEM		= 1 << 18,
+};
 #endif /* LINUX_IEEE80211_H */
diff -ruw linux-6.4/include/linux/if_vlan.h linux-6.4-fbx/include/linux/if_vlan.h
--- linux-6.4/include/linux/if_vlan.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/if_vlan.h	2024-02-08 19:19:27.692597465 +0100
@@ -12,6 +12,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/bug.h>
 #include <uapi/linux/if_vlan.h>
+#include <uapi/linux/pkt_sched.h>
 
 #define VLAN_HLEN	4		/* The additional bytes required by VLAN
 					 * (in addition to the Ethernet header)
@@ -144,6 +145,7 @@
 			 int (*action)(struct net_device *dev, int vid,
 				       void *arg), void *arg);
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+extern struct net_device *vlan_dev_upper_dev(const struct net_device *dev);
 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
 extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
 
@@ -213,7 +215,7 @@
 
 	mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
 	while (mp) {
-		if (mp->priority == skprio) {
+		if (mp->priority == (skprio & TC_H_MIN_MASK)) {
 			return mp->vlan_qos; /* This should already be shifted
 					      * to mask correctly with the
 					      * VLAN's TCI */
@@ -257,6 +259,12 @@
 	return NULL;
 }
 
+static inline struct net_device *vlan_dev_upper_dev(const struct net_device *dev)
+{
+	BUG();
+	return NULL;
+}
+
 static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
 {
 	BUG();
@@ -317,6 +325,9 @@
 	switch (ethertype) {
 	case htons(ETH_P_8021Q):
 	case htons(ETH_P_8021AD):
+#ifdef CONFIG_VLAN_FBX
+	case htons(ETH_P_FBXVLAN):
+#endif
 		return true;
 	default:
 		return false;
diff -ruw linux-6.4/include/linux/in.h linux-6.4-fbx/include/linux/in.h
--- linux-6.4/include/linux/in.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/in.h	2023-02-27 13:36:18.996765333 +0100
@@ -30,6 +30,9 @@
 		return 0;
 	case IPPROTO_AH:	/* SPI */
 		return 4;
+	case IPPROTO_IPV6:
+		/* third byte of ipv6 destination address */
+		return 36;
 	default:
 		return -EINVAL;
 	}
diff -ruw linux-6.4/include/linux/miscdevice.h linux-6.4-fbx/include/linux/miscdevice.h
--- linux-6.4/include/linux/miscdevice.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/miscdevice.h	2023-05-22 20:06:44.603863377 +0200
@@ -21,6 +21,7 @@
 #define APOLLO_MOUSE_MINOR	7	/* unused */
 #define PC110PAD_MINOR		9	/* unused */
 /*#define ADB_MOUSE_MINOR	10	FIXME OBSOLETE */
+#define TALDEV_MINOR		74	/* Marvell TAL device */
 #define WATCHDOG_MINOR		130	/* Watchdog timer     */
 #define TEMP_MINOR		131	/* Temperature Sensor */
 #define APM_MINOR_DEV		134
diff -ruw linux-6.4/include/linux/mm_types.h linux-6.4-fbx/include/linux/mm_types.h
--- linux-6.4/include/linux/mm_types.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/mm_types.h	2024-01-19 17:01:19.897847904 +0100
@@ -123,7 +123,7 @@
 			 */
 			unsigned long pp_magic;
 			struct page_pool *pp;
-			unsigned long _pp_mapping_pad;
+			unsigned long pp_recycle_flag;
 			unsigned long dma_addr;
 			union {
 				/**
@@ -398,8 +398,8 @@
  */
 #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
 
-#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
-#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+#define PAGE_FRAG_CACHE_MAX_ORDER	CONFIG_PAGE_FRAG_CACHE_ORDER
+#define PAGE_FRAG_CACHE_MAX_SIZE	(4096 << PAGE_FRAG_CACHE_MAX_ORDER)
 
 /*
  * page_private can be used on tail pages.  However, PagePrivate is only
@@ -432,6 +432,7 @@
 	 */
 	unsigned int		pagecnt_bias;
 	bool pfmemalloc;
+	atomic_t pages_allocated;
 };
 
 typedef unsigned long vm_flags_t;
diff -ruw linux-6.4/include/linux/mtd/mtd.h linux-6.4-fbx/include/linux/mtd/mtd.h
--- linux-6.4/include/linux/mtd/mtd.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/mtd/mtd.h	2023-05-22 20:06:44.623863909 +0200
@@ -287,6 +287,13 @@
 	 */
 	unsigned int bitflip_threshold;
 
+	/* NAND related attributes */
+	const char *nand_type;
+	const char *nand_manufacturer;
+	const char *onfi_model;
+	uint8_t onfi_ecc_bits;
+	uint8_t nand_ids[8];
+
 	/* Kernel-only stuff starts here. */
 	const char *name;
 	int index;
diff -ruw linux-6.4/include/linux/netdevice.h linux-6.4-fbx/include/linux/netdevice.h
--- linux-6.4/include/linux/netdevice.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/netdevice.h	2023-11-07 13:38:44.050256364 +0100
@@ -78,6 +78,20 @@
 struct xdp_buff;
 struct xdp_md;
 
+#ifdef CONFIG_NETRXTHREAD
+
+#define RXTHREAD_MAX_PKTS       512
+struct krxd {
+	struct sk_buff_head	pkt_queue;
+	unsigned int		stats_pkts;
+	unsigned int		stats_dropped;
+	wait_queue_head_t	wq;
+	struct task_struct	*task;
+};
+
+extern struct krxd gkrxd[CONFIG_NETRXTHREAD_RX_QUEUE];
+#endif
+
 void synchronize_net(void);
 void netdev_set_default_ethtool_ops(struct net_device *dev,
 				    const struct ethtool_ops *ops);
@@ -1743,6 +1757,8 @@
 	IFF_NO_ADDRCONF			= BIT_ULL(30),
 	IFF_TX_SKB_NO_LINEAR		= BIT_ULL(31),
 	IFF_CHANGE_PROTO_DOWN		= BIT_ULL(32),
+	IFF_FBXBRIDGE			= BIT_ULL(33),
+	IFF_FBXBRIDGE_PORT		= BIT_ULL(34),
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
@@ -1776,6 +1792,8 @@
 #define IFF_FAILOVER_SLAVE		IFF_FAILOVER_SLAVE
 #define IFF_L3MDEV_RX_HANDLER		IFF_L3MDEV_RX_HANDLER
 #define IFF_TX_SKB_NO_LINEAR		IFF_TX_SKB_NO_LINEAR
+#define IFF_FBXBRIDGE			IFF_FBXBRIDGE
+#define IFF_FBXBRIDGE_PORT		IFF_FBXBRIDGE_PORT
 
 /* Specifies the type of the struct net_device::ml_priv pointer */
 enum netdev_ml_priv_type {
@@ -4827,13 +4845,6 @@
 int skb_csum_hwoffload_help(struct sk_buff *skb,
 			    const netdev_features_t features);
 
-struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
-				  netdev_features_t features, bool tx_path);
-struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
-				    netdev_features_t features, __be16 type);
-struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
-				    netdev_features_t features);
-
 struct netdev_bonding_info {
 	ifslave	slave;
 	ifbond	master;
@@ -4856,11 +4867,6 @@
 }
 #endif
 
-static inline
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
-{
-	return __skb_gso_segment(skb, features, true);
-}
 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
 
 static inline bool can_checksum_protocol(netdev_features_t features,
@@ -4987,6 +4993,7 @@
 					  struct net_device *dev,
 					  netdev_features_t features);
 netdev_features_t netif_skb_features(struct sk_buff *skb);
+void skb_warn_bad_offload(const struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
@@ -5035,19 +5042,6 @@
 void netif_inherit_tso_max(struct net_device *to,
 			   const struct net_device *from);
 
-static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
-					int pulled_hlen, u16 mac_offset,
-					int mac_len)
-{
-	skb->protocol = protocol;
-	skb->encapsulation = 1;
-	skb_push(skb, pulled_hlen);
-	skb_reset_transport_header(skb);
-	skb->mac_header = mac_offset;
-	skb->network_header = skb->mac_header + mac_len;
-	skb->mac_len = mac_len;
-}
-
 static inline bool netif_is_macsec(const struct net_device *dev)
 {
 	return dev->priv_flags & IFF_MACSEC;
@@ -5103,6 +5097,16 @@
 	return dev->priv_flags & IFF_BRIDGE_PORT;
 }
 
+static inline bool netif_is_fbxbridge_master(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_FBXBRIDGE;
+}
+
+static inline bool netif_is_fbxbridge_port(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_FBXBRIDGE_PORT;
+}
+
 static inline bool netif_is_ovs_master(const struct net_device *dev)
 {
 	return dev->priv_flags & IFF_OPENVSWITCH;
diff -ruw linux-6.4/include/linux/netfilter/nf_conntrack_ftp.h linux-6.4-fbx/include/linux/netfilter/nf_conntrack_ftp.h
--- linux-6.4/include/linux/netfilter/nf_conntrack_ftp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/netfilter/nf_conntrack_ftp.h	2023-02-27 19:50:22.648261408 +0100
@@ -22,6 +22,11 @@
 	u_int16_t seq_aft_nl_num[IP_CT_DIR_MAX];
 	/* pickup sequence tracking, useful for conntrackd */
 	u_int16_t flags[IP_CT_DIR_MAX];
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	unsigned int is_fbxbridge;
+	unsigned long fbxbridge_remote;
+	unsigned long fbxbridge_wan;
+#endif
 };
 
 /* For NAT to hook in when we find a packet which describes what other
diff -ruw linux-6.4/include/linux/netfilter/nf_conntrack_sip.h linux-6.4-fbx/include/linux/netfilter/nf_conntrack_sip.h
--- linux-6.4/include/linux/netfilter/nf_conntrack_sip.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/netfilter/nf_conntrack_sip.h	2023-05-22 20:06:44.631864122 +0200
@@ -5,6 +5,7 @@
 #include <linux/skbuff.h>
 #include <linux/types.h>
 #include <net/netfilter/nf_conntrack_expect.h>
+#include <crypto/sha2.h>
 
 #define SIP_PORT	5060
 #define SIP_TIMEOUT	3600
@@ -12,7 +13,7 @@
 struct nf_ct_sip_master {
 	unsigned int	register_cseq;
 	unsigned int	invite_cseq;
-	__be16		forced_dport;
+	__be16		forced_dport[IP_CT_DIR_ORIGINAL];
 };
 
 enum sip_expectation_classes {
@@ -30,6 +31,10 @@
 	enum sip_expectation_classes	class;
 };
 
+struct nf_ct_sip_expect {
+	u8				cid_hash[SHA256_DIGEST_SIZE];
+};
+
 #define SDP_MEDIA_TYPE(__name, __class)					\
 {									\
 	.name	= (__name),						\
diff -ruw linux-6.4/include/linux/netfilter/nf_conntrack_tcp.h linux-6.4-fbx/include/linux/netfilter/nf_conntrack_tcp.h
--- linux-6.4/include/linux/netfilter/nf_conntrack_tcp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/netfilter/nf_conntrack_tcp.h	2023-02-27 12:58:11.897584734 +0100
@@ -28,6 +28,7 @@
 	/* For SYN packets while we may be out-of-sync */
 	u_int8_t	last_wscale;	/* Last window scaling factor seen */
 	u_int8_t	last_flags;	/* Last flags set */
+	u_int32_t	no_window_track;
 };
 
 #endif /* _NF_CONNTRACK_TCP_H */
diff -ruw linux-6.4/include/linux/of_fdt.h linux-6.4-fbx/include/linux/of_fdt.h
--- linux-6.4/include/linux/of_fdt.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/of_fdt.h	2023-05-22 20:06:44.643864441 +0200
@@ -83,6 +83,7 @@
 extern void unflatten_and_copy_device_tree(void);
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
+const void *of_fdt_find_compatible_dtb(const char *name);
 #else /* CONFIG_OF_EARLY_FLATTREE */
 static inline void early_init_dt_check_for_usable_mem_range(void) {}
 static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
diff -ruw linux-6.4/include/linux/page_owner.h linux-6.4-fbx/include/linux/page_owner.h
--- linux-6.4/include/linux/page_owner.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/page_owner.h	2023-05-22 20:06:44.643864441 +0200
@@ -12,6 +12,9 @@
 extern void __set_page_owner(struct page *page,
 			unsigned short order, gfp_t gfp_mask);
 extern void __split_page_owner(struct page *page, unsigned int nr);
+extern void __set_page_owner_frag_cache(struct page *page,
+					unsigned int order,
+					struct page_frag_cache *nc);
 extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(const struct page *page);
@@ -36,6 +39,13 @@
 	if (static_branch_unlikely(&page_owner_inited))
 		__split_page_owner(page, nr);
 }
+static inline void set_page_owner_frag_cache(struct page *page,
+					     unsigned int order,
+					     struct page_frag_cache *nc)
+{
+	if (static_branch_unlikely(&page_owner_inited))
+		__set_page_owner_frag_cache(page, order, nc);
+}
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
 {
 	if (static_branch_unlikely(&page_owner_inited))
@@ -59,6 +69,11 @@
 			unsigned int order, gfp_t gfp_mask)
 {
 }
+static inline void set_page_owner_frag_cache(struct page *page,
+					     unsigned int order,
+					     struct page_frag_cache *nc)
+{
+}
 static inline void split_page_owner(struct page *page,
 			unsigned short order)
 {
diff -ruw linux-6.4/include/linux/part_stat.h linux-6.4-fbx/include/linux/part_stat.h
--- linux-6.4/include/linux/part_stat.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/part_stat.h	2023-05-22 20:06:44.647864548 +0200
@@ -12,6 +12,7 @@
 	unsigned long merges[NR_STAT_GROUPS];
 	unsigned long io_ticks;
 	local_t in_flight[2];
+	unsigned long io_errors[2];
 };
 
 /*
diff -ruw linux-6.4/include/linux/pci.h linux-6.4-fbx/include/linux/pci.h
--- linux-6.4/include/linux/pci.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/pci.h	2023-06-01 18:16:20.069748352 +0200
@@ -464,12 +464,14 @@
 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
+	unsigned int	sysfs_init_done:1;	/* res_attr has been created */
 	pci_dev_flags_t dev_flags;
 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
 
 	u32		saved_config_space[16]; /* Config space saved at suspend time */
 	struct hlist_head saved_cap_space;
 	int		rom_attr_enabled;	/* Display of ROM attribute enabled? */
+	struct mutex	sysfs_init_lock;	/* res_attr has been created */
 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
 
diff -ruw linux-6.4/include/linux/pci_ids.h linux-6.4-fbx/include/linux/pci_ids.h
--- linux-6.4/include/linux/pci_ids.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/pci_ids.h	2023-05-22 20:06:44.651864654 +0200
@@ -1828,6 +1828,7 @@
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7952	0x7952
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7954	0x7954
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958	0x7958
+#define PCI_DEVICE_ID_PI7C9X20303SL		0xa303
 
 #define PCI_SUBVENDOR_ID_CHASE_PCIFAST		0x12E0
 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4		0x0031
diff -ruw linux-6.4/include/linux/phy.h linux-6.4-fbx/include/linux/phy.h
--- linux-6.4/include/linux/phy.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/phy.h	2023-05-22 20:30:14.549854255 +0200
@@ -110,6 +110,7 @@
  * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
  * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
  * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
+ * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII
  * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
  * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
  * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
@@ -123,6 +124,7 @@
  * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
  * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
  * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_10G_QXGMII: USXGMII-M 10G / 4 ports
  * @PHY_INTERFACE_MODE_MAX: Book keeping
  *
  * Describes the interface between the MAC and PHY.
@@ -147,6 +149,7 @@
 	PHY_INTERFACE_MODE_XLGMII,
 	PHY_INTERFACE_MODE_MOCA,
 	PHY_INTERFACE_MODE_QSGMII,
+	PHY_INTERFACE_MODE_PSGMII,
 	PHY_INTERFACE_MODE_TRGMII,
 	PHY_INTERFACE_MODE_100BASEX,
 	PHY_INTERFACE_MODE_1000BASEX,
@@ -162,6 +165,15 @@
 	PHY_INTERFACE_MODE_10GKR,
 	PHY_INTERFACE_MODE_QUSGMII,
 	PHY_INTERFACE_MODE_1000BASEKX,
+	PHY_INTERFACE_MODE_10G_QXGMII,
+
+	PHY_INTERFACE_MODE_1000BASEPX_D,
+	PHY_INTERFACE_MODE_1000BASEPX_U,
+	PHY_INTERFACE_MODE_10000BASEPR_D,
+	PHY_INTERFACE_MODE_10000BASEPR_U,
+	PHY_INTERFACE_MODE_10000_1000_BASEPRX_D,
+	PHY_INTERFACE_MODE_10000_1000_BASEPRX_U,
+
 	PHY_INTERFACE_MODE_MAX,
 } phy_interface_t;
 
@@ -255,6 +267,8 @@
 		return "moca";
 	case PHY_INTERFACE_MODE_QSGMII:
 		return "qsgmii";
+	case PHY_INTERFACE_MODE_PSGMII:
+		return "psgmii";
 	case PHY_INTERFACE_MODE_TRGMII:
 		return "trgmii";
 	case PHY_INTERFACE_MODE_1000BASEX:
@@ -281,6 +295,20 @@
 		return "100base-x";
 	case PHY_INTERFACE_MODE_QUSGMII:
 		return "qusgmii";
+	case PHY_INTERFACE_MODE_10G_QXGMII:
+		return "10g-qxgmii";
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+		return "1000base-px-d";
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
+		return "1000base-px-u";
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+		return "10000base-pr-d";
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+		return "10000base-pr-u";
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+		return "10000_1000base-prx-d";
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
+		return "10000_1000base-prx-u";
 	default:
 		return "unknown";
 	}
@@ -420,7 +448,11 @@
 	/** @reset_post_delay_us: GPIO reset deassert delay in microseconds */
 	int reset_post_delay_us;
 	/** @reset_gpiod: Reset GPIO descriptor pointer */
-	struct gpio_desc *reset_gpiod;
+	struct gpio_descs *reset_gpiod;
+
+	/* mark non-present phy as present but broken during
+	 * probing */
+	bool keep_broken_phy;
 
 	/** @shared_lock: protect access to the shared element */
 	struct mutex shared_lock;
@@ -1814,6 +1846,7 @@
 
 /* Generic C45 PHY driver */
 extern struct phy_driver genphy_c45_driver;
+extern struct phy_driver genphy_broken_c45_driver;
 
 /* The gen10g_* functions are the old Clause 45 stub */
 int gen10g_config_aneg(struct phy_device *phydev);
diff -ruw linux-6.4/include/linux/phylink.h linux-6.4-fbx/include/linux/phylink.h
--- linux-6.4/include/linux/phylink.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/phylink.h	2023-05-31 17:11:03.421680714 +0200
@@ -584,6 +584,7 @@
 void phylink_mac_change(struct phylink *, bool up);
 
 void phylink_start(struct phylink *);
+void phylink_start_silent(struct phylink *);
 void phylink_stop(struct phylink *);
 
 void phylink_suspend(struct phylink *pl, bool mac_wol);
@@ -660,4 +661,12 @@
 
 void phylink_decode_usxgmii_word(struct phylink_link_state *state,
 				 uint16_t lpa);
+int phylink_set_interface(struct phylink *pl,
+			  phy_interface_t interface,
+			  bool an_enabled);
+void phylink_get_interface(struct phylink *pl,
+			   phy_interface_t *interface,
+			   int *an_en,
+			   int *mode);
+
 #endif
diff -ruw linux-6.4/include/linux/ppp_channel.h linux-6.4-fbx/include/linux/ppp_channel.h
--- linux-6.4/include/linux/ppp_channel.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/ppp_channel.h	2023-05-22 20:06:44.671865186 +0200
@@ -50,6 +50,9 @@
 /* Called by the channel when it can send some more data. */
 extern void ppp_output_wakeup(struct ppp_channel *);
 
+/* Called by the channel when it want to prevent further transmit on it */
+extern void ppp_output_stop(struct ppp_channel *);
+
 /* Called by the channel to process a received PPP packet.
    The packet should have just the 2-byte PPP protocol header. */
 extern void ppp_input(struct ppp_channel *, struct sk_buff *);
diff -ruw linux-6.4/include/linux/pstore.h linux-6.4-fbx/include/linux/pstore.h
--- linux-6.4/include/linux/pstore.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/pstore.h	2023-05-22 20:06:44.671865186 +0200
@@ -67,6 +67,7 @@
  * @reason:	kdump reason for notification
  * @part:	position in a multipart record
  * @compressed:	whether the buffer is compressed
+ * @old:        reflects underlying prz old_zone.
  *
  */
 struct pstore_record {
@@ -83,6 +84,7 @@
 	enum kmsg_dump_reason	reason;
 	unsigned int		part;
 	bool			compressed;
+	bool			old;
 };
 
 /**
diff -ruw linux-6.4/include/linux/pstore_ram.h linux-6.4-fbx/include/linux/pstore_ram.h
--- linux-6.4/include/linux/pstore_ram.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/pstore_ram.h	2023-05-22 20:06:44.671865186 +0200
@@ -29,6 +29,7 @@
 struct ramoops_platform_data {
 	unsigned long	mem_size;
 	phys_addr_t	mem_address;
+	void		*mem_ptr;
 	unsigned int	mem_type;
 	unsigned long	record_size;
 	unsigned long	console_size;
diff -ruw linux-6.4/include/linux/regmap.h linux-6.4-fbx/include/linux/regmap.h
--- linux-6.4/include/linux/regmap.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/regmap.h	2023-05-22 20:06:44.679865399 +0200
@@ -1704,6 +1704,7 @@
 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
 
+void __iomem *regmap_get_mmio_base_address(struct regmap *map);
 #else
 
 /*
diff -ruw linux-6.4/include/linux/regulator/driver.h linux-6.4-fbx/include/linux/regulator/driver.h
--- linux-6.4/include/linux/regulator/driver.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/regulator/driver.h	2023-10-05 12:33:41.387635388 +0200
@@ -457,6 +457,7 @@
 	struct regmap *regmap;
 
 	struct gpio_desc *ena_gpiod;
+	struct gpio_desc *fault_sense_gpiod;
 };
 
 /**
@@ -642,6 +643,7 @@
 
 	struct regulator_enable_gpio *ena_pin;
 	unsigned int ena_gpio_state:1;
+	struct gpio_desc *fault_sense_gpiod;
 
 	unsigned int is_switch:1;
 
diff -ruw linux-6.4/include/linux/sched.h linux-6.4-fbx/include/linux/sched.h
--- linux-6.4/include/linux/sched.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/sched.h	2023-05-22 20:06:44.687865612 +0200
@@ -736,6 +736,12 @@
 #endif
 };
 
+enum task_exec_mode {
+	EXEC_MODE_DENIED,
+	EXEC_MODE_ONCE,
+	EXEC_MODE_UNLIMITED,
+};
+
 struct task_struct {
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 	/*
@@ -762,6 +768,7 @@
 	/* Per task flags (PF_*), defined further below: */
 	unsigned int			flags;
 	unsigned int			ptrace;
+	enum task_exec_mode		exec_mode;
 
 #ifdef CONFIG_SMP
 	int				on_cpu;
diff -ruw linux-6.4/include/linux/sfp.h linux-6.4-fbx/include/linux/sfp.h
--- linux-6.4/include/linux/sfp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/sfp.h	2023-05-22 20:06:44.695865825 +0200
@@ -554,6 +554,7 @@
 int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
 				  const struct ethtool_module_eeprom *page,
 				  struct netlink_ext_ack *extack);
+int sfp_get_sfp_state(struct sfp_bus *bus, struct ethtool_sfp_state *st);
 void sfp_upstream_start(struct sfp_bus *bus);
 void sfp_upstream_stop(struct sfp_bus *bus);
 void sfp_bus_put(struct sfp_bus *bus);
@@ -606,6 +607,12 @@
 {
 	return -EOPNOTSUPP;
 }
+
+static inline int sfp_get_sfp_state(struct sfp_bus *the_bus,
+				    struct ethtool_sfp_state *st)
+{
+	return -EOPNOTSUPP;
+}
 
 static inline void sfp_upstream_start(struct sfp_bus *bus)
 {
diff -ruw linux-6.4/include/linux/skbuff.h linux-6.4-fbx/include/linux/skbuff.h
--- linux-6.4/include/linux/skbuff.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/skbuff.h	2024-01-19 17:01:19.897847904 +0100
@@ -687,6 +687,13 @@
 typedef unsigned char *sk_buff_data_t;
 #endif
 
+enum {
+	FFN_STATE_INIT = 0,
+	FFN_STATE_FORWARDABLE,
+	FFN_STATE_FAST_FORWARDED,
+	FFN_STATE_INCOMPATIBLE,
+};
+
 /**
  * DOC: Basic sk_buff geometry
  *
@@ -893,11 +900,22 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	unsigned long		 _nfct;
 #endif
+
+#if defined(CONFIG_IP_FFN) || defined(CONFIG_IPV6_FFN)
+	int			ffn_state;
+	int			ffn_orig_tos;
+	__u16			ffn_ff_done,
+				ffn_ff_dirty_len;
+#endif
 	unsigned int		len,
 				data_len;
 	__u16			mac_len,
 				hdr_len;
 
+#ifdef CONFIG_NETRXTHREAD
+	int			rxthread_prio;
+#endif
+
 	/* Following fields are _not_ copied in __copy_skb_header()
 	 * Note that queue_mapping is here mostly to fill a hole.
 	 */
@@ -3035,6 +3053,10 @@
  * get_rps_cpu() for example only access one 64 bytes aligned block :
  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
  */
+#ifdef CONFIG_NETSKBPAD
+#define NET_SKB_PAD	CONFIG_NETSKBPAD
+#endif
+
 #ifndef NET_SKB_PAD
 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
 #endif
@@ -3163,6 +3185,10 @@
 
 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
 
+struct page_frag_cache *netdev_frag_cache_get(unsigned int cpu_id);
+struct page_frag_cache *napi_frag_cache_get(unsigned int cpu_id);
+
+
 /**
  * netdev_alloc_frag - allocate a page fragment
  * @fragsz: fragment size
@@ -3992,8 +4018,6 @@
 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
-bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
 				 unsigned int offset);
@@ -4859,75 +4883,6 @@
 #endif
 }
 
-/* Keeps track of mac header offset relative to skb->head.
- * It is useful for TSO of Tunneling protocol. e.g. GRE.
- * For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header.
- * Keeps track of level of encapsulation of network headers.
- */
-struct skb_gso_cb {
-	union {
-		int	mac_offset;
-		int	data_offset;
-	};
-	int	encap_level;
-	__wsum	csum;
-	__u16	csum_start;
-};
-#define SKB_GSO_CB_OFFSET	32
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
-
-static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
-{
-	return (skb_mac_header(inner_skb) - inner_skb->head) -
-		SKB_GSO_CB(inner_skb)->mac_offset;
-}
-
-static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
-{
-	int new_headroom, headroom;
-	int ret;
-
-	headroom = skb_headroom(skb);
-	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
-	if (ret)
-		return ret;
-
-	new_headroom = skb_headroom(skb);
-	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
-	return 0;
-}
-
-static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
-{
-	/* Do not update partial checksums if remote checksum is enabled. */
-	if (skb->remcsum_offload)
-		return;
-
-	SKB_GSO_CB(skb)->csum = res;
-	SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
-}
-
-/* Compute the checksum for a gso segment. First compute the checksum value
- * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
- * then add in skb->csum (checksum from csum_start to end of packet).
- * skb->csum and csum_start are then updated to reflect the checksum of the
- * resultant packet starting from the transport header-- the resultant checksum
- * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
- * header.
- */
-static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
-{
-	unsigned char *csum_start = skb_transport_header(skb);
-	int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
-	__wsum partial = SKB_GSO_CB(skb)->csum;
-
-	SKB_GSO_CB(skb)->csum = res;
-	SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
-
-	return csum_fold(csum_partial(csum_start, plen, partial));
-}
-
 static inline bool skb_is_gso(const struct sk_buff *skb)
 {
 	return skb_shinfo(skb)->gso_size;
diff -ruw linux-6.4/include/linux/tcp.h linux-6.4-fbx/include/linux/tcp.h
--- linux-6.4/include/linux/tcp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/tcp.h	2023-05-22 20:06:44.719866463 +0200
@@ -255,7 +255,8 @@
 	u8	compressed_ack;
 	u8	dup_ack_counter:2,
 		tlp_retrans:1,	/* TLP is a retransmission */
-		unused:5;
+		linear_rto:1,
+		unused:4;
 	u32	chrono_start;	/* Start time in jiffies of a TCP chrono */
 	u32	chrono_stat[3];	/* Time in jiffies for chrono_stat stats */
 	u8	chrono_type:2,	/* current chronograph type */
diff -ruw linux-6.4/include/linux/thermal.h linux-6.4-fbx/include/linux/thermal.h
--- linux-6.4/include/linux/thermal.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/thermal.h	2023-05-22 20:06:44.719866463 +0200
@@ -326,6 +326,9 @@
 
 struct thermal_cooling_device *thermal_cooling_device_register(const char *,
 		void *, const struct thermal_cooling_device_ops *);
+struct thermal_cooling_device *thermal_cooling_device_register_with_parent(
+		struct device *pdev, const char *, void *,
+		const struct thermal_cooling_device_ops *);
 struct thermal_cooling_device *
 thermal_of_cooling_device_register(struct device_node *np, const char *, void *,
 				   const struct thermal_cooling_device_ops *);
diff -ruw linux-6.4/include/linux/vmalloc.h linux-6.4-fbx/include/linux/vmalloc.h
--- linux-6.4/include/linux/vmalloc.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/linux/vmalloc.h	2023-05-22 20:06:44.739866995 +0200
@@ -145,6 +145,8 @@
 extern void *vmalloc_32(unsigned long size) __alloc_size(1);
 extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+extern void *__vmalloc_pgprot(unsigned long size, gfp_t gfp_mask,
+			      pgprot_t prot) __alloc_size(1);
 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
 			unsigned long start, unsigned long end, gfp_t gfp_mask,
 			pgprot_t prot, unsigned long vm_flags, int node,
diff -ruw linux-6.4/include/media/dvb-usb-ids.h linux-6.4-fbx/include/media/dvb-usb-ids.h
--- linux-6.4/include/media/dvb-usb-ids.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/media/dvb-usb-ids.h	2023-05-22 20:06:44.743867101 +0200
@@ -167,6 +167,7 @@
 #define USB_PID_DIBCOM_ANCHOR_2135_COLD 		0x2131
 #define USB_PID_DIBCOM_HOOK_DEFAULT			0x0064
 #define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM		0x0065
+#define USB_PID_DIBCOM_HOOK_DEFAULT_STK7770P		0x0066
 #define USB_PID_DIBCOM_MOD3000_COLD			0x0bb8
 #define USB_PID_DIBCOM_MOD3000_WARM			0x0bb9
 #define USB_PID_DIBCOM_MOD3001_COLD			0x0bc6
diff -ruw linux-6.4/include/net/cfg80211.h linux-6.4-fbx/include/net/cfg80211.h
--- linux-6.4/include/net/cfg80211.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/cfg80211.h	2024-04-19 16:04:28.965735994 +0200
@@ -7,7 +7,7 @@
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014 Intel Mobile Communications GmbH
  * Copyright 2015-2017	Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2021, 2023 Intel Corporation
  */
 
 #include <linux/ethtool.h>
@@ -76,6 +76,8 @@
  * @IEEE80211_CHAN_DISABLED: This channel is disabled.
  * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
  *	sending probe requests or beaconing.
+ * @IEEE80211_CHAN_PSD: Power spectral density (in dBm) is set for this
+ *	channel.
  * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
  * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
  *	is not permitted.
@@ -119,7 +121,7 @@
 enum ieee80211_channel_flags {
 	IEEE80211_CHAN_DISABLED		= 1<<0,
 	IEEE80211_CHAN_NO_IR		= 1<<1,
-	/* hole at 1<<2 */
+	IEEE80211_CHAN_PSD		= 1<<2,
 	IEEE80211_CHAN_RADAR		= 1<<3,
 	IEEE80211_CHAN_NO_HT40PLUS	= 1<<4,
 	IEEE80211_CHAN_NO_HT40MINUS	= 1<<5,
@@ -144,6 +146,7 @@
 	(IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
 
 #define IEEE80211_DFS_MIN_CAC_TIME_MS		60000
+#define IEEE80211_DFS_WEATHER_MIN_CAC_TIME_MS	600000
 #define IEEE80211_DFS_MIN_NOP_TIME_MS		(30 * 60 * 1000)
 
 /**
@@ -171,6 +174,7 @@
  *	on this channel.
  * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
  * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
+ * @psd: power spectral density (in dBm)
  */
 struct ieee80211_channel {
 	enum nl80211_band band;
@@ -187,6 +191,7 @@
 	enum nl80211_dfs_state dfs_state;
 	unsigned long dfs_state_entered;
 	unsigned int dfs_cac_ms;
+	s8 psd;
 };
 
 /**
@@ -263,7 +268,7 @@
  * are only for driver use when pointers to this structure are
  * passed around.
  *
- * @flags: rate-specific flags
+ * @flags: rate-specific flags from &enum ieee80211_rate_flags
  * @bitrate: bitrate in units of 100 Kbps
  * @hw_value: driver/hardware value for this rate
  * @hw_value_short: driver/hardware value for this rate when
@@ -309,6 +314,9 @@
 	u8 color;
 	bool enabled;
 	bool partial;
+
+	/* help compiling QCA code, no API to set it, value is always true */
+	bool collision_detection_enabled;
 };
 
 /**
@@ -562,6 +570,9 @@
 	if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
 		return NULL;
 
+	if (iftype == NL80211_IFTYPE_AP_VLAN)
+		iftype = NL80211_IFTYPE_AP;
+
 	for (i = 0; i < sband->n_iftype_data; i++)  {
 		const struct ieee80211_sband_iftype_data *data =
 			&sband->iftype_data[i];
@@ -752,6 +763,10 @@
 	u32 center_freq2;
 	struct ieee80211_edmg edmg;
 	u16 freq1_offset;
+
+	/*  help compiling QCA code, no API to set it, initialized to 0 */
+	u16 ru_punct_bitmap;
+	bool ru_punct_bitmap_supp_he;
 };
 
 /*
@@ -766,6 +781,13 @@
 		enum nl80211_txrate_gi gi;
 		enum nl80211_he_gi he_gi;
 		enum nl80211_he_ltf he_ltf;
+		/* help compiling QCA code, no API to set it, value is
+		 * always 0 */
+		u16 he_ul_mcs[NL80211_HE_NSS_MAX];
+
+		/* help compiling QCA code, no API to set it, initialized
+		 * according to station capabilities */
+		u16 eht_mcs[NL80211_EHT_NSS_MAX];
 	} control[NUM_NL80211_BANDS];
 };
 
@@ -808,7 +830,7 @@
 struct cfg80211_tid_config {
 	const u8 *peer;
 	u32 n_tid_conf;
-	struct cfg80211_tid_cfg tid_conf[];
+	struct cfg80211_tid_cfg tid_conf[] __counted_by(n_tid_conf);
 };
 
 /**
@@ -951,6 +973,30 @@
 				  enum nl80211_iftype iftype);
 
 /**
+ * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable and we
+ *				 can/need start CAC on such channel
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ *
+ * Return: true if all channels available and at least
+ *	   one channel requires CAC (NL80211_DFS_USABLE)
+ */
+bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
+				 const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_chandef_dfs_cac_time - get the DFS CAC time (in ms) for given
+ *				   channel definition
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ *
+ * Returns: DFS CAC time (in ms) which applies for this channel definition
+ */
+unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
+			      const struct cfg80211_chan_def *chandef);
+
+/**
  * nl80211_send_chandef - sends the channel definition.
  * @msg: the msg to send channel definition
  * @chandef: the channel definition to check
@@ -1184,7 +1230,7 @@
 	struct {
 		const u8 *data;
 		size_t len;
-	} elem[];
+	} elem[] __counted_by(cnt);
 };
 
 /**
@@ -1201,7 +1247,7 @@
 	struct {
 		const u8 *data;
 		size_t len;
-	} elem[];
+	} elem[] __counted_by(cnt);
 };
 
 /**
@@ -1279,13 +1325,14 @@
 	int n_acl_entries;
 
 	/* Keep it last */
-	struct mac_address mac_addrs[];
+	struct mac_address mac_addrs[] __counted_by(n_acl_entries);
 };
 
 /**
  * struct cfg80211_fils_discovery - FILS discovery parameters from
  * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
  *
+ * @update: Set to true if the feature configuration should be updated.
  * @min_interval: Minimum packet interval in TUs (0 - 10000)
  * @max_interval: Maximum packet interval in TUs (0 - 10000)
  * @tmpl_len: Template length
@@ -1293,6 +1340,7 @@
  *	frame headers.
  */
 struct cfg80211_fils_discovery {
+	bool update;
 	u32 min_interval;
 	u32 max_interval;
 	size_t tmpl_len;
@@ -1303,6 +1351,7 @@
  * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
  *	response parameters in 6GHz.
  *
+ * @update: Set to true if the feature configuration should be updated.
  * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
  *	in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
  *	scanning
@@ -1310,6 +1359,7 @@
  * @tmpl: Template data for probe response
  */
 struct cfg80211_unsol_bcast_probe_resp {
+	bool update;
 	u32 interval;
 	size_t tmpl_len;
 	const u8 *tmpl;
@@ -1350,7 +1400,7 @@
  * @twt_responder: Enable Target Wait Time
  * @he_required: stations must support HE
  * @sae_h2e_required: stations must support direct H2E technique in SAE
- * @flags: flags, as defined in enum cfg80211_ap_settings_flags
+ * @flags: flags, as defined in &enum nl80211_ap_settings_flags
  * @he_obss_pd: OBSS Packet Detection settings
  * @he_oper: HE operation IE (or %NULL if HE isn't enabled)
  * @fils_discovery: FILS discovery transmission parameters
@@ -1396,6 +1446,22 @@
 	u16 punct_bitmap;
 };
 
+
+/**
+ * struct cfg80211_ap_update - AP configuration update
+ *
+ * Subset of &struct cfg80211_ap_settings, for updating a running AP.
+ *
+ * @beacon: beacon data
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ */
+struct cfg80211_ap_update {
+	struct cfg80211_beacon_data beacon;
+	struct cfg80211_fils_discovery fils_discovery;
+	struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+};
+
 /**
  * struct cfg80211_csa_settings - channel switch settings
  *
@@ -1479,7 +1545,6 @@
  * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
  * @STATION_PARAM_APPLY_CAPABILITY: apply new capability
  * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state
- * @STATION_PARAM_APPLY_STA_TXPOWER: apply tx power for STA
  *
  * Not all station parameters have in-band "no change" signalling,
  * for those that don't these flags will are used.
@@ -1532,6 +1597,8 @@
  * @he_6ghz_capa: HE 6 GHz Band capabilities of station
  * @eht_capa: EHT capabilities of station
  * @eht_capa_len: the length of the EHT capabilities
+ * @tp_override: Throughput overide value
+ * @tp_overridden: Throughput has been overridden
  */
 struct link_station_parameters {
 	const u8 *mld_mac;
@@ -1550,6 +1617,8 @@
 	const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
 	const struct ieee80211_eht_cap_elem *eht_capa;
 	u8 eht_capa_len;
+	u32 tp_override;
+	bool tp_overridden;
 };
 
 /**
@@ -1702,6 +1771,7 @@
  * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
  * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
  * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
+ * @RATE_INFO_FLAGS_S1G_MCS: MCS field filled with S1G MCS
  */
 enum rate_info_flags {
 	RATE_INFO_FLAGS_MCS			= BIT(0),
@@ -1712,6 +1782,7 @@
 	RATE_INFO_FLAGS_EDMG			= BIT(5),
 	RATE_INFO_FLAGS_EXTENDED_SC_DMG		= BIT(6),
 	RATE_INFO_FLAGS_EHT_MCS			= BIT(7),
+	RATE_INFO_FLAGS_S1G_MCS			= BIT(8),
 };
 
 /**
@@ -1728,6 +1799,11 @@
  * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
  * @RATE_INFO_BW_320: 320 MHz bandwidth
  * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
+ * @RATE_INFO_BW_1: 1 MHz bandwidth
+ * @RATE_INFO_BW_2: 2 MHz bandwidth
+ * @RATE_INFO_BW_4: 4 MHz bandwidth
+ * @RATE_INFO_BW_8: 8 MHz bandwidth
+ * @RATE_INFO_BW_16: 16 MHz bandwidth
  */
 enum rate_info_bw {
 	RATE_INFO_BW_20 = 0,
@@ -1739,6 +1815,11 @@
 	RATE_INFO_BW_HE_RU,
 	RATE_INFO_BW_320,
 	RATE_INFO_BW_EHT_RU,
+	RATE_INFO_BW_1,
+	RATE_INFO_BW_2,
+	RATE_INFO_BW_4,
+	RATE_INFO_BW_8,
+	RATE_INFO_BW_16,
 };
 
 /**
@@ -1747,8 +1828,8 @@
  * Information about a receiving or transmitting bitrate
  *
  * @flags: bitflag of flags from &enum rate_info_flags
- * @mcs: mcs index if struct describes an HT/VHT/HE rate
  * @legacy: bitrate in 100kbit/s for 802.11abg
+ * @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G rate
  * @nss: number of streams (VHT & HE only)
  * @bw: bandwidth (from &enum rate_info_bw)
  * @he_gi: HE guard interval (from &enum nl80211_he_gi)
@@ -1761,9 +1842,9 @@
  *	only valid if bw is %RATE_INFO_BW_EHT_RU)
  */
 struct rate_info {
-	u8 flags;
-	u8 mcs;
+	u16 flags;
 	u16 legacy;
+	u8 mcs;
 	u8 nss;
 	u8 bw;
 	u8 he_gi;
@@ -2141,7 +2222,7 @@
  * @sn: target sequence number
  * @metric: metric (cost) of this mesh path
  * @exptime: expiration time for the mesh path from now, in msecs
- * @flags: mesh path flags
+ * @flags: mesh path flags from &enum mesh_path_flags
  * @discovery_timeout: total mesh path discovery timeout, in msecs
  * @discovery_retries: mesh path discovery retries
  * @generation: generation number for nl80211 dumps.
@@ -2166,6 +2247,15 @@
 	int generation;
 };
 
+#define MPLINK_DUMP_MAX_BLOCKED_ENTRIES	216 /* 216/6 = 36 entries */
+/**
+ * struct mplink_blocked_info - mesh peer link blocked info
+ */
+struct mplink_blocked_info {
+	u8 count;
+	u8 info[MPLINK_DUMP_MAX_BLOCKED_ENTRIES];
+};
+
 /**
  * struct bss_parameters - BSS parameters
  *
@@ -2332,7 +2422,7 @@
  * @user_mpm: userspace handles all MPM functions
  * @dtim_period: DTIM period to use
  * @beacon_interval: beacon interval to use
- * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
+ * @mcast_rate: multicast rate for Mesh Node [6Mbps is the default for 802.11a]
  * @basic_rates: basic rates to use when creating the mesh
  * @beacon_rate: bitrate to be used for beacons
  * @userspace_handles_dfs: whether user space controls DFS operation, i.e.
@@ -2366,6 +2456,17 @@
 };
 
 /**
+ * struct mesh_setup - 802.11s mesh setup configuration
+ * @ie: vendor information elements
+ * @ie_len: length of vendor information elements
+ * These parameters are updated peroidically after mesh creation.
+ */
+struct mesh_vendor_ie {
+	const u8 *ie;
+	u8 ie_len;
+};
+
+/**
  * struct ocb_setup - 802.11p OCB mode setup configuration
  * @chandef: defines the channel to use
  *
@@ -2454,6 +2555,7 @@
  * @short_ssid_valid: @short_ssid is valid and can be used
  * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
  *       20 TUs before starting to send probe requests.
+ * @psd_20: The AP's 20 MHz PSD value.
  */
 struct cfg80211_scan_6ghz_params {
 	u32 short_ssid;
@@ -2462,6 +2564,7 @@
 	bool unsolicited_probe;
 	bool short_ssid_valid;
 	bool psc_no_listen;
+	s8 psd_20;
 };
 
 /**
@@ -2471,7 +2574,6 @@
  * @n_ssids: number of SSIDs
  * @channels: channels to scan on.
  * @n_channels: total number of channels to scan
- * @scan_width: channel width for scanning
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
  * @duration: how long to listen on each channel, in TUs. If
@@ -2479,7 +2581,7 @@
  *	the actual dwell time may be shorter.
  * @duration_mandatory: if set, the scan duration must be as specified by the
  *	%duration field.
- * @flags: bit field of flags controlling operation
+ * @flags: control flags from &enum nl80211_scan_flags
  * @rates: bitmap of rates to advertise for each band
  * @wiphy: the wiphy this was for
  * @scan_start: time (in jiffies) when the scan started
@@ -2501,7 +2603,6 @@
 	struct cfg80211_ssid *ssids;
 	int n_ssids;
 	u32 n_channels;
-	enum nl80211_bss_scan_width scan_width;
 	const u8 *ie;
 	size_t ie_len;
 	u16 duration;
@@ -2527,7 +2628,7 @@
 	struct cfg80211_scan_6ghz_params *scan_6ghz_params;
 
 	/* keep last */
-	struct ieee80211_channel *channels[];
+	struct ieee80211_channel *channels[] __counted_by(n_channels);
 };
 
 static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
@@ -2550,7 +2651,7 @@
  *	or no match (RSSI only)
  * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
  * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
- *	for filtering out scan results received. Drivers advertize this support
+ *	for filtering out scan results received. Drivers advertise this support
  *	of band specific rssi based filtering through the feature capability
  *	%NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
  *	specific rssi thresholds take precedence over rssi_thold, if specified.
@@ -2596,14 +2697,13 @@
  * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
  * @n_ssids: number of SSIDs
  * @n_channels: total number of channels to scan
- * @scan_width: channel width for scanning
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
- * @flags: bit field of flags controlling operation
+ * @flags: control flags from &enum nl80211_scan_flags
  * @match_sets: sets of parameters to be matched for a scan result
  *	entry to be considered valid and to be passed to the host
  *	(others are filtered out).
- *	If ommited, all results are passed.
+ *	If omitted, all results are passed.
  * @n_match_sets: number of match sets
  * @report_results: indicates that results were reported for this request
  * @wiphy: the wiphy this was for
@@ -2637,14 +2737,13 @@
  *	to the specified band while deciding whether a better BSS is reported
  *	using @relative_rssi. If delta is a negative number, the BSSs that
  *	belong to the specified band will be penalized by delta dB in relative
- *	comparisions.
+ *	comparisons.
  */
 struct cfg80211_sched_scan_request {
 	u64 reqid;
 	struct cfg80211_ssid *ssids;
 	int n_ssids;
 	u32 n_channels;
-	enum nl80211_bss_scan_width scan_width;
 	const u8 *ie;
 	size_t ie_len;
 	u32 flags;
@@ -2692,7 +2791,6 @@
 /**
  * struct cfg80211_inform_bss - BSS inform data
  * @chan: channel the frame was received on
- * @scan_width: scan width that was used
  * @signal: signal strength value, according to the wiphy's
  *	signal type
  * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
@@ -2708,16 +2806,18 @@
  *	the BSS that requested the scan in which the beacon/probe was received.
  * @chains: bitmask for filled values in @chain_signal.
  * @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @drv_data: Data to be passed through to @inform_bss
  */
 struct cfg80211_inform_bss {
 	struct ieee80211_channel *chan;
-	enum nl80211_bss_scan_width scan_width;
 	s32 signal;
 	u64 boottime_ns;
 	u64 parent_tsf;
 	u8 parent_bssid[ETH_ALEN] __aligned(2);
 	u8 chains;
 	s8 chain_signal[IEEE80211_MAX_CHAINS];
+
+	void *drv_data;
 };
 
 /**
@@ -2743,7 +2843,6 @@
  * for use in scan results and similar.
  *
  * @channel: channel this BSS is on
- * @scan_width: width of the control channel
  * @bssid: BSSID of the BSS
  * @beacon_interval: the beacon interval as from the frame
  * @capability: the capability field in host byte order
@@ -2773,7 +2872,6 @@
  */
 struct cfg80211_bss {
 	struct ieee80211_channel *channel;
-	enum nl80211_bss_scan_width scan_width;
 
 	const struct cfg80211_bss_ies __rcu *ies;
 	const struct cfg80211_bss_ies __rcu *beacon_ies;
@@ -2870,11 +2968,17 @@
  *	if this is %NULL for a link, that link is not requested
  * @elems: extra elements for the per-STA profile for this link
  * @elems_len: length of the elements
+ * @disabled: If set this link should be included during association etc. but it
+ *	should not be used until enabled by the AP MLD.
+ * @error: per-link error code, must be <= 0. If there is an error, then the
+ *	operation as a whole must fail.
  */
 struct cfg80211_assoc_link {
 	struct cfg80211_bss *bss;
 	const u8 *elems;
 	size_t elems_len;
+	bool disabled;
+	int error;
 };
 
 /**
@@ -3473,7 +3577,7 @@
  * This structure provides information needed to transmit a mgmt frame
  *
  * @chan: channel to use
- * @offchan: indicates wether off channel operation is required
+ * @offchan: indicates whether off channel operation is required
  * @wait: duration for ROC
  * @buf: buffer to transmit
  * @len: buffer length
@@ -3591,7 +3695,7 @@
  * @publish_bcast: if true, the solicited publish should be broadcasted
  * @subscribe_active: if true, the subscribe is active
  * @followup_id: the instance ID for follow up
- * @followup_reqid: the requestor instance ID for follow up
+ * @followup_reqid: the requester instance ID for follow up
  * @followup_dest: MAC address of the recipient of the follow up
  * @ttl: time to live counter in DW.
  * @serv_spec_info: Service Specific Info
@@ -3925,7 +4029,7 @@
 
 	struct list_head list;
 
-	struct cfg80211_pmsr_request_peer peers[];
+	struct cfg80211_pmsr_request_peer peers[] __counted_by(n_peers);
 };
 
 /**
@@ -4086,6 +4190,13 @@
  *
  * @change_bss: Modify parameters for a given BSS.
  *
+ * @inform_bss: Called by cfg80211 while being informed about new BSS data
+ *	for every BSS found within the reported data or frame. This is called
+ *	from within the cfg8011 inform_bss handlers while holding the bss_lock.
+ *	The data parameter is passed through from drv_data inside
+ *	struct cfg80211_inform_bss.
+ *	The new IE data for the BSS is explicitly passed.
+ *
  * @set_txq_params: Set TX queue parameters
  *
  * @libertas_set_mesh_channel: Only for backward compatibility for libertas,
@@ -4421,7 +4532,7 @@
 	int	(*start_ap)(struct wiphy *wiphy, struct net_device *dev,
 			    struct cfg80211_ap_settings *settings);
 	int	(*change_beacon)(struct wiphy *wiphy, struct net_device *dev,
-				 struct cfg80211_beacon_data *info);
+				 struct cfg80211_ap_update *info);
 	int	(*stop_ap)(struct wiphy *wiphy, struct net_device *dev,
 			   unsigned int link_id);
 
@@ -4473,6 +4584,9 @@
 	int	(*change_bss)(struct wiphy *wiphy, struct net_device *dev,
 			      struct bss_parameters *params);
 
+	void	(*inform_bss)(struct wiphy *wiphy, struct cfg80211_bss *bss,
+			      const struct cfg80211_bss_ies *ies, void *data);
+
 	int	(*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
 				  struct ieee80211_txq_params *params);
 
@@ -4592,9 +4706,10 @@
 				  struct cfg80211_gtk_rekey_data *data);
 
 	int	(*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
-			     const u8 *peer, u8 action_code,  u8 dialog_token,
-			     u16 status_code, u32 peer_capability,
-			     bool initiator, const u8 *buf, size_t len);
+			     const u8 *peer, int link_id,
+			     u8 action_code, u8 dialog_token, u16 status_code,
+			     u32 peer_capability, bool initiator,
+			     const u8 *buf, size_t len);
 	int	(*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
 			     const u8 *peer, enum nl80211_tdls_operation oper);
 
@@ -4783,6 +4898,8 @@
  * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys.
  * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for
  *	NL80211_REGDOM_SET_BY_DRIVER.
+ * @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver
+ *	set this flag to update channels on beacon hints.
  */
 enum wiphy_flags {
 	WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK		= BIT(0),
@@ -4809,6 +4926,7 @@
 	WIPHY_FLAG_SUPPORTS_5_10_MHZ		= BIT(22),
 	WIPHY_FLAG_HAS_CHANNEL_SWITCH		= BIT(23),
 	WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER	= BIT(24),
+	WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON     = BIT(25),
 };
 
 /**
@@ -5410,6 +5528,8 @@
 	u8 perm_addr[ETH_ALEN];
 	u8 addr_mask[ETH_ALEN];
 
+	int dev_port;
+
 	struct mac_address *addresses;
 
 	const struct ieee80211_txrx_stypes *mgmt_stypes;
@@ -5724,12 +5844,17 @@
  * wiphy_lock - lock the wiphy
  * @wiphy: the wiphy to lock
  *
- * This is mostly exposed so it can be done around registering and
- * unregistering netdevs that aren't created through cfg80211 calls,
- * since that requires locking in cfg80211 when the notifiers is
- * called, but that cannot differentiate which way it's called.
+ * This is needed around registering and unregistering netdevs that
+ * aren't created through cfg80211 calls, since that requires locking
+ * in cfg80211 when the notifiers is called, but that cannot
+ * differentiate which way it's called.
+ *
+ * It can also be used by drivers for their own purposes.
  *
  * When cfg80211 ops are called, the wiphy is already locked.
+ *
+ * Note that this makes sure that no workers that have been queued
+ * with wiphy_queue_work() are running.
  */
 static inline void wiphy_lock(struct wiphy *wiphy)
 	__acquires(&wiphy->mtx)
@@ -5749,6 +5874,109 @@
 	mutex_unlock(&wiphy->mtx);
 }
 
+struct wiphy_work;
+typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
+
+struct wiphy_work {
+	struct list_head entry;
+	wiphy_work_func_t func;
+};
+
+static inline void wiphy_work_init(struct wiphy_work *work,
+				   wiphy_work_func_t func)
+{
+	INIT_LIST_HEAD(&work->entry);
+	work->func = func;
+}
+
+/**
+ * wiphy_work_queue - queue work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @work: the work item
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_cancel - cancel previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_flush - flush previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to flush, this can be %NULL to flush all work
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
+
+struct wiphy_delayed_work {
+	struct wiphy_work work;
+	struct wiphy *wiphy;
+	struct timer_list timer;
+};
+
+void wiphy_delayed_work_timer(struct timer_list *t);
+
+static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
+					   wiphy_work_func_t func)
+{
+	timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0);
+	wiphy_work_init(&dwork->work, func);
+}
+
+/**
+ * wiphy_delayed_work_queue - queue delayed work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @dwork: the delayable worker
+ * @delay: number of jiffies to wait before queueing
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_delayed_work_queue(struct wiphy *wiphy,
+			      struct wiphy_delayed_work *dwork,
+			      unsigned long delay);
+
+/**
+ * wiphy_delayed_work_cancel - cancel previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+			       struct wiphy_delayed_work *dwork);
+
+/**
+ * wiphy_delayed_work_flush - flush previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to flush
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_flush(struct wiphy *wiphy,
+			      struct wiphy_delayed_work *dwork);
+
 /**
  * struct wireless_dev - wireless device state
  *
@@ -5797,8 +6025,6 @@
  * @mgmt_registrations: list of registrations for management frames
  * @mgmt_registrations_need_update: mgmt registrations were updated,
  *	need to propagate the update to the driver
- * @mtx: mutex used to lock data in this struct, may be used by drivers
- *	and some API functions require it held
  * @beacon_interval: beacon interval used on this device for transmitting
  *	beacons, 0 when not valid
  * @address: The address for this device, valid only if @netdev is %NULL
@@ -5821,6 +6047,7 @@
  * @event_lock: (private) lock for event list
  * @owner_nlportid: (private) owner socket port ID
  * @nl_owner_dead: (private) owner socket went away
+ * @cqm_rssi_work: (private) CQM RSSI reporting work
  * @cqm_config: (private) nl80211 RSSI monitor state
  * @pmsr_list: (private) peer measurement requests
  * @pmsr_lock: (private) peer measurements requests/results lock
@@ -5844,8 +6071,6 @@
 	struct list_head mgmt_registrations;
 	u8 mgmt_registrations_need_update:1;
 
-	struct mutex mtx;
-
 	bool use_4addr, is_running, registered, registering;
 
 	u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@ -5893,7 +6118,8 @@
 	} wext;
 #endif
 
-	struct cfg80211_cqm_config *cqm_config;
+	struct wiphy_work cqm_rssi_work;
+	struct cfg80211_cqm_config __rcu *cqm_config;
 
 	struct list_head pmsr_list;
 	spinlock_t pmsr_lock;
@@ -6135,13 +6361,11 @@
 /**
  * ieee80211_mandatory_rates - get mandatory rates for a given band
  * @sband: the band to look for rates in
- * @scan_width: width of the control channel
  *
  * This function returns a bitmap of the mandatory rates for the given
  * band, bits are set according to the rate position in the bitrates array.
  */
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
-			      enum nl80211_bss_scan_width scan_width);
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
 
 /*
  * Radiotap parsing functions -- for controlled injection support
@@ -6482,7 +6706,7 @@
  * @ies: data consisting of IEs
  * @len: length of data
  *
- * Return: %NULL if the etended element could not be found or if
+ * Return: %NULL if the extended element could not be found or if
  * the element is invalid (claims to be longer than the given
  * data) or if the byte array doesn't match; otherwise return the
  * requested element struct.
@@ -6561,6 +6785,28 @@
 }
 
 /**
+ * cfg80211_defragment_element - Defrag the given element data into a buffer
+ *
+ * @elem: the element to defragment
+ * @ies: elements where @elem is contained
+ * @ieslen: length of @ies
+ * @data: buffer to store element data
+ * @data_len: length of @data
+ * @frag_id: the element ID of fragments
+ *
+ * Return: length of @data, or -EINVAL on error
+ *
+ * Copy out all data from an element that may be fragmented into @data, while
+ * skipping all headers.
+ *
+ * The function uses memmove() internally. It is acceptable to defragment an
+ * element in-place.
+ */
+ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+				    size_t ieslen, u8 *data, size_t data_len,
+				    u8 frag_id);
+
+/**
  * cfg80211_send_layer2_update - send layer 2 update frame
  *
  * @dev: network device
@@ -6607,7 +6853,7 @@
 /**
  * regulatory_set_wiphy_regd - set regdom info for self managed drivers
  * @wiphy: the wireless device we want to process the regulatory domain on
- * @rd: the regulatory domain informatoin to use for this wiphy
+ * @rd: the regulatory domain information to use for this wiphy
  *
  * Set the regulatory domain information for self-managed wiphys, only they
  * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more
@@ -6698,7 +6944,7 @@
  * Regulatory self-managed driver can use it to proactively
  *
  * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
- * @freq: the freqency(in MHz) to be queried.
+ * @freq: the frequency (in MHz) to be queried.
  * @rule: pointer to store the wmm rule from the regulatory db.
  *
  * Self-managed wireless drivers can use this function to  query
@@ -6781,22 +7027,6 @@
 			       gfp_t gfp);
 
 static inline struct cfg80211_bss * __must_check
-cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
-				struct ieee80211_channel *rx_channel,
-				enum nl80211_bss_scan_width scan_width,
-				struct ieee80211_mgmt *mgmt, size_t len,
-				s32 signal, gfp_t gfp)
-{
-	struct cfg80211_inform_bss data = {
-		.chan = rx_channel,
-		.scan_width = scan_width,
-		.signal = signal,
-	};
-
-	return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
-}
-
-static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss_frame(struct wiphy *wiphy,
 			  struct ieee80211_channel *rx_channel,
 			  struct ieee80211_mgmt *mgmt, size_t len,
@@ -6804,7 +7034,6 @@
 {
 	struct cfg80211_inform_bss data = {
 		.chan = rx_channel,
-		.scan_width = NL80211_BSS_CHAN_WIDTH_20,
 		.signal = signal,
 	};
 
@@ -6907,26 +7136,6 @@
 			 gfp_t gfp);
 
 static inline struct cfg80211_bss * __must_check
-cfg80211_inform_bss_width(struct wiphy *wiphy,
-			  struct ieee80211_channel *rx_channel,
-			  enum nl80211_bss_scan_width scan_width,
-			  enum cfg80211_bss_frame_type ftype,
-			  const u8 *bssid, u64 tsf, u16 capability,
-			  u16 beacon_interval, const u8 *ie, size_t ielen,
-			  s32 signal, gfp_t gfp)
-{
-	struct cfg80211_inform_bss data = {
-		.chan = rx_channel,
-		.scan_width = scan_width,
-		.signal = signal,
-	};
-
-	return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
-					capability, beacon_interval, ie, ielen,
-					gfp);
-}
-
-static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss(struct wiphy *wiphy,
 		    struct ieee80211_channel *rx_channel,
 		    enum cfg80211_bss_frame_type ftype,
@@ -6936,7 +7145,6 @@
 {
 	struct cfg80211_inform_bss data = {
 		.chan = rx_channel,
-		.scan_width = NL80211_BSS_CHAN_WIDTH_20,
 		.signal = signal,
 	};
 
@@ -7021,19 +7229,6 @@
 				    void *data),
 		       void *iter_data);
 
-static inline enum nl80211_bss_scan_width
-cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
-{
-	switch (chandef->width) {
-	case NL80211_CHAN_WIDTH_5:
-		return NL80211_BSS_CHAN_WIDTH_5;
-	case NL80211_CHAN_WIDTH_10:
-		return NL80211_BSS_CHAN_WIDTH_10;
-	default:
-		return NL80211_BSS_CHAN_WIDTH_20;
-	}
-}
-
 /**
  * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
  * @dev: network device
@@ -7066,7 +7261,7 @@
 void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
 
 /**
- * struct cfg80211_rx_assoc_resp - association response data
+ * struct cfg80211_rx_assoc_resp_data - association response data
  * @bss: the BSS that association was requested with, ownership of the pointer
  *	moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
  * @buf: (Re)Association Response frame (header + body)
@@ -7081,7 +7276,7 @@
  * @links.status: Set this (along with a BSS pointer) for links that
  *	were rejected by the AP.
  */
-struct cfg80211_rx_assoc_resp {
+struct cfg80211_rx_assoc_resp_data {
 	const u8 *buf;
 	size_t len;
 	const u8 *req_ies;
@@ -7089,7 +7284,7 @@
 	int uapsd_queues;
 	const u8 *ap_mld_addr;
 	struct {
-		const u8 *addr;
+		u8 addr[ETH_ALEN] __aligned(2);
 		struct cfg80211_bss *bss;
 		u16 status;
 	} links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -7098,7 +7293,7 @@
 /**
  * cfg80211_rx_assoc_resp - notification of processed association response
  * @dev: network device
- * @data: association response data, &struct cfg80211_rx_assoc_resp
+ * @data: association response data, &struct cfg80211_rx_assoc_resp_data
  *
  * After being asked to associate via cfg80211_ops::assoc() the driver must
  * call either this function or cfg80211_auth_timeout().
@@ -7106,7 +7301,7 @@
  * This function may sleep. The caller must hold the corresponding wdev's mutex.
  */
 void cfg80211_rx_assoc_resp(struct net_device *dev,
-			    struct cfg80211_rx_assoc_resp *data);
+			    struct cfg80211_rx_assoc_resp_data *data);
 
 /**
  * struct cfg80211_assoc_failure - association failure data
@@ -7825,7 +8020,8 @@
  * cfg80211_port_authorized - notify cfg80211 of successful security association
  *
  * @dev: network device
- * @bssid: the BSSID of the AP
+ * @peer_addr: BSSID of the AP/P2P GO in case of STA/GC or STA/GC MAC address
+ *	in case of AP/P2P GO
  * @td_bitmap: transition disable policy
  * @td_bitmap_len: Length of transition disable policy
  * @gfp: allocation flags
@@ -7836,8 +8032,11 @@
  * should be preceded with a call to cfg80211_connect_result(),
  * cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to
  * indicate the 802.11 association.
+ * This function can also be called by AP/P2P GO driver that supports
+ * authentication offload. In this case the peer_mac passed is that of
+ * associated STA/GC.
  */
-void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
+void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr,
 			      const u8* td_bitmap, u8 td_bitmap_len, gfp_t gfp);
 
 /**
@@ -7975,7 +8174,7 @@
  * @link_id: the ID of the link the frame was received	on
  * @buf: Management frame (header + body)
  * @len: length of the frame data
- * @flags: flags, as defined in enum nl80211_rxmgmt_flags
+ * @flags: flags, as defined in &enum nl80211_rxmgmt_flags
  * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds
  * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds
  */
@@ -8426,7 +8625,7 @@
  * @link_id: the link ID for MLO, must be 0 for non-MLO
  * @punct_bitmap: the new puncturing bitmap
  *
- * Caller must acquire wdev_lock, therefore must only be called from sleepable
+ * Caller must hold wiphy mutex, therefore must only be called from sleepable
  * driver context!
  */
 void cfg80211_ch_switch_notify(struct net_device *dev,
@@ -8666,6 +8865,18 @@
 }
 
 /**
+ * ieee80211_fragment_element - fragment the last element in skb
+ * @skb: The skbuf that the element was added to
+ * @len_pos: Pointer to length of the element to fragment
+ * @frag_id: The element ID to use for fragments
+ *
+ * This function fragments all data after @len_pos, adding fragmentation
+ * elements with the given ID as appropriate. The SKB will grow in size
+ * accordingly.
+ */
+void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id);
+
+/**
  * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
  * @wdev: the wireless device reporting the wakeup
  * @wakeup: the wakeup report
@@ -8914,9 +9125,9 @@
 
 /**
  * cfg80211_assoc_comeback - notification of association that was
- * temporarly rejected with a comeback
+ * temporarily rejected with a comeback
  * @netdev: network device
- * @ap_addr: AP (MLD) address that rejected the assocation
+ * @ap_addr: AP (MLD) address that rejected the association
  * @timeout: timeout interval value TUs.
  *
  * this function may sleep. the caller must hold the corresponding wdev's mutex.
@@ -9067,4 +9278,17 @@
 bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
 					      const struct cfg80211_chan_def *chandef);
 
+/**
+ * cfg80211_links_removed - Notify about removed STA MLD setup links.
+ * @dev: network device.
+ * @link_mask: BIT mask of removed STA MLD setup link IDs.
+ *
+ * Inform cfg80211 and the userspace about removed STA MLD setup links due to
+ * AP MLD removing the corresponding affiliated APs with Multi-Link
+ * reconfiguration. Note that it's not valid to remove all links, in this
+ * case disconnect instead.
+ * Also note that the wdev mutex must be held.
+ */
+void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
+
 #endif /* __NET_CFG80211_H */
diff -ruw linux-6.4/include/net/dsa.h linux-6.4-fbx/include/net/dsa.h
--- linux-6.4/include/net/dsa.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/dsa.h	2023-06-27 11:47:16.079867270 +0200
@@ -56,6 +56,7 @@
 #define DSA_TAG_PROTO_RTL8_4T_VALUE		25
 #define DSA_TAG_PROTO_RZN1_A5PSW_VALUE		26
 #define DSA_TAG_PROTO_LAN937X_VALUE		27
+#define DSA_TAG_PROTO_BRCM_FBX_VALUE		28
 
 enum dsa_tag_protocol {
 	DSA_TAG_PROTO_NONE		= DSA_TAG_PROTO_NONE_VALUE,
@@ -86,6 +87,7 @@
 	DSA_TAG_PROTO_RTL8_4T		= DSA_TAG_PROTO_RTL8_4T_VALUE,
 	DSA_TAG_PROTO_RZN1_A5PSW	= DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
 	DSA_TAG_PROTO_LAN937X		= DSA_TAG_PROTO_LAN937X_VALUE,
+	DSA_TAG_PROTO_BRCM_FBX		= DSA_TAG_PROTO_BRCM_FBX_VALUE,
 };
 
 struct dsa_switch;
@@ -263,6 +265,8 @@
 		DSA_PORT_TYPE_DSA,
 		DSA_PORT_TYPE_USER,
 	} type;
+	bool			is_def_cpu_port;
+	struct device_node	*force_cpu_dn;
 
 	const char		*name;
 	struct dsa_port		*cpu_dp;
diff -ruw linux-6.4/include/net/gro.h linux-6.4-fbx/include/net/gro.h
--- linux-6.4/include/net/gro.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/gro.h	2023-11-07 13:38:44.058256582 +0100
@@ -446,5 +446,6 @@
 		gro_normal_list(napi);
 }
 
+extern struct list_head offload_base;
 
 #endif /* _NET_IPV6_GRO_H */
diff -ruw linux-6.4/include/net/ieee80211_radiotap.h linux-6.4-fbx/include/net/ieee80211_radiotap.h
--- linux-6.4/include/net/ieee80211_radiotap.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/ieee80211_radiotap.h	2023-11-13 17:17:11.639576253 +0100
@@ -21,7 +21,7 @@
 #include <asm/unaligned.h>
 
 /**
- * struct ieee82011_radiotap_header - base radiotap header
+ * struct ieee80211_radiotap_header - base radiotap header
  */
 struct ieee80211_radiotap_header {
 	/**
@@ -535,8 +535,16 @@
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN	= 0x00000008,
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN		= 0x00000010,
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC		= 0x00000020,
+	IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED = 0x00000040,
+	IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK	= 0x00000080,
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER		= 0x00007000,
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW			= 0x00038000,
+		IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ		= 0,
+		IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ		= 1,
+		IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ		= 2,
+		IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ		= 3,
+		IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1		= 4,
+		IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_2		= 5,
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL		= 0x00040000,
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR		= 0x01f80000,
 	IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP			= 0xfe000000,
@@ -573,6 +581,7 @@
 
 /**
  * ieee80211_get_radiotap_len - get radiotap header length
+ * @data: pointer to the header
  */
 static inline u16 ieee80211_get_radiotap_len(const char *data)
 {
diff -ruw linux-6.4/include/net/ip.h linux-6.4-fbx/include/net/ip.h
--- linux-6.4/include/net/ip.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/ip.h	2023-06-27 11:47:16.079867270 +0200
@@ -705,6 +705,20 @@
 #endif
 
 /*
+ *     Functions provided by ip_ffn.c
+ */
+
+enum {
+	IP_FFN_FINISH_OUT,
+	IP_FFN_LOCAL_IN,
+};
+
+extern void ip_ffn_init(void);
+extern int ip_ffn_process(struct sk_buff *skb);
+extern void ip_ffn_add(struct sk_buff *skb, int when);
+extern void ip_ffn_flush_all(void);
+
+/*
  *	Functions provided by ip_forward.c
  */
 
diff -ruw linux-6.4/include/net/ip6_tunnel.h linux-6.4-fbx/include/net/ip6_tunnel.h
--- linux-6.4/include/net/ip6_tunnel.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/ip6_tunnel.h	2023-05-22 20:06:44.779868059 +0200
@@ -18,6 +18,18 @@
 /* determine capability on a per-packet basis */
 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
 
+/* IPv6 tunnel FMR */
+struct __ip6_tnl_fmr {
+	struct __ip6_tnl_fmr *next; /* next fmr in list */
+	struct in6_addr ip6_prefix;
+	struct in_addr ip4_prefix;
+
+	__u8 ip6_prefix_len;
+	__u8 ip4_prefix_len;
+	__u8 ea_len;
+	__u8 offset;
+};
+
 struct __ip6_tnl_parm {
 	char name[IFNAMSIZ];	/* name of tunnel device */
 	int link;		/* ifindex of underlying L2 interface */
@@ -29,6 +41,7 @@
 	__u32 flags;		/* tunnel flags */
 	struct in6_addr laddr;	/* local tunnel end-point address */
 	struct in6_addr raddr;	/* remote tunnel end-point address */
+	struct __ip6_tnl_fmr *fmrs;	/* FMRs */
 
 	__be16			i_flags;
 	__be16			o_flags;
diff -ruw linux-6.4/include/net/ipv6.h linux-6.4-fbx/include/net/ipv6.h
--- linux-6.4/include/net/ipv6.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/ipv6.h	2023-05-22 20:06:44.779868059 +0200
@@ -1147,6 +1147,7 @@
 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_forward(struct sk_buff *skb);
 int ip6_input(struct sk_buff *skb);
+int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_mc_input(struct sk_buff *skb);
 void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
 			      bool have_final);
@@ -1377,4 +1378,18 @@
 	release_sock(sk);
 }
 
+/*
+ *     Functions provided by ipv6_ffn.c
+ */
+
+enum {
+	IPV6_FFN_FINISH_OUT,
+	IPV6_FFN_LOCAL_IN,
+};
+
+extern void ipv6_ffn_init(void);
+extern int ipv6_ffn_process(struct sk_buff *skb);
+extern void ipv6_ffn_add(struct sk_buff *skb, int when);
+extern void ipv6_ffn_flush_all(void);
+
 #endif /* _NET_IPV6_H */
diff -ruw linux-6.4/include/net/mac80211.h linux-6.4-fbx/include/net/mac80211.h
--- linux-6.4/include/net/mac80211.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/mac80211.h	2024-01-19 17:01:19.905848123 +0100
@@ -7,7 +7,7 @@
  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
  */
 
 #ifndef MAC80211_H
@@ -341,6 +341,7 @@
  * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
  *	status changed.
  * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed.
+ * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
  */
 enum ieee80211_bss_change {
 	BSS_CHANGED_ASSOC		= 1<<0,
@@ -376,6 +377,7 @@
 	BSS_CHANGED_FILS_DISCOVERY      = 1<<30,
 	BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
 	BSS_CHANGED_EHT_PUNCTURING	= BIT_ULL(32),
+	BSS_CHANGED_MLD_VALID_LINKS	= BIT_ULL(33),
 
 	/* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -643,9 +645,7 @@
  * @pwr_reduction: power constraint of BSS.
  * @eht_support: does this BSS support EHT
  * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS
- * @csa_active: marks whether a channel switch is going on. Internally it is
- *	write-protected by sdata_lock and local->mtx so holding either is fine
- *	for read access.
+ * @csa_active: marks whether a channel switch is going on.
  * @csa_punct_bitmap: new puncturing bitmap for channel switch
  * @mu_mimo_owner: indicates interface owns MU-MIMO capability
  * @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -653,9 +653,7 @@
  *	path needing to access it; even though the netdev carrier will always
  *	be off when it is %NULL there can still be races and packets could be
  *	processed after it switches back to %NULL.
- * @color_change_active: marks whether a color change is ongoing. Internally it is
- *	write-protected by sdata_lock and local->mtx so holding either is fine
- *	for read access.
+ * @color_change_active: marks whether a color change is ongoing.
  * @color_change_color: the bss color that will be used after the change.
  * @ht_ldpc: in AP mode, indicates interface has HT LDPC capability.
  * @vht_ldpc: in AP mode, indicates interface has VHT LDPC capability.
@@ -776,6 +774,21 @@
 	bool eht_su_beamformer;
 	bool eht_su_beamformee;
 	bool eht_mu_beamformer;
+	bool eht_80mhz_full_bw_ul_mumimo;
+
+
+	/* next two fields are only present to make QCA ath12k
+	 * compile, they are never set to useful value  */
+	struct ieee80211_vif *mbssid_tx_vif;
+	int mbssid_tx_vif_linkid;
+
+	/* help compiling QCA code, no API to set it, value is always
+	 * false */
+	bool ap_ps_enable;
+
+	/* help compiling QCA code, no API to set it, value is always
+	 * 0 */
+	u32 critical_update_flag;
 };
 
 /**
@@ -1082,6 +1095,11 @@
 
 #define IEEE80211_MAX_TX_RETRY		31
 
+static inline bool ieee80211_rate_valid(struct ieee80211_tx_rate *rate)
+{
+	return rate->idx >= 0 && rate->count > 0;
+}
+
 static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate,
 					  u8 mcs, u8 nss)
 {
@@ -1115,7 +1133,9 @@
  *	not valid if the interface is an MLD since we won't know which
  *	link the frame will be transmitted on
  * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
- * @ack_frame_id: internal frame ID for TX status, used internally
+ * @status_data: internal data for TX status handling, assigned privately,
+ *	see also &enum ieee80211_status_data for the internal documentation
+ * @status_data_idr: indicates status data is IDR allocated ID for ack frame
  * @tx_time_est: TX time estimate in units of 4us, used internally
  * @control: union part for control data
  * @control.rates: TX rates array to try
@@ -1155,10 +1175,11 @@
 	/* common information */
 	u32 flags;
 	u32 band:3,
-	    ack_frame_id:13,
+	    status_data_idr:1,
+	    status_data:13,
 	    hw_queue:4,
 	    tx_time_est:10;
-	/* 2 free bits */
+	/* 1 free bit */
 
 	union {
 		struct {
@@ -1172,7 +1193,11 @@
 					u8 use_cts_prot:1;
 					u8 short_preamble:1;
 					u8 skip_table:1;
-					/* 2 bytes free */
+
+					/* for injection only (bitmap) */
+					u8 antennas:2;
+
+					/* 14 bits free */
 				};
 				/* only needed before rate control */
 				unsigned long jiffies;
@@ -1755,12 +1780,15 @@
  * @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes
  *	and send P2P_PS notification to the driver if NOA changed, even
  *	this is not pure P2P vif.
+ * @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is
+ *      enabled for the interface.
  */
 enum ieee80211_vif_flags {
 	IEEE80211_VIF_BEACON_FILTER		= BIT(0),
 	IEEE80211_VIF_SUPPORTS_CQM_RSSI		= BIT(1),
 	IEEE80211_VIF_SUPPORTS_UAPSD		= BIT(2),
 	IEEE80211_VIF_GET_NOA_UPDATE		= BIT(3),
+	IEEE80211_VIF_EML_ACTIVE	        = BIT(4),
 };
 
 
@@ -1790,6 +1818,9 @@
  * @ps: power-save mode (STA only). This flag is NOT affected by
  *	offchannel/dynamic_ps operations.
  * @aid: association ID number, valid only when @assoc is true
+ * @eml_cap: EML capabilities as described in P802.11be_D2.2 Figure 9-1002k.
+ * @eml_med_sync_delay: Medium Synchronization delay as described in
+ *	P802.11be_D2.2 Figure 9-1002j.
  * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
  *	may filter ARP queries targeted for other addresses than listed here.
  *	The driver must allow ARP queries targeted for all address listed here
@@ -1812,6 +1843,8 @@
 	bool ibss_creator;
 	bool ps;
 	u16 aid;
+	u16 eml_cap;
+	u16 eml_med_sync_delay;
 
 	__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
 	int arp_addr_cnt;
@@ -1838,6 +1871,8 @@
  * @active_links: The bitmap of active links, or 0 for non-MLO.
  *	The driver shouldn't change this directly, but use the
  *	API calls meant for that purpose.
+ * @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO.
+ *	Must be a subset of valid_links.
  * @addr: address of this interface
  * @p2p: indicates whether this AP or STA interface is a p2p
  *	interface, i.e. a GO or p2p-sta respectively
@@ -1875,7 +1910,7 @@
 	struct ieee80211_vif_cfg cfg;
 	struct ieee80211_bss_conf bss_conf;
 	struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
-	u16 valid_links, active_links;
+	u16 valid_links, active_links, dormant_links;
 	u8 addr[ETH_ALEN] __aligned(2);
 	bool p2p;
 
@@ -1901,6 +1936,27 @@
 	u8 drv_priv[] __aligned(sizeof(void *));
 };
 
+/**
+ * ieee80211_vif_usable_links - Return the usable links for the vif
+ * @vif: the vif for which the usable links are requested
+ * Return: the usable link bitmap
+ */
+static inline u16 ieee80211_vif_usable_links(const struct ieee80211_vif *vif)
+{
+	return vif->valid_links & ~vif->dormant_links;
+}
+
+/**
+ * ieee80211_vif_is_mld - Returns true iff the vif is an MLD one
+ * @vif: the vif
+ * Return: %true if the vif is an MLD, %false otherwise.
+ */
+static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif)
+{
+	/* valid_links != 0 indicates this vif is an MLD */
+	return vif->valid_links != 0;
+}
+
 #define for_each_vif_active_link(vif, link, link_id)				\
 	for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++)	\
 		if ((!(vif)->active_links ||					\
@@ -1938,22 +1994,18 @@
  */
 struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
 
-/**
- * lockdep_vif_mutex_held - for lockdep checks on link poiners
- * @vif: the interface to check
- */
-static inline bool lockdep_vif_mutex_held(struct ieee80211_vif *vif)
+static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif)
 {
-	return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->mtx);
+	return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->wiphy->mtx);
 }
 
 #define link_conf_dereference_protected(vif, link_id)		\
 	rcu_dereference_protected((vif)->link_conf[link_id],	\
-				  lockdep_vif_mutex_held(vif))
+				  lockdep_vif_wiphy_mutex_held(vif))
 
 #define link_conf_dereference_check(vif, link_id)		\
 	rcu_dereference_check((vif)->link_conf[link_id],	\
-			      lockdep_vif_mutex_held(vif))
+			      lockdep_vif_wiphy_mutex_held(vif))
 
 /**
  * enum ieee80211_key_flags - key flags
@@ -2250,11 +2302,16 @@
 	struct ieee80211_he_6ghz_capa he_6ghz_capa;
 	struct ieee80211_sta_eht_cap eht_cap;
 
+	/* help compiling QCA code, no API to set it, value is always 0 */
+	u16 ru_punct_bitmap;
+
 	struct ieee80211_sta_aggregates agg;
 
 	u8 rx_nss;
 	enum ieee80211_sta_rx_bandwidth bandwidth;
+	enum ieee80211_sta_rx_bandwidth sta_max_bandwidth;
 	struct ieee80211_sta_txpwr txpwr;
+	u32 tp_override;
 };
 
 /**
@@ -2704,6 +2761,8 @@
 	IEEE80211_HW_DETECTS_COLOR_COLLISION,
 	IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
 
+	IEEE80211_HW_APVLAN_NEED_MCAST_TO_UCAST,
+
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
 };
@@ -3601,11 +3660,14 @@
  * @success: whether the frame exchange was successful, only
  *	used with the mgd_complete_tx() method, and then only
  *	valid for auth and (re)assoc.
+ * @link_id: the link id on which the frame will be TX'ed.
+ *	Only used with the mgd_prepare_tx() method.
  */
 struct ieee80211_prep_tx_info {
 	u16 duration;
 	u16 subtype;
 	u8 success:1;
+	int link_id;
 };
 
 /**
@@ -3829,6 +3891,10 @@
  *	the station. See @sta_pre_rcu_remove if needed.
  *	This callback can sleep.
  *
+ * @vif_add_debugfs: Drivers can use this callback to add a debugfs vif
+ *	directory with its files. This callback should be within a
+ *	CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep.
+ *
  * @link_add_debugfs: Drivers can use this callback to add debugfs files
  *	when a link is added to a mac80211 vif. This callback should be within
  *	a CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep.
@@ -4033,11 +4099,15 @@
  *	This callback must be atomic.
  *
  * @get_et_sset_count:  Ethtool API to get string-set count.
+ *	Note that the wiphy mutex is not held for this callback since it's
+ *	expected to return a static value.
  *
  * @get_et_stats:  Ethtool API to get a set of u64 stats.
  *
  * @get_et_strings:  Ethtool API to get a set of strings to describe stats
  *	and perhaps other supported types of ethtool data-sets.
+ *	Note that the wiphy mutex is not held for this callback since it's
+ *	expected to return a static value.
  *
  * @mgd_prepare_tx: Prepare for transmitting a management frame for association
  *	before associated. In multi-channel scenarios, a virtual interface is
@@ -4242,6 +4312,8 @@
 		   struct sk_buff *skb);
 	int (*start)(struct ieee80211_hw *hw);
 	void (*stop)(struct ieee80211_hw *hw);
+	int (*set_powered)(struct ieee80211_hw *hw);
+	int (*get_powered)(struct ieee80211_hw *hw, bool *up, bool *busy);
 #ifdef CONFIG_PM
 	int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
 	int (*resume)(struct ieee80211_hw *hw);
@@ -4324,6 +4396,8 @@
 	int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			  struct ieee80211_sta *sta);
 #ifdef CONFIG_MAC80211_DEBUGFS
+	void (*vif_add_debugfs)(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif);
 	void (*link_add_debugfs)(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_bss_conf *link_conf,
@@ -4472,7 +4546,8 @@
 				   struct ieee80211_prep_tx_info *info);
 
 	void	(*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
-					     struct ieee80211_vif *vif);
+					     struct ieee80211_vif *vif,
+					     unsigned int link_id);
 
 	int (*add_chanctx)(struct ieee80211_hw *hw,
 			   struct ieee80211_chanctx_conf *ctx);
@@ -4510,7 +4585,8 @@
 				  struct ieee80211_channel_switch *ch_switch);
 
 	int (*post_channel_switch)(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif);
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_bss_conf *link_conf);
 	void (*abort_channel_switch)(struct ieee80211_hw *hw,
 				     struct ieee80211_vif *vif);
 	void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw,
@@ -5135,6 +5211,10 @@
 void ieee80211_tx_status(struct ieee80211_hw *hw,
 			 struct sk_buff *skb);
 
+void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct sk_buff *skb);
+
 /**
  * ieee80211_tx_status_ext - extended transmit status callback
  *
@@ -6505,11 +6585,14 @@
  * ieee80211_chswitch_done - Complete channel switch process
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
  * @success: make the channel switch successful or not
+ * @link_id: the link_id on which the switch was done. Ignored if success is
+ *	false.
  *
  * Complete the channel switch post-process: set the new operational channel
  * and wake up the suspended queues.
  */
-void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success);
+void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
+			     unsigned int link_id);
 
 /**
  * ieee80211_channel_switch_disconnect - disconnect due to channel switch error
@@ -6578,6 +6661,7 @@
  * marks frames marked in the bitmap as having been filtered. Afterwards, it
  * checks if any frames in the window starting from @ssn can now be released
  * (in case they were only waiting for frames that were filtered.)
+ * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames)
  */
 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
 					  u16 ssn, u64 filtered,
@@ -6597,6 +6681,13 @@
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
 
 /**
+ * same as ieee80211_send_bar but for given STA, allow sending to a
+ * STA on AP_VLAN and get a valid control->sta in the driver
+ */
+void ieee80211_send_bar_sta(struct ieee80211_sta *pubsta,
+			    u16 tid, u16 ssn);
+
+/**
  * ieee80211_manage_rx_ba_offl - helper to queue an RX BA work
  * @vif: &struct ieee80211_vif pointer from the add_interface callback
  * @addr: station mac address
@@ -6862,6 +6953,48 @@
 }
 
 /**
+ * ieee80211_get_he_iftype_cap_vif - return HE capabilities for sband/vif
+ * @sband: the sband to search for the iftype on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or %NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_iftype_cap_vif(const struct ieee80211_supported_band *sband,
+				struct ieee80211_vif *vif)
+{
+	return ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
+ * ieee80211_get_he_6ghz_capa_vif - return HE 6 GHz capabilities
+ * @sband: the sband to search for the STA on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: the 6GHz capabilities
+ */
+static inline __le16
+ieee80211_get_he_6ghz_capa_vif(const struct ieee80211_supported_band *sband,
+			       struct ieee80211_vif *vif)
+{
+	return ieee80211_get_he_6ghz_capa(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
+ * ieee80211_get_eht_iftype_cap_vif - return ETH capabilities for sband/vif
+ * @sband: the sband to search for the iftype on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: pointer to the struct ieee80211_sta_eht_cap, or %NULL is none found
+ */
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap_vif(const struct ieee80211_supported_band *sband,
+				 struct ieee80211_vif *vif)
+{
+	return ieee80211_get_eht_iftype_cap(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
  * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
  *
  * @vif: the specified virtual interface
@@ -7367,4 +7500,74 @@
 void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
 				      u16 active_links);
 
+/*
+ * force dtim count value on given VIF
+ */
+void ieee80211_force_dtim(struct ieee80211_vif *vif,
+			  unsigned int dtim_count);
+
+/*
+ * special helpers for QCA ath12 non upstream code
+ */
+static inline
+void ieee80211_csa_finish_mlo(struct ieee80211_vif *vif,
+			      unsigned int link_id)
+{
+	/* FIXME: implement correctly */
+	ieee80211_csa_finish(vif);
+}
+
+static inline
+bool ieee80211_beacon_cntdwn_is_complete_mlo(struct ieee80211_vif *vif,
+					     unsigned int link_id)
+{
+	/* FIXME: implement correctly */
+	return ieee80211_beacon_cntdwn_is_complete(vif);
+}
+
+static inline
+void ieee80211_color_change_finish_mlo(struct ieee80211_vif *vif,
+				       unsigned int link_id)
+{
+	/* FIXME: implement correctly */
+	return ieee80211_color_change_finish(vif);
+}
+
+static inline
+u8 ieee80211_beacon_update_cntdwn_mlo(struct ieee80211_vif *vif,
+				      unsigned int link_id)
+{
+	/* FIXME: implement correctly */
+	return ieee80211_beacon_update_cntdwn(vif);
+}
+
+static inline
+struct sk_buff *ieee80211_get_fils_discovery_tmpl_mlo(struct ieee80211_hw *hw,
+						      struct ieee80211_vif *vif,
+						      unsigned int link_id)
+{
+	/* FIXME: implement correctly */
+	return ieee80211_get_fils_discovery_tmpl(hw, vif);
+}
+
+static inline
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl_mlo(struct ieee80211_hw *hw,
+					      struct ieee80211_vif *vif,
+					      unsigned int link_id)
+{
+	/* FIXME: implement correctly */
+	return ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+}
+
+static inline
+void
+ieee80211_obss_color_collision_notify_mlo(struct ieee80211_vif *vif,
+					  u64 color_bitmap, gfp_t gfp,
+					  unsigned int link_id)
+{
+	/* FIXME: implement correctly */
+	return ieee80211_obss_color_collision_notify(vif, color_bitmap, gfp);
+}
+
 #endif /* MAC80211_H */
diff -ruw linux-6.4/include/net/netfilter/nf_conntrack.h linux-6.4-fbx/include/net/netfilter/nf_conntrack.h
--- linux-6.4/include/net/netfilter/nf_conntrack.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/netfilter/nf_conntrack.h	2023-05-22 20:06:44.787868272 +0200
@@ -53,6 +53,8 @@
 	/* only used when new connection is allocated: */
 	atomic_t count;
 	unsigned int expect_count;
+	u8 sysctl_auto_assign_helper;
+	bool auto_assign_helper_warned;
 
 	/* only used from work queues, configuration plane, and so on: */
 	unsigned int users4;
@@ -118,6 +120,9 @@
 	u_int32_t secmark;
 #endif
 
+	union nf_conntrack_man_proto	nat_src_proto_min;
+	union nf_conntrack_man_proto	nat_src_proto_max;
+
 	/* Extensions */
 	struct nf_ct_ext *ext;
 
diff -ruw linux-6.4/include/net/netfilter/nf_conntrack_expect.h linux-6.4-fbx/include/net/netfilter/nf_conntrack_expect.h
--- linux-6.4/include/net/netfilter/nf_conntrack_expect.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/netfilter/nf_conntrack_expect.h	2023-03-13 14:08:39.114429164 +0100
@@ -58,13 +58,24 @@
 #endif
 
 	struct rcu_head rcu;
+
+	/* private expect information. */
+	char data[32] __aligned(8);
 };
 
+#define NF_CT_EXPECT_BUILD_BUG_ON(structsize)				\
+	BUILD_BUG_ON((structsize) > sizeof_field(struct nf_conntrack_expect, data))
+
 static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
 {
 	return nf_ct_net(exp->master);
 }
 
+static inline void *nf_ct_exp_data(struct nf_conntrack_expect *exp)
+{
+	return (void *)exp->data;
+}
+
 #define NF_CT_EXP_POLICY_NAME_LEN	16
 
 struct nf_conntrack_expect_policy {
diff -ruw linux-6.4/include/net/netns/conntrack.h linux-6.4-fbx/include/net/netns/conntrack.h
--- linux-6.4/include/net/netns/conntrack.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/netns/conntrack.h	2023-05-22 20:06:44.791868378 +0200
@@ -100,6 +100,7 @@
 	u8			sysctl_log_invalid; /* Log invalid packets */
 	u8			sysctl_events;
 	u8			sysctl_acct;
+	u8			sysctl_auto_assign_helper;
 	u8			sysctl_tstamp;
 	u8			sysctl_checksum;
 
diff -ruw linux-6.4/include/net/page_pool.h linux-6.4-fbx/include/net/page_pool.h
--- linux-6.4/include/net/page_pool.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/page_pool.h	2024-01-19 17:01:19.905848123 +0100
@@ -377,6 +377,21 @@
 		page->dma_addr_upper = upper_32_bits(addr);
 }
 
+static inline void page_pool_clear_recycle_flag(struct page *page)
+{
+	page->pp_recycle_flag= 0;
+}
+
+static inline void page_pool_set_recycled_flag(struct page *page)
+{
+	page->pp_recycle_flag = 1;
+}
+
+static inline bool page_pool_is_recycled(struct page *page)
+{
+	return page->pp_recycle_flag & 1;
+}
+
 static inline bool is_page_pool_compiled_in(void)
 {
 #ifdef CONFIG_PAGE_POOL
diff -ruw linux-6.4/include/net/regulatory.h linux-6.4-fbx/include/net/regulatory.h
--- linux-6.4/include/net/regulatory.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/regulatory.h	2023-11-07 13:38:44.058256582 +0100
@@ -140,17 +140,6 @@
  *      otherwise initiating radiation is not allowed. This will enable the
  *      relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
  *      option
- * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure
- *	all interfaces on this wiphy reside on allowed channels. If this flag
- *	is not set, upon a regdomain change, the interfaces are given a grace
- *	period (currently 60 seconds) to disconnect or move to an allowed
- *	channel. Interfaces on forbidden channels are forcibly disconnected.
- *	Currently these types of interfaces are supported for enforcement:
- *	NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP,
- *	NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR,
- *	NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
- *	NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
- *	includes any modes unsupported for enforcement checking.
  * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
  *	regdom management. These devices will ignore all regdom changes not
  *	originating from their own wiphy.
@@ -177,7 +166,7 @@
 	REGULATORY_COUNTRY_IE_FOLLOW_POWER	= BIT(3),
 	REGULATORY_COUNTRY_IE_IGNORE		= BIT(4),
 	REGULATORY_ENABLE_RELAX_NO_IR           = BIT(5),
-	REGULATORY_IGNORE_STALE_KICKOFF         = BIT(6),
+	/* reuse bit 6 next time */
 	REGULATORY_WIPHY_SELF_MANAGED		= BIT(7),
 };
 
@@ -224,6 +213,7 @@
 	u32 flags;
 	u32 dfs_cac_ms;
 	bool has_wmm;
+	s8 psd;
 };
 
 struct ieee80211_regdomain {
diff -ruw linux-6.4/include/net/sock.h linux-6.4-fbx/include/net/sock.h
--- linux-6.4/include/net/sock.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/sock.h	2023-06-27 11:47:16.083867379 +0200
@@ -187,6 +187,7 @@
 	unsigned char		skc_reuseport:1;
 	unsigned char		skc_ipv6only:1;
 	unsigned char		skc_net_refcnt:1;
+	unsigned char		skc_reuse_conflict;
 	int			skc_bound_dev_if;
 	union {
 		struct hlist_node	skc_bind_node;
@@ -383,6 +384,7 @@
 #define sk_reuseport		__sk_common.skc_reuseport
 #define sk_ipv6only		__sk_common.skc_ipv6only
 #define sk_net_refcnt		__sk_common.skc_net_refcnt
+#define sk_reuse_conflict	__sk_common.skc_reuse_conflict
 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
 #define sk_bind_node		__sk_common.skc_bind_node
 #define sk_prot			__sk_common.skc_prot
@@ -959,6 +961,7 @@
 	SOCK_XDP, /* XDP is attached */
 	SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
 	SOCK_RCVMARK, /* Receive SO_MARK  ancillary data with packet */
+	SOCK_UDP_DUP_UNICAST,
 };
 
 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
diff -ruw linux-6.4/include/net/udp.h linux-6.4-fbx/include/net/udp.h
--- linux-6.4/include/net/udp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/net/udp.h	2023-11-07 13:38:44.058256582 +0100
@@ -21,6 +21,7 @@
 #include <linux/list.h>
 #include <linux/bug.h>
 #include <net/inet_sock.h>
+#include <net/gso.h>
 #include <net/sock.h>
 #include <net/snmp.h>
 #include <net/ip.h>
diff -ruw linux-6.4/include/uapi/asm-generic/socket.h linux-6.4-fbx/include/uapi/asm-generic/socket.h
--- linux-6.4/include/uapi/asm-generic/socket.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/asm-generic/socket.h	2023-05-22 20:06:44.851869974 +0200
@@ -30,9 +30,10 @@
 #define SO_PEERCRED	17
 #define SO_RCVLOWAT	18
 #define SO_SNDLOWAT	19
+#endif
+
 #define SO_RCVTIMEO_OLD	20
 #define SO_SNDTIMEO_OLD	21
-#endif
 
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION		22
@@ -132,6 +133,8 @@
 
 #define SO_RCVMARK		75
 
+#define SO_UDP_DUP_UNICAST	100
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff -ruw linux-6.4/include/uapi/linux/batadv_packet.h linux-6.4-fbx/include/uapi/linux/batadv_packet.h
--- linux-6.4/include/uapi/linux/batadv_packet.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/batadv_packet.h	2023-12-12 17:24:34.163627207 +0100
@@ -44,6 +44,7 @@
 	BATADV_ELP		= 0x03,
 	BATADV_OGM2		= 0x04,
 	BATADV_MCAST            = 0x05,
+	BATADV_FBX		= 0x3f,
 	/* 0x40 - 0x7f: unicast */
 #define BATADV_UNICAST_MIN     0x40
 	BATADV_UNICAST          = 0x40,
@@ -87,6 +88,14 @@
 };
 
 /**
+ * enum batadv_v_flags - flags used in B.A.T.M.A.N. V OGM2 packets
+ * @BATADV_V_HALF_DUPLEX: Halfduplex penalty should be applied to throughput
+ */
+enum batadv_v_flags {
+	BATADV_V_HALF_DUPLEX   = 1UL << 7,
+};
+
+/**
  * enum batadv_icmp_packettype - ICMP message types
  * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
  * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
@@ -174,6 +183,7 @@
  * @BATADV_TVLV_TT: translation table tvlv
  * @BATADV_TVLV_ROAM: roaming advertisement tvlv
  * @BATADV_TVLV_MCAST: multicast capability tvlv
+ * @BATADV_TVLV_FBX: fbx specific tvlv
  */
 enum batadv_tvlv_type {
 	BATADV_TVLV_GW		= 0x01,
@@ -182,6 +192,7 @@
 	BATADV_TVLV_TT		= 0x04,
 	BATADV_TVLV_ROAM	= 0x05,
 	BATADV_TVLV_MCAST	= 0x06,
+	BATADV_TVLV_FBX		= 0xff,
 };
 
 #pragma pack(2)
@@ -628,6 +639,61 @@
 	__u8 reserved[3];
 };
 
+/**
+ * struct batadv_fbx_packet - FBX specific packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @subtype: FBX packet subtype (see batadv_fbx_subtype)
+ * @rev: FBX specific version for compatibility
+ * @seqno: an alway increasing sequence number, not checked for now
+ */
+struct batadv_fbx_packet {
+	__u8   packet_type;
+	__u8   version;
+	__u8   subtype; /* see batadv_fbx_subtype detection message types */
+	__u8   rev;
+	__be32 seqno;
+};
+#define BATADV_FBX_HLEN sizeof(struct batadv_fbx_packet)
+
+/**
+ * enum batadv_fbx_subtype - FBX packet subtypes
+ * @BATADV_FBX_MTU_PROBE: Big message sent to a neigh to probe link MTU
+ * @BATADV_FBX_MTU_RESP: MTU acknowledgment from receiver to sender
+ */
+enum batadv_fbx_subtype {
+	BATADV_FBX_SUB_UNUSED = 0,
+	BATADV_FBX_SUB_MTU_PROBE,
+	BATADV_FBX_SUB_MTU_RESP,
+	BATADV_FBX_SUB_SLAP,
+	/* keep last */
+	BATADV_FBX_SUB_LAST,
+};
+
+/**
+ * struct batadv_fbx_mtu_packet - FBX MTU probing packet
+ * @hdr: Common FBX header
+ * @mtu: The mtu this probe / resp packet relates to
+ */
+struct batadv_fbx_mtu_packet {
+	struct batadv_fbx_packet hdr;
+	__be16 mtu;
+};
+
+#define BATADV_FBX_MTU_HLEN sizeof(struct batadv_fbx_mtu_packet)
+
+/**
+ * struct batadv_fbx_slap_packet - FBX SLAP ID packet
+ * @hdr: Common FBX header
+ * @prio: SLAP Prio of the originator node
+ */
+struct batadv_fbx_slap_packet {
+	struct batadv_fbx_packet hdr;
+	__be32 prio;
+};
+
+#define BATADV_FBX_SLAP_HLEN sizeof(struct batadv_fbx_slap_packet)
+
 #pragma pack()
 
 #endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff -ruw linux-6.4/include/uapi/linux/batman_adv.h linux-6.4-fbx/include/uapi/linux/batman_adv.h
--- linux-6.4/include/uapi/linux/batman_adv.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/batman_adv.h	2023-12-12 17:24:34.163627207 +0100
@@ -11,6 +11,7 @@
 
 #define BATADV_NL_MCAST_GROUP_CONFIG	"config"
 #define BATADV_NL_MCAST_GROUP_TPMETER	"tpmeter"
+#define BATADV_NL_MCAST_GROUP_ROUTE	"route"
 
 /**
  * enum batadv_tt_client_flags - TT client specific flags
@@ -49,6 +50,12 @@
 	BATADV_TT_CLIENT_ISOLA	 = (1 << 5),
 
 	/**
+	 * @BATADV_TT_CLIENT_SEEN: this global client has been actually
+	 * detected to be part of the originator and has not yet expired
+	 */
+	BATADV_TT_CLIENT_SEEN = (1 << 6),
+
+	/**
 	 * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from
 	 * the table
 	 */
@@ -481,6 +488,11 @@
 	 */
 	BATADV_ATTR_MULTICAST_FANOUT,
 
+	/**
+	 * @BATADV_ATTF_FBX: defines FBX specific NL attributes
+	 */
+	BATADV_ATTR_FBX,
+
 	/* add attributes above here, update the policy in netlink.c */
 
 	/**
@@ -500,6 +512,52 @@
 };
 
 /**
+ * enum batadv_nl_fbx_attrs - batman-adv netlink attributes
+ */
+enum batadv_nl_fbx_attr {
+	/**
+	 * @BATADV_ATTR_FBX_MTU: defines the MTU this neighbor can safely use.
+	 */
+	BATADV_ATTR_FBX_MTU,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_MASTER_MAC: Show current SLAP master address
+	 */
+	BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_MASTER_PRIO: Show current SLAP master priority
+	 */
+	BATADV_ATTR_FBX_SLAP_MASTER_PRIO,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_IFINDEX: defines the currently selected SLAP
+	 * interface
+	 */
+	BATADV_ATTR_FBX_SLAP_IFINDEX,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_PRIO: defines the current SLAP priority
+	 */
+	BATADV_ATTR_FBX_SLAP_PRIO,
+
+	/**
+	 * @__BATADV_ATTR_FBX_AFTER_LAST: internal use
+	 */
+	__BATADV_ATTR_FBX_AFTER_LAST,
+
+	/**
+	 * @NUM_BATADV_FBX_ATTR: total number of batadv_nl_fbx_attrs available
+	 */
+	NUM_BATADV_ATTR_FBX = __BATADV_ATTR_FBX_AFTER_LAST,
+
+	/**
+	 * @BATADV_ATTR_FBX_MAX: highest attribute number currently defined
+	 */
+	BATADV_ATTR_FBX_MAX = __BATADV_ATTR_FBX_AFTER_LAST - 1
+};
+
+/**
  * enum batadv_nl_commands - supported batman-adv netlink commands
  */
 enum batadv_nl_commands {
@@ -613,6 +671,21 @@
 	 */
 	BATADV_CMD_SET_VLAN,
 
+	/**
+	 * @BATADV_CMD_ADD_ROUTE: Add new route to reach originator
+	 */
+	BATADV_CMD_ADD_ROUTE,
+
+	/**
+	 * @BATADV_CMD_DEL_ROUTE: Del route to originator
+	 */
+	BATADV_CMD_DEL_ROUTE,
+
+	/**
+	 * @BATADV_CMD_CHANGE_ROUTE: Modify an existing route to originator
+	 */
+	BATADV_CMD_CHANGE_ROUTE,
+
 	/* add new commands above here */
 
 	/**
diff -ruw linux-6.4/include/uapi/linux/ethtool.h linux-6.4-fbx/include/uapi/linux/ethtool.h
--- linux-6.4/include/uapi/linux/ethtool.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/ethtool.h	2023-05-31 17:11:03.421680714 +0200
@@ -294,6 +294,8 @@
 	ETHTOOL_PHY_DOWNSHIFT,
 	ETHTOOL_PHY_FAST_LINK_DOWN,
 	ETHTOOL_PHY_EDPD,
+	ETHTOOL_PHY_BROKEN,
+
 	/*
 	 * Add your fresh new phy tunable attribute above and remember to update
 	 * phy_tunable_strings[] in net/ethtool/common.c
@@ -681,6 +683,7 @@
  * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
  * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
  * @ETH_SS_STATS_RMON: names of RMON statistics
+ * @ETH_SS_PHYLINK_IFTYPES: names of phylink interface types
  *
  * @ETH_SS_COUNT: number of defined string sets
  */
@@ -706,6 +709,7 @@
 	ETH_SS_STATS_ETH_MAC,
 	ETH_SS_STATS_ETH_CTRL,
 	ETH_SS_STATS_RMON,
+	ETH_SS_PHYLINK_IFTYPES,
 
 	/* add new constants above here */
 	ETH_SS_COUNT
@@ -1576,6 +1580,91 @@
 #define ETHTOOL_FEC_BASER		(1 << ETHTOOL_FEC_BASER_BIT)
 #define ETHTOOL_FEC_LLRS		(1 << ETHTOOL_FEC_LLRS_BIT)
 
+/**
+ * struct ethtool_shaper_params
+ * @cmd: %ETHTOOL_GSHAPER_PARAMS / %ETHTOOL_SSHAPER_PARAMS
+ */
+struct ethtool_shaper_params {
+	__u32 cmd;
+
+	__u64 rate;
+	__u32 burst;
+	__u32 mtu;
+};
+
+/**
+ * struct ethtool_epon_param
+ * @cmd: Command number = %ETHTOOL_GEPON_PARAM or %ETHTOOL_SEPON_*
+ */
+struct ethtool_epon_param {
+	__u32   cmd;
+	__u8	discovery_rx;
+	__u8	registered;
+	__u16	llid;
+	__u32	burst_cap;
+	__u32	lasermon_event_count;
+	__u32	change_count;
+	__u32	keys_update_id;
+	__u8	key_sci[8];
+	__u8	down_key0[16];
+	__u8	down_key1[16];
+	__u32	down_encrypt;
+	__u32	down_last_rx_encrypted;
+	__u32	down_last_rx_key_id;
+	__u16	mcast_llid;
+	__u16	pad;
+};
+
+/*
+ * currently a 1:1 mapping for SFP SM in drivers/net/phy/sfp.c
+ */
+enum {
+	ETHTOOL_SFP_S_DOWN = 0,
+	ETHTOOL_SFP_S_FAIL,
+	ETHTOOL_SFP_S_WAIT,
+	ETHTOOL_SFP_S_INIT,
+	ETHTOOL_SFP_S_INIT_PHY,
+	ETHTOOL_SFP_S_INIT_TX_FAULT,
+	ETHTOOL_SFP_S_WAIT_LOS,
+	ETHTOOL_SFP_S_LINK_UP,
+	ETHTOOL_SFP_S_TX_FAULT,
+	ETHTOOL_SFP_S_REINIT,
+	ETHTOOL_SFP_S_TX_DISABLE,
+};
+
+/**
+ * struct ethtool_sfp_state
+ * @cmd: Command number = %ETHTOOL_GSFP_STATE
+ */
+struct ethtool_sfp_state {
+	__u32 cmd;
+
+	__u32 fsm_state;
+
+	__u8 o_pwren;
+	__u8 o_txdis;
+	__u8 i_presence;
+	__u8 i_rxlos;
+	__u8 i_txfault;
+};
+
+/**
+ * struct ethtool_phylink_if_mode
+ * @cmd: %ETHTOOL_GPHYLINK_IFTYPE / %ETHTOOL_SPHYLINK_IFTYPE
+ */
+struct ethtool_phylink_iftype {
+	__u32	cmd;
+
+	/* stringified phy_interface_t (enum is not part of UAPI and
+	 * is not stable), uses string from phy_modes()  */
+	char	iftype[ETH_GSTRING_LEN];
+
+	__u32	autoneg_en;
+
+	/* enum MLO_AN_xxx, read-only */
+	__u32	mode;
+};
+
 /* CMDs currently supported */
 #define ETHTOOL_GSET		0x00000001 /* DEPRECATED, Get settings.
 					    * Please use ETHTOOL_GLINKSETTINGS
@@ -1671,6 +1760,23 @@
 #define ETHTOOL_GFECPARAM	0x00000050 /* Get FEC settings */
 #define ETHTOOL_SFECPARAM	0x00000051 /* Set FEC settings */
 
+#define ETHTOOL_GEPON_PARAM	0x00000052 /* Get EPON params */
+#define ETHTOOL_SEPON_KEYS	0x00000053 /* Set EPON encryption keys */
+#define ETHTOOL_SEPON_ENCRYPT	0x00000054 /* Set EPON encryption keys */
+#define ETHTOOL_SEPON_RESTART	0x00000055 /* restart epon link */
+#define ETHTOOL_SEPON_BURST	0x00000056 /* update burst value */
+#define ETHTOOL_SEPON_ADD_MCLLID	0x00000057 /* add epon llid */
+#define ETHTOOL_SEPON_DEL_MCLLID	0x00000058 /* remove epon llid */
+#define ETHTOOL_SEPON_CLR_MCLLID	0x00000059 /* remove all epon llid */
+
+#define ETHTOOL_GSFP_STATE	0x00000060 /* get SFP state (IOs/FSM) */
+
+#define ETHTOOL_SSHAPER_PARAMS	0x00000061 /* set HW TX shaper params */
+#define ETHTOOL_GSHAPER_PARAMS	0x00000062 /* get HW TX shaper params */
+
+#define ETHTOOL_GPHYLINK_IFTYPE	0x00000063 /* get phylink interface type  */
+#define ETHTOOL_SPHYLINK_IFTYPE	0x00000064 /* set phylink interface type */
+
 /* compatibility with older code */
 #define SPARC_ETH_GSET		ETHTOOL_GSET
 #define SPARC_ETH_SSET		ETHTOOL_SSET
@@ -1787,6 +1893,12 @@
 	ETHTOOL_LINK_MODE_10baseT1S_Full_BIT		 = 99,
 	ETHTOOL_LINK_MODE_10baseT1S_Half_BIT		 = 100,
 	ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT	 = 101,
+	ETHTOOL_LINK_MODE_1000basePX_D_Full_BIT		 = 102,
+	ETHTOOL_LINK_MODE_1000basePX_U_Full_BIT		 = 103,
+	ETHTOOL_LINK_MODE_10000basePR_D_Full_BIT	 = 104,
+	ETHTOOL_LINK_MODE_10000basePR_U_Full_BIT	 = 105,
+	ETHTOOL_LINK_MODE_10000_1000basePRX_D_Full_BIT	 = 106,
+	ETHTOOL_LINK_MODE_10000_1000basePRX_U_Full_BIT	 = 107,
 
 	/* must be last entry */
 	__ETHTOOL_LINK_MODE_MASK_NBITS
diff -ruw linux-6.4/include/uapi/linux/if_ether.h linux-6.4-fbx/include/uapi/linux/if_ether.h
--- linux-6.4/include/uapi/linux/if_ether.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/if_ether.h	2024-02-08 19:19:27.692597465 +0100
@@ -55,6 +55,7 @@
 #define	ETH_P_BPQ	0x08FF		/* G8BPQ AX.25 Ethernet Packet	[ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_IEEEPUP	0x0a00		/* Xerox IEEE802.3 PUP packet */
 #define ETH_P_IEEEPUPAT	0x0a01		/* Xerox IEEE802.3 PUP Addr Trans packet */
+#define ETH_P_FBXVLAN	0x1337		/* Freebox specific VLAN type [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_BATMAN	0x4305		/* B.A.T.M.A.N.-Advanced packet [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_DEC       0x6000          /* DEC Assigned proto           */
 #define ETH_P_DNA_DL    0x6001          /* DEC DNA Dump/Load            */
diff -ruw linux-6.4/include/uapi/linux/if_tun.h linux-6.4-fbx/include/uapi/linux/if_tun.h
--- linux-6.4/include/uapi/linux/if_tun.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/if_tun.h	2023-05-22 20:06:44.879870719 +0200
@@ -62,6 +62,32 @@
 #define TUNSETCARRIER _IOW('T', 226, int)
 #define TUNGETDEVNETNS _IO('T', 227)
 
+
+struct smalltun_rule {
+	__u8	proto;
+	__be16	src_port_start;
+	__be16	src_port_end;
+	__be16	dst_port_start;
+	__be16	dst_port_end;
+};
+
+struct smalltun_fp {
+	__be32	inner_src;
+	__be32	inner_dst;
+
+	__u32	af;
+	__u8	outer_src[16];
+	__u8	outer_dst[16];
+	__be16	outer_src_port;
+	__be16	outer_dst_port;
+
+	struct smalltun_rule rules[8];
+	__u32	rule_count;
+};
+
+#define TUNSMALLTUNSETFP _IOW('T', 228, struct smalltun_fp)
+#define TUNSMALLTUNDELFP _IOW('T', 229, struct smalltun_fp)
+
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
 #define IFF_TAP		0x0002
diff -ruw linux-6.4/include/uapi/linux/if_tunnel.h linux-6.4-fbx/include/uapi/linux/if_tunnel.h
--- linux-6.4/include/uapi/linux/if_tunnel.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/if_tunnel.h	2023-05-22 20:06:44.879870719 +0200
@@ -77,10 +77,23 @@
 	IFLA_IPTUN_ENCAP_DPORT,
 	IFLA_IPTUN_COLLECT_METADATA,
 	IFLA_IPTUN_FWMARK,
+	IFLA_IPTUN_FMRS,
 	__IFLA_IPTUN_MAX,
 };
 #define IFLA_IPTUN_MAX	(__IFLA_IPTUN_MAX - 1)
 
+enum {
+	IFLA_IPTUN_FMR_UNSPEC,
+	IFLA_IPTUN_FMR_IP6_PREFIX,
+	IFLA_IPTUN_FMR_IP4_PREFIX,
+	IFLA_IPTUN_FMR_IP6_PREFIX_LEN,
+	IFLA_IPTUN_FMR_IP4_PREFIX_LEN,
+	IFLA_IPTUN_FMR_EA_LEN,
+	IFLA_IPTUN_FMR_OFFSET,
+	__IFLA_IPTUN_FMR_MAX,
+};
+#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1)
+
 enum tunnel_encap_types {
 	TUNNEL_ENCAP_NONE,
 	TUNNEL_ENCAP_FOU,
diff -ruw linux-6.4/include/uapi/linux/input-event-codes.h linux-6.4-fbx/include/uapi/linux/input-event-codes.h
--- linux-6.4/include/uapi/linux/input-event-codes.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/input-event-codes.h	2023-05-22 20:06:44.879870719 +0200
@@ -803,6 +803,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
diff -ruw linux-6.4/include/uapi/linux/libc-compat.h linux-6.4-fbx/include/uapi/linux/libc-compat.h
--- linux-6.4/include/uapi/linux/libc-compat.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/libc-compat.h	2023-02-27 14:15:14.847571831 +0100
@@ -49,11 +49,11 @@
 #ifndef _UAPI_LIBC_COMPAT_H
 #define _UAPI_LIBC_COMPAT_H
 
-/* We have included glibc headers... */
-#if defined(__GLIBC__)
+/* We have included libc headers... */
+#if !defined(__KERNEL__)
 
-/* Coordinate with glibc net/if.h header. */
-#if defined(_NET_IF_H) && defined(__USE_MISC)
+/* Coordinate with libc net/if.h header. */
+#if defined(_NET_IF_H) && (!defined(__GLIBC__) || defined(__USE_MISC))
 
 /* GLIBC headers included first so don't define anything
  * that would already be defined. */
@@ -65,9 +65,11 @@
 /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
 /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef IFF_ECHO
 #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
 #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+#endif /* IFF_ECHO */
 
 #else /* _NET_IF_H */
 
@@ -170,7 +172,7 @@
  * or we are being included in the kernel, then define everything
  * that we need. Check for previous __UAPI_* definitions to give
  * unsupported C libraries a way to opt out of any kernel definition. */
-#else /* !defined(__GLIBC__) */
+#else /* !defined(__KERNEL__) */
 
 /* Definitions for if.h */
 #ifndef __UAPI_DEF_IF_IFCONF
@@ -262,6 +264,6 @@
 #define __UAPI_DEF_XATTR		1
 #endif
 
-#endif /* __GLIBC__ */
+#endif /* __KERNEL__ */
 
 #endif /* _UAPI_LIBC_COMPAT_H */
diff -ruw linux-6.4/include/uapi/linux/nl80211.h linux-6.4-fbx/include/uapi/linux/nl80211.h
--- linux-6.4/include/uapi/linux/nl80211.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/nl80211.h	2024-04-19 16:04:28.965735994 +0200
@@ -11,7 +11,7 @@
  * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
  * Copyright 2008 Colin McCabe <colin@cozybit.com>
  * Copyright 2015-2017	Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -167,7 +167,7 @@
  * following events occur.
  * a) Expiration of hardware timer whose expiration time is set to maximum
  * coalescing delay of matching coalesce rule.
- * b) Coalescing buffer in hardware reaches it's limit.
+ * b) Coalescing buffer in hardware reaches its limit.
  * c) Packet doesn't match any of the configured coalesce rules.
  *
  * User needs to configure following parameters for creating a coalesce
@@ -326,7 +326,7 @@
 /**
  * DOC: Multi-Link Operation
  *
- * In Multi-Link Operation, a connection between to MLDs utilizes multiple
+ * In Multi-Link Operation, a connection between two MLDs utilizes multiple
  * links. To use this in nl80211, various commands and responses now need
  * to or will include the new %NL80211_ATTR_MLO_LINKS attribute.
  * Additionally, various commands that need to operate on a specific link
@@ -335,6 +335,15 @@
  */
 
 /**
+ * DOC: OWE DH IE handling offload
+ *
+ * By setting @NL80211_EXT_FEATURE_OWE_OFFLOAD flag, drivers can indicate
+ * kernel/application space to avoid DH IE handling. When this flag is
+ * advertised, the driver/device will take care of DH IE inclusion and
+ * processing of peer DH IE to generate PMK.
+ */
+
+/**
  * enum nl80211_commands - supported nl80211 commands
  *
  * @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -1309,6 +1318,11 @@
  *	The number of peers that HW timestamping can be enabled for concurrently
  *	is indicated by %NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS.
  *
+ * @NL80211_CMD_LINKS_REMOVED: Notify userspace about the removal of STA MLD
+ *	setup links due to AP MLD removing the corresponding affiliated APs with
+ *	Multi-Link reconfiguration. %NL80211_ATTR_MLO_LINKS is used to provide
+ *	information about the removed STA MLD setup links.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -1562,6 +1576,8 @@
 
 	NL80211_CMD_SET_HW_TIMESTAMP,
 
+	NL80211_CMD_LINKS_REMOVED,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -2683,11 +2699,13 @@
  *
  * @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS
  *	discovery. It is a nested attribute, see
- *	&enum nl80211_fils_discovery_attributes.
+ *	&enum nl80211_fils_discovery_attributes. Userspace should pass an empty
+ *	nested attribute to disable this feature and delete the templates.
  *
  * @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure
  *	unsolicited broadcast probe response. It is a nested attribute, see
- *	&enum nl80211_unsol_bcast_probe_resp_attributes.
+ *	&enum nl80211_unsol_bcast_probe_resp_attributes. Userspace should pass an empty
+ *	nested attribute to disable this feature and delete the templates.
  *
  * @NL80211_ATTR_S1G_CAPABILITY: S1G Capability information element (from
  *	association request when used with NL80211_CMD_NEW_STATION)
@@ -2805,6 +2823,9 @@
  *	index. If the userspace includes more RNR elements than number of
  *	MBSSID elements then these will be added in every EMA beacon.
  *
+ * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is
+ *	disabled.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3341,6 +3362,8 @@
 
 	NL80211_ATTR_EMA_RNR_ELEMS,
 
+	NL80211_ATTR_MLO_LINK_DISABLED,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -3348,6 +3371,9 @@
 	NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
 };
 
+#define NL80211_QBC_UPDATE_NODE_METRICS_IE 1
+#define NL80211_QBC_UPDATE_PATH_METRICS_IE 2
+
 /* source-level API compatibility */
 #define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
 #define	NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
@@ -3667,6 +3693,13 @@
  *	(u8, see &enum nl80211_eht_gi)
  * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then
  *	non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc)
+ * @NL80211_RATE_INFO_S1G_MCS: S1G MCS index (u8, 0-10)
+ * @NL80211_RATE_INFO_S1G_NSS: S1G NSS value (u8, 1-4)
+ * @NL80211_RATE_INFO_1_MHZ_WIDTH: 1 MHz S1G rate
+ * @NL80211_RATE_INFO_2_MHZ_WIDTH: 2 MHz S1G rate
+ * @NL80211_RATE_INFO_4_MHZ_WIDTH: 4 MHz S1G rate
+ * @NL80211_RATE_INFO_8_MHZ_WIDTH: 8 MHz S1G rate
+ * @NL80211_RATE_INFO_16_MHZ_WIDTH: 16 MHz S1G rate
  * @__NL80211_RATE_INFO_AFTER_LAST: internal use
  */
 enum nl80211_rate_info {
@@ -3693,6 +3726,13 @@
 	NL80211_RATE_INFO_EHT_NSS,
 	NL80211_RATE_INFO_EHT_GI,
 	NL80211_RATE_INFO_EHT_RU_ALLOC,
+	NL80211_RATE_INFO_S1G_MCS,
+	NL80211_RATE_INFO_S1G_NSS,
+	NL80211_RATE_INFO_1_MHZ_WIDTH,
+	NL80211_RATE_INFO_2_MHZ_WIDTH,
+	NL80211_RATE_INFO_4_MHZ_WIDTH,
+	NL80211_RATE_INFO_8_MHZ_WIDTH,
+	NL80211_RATE_INFO_16_MHZ_WIDTH,
 
 	/* keep last */
 	__NL80211_RATE_INFO_AFTER_LAST,
@@ -4187,6 +4227,8 @@
  *	as the primary or any of the secondary channels isn't possible
  * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel
  *	in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_PSD: Power spectral density (in dBm) that
+ *	is allowed on this channel in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
  *	currently defined
  * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4225,6 +4267,7 @@
 	NL80211_FREQUENCY_ATTR_16MHZ,
 	NL80211_FREQUENCY_ATTR_NO_320MHZ,
 	NL80211_FREQUENCY_ATTR_NO_EHT,
+	NL80211_FREQUENCY_ATTR_PSD,
 
 	/* keep last */
 	__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4325,6 +4368,8 @@
  * 	a given frequency range. The value is in mBm (100 * dBm).
  * @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
  *	If not present or 0 default CAC time will be used.
+ * @NL80211_ATTR_POWER_RULE_PSD: power spectral density (in dBm).
+ *	This could be negative.
  * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number
  *	currently defined
  * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use
@@ -4342,6 +4387,8 @@
 
 	NL80211_ATTR_DFS_CAC_TIME,
 
+	NL80211_ATTR_POWER_RULE_PSD,
+
 	/* keep last */
 	__NL80211_REG_RULE_ATTR_AFTER_LAST,
 	NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
@@ -4424,6 +4471,8 @@
  * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
  * @NL80211_RRF_NO_HE: HE operation not allowed
  * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed
+ * @NL80211_RRF_NO_EHT: EHT operation not allowed
+ * @NL80211_RRF_PSD: Ruleset has power spectral density value
  */
 enum nl80211_reg_rule_flags {
 	NL80211_RRF_NO_OFDM		= 1<<0,
@@ -4443,6 +4492,8 @@
 	NL80211_RRF_NO_160MHZ		= 1<<16,
 	NL80211_RRF_NO_HE		= 1<<17,
 	NL80211_RRF_NO_320MHZ		= 1<<18,
+	NL80211_RRF_NO_EHT		= 1<<19,
+	NL80211_RRF_PSD			= 1<<20,
 };
 
 #define NL80211_RRF_PASSIVE_SCAN	NL80211_RRF_NO_IR
@@ -5010,7 +5061,7 @@
  *	elements from a Beacon frame (bin); not present if no Beacon frame has
  *	yet been received
  * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
- *	(u32, enum nl80211_bss_scan_width)
+ *	(u32, enum nl80211_bss_scan_width) - No longer used!
  * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64)
  *	(not present if no beacon frame has been received yet)
  * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and
@@ -5255,6 +5306,7 @@
 };
 
 #define NL80211_HE_NSS_MAX		8
+#define NL80211_EHT_NSS_MAX             16
 /**
  * struct nl80211_txrate_he - HE MCS/NSS txrate bitmap
  * @mcs: MCS bitmap table for each NSS (array index 0 for 1 stream, etc.)
@@ -6372,6 +6424,12 @@
  *	in authentication and deauthentication frames sent to unassociated peer
  *	using @NL80211_CMD_FRAME.
  *
+ * @NL80211_EXT_FEATURE_OWE_OFFLOAD: Driver/Device wants to do OWE DH IE
+ *	handling in station mode.
+ *
+ * @NL80211_EXT_FEATURE_OWE_OFFLOAD_AP: Driver/Device wants to do OWE DH IE
+ *	handling in AP mode.
+ *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
  */
@@ -6443,6 +6501,8 @@
 	NL80211_EXT_FEATURE_PUNCT,
 	NL80211_EXT_FEATURE_SECURE_NAN,
 	NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA,
+	NL80211_EXT_FEATURE_OWE_OFFLOAD,
+	NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
 
 	/* add new features before the definition below */
 	NUM_NL80211_EXT_FEATURES,
@@ -6567,6 +6627,7 @@
  *	these channels would passively be scanned. Also note that when the flag
  *	is set, in addition to the colocated APs, PSC channels would also be
  *	scanned if the user space has asked for it.
+ * @NL80211_SCAN_FLAG_UPDATE_DFS: scan results will update DFS state
  */
 enum nl80211_scan_flags {
 	NL80211_SCAN_FLAG_LOW_PRIORITY				= 1<<0,
@@ -6584,6 +6645,7 @@
 	NL80211_SCAN_FLAG_MIN_PREQ_CONTENT			= 1<<12,
 	NL80211_SCAN_FLAG_FREQ_KHZ				= 1<<13,
 	NL80211_SCAN_FLAG_COLOCATED_6GHZ			= 1<<14,
+	NL80211_SCAN_FLAG_UPDATE_DFS				= 1<<15,
 };
 
 /**
@@ -7578,7 +7640,7 @@
  * @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU).
  *	Allowed range: 0..10000 (TU = Time Unit)
  * @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU).
- *	Allowed range: 0..10000 (TU = Time Unit)
+ *	Allowed range: 0..10000 (TU = Time Unit). If set to 0, the feature is disabled.
  * @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action
  *	frame including the headers.
  *
@@ -7611,7 +7673,8 @@
  *
  * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU).
  *	Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0
- *	26.17.2.3.2 (AP behavior for fast passive scanning).
+ *	26.17.2.3.2 (AP behavior for fast passive scanning). If set to 0, the feature is
+ *	disabled.
  * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response
  *	frame template (binary).
  *
diff -ruw linux-6.4/include/uapi/linux/serial_core.h linux-6.4-fbx/include/uapi/linux/serial_core.h
--- linux-6.4/include/uapi/linux/serial_core.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/serial_core.h	2023-05-22 20:06:44.903871357 +0200
@@ -279,4 +279,7 @@
 /* Sunplus UART */
 #define PORT_SUNPLUS	123
 
+/* BCM63xx HS */
+#define PORT_BCM63XX_HS	123
+
 #endif /* _UAPILINUX_SERIAL_CORE_H */
diff -ruw linux-6.4/include/uapi/linux/sockios.h linux-6.4-fbx/include/uapi/linux/sockios.h
--- linux-6.4/include/uapi/linux/sockios.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/sockios.h	2023-02-27 20:40:38.056744387 +0100
@@ -153,6 +153,14 @@
 #define SIOCSHWTSTAMP	0x89b0		/* set and get config		*/
 #define SIOCGHWTSTAMP	0x89b1		/* get config			*/
 
+/* fbxbridge call */
+#define SIOCGFBXBRIDGE	0x89c0		/* fbxbridge support          */
+#define SIOCSFBXBRIDGE	0x89c1		/* Set fbxbridge options      */
+
+/* fbxdiverter call */
+#define SIOCGFBXDIVERT  0x89d0		/* fbxdiverter support          */
+#define SIOCSFBXDIVERT  0x89d1		/* Set fbxdiverter options      */
+
 /* Device private ioctl calls */
 
 /*
diff -ruw linux-6.4/include/uapi/linux/stddef.h linux-6.4-fbx/include/uapi/linux/stddef.h
--- linux-6.4/include/uapi/linux/stddef.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/stddef.h	2023-11-07 13:38:44.062256691 +0100
@@ -45,3 +45,7 @@
 		TYPE NAME[]; \
 	}
 #endif
+
+#ifndef __counted_by
+#define __counted_by(m)
+#endif
diff -ruw linux-6.4/include/uapi/linux/tcp.h linux-6.4-fbx/include/uapi/linux/tcp.h
--- linux-6.4/include/uapi/linux/tcp.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/tcp.h	2023-05-22 20:06:44.911871570 +0200
@@ -134,6 +134,8 @@
 #define TCP_REPAIR_OFF		0
 #define TCP_REPAIR_OFF_NO_WP	-1	/* Turn off without window probes */
 
+#define TCP_LINEAR_RTO		128	/* force use of linear timeouts */
+
 struct tcp_repair_opt {
 	__u32	opt_code;
 	__u32	opt_val;
diff -ruw linux-6.4/include/uapi/linux/tty.h linux-6.4-fbx/include/uapi/linux/tty.h
--- linux-6.4/include/uapi/linux/tty.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/include/uapi/linux/tty.h	2023-05-22 20:06:44.911871570 +0200
@@ -39,8 +39,9 @@
 #define N_MCTP		28	/* MCTP-over-serial */
 #define N_DEVELOPMENT	29	/* Manual out-of-tree testing */
 #define N_CAN327	30	/* ELM327 based OBD-II interfaces */
+#define N_REMOTI	31	/* RemoTI over UART */
 
 /* Always the newest line discipline + 1 */
-#define NR_LDISCS	31
+#define NR_LDISCS	32
 
 #endif /* _UAPI_LINUX_TTY_H */
diff -ruw linux-6.4/init/Kconfig linux-6.4-fbx/init/Kconfig
--- linux-6.4/init/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/init/Kconfig	2023-05-22 20:06:44.935872208 +0200
@@ -139,6 +139,15 @@
 	  Maximum of each of the number of arguments and environment
 	  variables passed to init from the kernel command line.
 
+
+config CROSS_COMPILE
+	string "Cross-compiler tool prefix"
+	help
+	  Same as running 'make CROSS_COMPILE=prefix-' but stored for
+	  default make runs in this kernel build directory.  You don't
+	  need to set this unless you want the configured kernel build
+	  directory to select the cross-compiler automatically.
+
 config COMPILE_TEST
 	bool "Compile also drivers which will not load"
 	depends on HAS_IOMEM
@@ -779,6 +788,44 @@
 
 	  There is no additional runtime cost to printk with this enabled.
 
+config FBX_DECRYPT_INITRD
+	bool "Decrypt initrd at boot"
+	depends on BLK_DEV_RAM
+	default n
+
+choice
+	prompt "initrd decryption encryption flavor"
+	default FBX_DECRYPT_INITRD_RC4
+
+config FBX_DECRYPT_INITRD_RC4
+	depends on FBX_DECRYPT_INITRD
+	bool "RC4"
+
+config FBX_DECRYPT_INITRD_CHACHA20
+	depends on FBX_DECRYPT_INITRD
+	bool "CHACHA20"
+	select CRYPTO_CHACHA20
+
+endchoice
+
+config FBX_DECRYPT_INITRD_KEY
+	string "Decryption key"
+	depends on FBX_DECRYPT_INITRD
+
+config FBX_DECRYPT_INITRD_NONCE
+	string "Decryption nonce/IV"
+	depends on FBX_DECRYPT_INITRD_CHACHA20
+
+config FBX_VERIFY_INITRD
+	bool "Verify initrd at boot"
+	depends on FBX_DECRYPT_INITRD
+	select CRYPTO_RSA
+	select CRYPTO_SHA256
+
+config FBX_VERIFY_INITRD_PUBKEY
+	string "Public key path for initrd verify"
+	depends on FBX_VERIFY_INITRD
+
 #
 # Architectures with an unreliable sched_clock() should select this:
 #
diff -ruw linux-6.4/init/Makefile linux-6.4-fbx/init/Makefile
--- linux-6.4/init/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/init/Makefile	2023-05-22 20:06:44.935872208 +0200
@@ -15,6 +15,14 @@
 
 obj-y                          += init_task.o
 
+obj-$(CONFIG_FBX_DECRYPT_INITRD)+= fbx_decrypt_initrd.o
+obj-$(CONFIG_FBX_DECRYPT_INITRD_RC4) += rc4.o
+obj-$(CONFIG_FBX_VERIFY_INITRD) += fbx_initrd_pub_key.o
+
+PUB_KEY_PATH_UNQUOTED = $(patsubst "%",%,$(CONFIG_FBX_VERIFY_INITRD_PUBKEY))
+
+init/fbx_initrd_pub_key.o: $(PUB_KEY_PATH_UNQUOTED)
+
 mounts-y			:= do_mounts.o
 mounts-$(CONFIG_BLK_DEV_RAM)	+= do_mounts_rd.o
 mounts-$(CONFIG_BLK_DEV_INITRD)	+= do_mounts_initrd.o
diff -ruw linux-6.4/init/init_task.c linux-6.4-fbx/init/init_task.c
--- linux-6.4/init/init_task.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/init/init_task.c	2023-05-22 20:06:44.939872315 +0200
@@ -75,6 +75,7 @@
 	.stack		= init_stack,
 	.usage		= REFCOUNT_INIT(2),
 	.flags		= PF_KTHREAD,
+	.exec_mode	= EXEC_MODE_UNLIMITED,
 	.prio		= MAX_PRIO - 20,
 	.static_prio	= MAX_PRIO - 20,
 	.normal_prio	= MAX_PRIO - 20,
diff -ruw linux-6.4/init/initramfs.c linux-6.4-fbx/init/initramfs.c
--- linux-6.4/init/initramfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/init/initramfs.c	2023-05-22 20:06:44.939872315 +0200
@@ -18,6 +18,7 @@
 #include <linux/init_syscalls.h>
 #include <linux/task_work.h>
 #include <linux/umh.h>
+#include <linux/printk.h>
 
 static __initdata bool csum_present;
 static __initdata u32 io_csum;
@@ -668,6 +669,10 @@
 	ssize_t written;
 	struct file *file;
 	loff_t pos = 0;
+#ifdef CONFIG_FBX_DECRYPT_INITRD
+	int ret;
+	extern int fbx_decrypt_initrd(char *start, u32 size);
+#endif
 
 	unpack_to_rootfs(__initramfs_start, __initramfs_size);
 
@@ -677,6 +682,15 @@
 	if (IS_ERR(file))
 		return;
 
+#ifdef CONFIG_FBX_DECRYPT_INITRD
+	ret = fbx_decrypt_initrd((char*)initrd_start,
+				 initrd_end - initrd_start);
+	if (ret) {
+		printk(KERN_ERR "Decrypt failed: %i\n", ret);
+		return;
+	}
+#endif
+
 	written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
 			&pos);
 	if (written != initrd_end - initrd_start)
diff -ruw linux-6.4/kernel/fork.c linux-6.4-fbx/kernel/fork.c
--- linux-6.4/kernel/fork.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/kernel/fork.c	2023-06-27 11:47:16.103867923 +0200
@@ -1195,6 +1195,11 @@
 	tsk->mm_cid_active = 0;
 	tsk->migrate_from_cpu = -1;
 #endif
+	/*
+	 * inherit parent exec_mode.
+	 */
+	tsk->exec_mode = orig->exec_mode;
+
 	return tsk;
 
 free_stack:
diff -ruw linux-6.4/kernel/module/Kconfig linux-6.4-fbx/kernel/module/Kconfig
--- linux-6.4/kernel/module/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/kernel/module/Kconfig	2023-05-22 20:06:45.039874975 +0200
@@ -385,6 +385,10 @@
 	  one per line. The path can be absolute, or relative to the kernel
 	  source tree.
 
+config UNUSED_KSYMS_WHITELIST_SYMS
+	string "Whitelist of symbols name to keep in ksymtab"
+	depends on TRIM_UNUSED_KSYMS
+
 config MODULES_TREE_LOOKUP
 	def_bool y
 	depends on PERF_EVENTS || TRACING || CFI_CLANG
diff -ruw linux-6.4/kernel/sys.c linux-6.4-fbx/kernel/sys.c
--- linux-6.4/kernel/sys.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/kernel/sys.c	2023-05-22 20:06:45.091876358 +0200
@@ -12,6 +12,7 @@
 #include <linux/mman.h>
 #include <linux/reboot.h>
 #include <linux/prctl.h>
+#include <linux/prctl-private.h>
 #include <linux/highuid.h>
 #include <linux/fs.h>
 #include <linux/kmod.h>
@@ -2708,6 +2709,18 @@
 		error = !!test_bit(MMF_VM_MERGE_ANY, &me->mm->flags);
 		break;
 #endif
+	case PR_SET_EXEC_MODE:
+		if (arg2 != EXEC_MODE_UNLIMITED &&
+		    arg2 != EXEC_MODE_ONCE &&
+		    arg2 != EXEC_MODE_DENIED)
+			return -EINVAL;
+
+		if (arg2 > current->exec_mode)
+			return -EPERM;
+		current->exec_mode = arg2;
+		return 0;
+	case PR_GET_EXEC_MODE:
+		return current->exec_mode;
 	default:
 		error = -EINVAL;
 		break;
diff -ruw linux-6.4/lib/Kconfig linux-6.4-fbx/lib/Kconfig
--- linux-6.4/lib/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/lib/Kconfig	2023-05-22 20:06:45.147877847 +0200
@@ -728,6 +728,13 @@
 config OBJAGG
 	tristate "objagg" if COMPILE_TEST
 
+config ARCH_HAS_FBXSERIAL
+	bool
+
+config FBXSERIAL
+	bool "fbxserial"
+	select CRC32
+
 endmenu
 
 config GENERIC_IOREMAP
diff -ruw linux-6.4/lib/Makefile linux-6.4-fbx/lib/Makefile
--- linux-6.4/lib/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/lib/Makefile	2023-05-22 20:06:45.151877954 +0200
@@ -430,3 +430,4 @@
 ifeq ($(CONFIG_FORTIFY_SOURCE),y)
 $(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
 endif
+obj-$(CONFIG_FBXSERIAL) += fbxserial.o
diff -ruw linux-6.4/mm/Kconfig linux-6.4-fbx/mm/Kconfig
--- linux-6.4/mm/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/mm/Kconfig	2023-05-22 20:06:45.207879443 +0200
@@ -709,6 +709,13 @@
 config ARCH_SUPPORTS_MEMORY_FAILURE
 	bool
 
+config PAGE_FRAG_CACHE_ORDER
+	int "page order size of page fragment allocator"
+	default 3
+	help
+	  This allocator is used by networking only for skb->head allocation.
+	  A large value speeds up allocation but causes memory fragmentation.
+
 config MEMORY_FAILURE
 	depends on MMU
 	depends on ARCH_SUPPORTS_MEMORY_FAILURE
diff -ruw linux-6.4/mm/memtest.c linux-6.4-fbx/mm/memtest.c
--- linux-6.4/mm/memtest.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/mm/memtest.c	2023-12-05 17:14:42.315715453 +0100
@@ -28,6 +28,7 @@
 	0x7a6c7258554e494cULL, /* yeah ;-) */
 };
 
+#ifndef CONFIG_X86 // original code
 static void __init reserve_bad_mem(u64 pattern, phys_addr_t start_bad, phys_addr_t end_bad)
 {
 	pr_info("  %016llx bad mem addr %pa - %pa reserved\n",
@@ -68,19 +69,83 @@
 
 	early_memtest_done = true;
 }
+#else // fbx6hd
+/*
+ * Count memory errors in segment [a,b]
+ * If an error is detected, remove segment from memory pool.
+ */
+static void __init test_segment(unsigned long a, unsigned long b, u64 pat)
+{
+	int err_count = 0;
+	u64 *p;
+
+	/*
+	 * If an exception, such as page fault, is required to prefetch the data,
+	 * then the software prefetch instruction retires without prefetching data.
+	 */
+	for (p = __va(a); p != __va(b); p += 4) {
+		__builtin_prefetch(p+64);
+		if (p[0] != pat || p[1] != pat || p[2] != pat || p[3] != pat)
+			++err_count;
+	}
+
+	if (err_count) {
+		pr_warn("BAD+RAM: %lx-%lx: N=%d", a, b, err_count);
+		memblock_reserve(a, b-a);
+	}
+}
+
+typedef u64 u128 __attribute__ ((__vector_size__ (16)));
+
+static void __init write_pattern(unsigned long a, unsigned long b, u64 pat)
+{
+	u128 val = (u128){ pat, pat };
+	u128 *p = __va(a), *q = __va(b);
+	kernel_fpu_begin();
+	asm("movdqa %0, %%xmm0" : : "m" (val));
+	for (/**/; p != q; ++p)
+		asm("movntdq %%xmm0, %0" : "=m" (*p));
+	kernel_fpu_end();
+}
+
+#define SEGMENT_SIZE (1 << 16) // 64K
+
+static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size)
+{
+	/* On fbx6hd, ulong is wide enough to store physical addresses */
+	unsigned long curr = start_phys;
+	unsigned long next = ALIGN(curr + 1, SEGMENT_SIZE);
+	unsigned long end = start_phys + size;
+
+	// Check alignment for correct loop unrolling
+	if (curr & 0x1f || end & 0x1f) {
+		pr_warn("BAD+RAM: %lx-%lx: misaligned", curr, end);
+		return;
+	}
+
+	write_pattern(curr, end, pattern);
+
+	while (curr < end) {
+		if (next > end)
+			next = end;
+		test_segment(curr, next, pattern);
+		curr = next;
+		next += SEGMENT_SIZE;
+	}
+}
+#endif
 
 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end)
 {
 	u64 i;
 	phys_addr_t this_start, this_end;
 
+	pr_info("pattern %016llx\n", cpu_to_be64(pattern));
 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &this_start,
 				&this_end, NULL) {
 		this_start = clamp(this_start, start, end);
 		this_end = clamp(this_end, start, end);
 		if (this_start < this_end) {
-			pr_info("  %pa - %pa pattern %016llx\n",
-				&this_start, &this_end, cpu_to_be64(pattern));
 			memtest(pattern, this_start, this_end - this_start);
 		}
 	}
diff -ruw linux-6.4/mm/page_alloc.c linux-6.4-fbx/mm/page_alloc.c
--- linux-6.4/mm/page_alloc.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/mm/page_alloc.c	2023-05-22 20:06:45.263880933 +0200
@@ -4887,18 +4887,24 @@
 {
 	struct page *page = NULL;
 	gfp_t gfp = gfp_mask;
+	unsigned int order;
 
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
 		    __GFP_NOMEMALLOC;
 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
 				PAGE_FRAG_CACHE_MAX_ORDER);
+	order = PAGE_FRAG_CACHE_MAX_ORDER;
 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
 #endif
-	if (unlikely(!page))
+	if (unlikely(!page)) {
 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+		order = 0;
+	}
 
 	nc->va = page ? page_address(page) : NULL;
+	if (page)
+		set_page_owner_frag_cache(page, order, nc);
 
 	return page;
 }
diff -ruw linux-6.4/mm/vmalloc.c linux-6.4-fbx/mm/vmalloc.c
--- linux-6.4/mm/vmalloc.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/mm/vmalloc.c	2023-06-27 11:47:16.127868575 +0200
@@ -3340,6 +3340,19 @@
 EXPORT_SYMBOL_GPL(__vmalloc_node);
 #endif
 
+/*
+ * __vmalloc_pgprot(): same as __vmalloc, but with a pgprot_t parameter.
+ *
+ * required for IntelCE drivers.
+ */
+void *__vmalloc_pgprot(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
+{
+	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+				    gfp_mask, prot, 0, NUMA_NO_NODE,
+				    __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__vmalloc_pgprot);
+
 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
 {
 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
diff -ruw linux-6.4/net/8021q/Kconfig linux-6.4-fbx/net/8021q/Kconfig
--- linux-6.4/net/8021q/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/8021q/Kconfig	2024-02-08 19:19:27.692597465 +0100
@@ -39,3 +39,12 @@
 	  supersedes GVRP and is not backwards-compatible.
 
 	  If unsure, say N.
+
+config VLAN_FBX
+	bool "Freebox specific VLAN ethertype to bypass dump switches"
+	depends on VLAN_8021Q
+	help
+	  Select this to enable FBX VLAN specific ethertype to bypass
+	  switches that drops 802.1q packets
+
+	  If unsure, say N.
diff -ruw linux-6.4/net/8021q/vlan.c linux-6.4-fbx/net/8021q/vlan.c
--- linux-6.4/net/8021q/vlan.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/8021q/vlan.c	2023-05-22 20:06:45.287881571 +0200
@@ -211,7 +211,7 @@
 /*  Attach a VLAN device to a mac address (ie Ethernet Card).
  *  Returns 0 if the device was created or a negative error code otherwise.
  */
-static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
 {
 	struct net_device *new_dev;
 	struct vlan_dev_priv *vlan;
diff -ruw linux-6.4/net/8021q/vlan.h linux-6.4-fbx/net/8021q/vlan.h
--- linux-6.4/net/8021q/vlan.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/8021q/vlan.h	2024-02-08 19:19:27.692597465 +0100
@@ -16,6 +16,9 @@
 enum vlan_protos {
 	VLAN_PROTO_8021Q	= 0,
 	VLAN_PROTO_8021AD,
+#ifdef CONFIG_VLAN_FBX
+	VLAN_PROTO_FBX,
+#endif
 	VLAN_PROTO_NUM,
 };
 
@@ -43,6 +46,10 @@
 		return VLAN_PROTO_8021Q;
 	case htons(ETH_P_8021AD):
 		return VLAN_PROTO_8021AD;
+#ifdef CONFIG_VLAN_FBX
+	case htons(ETH_P_FBXVLAN):
+		return VLAN_PROTO_FBX;
+#endif
 	default:
 		WARN(1, "invalid VLAN protocol: 0x%04x\n", ntohs(proto));
 		return -EINVAL;
diff -ruw linux-6.4/net/8021q/vlan_core.c linux-6.4-fbx/net/8021q/vlan_core.c
--- linux-6.4/net/8021q/vlan_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/8021q/vlan_core.c	2023-05-22 20:06:45.287881571 +0200
@@ -99,6 +99,12 @@
 }
 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
 
+struct net_device *vlan_dev_upper_dev(const struct net_device *dev)
+{
+	return vlan_dev_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_upper_dev);
+
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
 	struct net_device *ret = vlan_dev_priv(dev)->real_dev;
diff -ruw linux-6.4/net/8021q/vlan_netlink.c linux-6.4-fbx/net/8021q/vlan_netlink.c
--- linux-6.4/net/8021q/vlan_netlink.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/8021q/vlan_netlink.c	2024-02-08 19:19:27.692597465 +0100
@@ -63,6 +63,9 @@
 		switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
 		case htons(ETH_P_8021Q):
 		case htons(ETH_P_8021AD):
+#ifdef CONFIG_VLAN_FBX
+		case htons(ETH_P_FBXVLAN):
+#endif
 			break;
 		default:
 			NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN protocol");
diff -ruw linux-6.4/net/Kconfig linux-6.4-fbx/net/Kconfig
--- linux-6.4/net/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/Kconfig	2023-12-12 17:24:34.163627207 +0100
@@ -32,6 +32,10 @@
 	  This option can be selected by other options that need compat
 	  netlink messages.
 
+config NET_PROMISC_MESSAGES
+	bool "show promisc/allmulti status change in kernel log"
+	default y
+
 config COMPAT_NETLINK_MESSAGES
 	def_bool y
 	depends on COMPAT
@@ -60,6 +64,19 @@
 
 menu "Networking options"
 
+config NETSKBPAD
+	int "Size reserved by dev_alloc_skb"
+	default 32
+
+config NETRXTHREAD
+	bool "Do rx network processing in kernel thread"
+	depends on BROKEN_ON_SMP
+
+config NETRXTHREAD_RX_QUEUE
+	int "Number of rx queues"
+	default 1
+	depends on NETRXTHREAD
+
 source "net/packet/Kconfig"
 source "net/unix/Kconfig"
 source "net/tls/Kconfig"
@@ -236,6 +253,8 @@
 source "net/tipc/Kconfig"
 source "net/atm/Kconfig"
 source "net/l2tp/Kconfig"
+source "net/fbxatm/Kconfig"
+source "net/fbxbridge/Kconfig"
 source "net/802/Kconfig"
 source "net/bridge/Kconfig"
 source "net/dsa/Kconfig"
diff -ruw linux-6.4/net/Makefile linux-6.4-fbx/net/Makefile
--- linux-6.4/net/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/Makefile	2023-12-12 17:24:34.163627207 +0100
@@ -39,6 +39,12 @@
 obj-$(CONFIG_STREAM_PARSER)	+= strparser/
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_L2TP)		+= l2tp/
+ifneq ($(CONFIG_FBXATM),)
+obj-y				+= fbxatm/
+endif
+ifneq ($(CONFIG_FBXBRIDGE),)
+obj-y				+= fbxbridge/
+endif
 obj-$(CONFIG_PHONET)		+= phonet/
 ifneq ($(CONFIG_VLAN_8021Q),)
 obj-y				+= 8021q/
diff -ruw linux-6.4/net/batman-adv/Kconfig linux-6.4-fbx/net/batman-adv/Kconfig
--- linux-6.4/net/batman-adv/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/Kconfig	2023-12-21 17:30:06.449516617 +0100
@@ -94,3 +94,31 @@
 	  messages using the generic tracing infrastructure of the kernel.
 	  BATMAN_ADV_DEBUG must also be selected to get trace events for
 	  batadv_dbg.
+
+config BATMAN_ADV_FBX
+	bool "B.A.T.M.A.N. FBX specific features"
+	depends on BATMAN_ADV
+	help
+	  This enables FBX specific options to be selected (e.g. MTU
+	  discovery, SLAP protocol).
+
+config BATMAN_ADV_FBX_MTU
+	bool "B.A.T.M.A.N. FBX path max MTU discovery feature"
+	depends on BATMAN_ADV_FBX
+	help
+	  This enables FBX path max MTU discovery protocol.
+
+config BATMAN_ADV_FBX_SLAP
+	bool "B.A.T.M.A.N. FBX SLAP"
+	depends on BATMAN_ADV_FBX
+	help
+	  This enables FBX SLAP (simple loop avoidance protocol) to handle
+	  blend of LAN and B.A.T.M.A.N traffic on ethernet port correctly
+	  in Freebox configuration
+
+config BATMAN_ADV_FBX_PERIF_ROUTER
+	bool "B.A.T.M.A.N. FBX perif router"
+	depends on BATMAN_ADV_FBX && BATMAN_ADV_BATMAN_V
+	help
+	  Keep track of per interface best router to reach a specific
+	  originator.
diff -ruw linux-6.4/net/batman-adv/Makefile linux-6.4-fbx/net/batman-adv/Makefile
--- linux-6.4/net/batman-adv/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/Makefile	2023-12-21 17:30:06.449516617 +0100
@@ -4,6 +4,7 @@
 # Marek Lindner, Simon Wunderlich
 
 obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
+
 batman-adv-y += bat_algo.o
 batman-adv-y += bat_iv_ogm.o
 batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v.o
@@ -30,5 +31,9 @@
 batman-adv-y += tp_meter.o
 batman-adv-y += translation-table.o
 batman-adv-y += tvlv.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX) += fbx/fbx.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX_MTU) += fbx/mtu.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX_SLAP) += fbx/slap.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER) += fbx/router.o
 
 CFLAGS_trace.o := -I$(src)
diff -ruw linux-6.4/net/batman-adv/bat_v.c linux-6.4-fbx/net/batman-adv/bat_v.c
--- linux-6.4/net/batman-adv/bat_v.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/bat_v.c	2023-12-12 17:24:34.163627207 +0100
@@ -38,6 +38,8 @@
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
+#include "fbx/fbx.h"
+#include "fbx/mtu.h"
 #include "netlink.h"
 #include "originator.h"
 
@@ -130,6 +132,7 @@
 batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 			  struct batadv_hardif_neigh_node *hardif_neigh)
 {
+	struct batadv_priv *bat_priv;
 	void *hdr;
 	unsigned int last_seen_msecs;
 	u32 throughput;
@@ -154,6 +157,9 @@
 	    nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput))
 		goto nla_put_failure;
 
+	bat_priv = netdev_priv(hardif_neigh->if_incoming->soft_iface);
+	batadv_fbx_nl(bat_priv, BATADV_CMD_GET_NEIGHBORS, NULL, msg,
+		      hardif_neigh);
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -311,6 +317,9 @@
 	if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
 		goto nla_put_failure;
 
+	batadv_fbx_nl(bat_priv, BATADV_CMD_GET_ORIGINATORS, NULL, msg,
+		      orig_node);
+
 	genlmsg_end(msg, hdr);
 	return 0;
 
diff -ruw linux-6.4/net/batman-adv/bat_v_elp.c linux-6.4-fbx/net/batman-adv/bat_v_elp.c
--- linux-6.4/net/batman-adv/bat_v_elp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/bat_v_elp.c	2023-12-12 17:24:34.163627207 +0100
@@ -140,7 +140,8 @@
 	}
 
 default_throughput:
-	if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
+	if (hard_iface->soft_iface &&
+	    !(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
 		batadv_info(hard_iface->soft_iface,
 			    "WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
 			    hard_iface->net_dev->name,
@@ -505,7 +506,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_elp_packet *elp_packet;
 	struct batadv_hard_iface *primary_if;
-	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
+	struct ethhdr *ethhdr;
 	bool res;
 	int ret = NET_RX_DROP;
 
@@ -513,6 +514,7 @@
 	if (!res)
 		goto free_skb;
 
+	ethhdr = eth_hdr(skb);
 	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
 		goto free_skb;
 
diff -ruw linux-6.4/net/batman-adv/bat_v_ogm.c linux-6.4-fbx/net/batman-adv/bat_v_ogm.c
--- linux-6.4/net/batman-adv/bat_v_ogm.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/bat_v_ogm.c	2023-12-21 17:30:06.449516617 +0100
@@ -42,6 +42,7 @@
 #include "send.h"
 #include "translation-table.h"
 #include "tvlv.h"
+#include "fbx/fbx.h"
 
 /**
  * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
@@ -123,8 +124,10 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 
-	if (hard_iface->if_status != BATADV_IF_ACTIVE)
+	if (hard_iface->if_status != BATADV_IF_ACTIVE) {
+		kfree_skb(skb);
 		return;
+	}
 
 	batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
 	batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
@@ -473,12 +476,14 @@
 static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
 				    struct batadv_hard_iface *if_incoming,
 				    struct batadv_hard_iface *if_outgoing,
-				    u32 throughput)
+				    u32 throughput, bool *half_duplex)
 {
 	int if_hop_penalty = atomic_read(&if_incoming->hop_penalty);
 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
 	int hop_penalty_max = BATADV_TQ_MAX_VALUE;
 
+	*half_duplex = false;
+
 	/* Apply per hardif hop penalty */
 	throughput = throughput * (hop_penalty_max - if_hop_penalty) /
 		     hop_penalty_max;
@@ -493,8 +498,10 @@
 	 */
 	if (throughput > 10 &&
 	    if_incoming == if_outgoing &&
-	    !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX))
+	    !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX)) {
+		*half_duplex = true;
 		return throughput / 2;
+	}
 
 	/* hop penalty of 255 equals 100% */
 	return throughput * (hop_penalty_max - hop_penalty) / hop_penalty_max;
@@ -572,6 +579,9 @@
 
 	/* apply forward penalty */
 	ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
+	ogm_forward->flags &= ~BATADV_V_HALF_DUPLEX;
+	if (neigh_ifinfo->bat_v.half_duplex)
+		ogm_forward->flags |= BATADV_V_HALF_DUPLEX;
 	ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
 	ogm_forward->ttl--;
 
@@ -614,6 +624,7 @@
 	bool protection_started = false;
 	int ret = -EINVAL;
 	u32 path_throughput;
+	bool half_duplex;
 	s32 seq_diff;
 
 	orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
@@ -655,10 +666,12 @@
 
 	path_throughput = batadv_v_forward_penalty(bat_priv, if_incoming,
 						   if_outgoing,
-						   ntohl(ogm2->throughput));
+						   ntohl(ogm2->throughput),
+						   &half_duplex);
 	neigh_ifinfo->bat_v.throughput = path_throughput;
 	neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno);
 	neigh_ifinfo->last_ttl = ogm2->ttl;
+	neigh_ifinfo->bat_v.half_duplex = half_duplex;
 
 	if (seq_diff > 0 || protection_started)
 		ret = 1;
@@ -842,6 +855,28 @@
 }
 
 /**
+ * batadv_v_get_throughput() - Compute path throughput from ogm
+ * @ogm: OGM2 packet received
+ * @neigh: Neighbour OGM packet has been received from
+ * @return: Estimated path throughput
+ */
+static u32 batadv_v_get_throughput(struct batadv_ogm2_packet *ogm,
+				   struct batadv_hardif_neigh_node *neigh)
+{
+	u32 oth, lth;
+
+	oth = ntohl(ogm->throughput);
+	lth = ewma_throughput_read(&neigh->bat_v.throughput);
+
+	if (!(ogm->flags & BATADV_V_HALF_DUPLEX))
+		return min_t(u32, lth, oth);
+
+	/* OGM throughput was divided by two for retrocompatibility sake */
+	oth *= 2;
+	return oth * lth / (oth + lth);
+}
+
+/**
  * batadv_v_ogm_process() - process an incoming batman v OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
@@ -857,7 +892,7 @@
 	struct batadv_neigh_node *neigh_node = NULL;
 	struct batadv_hard_iface *hard_iface;
 	struct batadv_ogm2_packet *ogm_packet;
-	u32 ogm_throughput, link_throughput, path_throughput;
+	u32 ogm_throughput;
 	int ret;
 
 	ethhdr = eth_hdr(skb);
@@ -911,9 +946,8 @@
 	 *  - For OGMs traversing more than hop the path throughput metric is
 	 *    the smaller of the path throughput and the link throughput.
 	 */
-	link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
-	path_throughput = min_t(u32, link_throughput, ogm_throughput);
-	ogm_packet->throughput = htonl(path_throughput);
+	ogm_packet->throughput = htonl(batadv_v_get_throughput(ogm_packet,
+							       hardif_neigh));
 
 	batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node,
 				       neigh_node, if_incoming,
@@ -967,6 +1001,7 @@
 	}
 	rcu_read_unlock();
 out:
+	batadv_fbx_ogm_process(bat_priv, orig_node, neigh_node, ogm_packet);
 	batadv_orig_node_put(orig_node);
 	batadv_neigh_node_put(neigh_node);
 	batadv_hardif_neigh_put(hardif_neigh);
@@ -985,7 +1020,7 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_ogm2_packet *ogm_packet;
-	struct ethhdr *ethhdr = eth_hdr(skb);
+	struct ethhdr *ethhdr;
 	int ogm_offset;
 	u8 *packet_pos;
 	int ret = NET_RX_DROP;
@@ -999,6 +1034,7 @@
 	if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
 		goto free_skb;
 
+	ethhdr = eth_hdr(skb);
 	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
 		goto free_skb;
 
diff -ruw linux-6.4/net/batman-adv/fragmentation.c linux-6.4-fbx/net/batman-adv/fragmentation.c
--- linux-6.4/net/batman-adv/fragmentation.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/fragmentation.c	2023-12-12 17:24:34.163627207 +0100
@@ -28,6 +28,8 @@
 #include "routing.h"
 #include "send.h"
 
+#include "fbx/mtu.h"
+
 /**
  * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
@@ -445,7 +447,7 @@
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_frag_packet frag_header;
 	struct sk_buff *skb_fragment;
-	unsigned int mtu = net_dev->mtu;
+	unsigned int mtu = batadv_mtu_get_for_neigh(neigh_node->hardif_neigh);
 	unsigned int header_size = sizeof(frag_header);
 	unsigned int max_fragment_size, num_fragments;
 	int ret;
diff -ruw linux-6.4/net/batman-adv/hard-interface.c linux-6.4-fbx/net/batman-adv/hard-interface.c
--- linux-6.4/net/batman-adv/hard-interface.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/hard-interface.c	2023-12-12 17:24:34.167627317 +0100
@@ -36,6 +36,7 @@
 #include "gateway_client.h"
 #include "log.h"
 #include "originator.h"
+#include "fbx/fbx.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "translation-table.h"
@@ -469,6 +470,7 @@
 
 	batadv_dat_init_own_addr(bat_priv, primary_if);
 	batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
+	batadv_fbx_primary_update(bat_priv, primary_if);
 out:
 	batadv_hardif_put(primary_if);
 }
@@ -970,8 +972,10 @@
 		batadv_hardif_remove_interface(hard_iface);
 		break;
 	case NETDEV_CHANGEMTU:
-		if (hard_iface->soft_iface)
+		if (hard_iface->soft_iface) {
 			batadv_update_min_mtu(hard_iface->soft_iface);
+			batadv_fbx_hardif_update(hard_iface);
+		}
 		break;
 	case NETDEV_CHANGEADDR:
 		if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
diff -ruw linux-6.4/net/batman-adv/main.c linux-6.4-fbx/net/batman-adv/main.c
--- linux-6.4/net/batman-adv/main.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/main.c	2023-12-12 17:24:34.167627317 +0100
@@ -59,6 +59,7 @@
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
  * list traversals just rcu-locked
@@ -115,6 +116,7 @@
 	register_netdevice_notifier(&batadv_hard_if_notifier);
 	rtnl_link_register(&batadv_link_ops);
 	batadv_netlink_register();
+	batadv_fbx_init();
 
 	pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
 		BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
@@ -129,6 +131,7 @@
 
 static void __exit batadv_exit(void)
 {
+	batadv_fbx_exit();
 	batadv_netlink_unregister();
 	rtnl_link_unregister(&batadv_link_ops);
 	unregister_netdevice_notifier(&batadv_hard_if_notifier);
@@ -228,6 +231,7 @@
 
 	batadv_gw_init(bat_priv);
 	batadv_mcast_init(bat_priv);
+	batadv_fbx_new_priv(bat_priv);
 
 	atomic_set(&bat_priv->gw.reselect, 0);
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
@@ -270,6 +274,7 @@
 	batadv_dat_free(bat_priv);
 	batadv_bla_free(bat_priv);
 
+	batadv_fbx_free_priv(bat_priv);
 	batadv_mcast_free(bat_priv);
 
 	/* Free the TT and the originator tables only after having terminated
@@ -481,6 +486,7 @@
 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
 
 	idx = batadv_ogm_packet->packet_type;
+
 	(*batadv_rx_handler[idx])(skb, hard_iface);
 
 	batadv_hardif_put(hard_iface);
@@ -526,6 +532,7 @@
 	BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
 	BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
 	BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
+	BUILD_BUG_ON(sizeof(struct batadv_fbx_packet) != 8);
 
 	i = sizeof_field(struct sk_buff, cb);
 	BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
diff -ruw linux-6.4/net/batman-adv/netlink.c linux-6.4-fbx/net/batman-adv/netlink.c
--- linux-6.4/net/batman-adv/netlink.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/netlink.c	2023-12-21 17:30:06.449516617 +0100
@@ -51,6 +51,7 @@
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 struct genl_family batadv_netlink_family;
 
@@ -58,6 +59,7 @@
 enum batadv_netlink_multicast_groups {
 	BATADV_NL_MCGRP_CONFIG,
 	BATADV_NL_MCGRP_TPMETER,
+	BATADV_NL_MCGRP_ROUTE,
 };
 
 /**
@@ -89,6 +91,7 @@
 static const struct genl_multicast_group batadv_netlink_mcgrps[] = {
 	[BATADV_NL_MCGRP_CONFIG] = { .name = BATADV_NL_MCAST_GROUP_CONFIG },
 	[BATADV_NL_MCGRP_TPMETER] = { .name = BATADV_NL_MCAST_GROUP_TPMETER },
+	[BATADV_NL_MCGRP_ROUTE] = { .name = BATADV_NL_MCAST_GROUP_ROUTE },
 };
 
 static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
@@ -151,6 +154,7 @@
 	[BATADV_ATTR_ORIG_INTERVAL]		= { .type = NLA_U32 },
 	[BATADV_ATTR_ELP_INTERVAL]		= { .type = NLA_U32 },
 	[BATADV_ATTR_THROUGHPUT_OVERRIDE]	= { .type = NLA_U32 },
+	[BATADV_ATTR_FBX]			= { .type = NLA_NESTED },
 };
 
 /**
@@ -361,6 +365,7 @@
 
 	batadv_hardif_put(primary_if);
 
+	batadv_fbx_nl(bat_priv, cmd, NULL, msg, NULL);
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -613,6 +618,7 @@
 		atomic_set(&bat_priv->orig_interval, orig_interval);
 	}
 
+	batadv_fbx_nl(bat_priv, BATADV_CMD_SET_MESH, info, NULL, NULL);
 	batadv_netlink_notify_mesh(bat_priv);
 
 	return 0;
@@ -1151,6 +1157,100 @@
 	return 0;
 }
 
+/**
+ * batadv_netlink_mesh_fill: Fill message with route attributes
+ * @msg: Nelink message to dump route info into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cmd: type of netlink message
+ * @orig: Current route destination originator
+ * @neigh: Current best neighbour for this originator
+ * @best: Globally best route
+ * @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additionnal netlink flag message
+ */
+static int batadv_netlink_route_fill(struct sk_buff *msg,
+				     struct batadv_priv *bat_priv,
+				     enum batadv_nl_commands cmd,
+				     struct batadv_orig_node *orig_node,
+				     struct batadv_neigh_node *neigh_node,
+				     bool best,
+				     u32 portid, u32 seq, int flags)
+{
+	struct net_device *soft_iface = bat_priv->soft_iface;
+	void *hdr;
+
+	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
+	if (!hdr)
+		return -ENOBUFS;
+
+	if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex))
+		goto nla_put_failure;
+
+	if (nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN,
+		    soft_iface->dev_addr))
+		goto nla_put_failure;
+
+	if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+		    orig_node->orig))
+		goto nla_put_failure;
+
+	if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+		goto nla_put_failure;
+
+	if (neigh_node && nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+				  neigh_node->addr))
+		goto nla_put_failure;
+
+	if (neigh_node &&
+	    nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+			neigh_node->if_incoming->net_dev->ifindex))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+/**
+ * batadv_netlink_notify_route() - send route events to listener
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ev: Route event type (add, del or change)
+ * @orig: The route destination node
+ * @neigh: Best neighbour for this route
+ * @best: This is a globaly best route
+ *
+ * Return: 0 on success, < 0 on error
+ */
+int batadv_netlink_notify_route(struct batadv_priv *bat_priv,
+				enum batadv_nl_commands ev,
+				struct batadv_orig_node *orig_node,
+				struct batadv_neigh_node *neigh_node,
+				bool best)
+{
+	struct sk_buff *msg;
+	int ret;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+
+	ret = batadv_netlink_route_fill(msg, bat_priv, ev, orig_node,
+					neigh_node, best, 0, 0, 0);
+	if (ret < 0) {
+		nlmsg_free(msg);
+		return ret;
+	}
+
+	genlmsg_multicast_netns(&batadv_netlink_family,
+				dev_net(bat_priv->soft_iface), msg, 0,
+				BATADV_NL_MCGRP_ROUTE, GFP_KERNEL);
+	return 0;
+}
+
 /**
  * batadv_get_softif_from_info() - Retrieve soft interface from genl attributes
  * @net: the applicable net namespace
diff -ruw linux-6.4/net/batman-adv/netlink.h linux-6.4-fbx/net/batman-adv/netlink.h
--- linux-6.4/net/batman-adv/netlink.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/netlink.h	2023-12-21 17:30:06.449516617 +0100
@@ -27,6 +27,12 @@
 int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv,
 			       struct batadv_softif_vlan *vlan);
 
+int batadv_netlink_notify_route(struct batadv_priv *bat_priv,
+				enum batadv_nl_commands ev,
+				struct batadv_orig_node *orig_node,
+				struct batadv_neigh_node *neigh_node,
+				bool best);
+
 extern struct genl_family batadv_netlink_family;
 
 #endif /* _NET_BATMAN_ADV_NETLINK_H_ */
diff -ruw linux-6.4/net/batman-adv/originator.c linux-6.4-fbx/net/batman-adv/originator.c
--- linux-6.4/net/batman-adv/originator.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/originator.c	2023-12-21 17:30:06.453516726 +0100
@@ -42,6 +42,7 @@
 #include "routing.h"
 #include "soft-interface.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -249,6 +250,7 @@
 	hlist_del_init_rcu(&hardif_neigh->list);
 	spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
 
+	batadv_fbx_neigh_release(hardif_neigh);
 	batadv_hardif_put(hardif_neigh->if_incoming);
 	kfree_rcu(hardif_neigh, rcu);
 }
@@ -381,6 +383,8 @@
 	INIT_HLIST_NODE(&orig_ifinfo->list);
 	kref_init(&orig_ifinfo->refcount);
 
+	batadv_fbx_orig_ifinfo_init(orig_ifinfo);
+
 	kref_get(&orig_ifinfo->refcount);
 	hlist_add_head_rcu(&orig_ifinfo->list,
 			   &orig_node->ifinfo_list);
@@ -517,6 +521,7 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct batadv_hardif_neigh_node *hardif_neigh;
+	int ret;
 
 	spin_lock_bh(&hard_iface->neigh_list_lock);
 
@@ -537,6 +542,13 @@
 	hardif_neigh->last_seen = jiffies;
 
 	kref_init(&hardif_neigh->refcount);
+	ret = batadv_fbx_neigh_init(hardif_neigh);
+	if (ret) {
+		batadv_hardif_put(hard_iface);
+		kfree(hardif_neigh);
+		hardif_neigh = NULL;
+		goto out;
+	}
 
 	if (bat_priv->algo_ops->neigh.hardif_init)
 		bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
@@ -776,6 +788,8 @@
 
 	orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
 
+	batadv_fbx_orig_ifinfo_release(orig_ifinfo);
+
 	if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
 		batadv_hardif_put(orig_ifinfo->if_outgoing);
 
@@ -796,6 +810,8 @@
 
 	orig_node = container_of(rcu, struct batadv_orig_node, rcu);
 
+	batadv_fbx_orig_release(orig_node);
+
 	batadv_mcast_purge_orig(orig_node);
 
 	batadv_frag_purge_orig(orig_node, NULL);
@@ -906,7 +922,7 @@
 	struct batadv_orig_node *orig_node;
 	struct batadv_orig_node_vlan *vlan;
 	unsigned long reset_time;
-	int i;
+	int ret, i;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "Creating new originator: %pM\n", addr);
@@ -964,6 +980,10 @@
 		orig_node->fragments[i].size = 0;
 	}
 
+	ret = batadv_fbx_orig_init(orig_node);
+	if (ret)
+		goto free_orig_node;
+
 	return orig_node;
 free_orig_node:
 	kfree(orig_node);
diff -ruw linux-6.4/net/batman-adv/routing.c linux-6.4-fbx/net/batman-adv/routing.c
--- linux-6.4/net/batman-adv/routing.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/routing.c	2023-12-21 17:30:06.453516726 +0100
@@ -37,6 +37,8 @@
 #include "tp_meter.h"
 #include "translation-table.h"
 #include "tvlv.h"
+#include "netlink.h"
+#include "fbx/fbx.h"
 
 static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
@@ -57,6 +59,7 @@
 {
 	struct batadv_orig_ifinfo *orig_ifinfo;
 	struct batadv_neigh_node *curr_router;
+	enum batadv_nl_commands cmd;
 
 	orig_ifinfo = batadv_orig_ifinfo_get(orig_node, recv_if);
 	if (!orig_ifinfo)
@@ -82,6 +85,7 @@
 
 	/* route deleted */
 	if (curr_router && !neigh_node) {
+		cmd = BATADV_CMD_DEL_ROUTE;
 		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
 			   "Deleting route towards: %pM\n", orig_node->orig);
 		batadv_tt_global_del_orig(bat_priv, orig_node, -1,
@@ -89,11 +93,13 @@
 
 	/* route added */
 	} else if (!curr_router && neigh_node) {
+		cmd = BATADV_CMD_ADD_ROUTE;
 		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
 			   "Adding route towards: %pM (via %pM)\n",
 			   orig_node->orig, neigh_node->addr);
 	/* route changed */
 	} else if (neigh_node && curr_router) {
+		cmd = BATADV_CMD_CHANGE_ROUTE;
 		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
 			   "Changing route towards: %pM (now via %pM - was via %pM)\n",
 			   orig_node->orig, neigh_node->addr,
@@ -102,6 +108,10 @@
 
 	/* decrease refcount of previous best neighbor */
 	batadv_neigh_node_put(curr_router);
+
+	if (recv_if == BATADV_IF_DEFAULT)
+		batadv_netlink_notify_route(bat_priv, cmd, orig_node,
+					    neigh_node, true);
 }
 
 /**
@@ -946,7 +956,7 @@
 	int check, hdr_size = sizeof(*unicast_packet);
 	enum batadv_subtype subtype;
 	int ret = NET_RX_DROP;
-	bool is4addr, is_gw;
+	bool is4addr, is_gw, shortcut;
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 	is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
@@ -970,8 +980,10 @@
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
+	shortcut = batadv_fbx_shortcut(bat_priv, unicast_packet->dest);
+
 	/* packet for me */
-	if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+	if (shortcut || batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
 		/* If this is a unicast packet from another backgone gw,
 		 * drop it.
 		 */
diff -ruw linux-6.4/net/batman-adv/send.c linux-6.4-fbx/net/batman-adv/send.c
--- linux-6.4/net/batman-adv/send.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/send.c	2023-12-12 17:24:34.167627317 +0100
@@ -34,6 +34,8 @@
 #include "gateway_client.h"
 #include "hard-interface.h"
 #include "log.h"
+#include "fbx/fbx.h"
+#include "fbx/mtu.h"
 #include "network-coding.h"
 #include "originator.h"
 #include "routing.h"
@@ -192,7 +194,7 @@
 	 * it if needed.
 	 */
 	if (atomic_read(&bat_priv->fragmentation) &&
-	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
+	    skb->len > batadv_mtu_get_for_neigh(neigh_node->hardif_neigh)) {
 		/* Fragment and send packet. */
 		ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
 		/* skb was consumed */
@@ -408,7 +410,7 @@
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct batadv_orig_node *orig_node;
 	u8 *src, *dst;
-	int ret;
+	int ret = NET_XMIT_DROP;
 
 	src = ethhdr->h_source;
 	dst = ethhdr->h_dest;
@@ -420,6 +422,10 @@
 	}
 	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
 
+	/* unknown unicast (no tt entry for dest), handle same as broadcast */
+	if (!orig_node)
+		return batadv_send_bcast_packet(bat_priv, skb, 0, true);
+
 	ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
 				      packet_subtype, orig_node, vid);
 
@@ -990,13 +996,48 @@
  *
  * Consumes the provided skb.
  */
-void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+int batadv_send_bcast_packet(struct batadv_priv *bat_priv,
 			      struct sk_buff *skb,
 			      unsigned long delay,
 			      bool own_packet)
 {
+	struct batadv_bcast_packet *bcast_packet;
+	struct batadv_hard_iface *primary_if = NULL;
+	u32 seqno;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto drop;
+
+	if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
+		goto drop;
+
+	bcast_packet = (struct batadv_bcast_packet *)skb->data;
+	bcast_packet->version = BATADV_COMPAT_VERSION;
+	bcast_packet->ttl = BATADV_TTL - 1;
+
+	/* batman packet type: broadcast */
+	bcast_packet->packet_type = BATADV_BCAST;
+	bcast_packet->reserved = 0;
+
+	/* hw address of first interface is the orig mac because only
+	 * this mac is known throughout the mesh
+	 */
+	ether_addr_copy(bcast_packet->orig,
+			primary_if->net_dev->dev_addr);
+
+	/* set broadcast sequence number */
+	seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+	bcast_packet->seqno = htonl(seqno);
+
 	__batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
+
 	consume_skb(skb);
+	batadv_hardif_put(primary_if);
+	return NET_XMIT_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_XMIT_DROP;
 }
 
 /**
diff -ruw linux-6.4/net/batman-adv/send.h linux-6.4-fbx/net/batman-adv/send.h
--- linux-6.4/net/batman-adv/send.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/send.h	2023-12-12 17:24:34.167627317 +0100
@@ -43,7 +43,7 @@
 			     struct sk_buff *skb,
 			     unsigned long delay,
 			     bool own_packet);
-void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+int batadv_send_bcast_packet(struct batadv_priv *bat_priv,
 			      struct sk_buff *skb,
 			      unsigned long delay,
 			      bool own_packet);
diff -ruw linux-6.4/net/batman-adv/soft-interface.c linux-6.4-fbx/net/batman-adv/soft-interface.c
--- linux-6.4/net/batman-adv/soft-interface.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/soft-interface.c	2024-03-22 17:24:19.542846394 +0100
@@ -50,6 +50,7 @@
 #include "network-coding.h"
 #include "send.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 /**
  * batadv_skb_head_push() - Increase header size and move (push) head pointer
@@ -174,13 +175,27 @@
 {
 }
 
+#ifndef ETH_P_BCOM_DSA
+#define ETH_P_BCOM_DSA 0x8874
+#endif
+
+#ifndef ETH_P_REALTEK_RRCP
+#define ETH_P_REALTEK_RRCP 0x8899
+#endif
+
+#ifndef RRCP_PROTO_RLDP
+#define RRCP_PROTO_RLDP 0x03
+#endif
+
+#ifndef RRCP_PROTO_RLDP2
+#define RRCP_PROTO_RLDP2 0x23
+#endif
+
 static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
 				       struct net_device *soft_iface)
 {
 	struct ethhdr *ethhdr;
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-	struct batadv_hard_iface *primary_if = NULL;
-	struct batadv_bcast_packet *bcast_packet;
 	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
 					      0x00, 0x00};
 	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
@@ -193,12 +208,12 @@
 	unsigned long brd_delay = 0;
 	bool do_bcast = false, client_added;
 	unsigned short vid;
-	u32 seqno;
 	int gw_mode;
 	enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
 	int mcast_is_routable = 0;
 	int network_offset = ETH_HLEN;
 	__be16 proto;
+	u8 rrcp_proto = 0;
 
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
@@ -237,6 +252,9 @@
 	if (batadv_bla_tx(bat_priv, skb, vid))
 		goto dropped;
 
+	if (!batadv_fbx_check_skb_tx(bat_priv, skb, vid))
+		goto dropped;
+
 	/* skb->data might have been reallocated by batadv_bla_tx() */
 	ethhdr = eth_hdr(skb);
 
@@ -253,18 +271,42 @@
 	/* Snoop address candidates from DHCPACKs for early DAT filling */
 	batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
 
-	/* don't accept stp packets. STP does not help in meshes.
-	 * better use the bridge loop avoidance ...
+	/* don't accept stp packets with BLA. STP does not help in meshes.
+	 * better use the bridge loop avoidance .... But as some devices uses
+	 * that, such as SONOS ones, if BLA is not enabled let those packets
+	 * going and print debug once.
 	 *
 	 * The same goes for ECTP sent at least by some Cisco Switches,
 	 * it might confuse the mesh when used with bridge loop avoidance.
+	 * Those seems safe to drop with or without BLA.
 	 */
-	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
+#ifdef CONFIG_BATMAN_ADV_BLA
+	if (atomic_read(&bat_priv->bridge_loop_avoidance) &&
+	    batadv_compare_eth(ethhdr->h_dest, stp_addr))
 		goto dropped;
+#else
+	WARN_ONCE(batadv_compare_eth(ethhdr->h_dest, stp_addr),
+		  "Some STP packets stroll around this network");
+#endif
 
 	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
 		goto dropped;
 
+	/* Also don't accept Broadcom and Realtek loop detection packets as
+	 * they do not play well on mesh.
+	 */
+	if (unlikely(is_multicast_ether_addr(ethhdr->h_dest))) {
+		if (unlikely(proto == htons(ETH_P_BCOM_DSA)))
+			goto dropped;
+		if (unlikely(proto == htons(ETH_P_REALTEK_RRCP))) {
+			skb_copy_bits(skb, sizeof(*ethhdr), &rrcp_proto, 1);
+			if (rrcp_proto == RRCP_PROTO_RLDP)
+				goto dropped;
+			if (rrcp_proto == RRCP_PROTO_RLDP2)
+				goto dropped;
+		}
+	}
+
 	gw_mode = atomic_read(&bat_priv->gw.mode);
 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
 		/* if gw mode is off, broadcast every packet */
@@ -318,10 +360,6 @@
 
 	/* ethernet packet should be broadcasted */
 	if (do_bcast) {
-		primary_if = batadv_primary_if_get_selected(bat_priv);
-		if (!primary_if)
-			goto dropped;
-
 		/* in case of ARP request, we do not immediately broadcasti the
 		 * packet, instead we first wait for DAT to try to retrieve the
 		 * correct ARP entry
@@ -329,28 +367,9 @@
 		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
 			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
 
-		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
-			goto dropped;
-
-		bcast_packet = (struct batadv_bcast_packet *)skb->data;
-		bcast_packet->version = BATADV_COMPAT_VERSION;
-		bcast_packet->ttl = BATADV_TTL - 1;
-
-		/* batman packet type: broadcast */
-		bcast_packet->packet_type = BATADV_BCAST;
-		bcast_packet->reserved = 0;
-
-		/* hw address of first interface is the orig mac because only
-		 * this mac is known throughout the mesh
-		 */
-		ether_addr_copy(bcast_packet->orig,
-				primary_if->net_dev->dev_addr);
-
-		/* set broadcast sequence number */
-		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
-		bcast_packet->seqno = htonl(seqno);
-
-		batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
+		ret = batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
+		if (ret != NET_XMIT_SUCCESS)
+			goto dropped_freed;
 	/* unicast packet */
 	} else {
 		/* DHCP packets going to a server will use the GW feature */
@@ -385,7 +404,6 @@
 dropped_freed:
 	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
 end:
-	batadv_hardif_put(primary_if);
 	return NETDEV_TX_OK;
 }
 
@@ -431,6 +449,9 @@
 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
 		goto dropped;
 
+	if (!batadv_fbx_check_skb_rx(bat_priv, packet_type, skb))
+		goto dropped;
+
 	vid = batadv_get_vid(skb, 0);
 	ethhdr = eth_hdr(skb);
 
@@ -774,7 +795,8 @@
 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
 
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
-	atomic_set(&bat_priv->bcast_seqno, 1);
+	get_random_bytes(&random_seqno, sizeof(random_seqno));
+	atomic_set(&bat_priv->bcast_seqno, random_seqno);
 	atomic_set(&bat_priv->tt.vn, 0);
 	atomic_set(&bat_priv->tt.local_changes, 0);
 	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
diff -ruw linux-6.4/net/batman-adv/translation-table.c linux-6.4-fbx/net/batman-adv/translation-table.c
--- linux-6.4/net/batman-adv/translation-table.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/translation-table.c	2023-12-12 17:24:34.167627317 +0100
@@ -50,6 +50,7 @@
 #include "originator.h"
 #include "soft-interface.h"
 #include "tvlv.h"
+#include "fbx/fbx.h"
 
 static struct kmem_cache *batadv_tl_cache __read_mostly;
 static struct kmem_cache *batadv_tg_cache __read_mostly;
@@ -167,7 +168,7 @@
  * Return: a pointer to the corresponding tt_local_entry struct if the client is
  * found, NULL otherwise.
  */
-static struct batadv_tt_local_entry *
+struct batadv_tt_local_entry *
 batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 			  unsigned short vid)
 {
@@ -244,8 +245,7 @@
  *  possibly release it
  * @tt_local_entry: tt_local_entry to be free'd
  */
-static void
-batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
+void batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
 {
 	if (!tt_local_entry)
 		return;
@@ -456,7 +456,7 @@
  * @tt_local_entry: the TT entry involved in the event
  * @event_flags: flags to store in the event structure
  */
-static void batadv_tt_local_event(struct batadv_priv *bat_priv,
+void batadv_tt_local_event(struct batadv_priv *bat_priv,
 				  struct batadv_tt_local_entry *tt_local_entry,
 				  u8 event_flags)
 {
@@ -645,8 +645,9 @@
 	struct hlist_head *head;
 	struct batadv_tt_orig_list_entry *orig_entry;
 	int hash_added, table_size, packet_size_max;
-	bool ret = false;
+	bool roam = true, ret = false;
 	bool roamed_back = false;
+	u16 save_flags;
 	u8 remote_flags;
 	u32 match_mark;
 
@@ -708,6 +709,8 @@
 
 	/* increase the refcounter of the related vlan */
 	vlan = batadv_softif_vlan_get(bat_priv, vid);
+	if (!vlan)
+		vlan = batadv_softif_vlan_get(bat_priv, 0);
 	if (!vlan) {
 		net_ratelimited_function(batadv_info, soft_iface,
 					 "adding TT local entry %pM to non-existent VLAN %d\n",
@@ -758,10 +761,17 @@
 	batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
 
 check_roaming:
+
+	save_flags = tt_local->common.flags;
+	roam = batadv_fbx_tt_local_add(bat_priv, tt_local, tt_global, ifindex);
+	if (save_flags != tt_local->common.flags)
+		batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
 	/* Check whether it is a roaming, but don't do anything if the roaming
 	 * process has already been handled
 	 */
-	if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
+	if (roam && tt_global &&
+	    !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
 		/* These node are probably going to update their tt table */
 		head = &tt_global->orig_list;
 		rcu_read_lock();
@@ -774,7 +784,6 @@
 		if (roamed_back) {
 			batadv_tt_global_free(bat_priv, tt_global,
 					      "Roaming canceled");
-			tt_global = NULL;
 		} else {
 			/* The global entry has to be marked as ROAMING and
 			 * has to be kept for consistency purpose
@@ -1081,6 +1090,8 @@
 
 	vlan = batadv_softif_vlan_get(bat_priv, common->vid);
 	if (!vlan)
+		vlan = batadv_softif_vlan_get(bat_priv, 0);
+	if (!vlan)
 		return 0;
 
 	crc = vlan->tt.crc;
@@ -1252,6 +1263,7 @@
 	struct batadv_tt_local_entry *tt_local_entry;
 	u16 flags, curr_flags = BATADV_NO_FLAGS;
 	struct hlist_node *tt_removed_node;
+	bool remove;
 
 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
 	if (!tt_local_entry)
@@ -1259,6 +1271,16 @@
 
 	curr_flags = tt_local_entry->common.flags;
 
+	remove = batadv_fbx_tt_local_del(bat_priv, tt_local_entry);
+	if (!remove) {
+		/* Do not delete local entry if other SLAP node has still
+		 * references on it, just mark it shallow
+		 */
+		batadv_tt_local_event(bat_priv, tt_local_entry,
+				      BATADV_NO_FLAGS);
+		goto out;
+	}
+
 	flags = BATADV_TT_CLIENT_DEL;
 	/* if this global entry addition is due to a roaming, the node has to
 	 * mark the local entry as "roamed" in order to correctly reroute
@@ -1329,6 +1351,13 @@
 		if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
 			continue;
 
+		if (!batadv_fbx_tt_local_del(bat_priv, tt_local_entry)) {
+			/* Send only flag changes */
+			batadv_tt_local_event(bat_priv, tt_local_entry,
+					      BATADV_NO_FLAGS);
+			continue;
+		}
+
 		batadv_tt_local_set_pending(bat_priv, tt_local_entry,
 					    BATADV_TT_CLIENT_DEL, "timed out");
 	}
@@ -1595,7 +1624,7 @@
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	struct batadv_tt_local_entry *tt_local_entry;
-	bool ret = false;
+	bool remove = true, ret = false;
 	int hash_added;
 	struct batadv_tt_common_entry *common;
 	u16 local_flags;
@@ -1711,6 +1740,8 @@
 	batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
 					flags & BATADV_TT_SYNC_MASK);
 
+	remove = batadv_fbx_tt_global_add(bat_priv, tt_global_entry, orig_node);
+
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
 		   common->addr, batadv_print_vid(common->vid),
@@ -1721,7 +1752,7 @@
 	/* Do not remove multicast addresses from the local hash on
 	 * global additions
 	 */
-	if (is_multicast_ether_addr(tt_addr))
+	if (!remove || is_multicast_ether_addr(tt_addr))
 		goto out;
 
 	/* remove address from local hash if present */
@@ -1730,13 +1761,13 @@
 					     flags & BATADV_TT_CLIENT_ROAM);
 	tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
 
-	if (!(flags & BATADV_TT_CLIENT_ROAM))
+out:
+	if (tt_global_entry && !(flags & BATADV_TT_CLIENT_ROAM))
 		/* this is a normal global add. Therefore the client is not in a
 		 * roaming state anymore.
 		 */
 		tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
 
-out:
 	batadv_tt_global_entry_put(tt_global_entry);
 	batadv_tt_local_entry_put(tt_local_entry);
 	return ret;
@@ -2166,7 +2197,7 @@
 	local_entry = batadv_tt_local_hash_find(bat_priv,
 						tt_global_entry->common.addr,
 						vid);
-	if (local_entry) {
+	if (local_entry && local_entry->common.flags & BATADV_TT_CLIENT_SEEN) {
 		/* local entry exists, case 2: client roamed to us. */
 		batadv_tt_global_del_orig_list(tt_global_entry);
 		batadv_tt_global_free(bat_priv, tt_global_entry, message);
@@ -2177,6 +2208,8 @@
 	}
 
 out:
+	/* TODO check roaming */
+	batadv_fbx_tt_global_del(bat_priv, tt_global_entry, orig_node);
 	batadv_tt_global_entry_put(tt_global_entry);
 	batadv_tt_local_entry_put(local_entry);
 }
diff -ruw linux-6.4/net/batman-adv/translation-table.h linux-6.4-fbx/net/batman-adv/translation-table.h
--- linux-6.4/net/batman-adv/translation-table.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/translation-table.h	2023-12-12 17:24:34.167627317 +0100
@@ -21,6 +21,10 @@
 u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
 			   const u8 *addr, unsigned short vid,
 			   const char *message, bool roaming);
+struct batadv_tt_local_entry *
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
+			  unsigned short vid);
+void batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry);
 int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb);
 int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
diff -ruw linux-6.4/net/batman-adv/types.h linux-6.4-fbx/net/batman-adv/types.h
--- linux-6.4/net/batman-adv/types.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/batman-adv/types.h	2023-12-21 17:30:06.453516726 +0100
@@ -275,6 +275,19 @@
 
 	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+#ifdef CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER
+	/**
+	 * @fbx_ifrouter: best router that should be used to reach this
+	 * originator from this interface. Due to internal BATMAN_V router
+	 * management algorithm, orig_ifinfo->router is the best router to
+	 * reach originator from any interface not only this one,
+	 * orig_ifinfo->fbx_ifrouter is used to get best router for this
+	 * interface.
+	 */
+	struct batadv_neigh_node __rcu *fbx_ifrouter;
+	/** @ fbx_ifrouter_lock: lock to protect fbx_ifrouter rcu update */
+	spinlock_t fbx_ifrouter_lock;
+#endif
 };
 
 /**
@@ -527,6 +540,10 @@
 
 	/** @bat_iv: B.A.T.M.A.N. IV private structure */
 	struct batadv_orig_bat_iv bat_iv;
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	struct batadv_slap_segid *slap_segid;
+	spinlock_t slap_lock;
+#endif
 };
 
 /**
@@ -601,6 +618,74 @@
 	struct work_struct metric_work;
 };
 
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+/**
+ * MTU discovery bookkeeping data
+ */
+struct batadv_mtu {
+	/** @neigh: Neighbour hardif */
+	struct batadv_hardif_neigh_node __rcu *neigh;
+	/** periodic work */
+	struct delayed_work periodic_work;
+	/** timeout work */
+	struct delayed_work recv_work;
+	/** release work */
+	struct delayed_work release_work;
+	/** the final MTU to use for this link */
+	atomic_t mtu;
+	/** current delay, not critical, use (READ/WRITE)_ONCE */
+	unsigned long delay;
+};
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+/**
+ * batadv_slap_segid -  SLAP segment address used to uniquely identify a SLAP
+ * segment
+ */
+struct batadv_slap_segid {
+	/* @rcu: struct used for freeing in a RCU-safe manner */
+	struct rcu_head rcu;
+	/* @addr: The SLAP master MAC address is used as unique segment ID */
+	u8 addr[ETH_ALEN];
+};
+
+/**
+ * batadv_slap_id - SLAP ID management structure, a SLAP ID is composed of the
+ * MAC address of the originator along with a priority
+ */
+struct batadv_slap_id {
+	/** @bat_priv: pointer to the mesh object */
+	struct batadv_priv *bat_priv;
+	/* @dw: delayed work used to schedule SLAP ID expiration */
+	struct delayed_work expire;
+	/** @refcount: Number of contexts using this slap id */
+	struct kref refcount;
+	/* @rcu: struct used for freeing in a RCU-safe manner */
+	struct rcu_head rcu;
+	/* @exp_time: Expiration time of this SLAP ID, in jiffies */
+	unsigned long exp_time;
+	/* @prio: SLAP ID priority part */
+	u32 prio;
+	/* @addr: SLAP ID addr part */
+	u8 addr[ETH_ALEN];
+};
+
+struct batadv_hardif_neigh_node;
+
+/**
+ * struct batadv_hardif_neigh_slap - SLAP specific neighbor information
+ */
+struct batadv_hardif_neigh_slap {
+	/** @neigh: Neighbour hardif */
+	struct batadv_hardif_neigh_node __rcu *neigh;
+	/** @announce: Work to announce our SLAP ID to this neighbor */
+	struct delayed_work announce;
+	/** @release: Work to announce our SLAP ID to this neighbor */
+	struct delayed_work release;
+};
+#endif
+
 /**
  * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
  */
@@ -632,6 +717,15 @@
 
 	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @mtud: struct used for mtu discovery related stuff */
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+	struct batadv_mtu *mtud;
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	/** @slap: SLAP specific neighbour information */
+	struct batadv_hardif_neigh_slap *slap;
+#endif
 };
 
 /**
@@ -708,6 +802,9 @@
 
 	/** @last_seqno: last sequence number known for this neighbor */
 	u32 last_seqno;
+
+	/** @half_duplex: throughput should suffer half duplex penalty */
+	bool half_duplex;
 };
 
 /**
@@ -1738,6 +1835,40 @@
 	/** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
 	struct batadv_priv_bat_v bat_v;
 #endif
+
+#ifdef CONFIG_BATMAN_ADV_FBX
+	/** @fbx_nl_cmd_handlers: List of NL command handler */
+	struct hlist_head fbx_nl_handlers;
+	/** @fbx_tvlv_handlers: List of FBX specific TVLV handler */
+	struct hlist_head fbx_tvlv_handlers;
+	/** @fbx_tvlv_containers: List of FBX specific TVLV container */
+	struct hlist_head fbx_tvlv_containers;
+	/** @fbx_nl_lock: FBX specific NL handler list lock */
+	spinlock_t fbx_nl_lock;
+	/** @fbx_tvlv_lock: FBX specific TVLV handler list lock */
+	spinlock_t fbx_tvlv_lock;
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+	/** MTU delayed work */
+	atomic_t mtu_seqno;
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	/**
+	 * @slap_lock: Common SLAP lock used to serialize rcu write side on
+	 * both bat_priv and neighbor
+	 */
+	spinlock_t slap_lock;
+	/** @slap_master: Current SLAP master */
+	struct batadv_slap_id __rcu *slap_master;
+	/** @slap_iface: Current SLAP hard interface */
+	struct  batadv_hard_iface __rcu *slap_iface;
+	/** @slap_id: Current SLAP ID */
+	struct  batadv_slap_id __rcu *slap_id;
+	/** @slap_skb: SLAP ID SKB model to use */
+	struct  sk_buff __rcu *slap_skb;
+	/* @slap_wq: Common worqueue for delayed SLAP work */
+	struct workqueue_struct *slap_wq;
+#endif
+#endif
 };
 
 #ifdef CONFIG_BATMAN_ADV_BLA
diff -ruw linux-6.4/net/bridge/Kconfig linux-6.4-fbx/net/bridge/Kconfig
--- linux-6.4/net/bridge/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/bridge/Kconfig	2023-11-07 16:12:24.794176326 +0100
@@ -34,6 +34,11 @@
 
 	  If unsure, say N.
 
+config BRIDGE_STATE_MESSAGES
+	bool "show port status change in kernel log"
+	depends on BRIDGE
+	default y
+
 config BRIDGE_IGMP_SNOOPING
 	bool "IGMP/MLD snooping"
 	depends on BRIDGE
diff -ruw linux-6.4/net/bridge/br_device.c linux-6.4-fbx/net/bridge/br_device.c
--- linux-6.4/net/bridge/br_device.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/bridge/br_device.c	2023-05-22 20:06:45.323882529 +0200
@@ -213,6 +213,7 @@
 	struct net_bridge *br = netdev_priv(dev);
 
 	dev->mtu = new_mtu;
+	br->forced_mtu = new_mtu;
 
 	/* this flag will be cleared if the MTU was automatically adjusted */
 	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
diff -ruw linux-6.4/net/bridge/br_fdb.c linux-6.4-fbx/net/bridge/br_fdb.c
--- linux-6.4/net/bridge/br_fdb.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/bridge/br_fdb.c	2023-12-21 17:30:06.453516726 +0100
@@ -850,6 +850,21 @@
 		  test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
 }
 
+bool br_fdb_update_only(struct net_bridge *br,
+			struct net_bridge_port *source,
+			const unsigned char *addr)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = br_fdb_find_rcu(br, addr, 0);
+	if (!fdb)
+		return false;
+
+	fdb->updated = jiffies;
+	return true;
+}
+EXPORT_SYMBOL(br_fdb_update_only);
+
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		   const unsigned char *addr, u16 vid, unsigned long flags)
 {
@@ -992,6 +1007,7 @@
 	rcu_read_unlock();
 	return err;
 }
+EXPORT_SYMBOL(br_fdb_find_rcu);
 
 /* returns true if the fdb is modified */
 static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
diff -ruw linux-6.4/net/bridge/br_if.c linux-6.4-fbx/net/bridge/br_if.c
--- linux-6.4/net/bridge/br_if.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/bridge/br_if.c	2023-05-22 20:06:45.323882529 +0200
@@ -500,13 +500,13 @@
 static int br_mtu_min(const struct net_bridge *br)
 {
 	const struct net_bridge_port *p;
-	int ret_mtu = 0;
+	int ret_mtu = min_t(unsigned int, br->forced_mtu, ETH_DATA_LEN);
 
 	list_for_each_entry(p, &br->port_list, list)
 		if (!ret_mtu || ret_mtu > p->dev->mtu)
 			ret_mtu = p->dev->mtu;
 
-	return ret_mtu ? ret_mtu : ETH_DATA_LEN;
+	return ret_mtu;
 }
 
 void br_mtu_auto_adjust(struct net_bridge *br)
diff -ruw linux-6.4/net/bridge/br_private.h linux-6.4-fbx/net/bridge/br_private.h
--- linux-6.4/net/bridge/br_private.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/bridge/br_private.h	2023-12-12 17:24:34.171627426 +0100
@@ -569,6 +569,7 @@
 #if IS_ENABLED(CONFIG_BRIDGE_CFM)
 	struct hlist_head		mep_list;
 #endif
+	unsigned int			forced_mtu;
 };
 
 struct br_input_skb_cb {
@@ -623,8 +624,14 @@
 	br_printk(KERN_WARNING, __br, format, ##args)
 #define br_notice(__br, format, args...)		\
 	br_printk(KERN_NOTICE, __br, format, ##args)
+
+#ifdef CONFIG_BRIDGE_STATE_MESSAGES
 #define br_info(__br, format, args...)			\
 	br_printk(KERN_INFO, __br, format, ##args)
+#else
+#define br_info(__br, format, args...)			\
+	pr_debug("%s: " format,  (__br)->dev->name, ##args)
+#endif
 
 #define br_debug(br, format, args...)			\
 	pr_debug("%s: " format,  (br)->dev->name, ##args)
@@ -811,6 +818,9 @@
 		   unsigned long off);
 int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
 		     const unsigned char *addr, u16 vid);
+bool br_fdb_update_only(struct net_bridge *br,
+			struct net_bridge_port *source,
+			const unsigned char *addr);
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		   const unsigned char *addr, u16 vid, unsigned long flags);
 
diff -ruw linux-6.4/net/core/Makefile linux-6.4-fbx/net/core/Makefile
--- linux-6.4/net/core/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/Makefile	2023-11-07 13:38:44.062256691 +0100
@@ -13,7 +13,7 @@
 			neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
 			sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
 			fib_notifier.o xdp.o flow_offload.o gro.o \
-			netdev-genl.o netdev-genl-gen.o
+			netdev-genl.o netdev-genl-gen.o gso.o
 
 obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
 
diff -ruw linux-6.4/net/core/dev.c linux-6.4-fbx/net/core/dev.c
--- linux-6.4/net/core/dev.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/dev.c	2023-12-12 17:24:34.171627426 +0100
@@ -150,6 +150,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/prandom.h>
 #include <linux/once_lite.h>
+#include <linux/kthread.h>
 
 #include "dev.h"
 #include "net-sysfs.h"
@@ -165,6 +166,10 @@
 					   struct netlink_ext_ack *extack);
 static struct napi_struct *napi_by_id(unsigned int napi_id);
 
+#ifdef CONFIG_NETRXTHREAD
+struct krxd gkrxd[CONFIG_NETRXTHREAD_RX_QUEUE];
+#endif
+
 /*
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  * semaphore.
@@ -3209,7 +3214,7 @@
 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
 }
 
-static void skb_warn_bad_offload(const struct sk_buff *skb)
+void skb_warn_bad_offload(const struct sk_buff *skb)
 {
 	static const netdev_features_t null_features;
 	struct net_device *dev = skb->dev;
@@ -3338,74 +3343,6 @@
 	return vlan_get_protocol_and_depth(skb, type, depth);
 }
 
-/* openvswitch calls this on rx path, so we need a different check.
- */
-static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
-{
-	if (tx_path)
-		return skb->ip_summed != CHECKSUM_PARTIAL &&
-		       skb->ip_summed != CHECKSUM_UNNECESSARY;
-
-	return skb->ip_summed == CHECKSUM_NONE;
-}
-
-/**
- *	__skb_gso_segment - Perform segmentation on skb.
- *	@skb: buffer to segment
- *	@features: features for the output path (see dev->features)
- *	@tx_path: whether it is called in TX path
- *
- *	This function segments the given skb and returns a list of segments.
- *
- *	It may return NULL if the skb requires no segmentation.  This is
- *	only possible when GSO is used for verifying header integrity.
- *
- *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
- */
-struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
-				  netdev_features_t features, bool tx_path)
-{
-	struct sk_buff *segs;
-
-	if (unlikely(skb_needs_check(skb, tx_path))) {
-		int err;
-
-		/* We're going to init ->check field in TCP or UDP header */
-		err = skb_cow_head(skb, 0);
-		if (err < 0)
-			return ERR_PTR(err);
-	}
-
-	/* Only report GSO partial support if it will enable us to
-	 * support segmentation on this frame without needing additional
-	 * work.
-	 */
-	if (features & NETIF_F_GSO_PARTIAL) {
-		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
-		struct net_device *dev = skb->dev;
-
-		partial_features |= dev->features & dev->gso_partial_features;
-		if (!skb_gso_ok(skb, features | partial_features))
-			features &= ~NETIF_F_GSO_PARTIAL;
-	}
-
-	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
-		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
-
-	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
-	SKB_GSO_CB(skb)->encap_level = 0;
-
-	skb_reset_mac_header(skb);
-	skb_reset_mac_len(skb);
-
-	segs = skb_mac_gso_segment(skb, features);
-
-	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
-		skb_warn_bad_offload(skb);
-
-	return segs;
-}
-EXPORT_SYMBOL(__skb_gso_segment);
 
 /* Take action when hardware reception checksum errors are detected. */
 #ifdef CONFIG_BUG
@@ -4959,6 +4896,23 @@
 	return ret;
 }
 
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+int (*fbxdiverter_hook)(struct sk_buff *);
+
+static int handle_fbxdiverter(struct sk_buff *skb)
+{
+	/* try_module_get is missing here, so there is a race on
+	 * fbxdiverter module deletion */
+	if (!fbxdiverter_hook)
+		return 0;
+	return fbxdiverter_hook(skb);
+}
+
+EXPORT_SYMBOL(fbxdiverter_hook);
+#endif
+
+
 /**
  *	__netif_rx	-	Slightly optimized version of netif_rx
  *	@skb: buffer to post
@@ -5278,28 +5232,116 @@
 	return 0;
 }
 
+static int __netif_receive_skb_core_end(struct sk_buff **pskb, bool pfmemalloc,
+					struct packet_type **ppt_prev);
+
 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
 				    struct packet_type **ppt_prev)
 {
-	struct packet_type *ptype, *pt_prev;
-	rx_handler_func_t *rx_handler;
 	struct sk_buff *skb = *pskb;
-	struct net_device *orig_dev;
-	bool deliver_exact = false;
-	int ret = NET_RX_DROP;
-	__be16 type;
+#ifdef CONFIG_NETRXTHREAD
+	unsigned int len;
+	struct krxd *krxd;
+#endif
 
 	net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
 
 	trace_netif_receive_skb(skb);
 
-	orig_dev = skb->dev;
-
 	skb_reset_network_header(skb);
 	if (!skb_transport_header_was_set(skb))
 		skb_reset_transport_header(skb);
 	skb_reset_mac_len(skb);
 
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+	if (handle_fbxdiverter(skb))
+		return NET_RX_SUCCESS;
+#endif
+
+#ifndef CONFIG_NETRXTHREAD
+	return __netif_receive_skb_core_end(pskb, pfmemalloc, ppt_prev);
+#else
+	if (pfmemalloc)
+		return __netif_receive_skb_core_end(pskb, pfmemalloc, ppt_prev);
+
+	BUILD_BUG_ON(ARRAY_SIZE(gkrxd) < 2);
+	krxd = &gkrxd[skb->rxthread_prio & 1];
+
+        /* queue the packet to the rx thread */
+	local_bh_disable();
+	len = skb_queue_len(&krxd->pkt_queue);
+	if (len < RXTHREAD_MAX_PKTS) {
+		__skb_queue_tail(&krxd->pkt_queue, skb);
+		krxd->stats_pkts++;
+		if (!len)
+			wake_up(&krxd->wq);
+	} else {
+		krxd->stats_dropped++;
+		dev_kfree_skb(skb);
+        }
+	local_bh_enable();
+	return NET_RX_SUCCESS;
+#endif
+}
+
+#ifdef CONFIG_NETRXTHREAD
+static int krxd_action(void *data)
+{
+	struct krxd *krxd = (struct krxd *)data;
+	unsigned int queue = krxd - gkrxd;
+	struct sk_buff *skb;
+
+	set_user_nice(current, queue > 0 ? -10 : -5);
+	current->flags |= PF_NOFREEZE;
+	__set_current_state(TASK_RUNNING);
+
+	local_bh_disable();
+	while (1) {
+		struct packet_type *pt_prev = NULL;
+		struct net_device *orig_dev;
+
+		skb = skb_dequeue(&krxd->pkt_queue);
+		if (!skb) {
+			local_bh_enable();
+			wait_event_interruptible(krxd->wq,
+						 skb_queue_len(&krxd->pkt_queue));
+			set_current_state(TASK_RUNNING);
+			local_bh_disable();
+			continue;
+		}
+
+		rcu_read_lock();
+		orig_dev = skb->dev;
+		__netif_receive_skb_core_end(&skb, false, &pt_prev);
+		if (pt_prev)
+			INDIRECT_CALL_INET(pt_prev->func,
+					   ipv6_rcv, ip_rcv, skb,
+					   skb->dev, pt_prev, orig_dev);
+		rcu_read_unlock();
+
+		/* only schedule when working on lowest prio queue */
+		if (queue == 0 && need_resched()) {
+			local_bh_enable();
+			schedule();
+			local_bh_disable();
+		}
+	}
+	return 0;
+}
+#endif
+
+static int __netif_receive_skb_core_end(struct sk_buff **pskb, bool pfmemalloc,
+					struct packet_type **ppt_prev)
+{
+	struct sk_buff *skb = *pskb;
+	struct packet_type *ptype, *pt_prev;
+	rx_handler_func_t *rx_handler;
+	struct net_device *orig_dev;
+	bool deliver_exact = false;
+	int ret = NET_RX_DROP;
+	__be16 type;
+
+	orig_dev = skb->dev;
 	pt_prev = NULL;
 
 another_round:
@@ -5457,7 +5499,9 @@
 	if (pt_prev) {
 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
 			goto drop;
-		*ppt_prev = pt_prev;
+		else
+			ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
+						 skb->dev, pt_prev, orig_dev);
 	} else {
 drop:
 		if (!deliver_exact)
@@ -5488,10 +5532,16 @@
 	struct packet_type *pt_prev = NULL;
 	int ret;
 
+#ifdef CONFIG_NETRXTHREAD
+	(void)orig_dev;
+	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
+#else
 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
 	if (pt_prev)
 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
 					 skb->dev, pt_prev, orig_dev);
+#endif
+
 	return ret;
 }
 
@@ -8356,8 +8406,10 @@
 		}
 	}
 	if (dev->flags != old_flags) {
+#ifdef CONFIG_NET_PROMISC_MESSAGES
 		netdev_info(dev, "%s promiscuous mode\n",
 			    dev->flags & IFF_PROMISC ? "entered" : "left");
+#endif
 		if (audit_enabled) {
 			current_uid_gid(&uid, &gid);
 			audit_log(audit_context(), GFP_ATOMIC,
@@ -8425,8 +8477,10 @@
 		}
 	}
 	if (dev->flags ^ old_flags) {
+#ifdef CONFIG_NET_PROMISC_MESSAGES
 		netdev_info(dev, "%s allmulticast mode\n",
 			    dev->flags & IFF_ALLMULTI ? "entered" : "left");
+#endif
 		dev_change_rx_flags(dev, IFF_ALLMULTI);
 		dev_set_rx_mode(dev);
 		if (notify)
@@ -11477,6 +11531,24 @@
 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
+#ifdef CONFIG_NETRXTHREAD
+        for (i = 0; i < CONFIG_NETRXTHREAD_RX_QUEUE; i++) {
+		struct krxd *krxd = &gkrxd[i];
+		struct task_struct *task;
+
+		skb_queue_head_init(&krxd->pkt_queue);
+		init_waitqueue_head(&krxd->wq);
+		task = kthread_create(krxd_action, krxd, "krxthread_%u", i);
+		if (IS_ERR(task)) {
+			printk(KERN_ERR "unable to create krxd\n");
+			return -ENOMEM;
+		}
+		krxd->task = task;
+		wake_up_process(task);
+	}
+#endif
+
+
 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
 				       NULL, dev_cpu_dead);
 	WARN_ON(rc < 0);
diff -ruw linux-6.4/net/core/gro.c linux-6.4-fbx/net/core/gro.c
--- linux-6.4/net/core/gro.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/gro.c	2023-11-07 13:38:44.066256801 +0100
@@ -10,7 +10,7 @@
 #define GRO_MAX_HEAD (MAX_HEADER + 128)
 
 static DEFINE_SPINLOCK(offload_lock);
-static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
+struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
 int gro_normal_batch __read_mostly = 8;
 
@@ -92,63 +92,6 @@
 }
 EXPORT_SYMBOL(dev_remove_offload);
 
-/**
- *	skb_eth_gso_segment - segmentation handler for ethernet protocols.
- *	@skb: buffer to segment
- *	@features: features for the output path (see dev->features)
- *	@type: Ethernet Protocol ID
- */
-struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
-				    netdev_features_t features, __be16 type)
-{
-	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-	struct packet_offload *ptype;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(ptype, &offload_base, list) {
-		if (ptype->type == type && ptype->callbacks.gso_segment) {
-			segs = ptype->callbacks.gso_segment(skb, features);
-			break;
-		}
-	}
-	rcu_read_unlock();
-
-	return segs;
-}
-EXPORT_SYMBOL(skb_eth_gso_segment);
-
-/**
- *	skb_mac_gso_segment - mac layer segmentation handler.
- *	@skb: buffer to segment
- *	@features: features for the output path (see dev->features)
- */
-struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
-				    netdev_features_t features)
-{
-	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-	struct packet_offload *ptype;
-	int vlan_depth = skb->mac_len;
-	__be16 type = skb_network_protocol(skb, &vlan_depth);
-
-	if (unlikely(!type))
-		return ERR_PTR(-EINVAL);
-
-	__skb_pull(skb, vlan_depth);
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(ptype, &offload_base, list) {
-		if (ptype->type == type && ptype->callbacks.gso_segment) {
-			segs = ptype->callbacks.gso_segment(skb, features);
-			break;
-		}
-	}
-	rcu_read_unlock();
-
-	__skb_push(skb, skb->data - skb_mac_header(skb));
-
-	return segs;
-}
-EXPORT_SYMBOL(skb_mac_gso_segment);
 
 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
 {
diff -ruw linux-6.4/net/core/net-procfs.c linux-6.4-fbx/net/core/net-procfs.c
--- linux-6.4/net/core/net-procfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/net-procfs.c	2023-05-22 20:06:45.363883593 +0200
@@ -200,6 +200,89 @@
 	.show  = softnet_seq_show,
 };
 
+static struct page_frag_cache *frag_alloc_netdev_get_online(loff_t *pos)
+{
+	struct page_frag_cache *nc = NULL;
+
+	while (*pos < nr_cpu_ids)
+		if (cpu_online(*pos)) {
+			nc = netdev_frag_cache_get(*pos);
+			break;
+		} else
+			++*pos;
+	return nc;
+}
+
+static struct page_frag_cache *frag_alloc_napi_get_online(loff_t *pos)
+{
+	struct page_frag_cache *nc = NULL;
+
+	while (*pos < nr_cpu_ids)
+		if (cpu_online(*pos)) {
+			nc = napi_frag_cache_get(*pos);
+			break;
+		} else
+			++*pos;
+	return nc;
+}
+
+static void *frag_alloc_netdev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return frag_alloc_netdev_get_online(pos);
+}
+
+static void *frag_alloc_netdev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	++*pos;
+	return frag_alloc_netdev_get_online(pos);
+}
+
+static void *frag_alloc_napi_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return frag_alloc_napi_get_online(pos);
+}
+
+static void *frag_alloc_napi_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	++*pos;
+	return frag_alloc_napi_get_online(pos);
+}
+
+static void frag_alloc_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int frag_alloc_seq_show(struct seq_file *seq, void *v)
+{
+#ifdef CONFIG_PAGE_OWNER
+	struct page_frag_cache *nc = v;
+	unsigned int pages = atomic_read(&nc->pages_allocated);
+
+	seq_printf(seq,
+		   "cpu[%d]: pages:%u (%lu kB)\n",
+		   (int)seq->index, pages, (pages * PAGE_SIZE) / 1024);
+#else
+	seq_printf(seq,
+		   "cpu[%d]: CONFIG_PAGE_OWNER missing\n",
+		   (int)seq->index);
+#endif
+	return 0;
+}
+
+static const struct seq_operations frag_alloc_netdev_seq_ops = {
+	.start = frag_alloc_netdev_seq_start,
+	.next  = frag_alloc_netdev_seq_next,
+	.stop  = frag_alloc_seq_stop,
+	.show  = frag_alloc_seq_show,
+};
+
+static const struct seq_operations frag_alloc_napi_seq_ops = {
+	.start = frag_alloc_napi_seq_start,
+	.next  = frag_alloc_napi_seq_next,
+	.stop  = frag_alloc_seq_stop,
+	.show  = frag_alloc_seq_show,
+};
+
 static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
 {
 	struct list_head *ptype_list = NULL;
@@ -320,6 +403,85 @@
 	.show  = ptype_seq_show,
 };
 
+#ifdef CONFIG_NETRXTHREAD
+/*
+ *	This is invoked by the /proc filesystem handler to display a device
+ *	in detail.
+ */
+static void *krxthread_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	int *queue;
+
+	if (*pos > CONFIG_NETRXTHREAD_RX_QUEUE)
+		return NULL;
+
+	queue = kmalloc(sizeof(*queue), GFP_KERNEL);
+	if (!queue)
+		return NULL;
+	*queue = ((int)*pos - 1);
+
+	return queue;
+}
+
+static void *krxthread_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int *queue = v;
+
+	if (*pos == CONFIG_NETRXTHREAD_RX_QUEUE) {
+		++*pos;
+		return NULL;
+	}
+
+	++*queue;
+	*pos = *queue + 1;
+	return queue;
+}
+
+static void krxthread_seq_stop(struct seq_file *seq, void *v)
+{
+	kfree(v);
+}
+
+static void krxthread_seq_printf_stats(struct seq_file *seq, int queue)
+{
+	seq_printf(seq, "%8u %12u %12u\n",
+		   queue,
+		   gkrxd[queue].stats_pkts,
+		   gkrxd[queue].stats_dropped);
+}
+
+static int krxthread_seq_show(struct seq_file *seq, void *v)
+{
+	int *queue = v;
+
+	if (*queue == -1)
+		seq_printf(seq, "%8s %12s %12s\n",
+			   "queue", "packets", "drops");
+	else
+		krxthread_seq_printf_stats(seq, *queue);
+	return 0;
+}
+
+static const struct seq_operations krxthread_seq_ops = {
+	.start = krxthread_seq_start,
+	.next  = krxthread_seq_next,
+	.stop  = krxthread_seq_stop,
+	.show  = krxthread_seq_show,
+};
+
+static int krxthread_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &krxthread_seq_ops);
+}
+
+static const struct proc_ops krxthread_seq_fops = {
+	.proc_open	= krxthread_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+};
+#endif /* KRXTHREAD */
+
 static int __net_init dev_proc_net_init(struct net *net)
 {
 	int rc = -ENOMEM;
@@ -333,9 +495,20 @@
 	if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
 			sizeof(struct seq_net_private)))
 		goto out_softnet;
+	if (!proc_create_seq("frag_alloc_netdev", 0444, net->proc_net,
+			     &frag_alloc_netdev_seq_ops))
+		goto out_softnet;
+	if (!proc_create_seq("frag_alloc_napi", 0444, net->proc_net,
+			     &frag_alloc_napi_seq_ops))
+		goto out_softnet;
 
 	if (wext_proc_init(net))
 		goto out_ptype;
+#ifdef CONFIG_NETRXTHREAD
+	if (!proc_create("krxthread", S_IRUGO, net->proc_net,
+			 &krxthread_seq_fops))
+		goto out_ptype;
+#endif
 	rc = 0;
 out:
 	return rc;
diff -ruw linux-6.4/net/core/net-sysfs.c linux-6.4-fbx/net/core/net-sysfs.c
--- linux-6.4/net/core/net-sysfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/net-sysfs.c	2023-05-22 20:06:45.363883593 +0200
@@ -15,6 +15,7 @@
 #include <linux/nsproxy.h>
 #include <net/sock.h>
 #include <net/net_namespace.h>
+#include <net/cfg80211.h>
 #include <linux/rtnetlink.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
@@ -747,7 +748,28 @@
 	.attrs  = netstat_attrs,
 };
 
+#if IS_ENABLED(CONFIG_CFG80211)
+static ssize_t show_nl80211_iftype(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	const struct net_device *netdev = to_net_dev(dev);
+	ssize_t ret = 0;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+	if (netdev->ieee80211_ptr)
+		ret = sprintf(buf, "%d\n", netdev->ieee80211_ptr->iftype);
+	rtnl_unlock();
+
+	return ret;
+}
+static DEVICE_ATTR(nl80211_iftype, S_IRUGO, show_nl80211_iftype, NULL);
+#endif
+
 static struct attribute *wireless_attrs[] = {
+#if IS_ENABLED(CONFIG_CFG80211)
+	&dev_attr_nl80211_iftype.attr,
+#endif
 	NULL
 };
 
diff -ruw linux-6.4/net/core/of_net.c linux-6.4-fbx/net/core/of_net.c
--- linux-6.4/net/core/of_net.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/of_net.c	2023-05-22 20:06:45.367883699 +0200
@@ -12,6 +12,7 @@
 #include <linux/export.h>
 #include <linux/device.h>
 #include <linux/nvmem-consumer.h>
+#include <linux/fbxserial.h>
 
 /**
  * of_get_phy_mode - Get phy mode for given device_node
@@ -96,6 +97,23 @@
 }
 EXPORT_SYMBOL(of_get_mac_address_nvmem);
 
+static int of_get_mac_addr_from_fbxserial(struct device_node *np, u8 *addr)
+{
+#ifdef CONFIG_FBXSERIAL
+	struct property *pp;
+
+	pp = of_find_property(np, "fbxserial-mac-address", NULL);
+	if (!pp || pp->length != 4)
+		return -ENODEV;
+
+	memcpy(addr, fbxserialinfo_get_mac_addr(be32_to_cpu(*(u32*)pp->value)),
+	       ETH_ALEN);
+	return 0;
+#else
+	return -ENOSYS;
+#endif
+}
+
 /**
  * of_get_mac_address()
  * @np:		Caller's Device Node
@@ -129,6 +147,10 @@
 	if (!np)
 		return -ENODEV;
 
+	ret = of_get_mac_addr_from_fbxserial(np, addr);
+	if (!ret)
+		return 0;
+
 	ret = of_get_mac_addr(np, "mac-address", addr);
 	if (!ret)
 		return 0;
diff -ruw linux-6.4/net/core/page_pool.c linux-6.4-fbx/net/core/page_pool.c
--- linux-6.4/net/core/page_pool.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/page_pool.c	2024-01-19 17:01:19.905848123 +0100
@@ -357,6 +357,7 @@
 {
 	page->pp = pool;
 	page->pp_magic |= PP_SIGNATURE;
+	page_pool_clear_recycle_flag(page);
 	if (pool->p.init_callback)
 		pool->p.init_callback(page, pool->p.init_arg);
 }
diff -ruw linux-6.4/net/core/skbuff.c linux-6.4-fbx/net/core/skbuff.c
--- linux-6.4/net/core/skbuff.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/skbuff.c	2024-01-19 17:01:19.905848123 +0100
@@ -67,6 +67,7 @@
 #include <net/dst.h>
 #include <net/sock.h>
 #include <net/checksum.h>
+#include <net/gso.h>
 #include <net/ip6_checksum.h>
 #include <net/xfrm.h>
 #include <net/mpls.h>
@@ -281,6 +282,17 @@
 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
 
+struct page_frag_cache *netdev_frag_cache_get(unsigned int cpu_id)
+{
+	return per_cpu_ptr(&netdev_alloc_cache, cpu_id);
+}
+
+struct page_frag_cache *napi_frag_cache_get(unsigned int cpu_id)
+{
+	struct napi_alloc_cache *nc = per_cpu_ptr(&napi_alloc_cache, cpu_id);
+	return &nc->page;
+}
+
 /* Double check that napi_get_frags() allocates skbs with
  * skb->head being backed by slab, not a page fragment.
  * This is to make sure bug fixed in 3226b158e67c
@@ -893,6 +905,11 @@
 {
 	if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
 		return false;
+
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_ff_done & BIT(1))
+		page_pool_set_recycled_flag(virt_to_page(data));
+#endif
 	return page_pool_return_skb_page(virt_to_page(data), napi_safe);
 }
 
@@ -1345,6 +1362,11 @@
 	new->queue_mapping = old->queue_mapping;
 
 	memcpy(&new->headers, &old->headers, sizeof(new->headers));
+
+#ifdef CONFIG_IP_FFN
+	new->ffn_state		= FFN_STATE_INIT;
+	new->ffn_ff_done	= 0;
+#endif
 	CHECK_SKB_FIELD(protocol);
 	CHECK_SKB_FIELD(csum);
 	CHECK_SKB_FIELD(hash);
@@ -5774,157 +5796,20 @@
 	skb->offload_fwd_mark = 0;
 	skb->offload_l3_fwd_mark = 0;
 #endif
+	skb->mark = 0;
 
+#ifdef CONFIG_IP_FFN
+	skb->ffn_state = FFN_STATE_INIT;
+	skb->ffn_ff_done = 0;
+#endif
 	if (!xnet)
 		return;
 
 	ipvs_reset(skb);
-	skb->mark = 0;
 	skb_clear_tstamp(skb);
 }
 EXPORT_SYMBOL_GPL(skb_scrub_packet);
 
-/**
- * skb_gso_transport_seglen - Return length of individual segments of a gso packet
- *
- * @skb: GSO skb
- *
- * skb_gso_transport_seglen is used to determine the real size of the
- * individual segments, including Layer4 headers (TCP/UDP).
- *
- * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
- */
-static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
-{
-	const struct skb_shared_info *shinfo = skb_shinfo(skb);
-	unsigned int thlen = 0;
-
-	if (skb->encapsulation) {
-		thlen = skb_inner_transport_header(skb) -
-			skb_transport_header(skb);
-
-		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
-			thlen += inner_tcp_hdrlen(skb);
-	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
-		thlen = tcp_hdrlen(skb);
-	} else if (unlikely(skb_is_gso_sctp(skb))) {
-		thlen = sizeof(struct sctphdr);
-	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
-		thlen = sizeof(struct udphdr);
-	}
-	/* UFO sets gso_size to the size of the fragmentation
-	 * payload, i.e. the size of the L4 (UDP) header is already
-	 * accounted for.
-	 */
-	return thlen + shinfo->gso_size;
-}
-
-/**
- * skb_gso_network_seglen - Return length of individual segments of a gso packet
- *
- * @skb: GSO skb
- *
- * skb_gso_network_seglen is used to determine the real size of the
- * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
- *
- * The MAC/L2 header is not accounted for.
- */
-static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
-{
-	unsigned int hdr_len = skb_transport_header(skb) -
-			       skb_network_header(skb);
-
-	return hdr_len + skb_gso_transport_seglen(skb);
-}
-
-/**
- * skb_gso_mac_seglen - Return length of individual segments of a gso packet
- *
- * @skb: GSO skb
- *
- * skb_gso_mac_seglen is used to determine the real size of the
- * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
- * headers (TCP/UDP).
- */
-static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
-{
-	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
-
-	return hdr_len + skb_gso_transport_seglen(skb);
-}
-
-/**
- * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
- *
- * There are a couple of instances where we have a GSO skb, and we
- * want to determine what size it would be after it is segmented.
- *
- * We might want to check:
- * -    L3+L4+payload size (e.g. IP forwarding)
- * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
- *
- * This is a helper to do that correctly considering GSO_BY_FRAGS.
- *
- * @skb: GSO skb
- *
- * @seg_len: The segmented length (from skb_gso_*_seglen). In the
- *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
- *
- * @max_len: The maximum permissible length.
- *
- * Returns true if the segmented length <= max length.
- */
-static inline bool skb_gso_size_check(const struct sk_buff *skb,
-				      unsigned int seg_len,
-				      unsigned int max_len) {
-	const struct skb_shared_info *shinfo = skb_shinfo(skb);
-	const struct sk_buff *iter;
-
-	if (shinfo->gso_size != GSO_BY_FRAGS)
-		return seg_len <= max_len;
-
-	/* Undo this so we can re-use header sizes */
-	seg_len -= GSO_BY_FRAGS;
-
-	skb_walk_frags(skb, iter) {
-		if (seg_len + skb_headlen(iter) > max_len)
-			return false;
-	}
-
-	return true;
-}
-
-/**
- * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
- *
- * @skb: GSO skb
- * @mtu: MTU to validate against
- *
- * skb_gso_validate_network_len validates if a given skb will fit a
- * wanted MTU once split. It considers L3 headers, L4 headers, and the
- * payload.
- */
-bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
-{
-	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
-}
-EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
-
-/**
- * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
- *
- * @skb: GSO skb
- * @len: length to validate against
- *
- * skb_gso_validate_mac_len validates if a given skb will fit a wanted
- * length once split, including L2, L3 and L4 headers and the payload.
- */
-bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
-{
-	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
-}
-EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
-
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
 	int mac_len, meta_len;
diff -ruw linux-6.4/net/core/sock.c linux-6.4-fbx/net/core/sock.c
--- linux-6.4/net/core/sock.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/core/sock.c	2023-06-27 11:47:16.143869010 +0200
@@ -1526,6 +1526,10 @@
 		WRITE_ONCE(sk->sk_txrehash, (u8)val);
 		break;
 
+	case SO_UDP_DUP_UNICAST:
+		sock_valbool_flag(sk, SOCK_UDP_DUP_UNICAST, valbool);
+		break;
+
 	default:
 		ret = -ENOPROTOOPT;
 		break;
@@ -1896,6 +1900,10 @@
 		v.val64 = sock_gen_cookie(sk);
 		break;
 
+	case SO_UDP_DUP_UNICAST:
+		v.val = sock_flag(sk, SOCK_UDP_DUP_UNICAST);
+		break;
+
 	case SO_ZEROCOPY:
 		v.val = sock_flag(sk, SOCK_ZEROCOPY);
 		break;
diff -ruw linux-6.4/net/dsa/Kconfig linux-6.4-fbx/net/dsa/Kconfig
--- linux-6.4/net/dsa/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/dsa/Kconfig	2023-05-22 20:06:45.383884125 +0200
@@ -63,6 +63,10 @@
 	  Say Y or M if you want to enable support for tagging frames
 	  for the Hirschmann Hellcreek TSN switches.
 
+config NET_DSA_TAG_BRCM_FBX
+	tristate "Tag driver for Broadcom switches using in-frame headers"
+	select NET_DSA_TAG_BRCM_COMMON
+
 config NET_DSA_TAG_GSWIP
 	tristate "Tag driver for Lantiq / Intel GSWIP switches"
 	help
diff -ruw linux-6.4/net/dsa/dsa.c linux-6.4-fbx/net/dsa/dsa.c
--- linux-6.4/net/dsa/dsa.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/dsa/dsa.c	2023-08-16 14:13:31.823188647 +0200
@@ -421,6 +421,32 @@
 	return cpu_dp;
 }
 
+/*
+ * find cpu port forcibly assigned to this port via device tree
+ */
+static struct dsa_port *
+dsa_port_get_forced_cpu_port(struct dsa_switch_tree *dst,
+			     struct dsa_port *dp)
+{
+       struct device_node *cpu_dn;
+       struct dsa_switch *ds = dp->ds;
+
+       cpu_dn = of_parse_phandle(dp->dn, "dsa,cpu-port", 0);
+       if (!cpu_dn)
+               return ERR_PTR(-ENOENT);
+
+       list_for_each_entry (dp, &dst->ports, list) {
+               if (!dsa_port_is_cpu(dp))
+                       continue;
+
+               if (dp->dn == cpu_dn)
+                       return dp;
+       }
+
+       dev_err(ds->dev, "failed to find cpu port referenced by phandle");
+       return ERR_PTR(-EINVAL);
+}
+
 /* Perform initial assignment of CPU ports to user ports and DSA links in the
  * fabric, giving preference to CPU ports local to each switch. Default to
  * using the first CPU port in the switch tree if the port does not have a CPU
@@ -429,17 +455,49 @@
 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
 {
 	struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
+	size_t loop;
+
+	/*
+	 * first pass to assign explicit cpu port assigned through DT
+	 * to user ports
+	 */
+	list_for_each_entry(dp, &dst->ports, list) {
+		if (!dsa_port_is_user(dp) && !dsa_port_is_dsa(dp))
+			continue;
+
+		preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(dp->ds);
+		if (preferred_cpu_dp && preferred_cpu_dp != dp)
+			continue;
+
+		cpu_dp = dsa_port_get_forced_cpu_port(dst, dp);
+
+		if (IS_ERR(cpu_dp)) {
+			if (PTR_ERR(cpu_dp) == -EINVAL)
+				return -EINVAL;
+			continue;
+		}
 
+		dp->cpu_dp = cpu_dp;
+	}
+
+	/*
+	 * for user ports without explicit cpu port, we will assigned
+	 * one CPU port from the same switch, first lookup default CPU
+	 * port if it's declared in DT, or fallback to first cpu port
+	 */
+	for (loop = 0; loop < 2; loop++) {
 	list_for_each_entry(cpu_dp, &dst->ports, list) {
 		if (!dsa_port_is_cpu(cpu_dp))
 			continue;
 
-		preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
-		if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
+			if (loop == 0 && !cpu_dp->is_def_cpu_port)
 			continue;
 
+			list_for_each_entry(dp, &dst->ports, list) {
 		/* Prefer a local CPU port */
-		dsa_switch_for_each_port(dp, cpu_dp->ds) {
+				if (dp->ds != cpu_dp->ds)
+					continue;
+
 			/* Prefer the first local CPU port found */
 			if (dp->cpu_dp)
 				continue;
@@ -448,7 +506,10 @@
 				dp->cpu_dp = cpu_dp;
 		}
 	}
+	}
 
+	/* finally handle all remaining user ports, which don't have a
+	 * CPU port on the same switch */
 	return dsa_tree_setup_default_cpu(dst);
 }
 
@@ -1222,6 +1283,7 @@
 	dp->type = DSA_PORT_TYPE_CPU;
 	dsa_port_set_tag_protocol(dp, dst->tag_ops);
 	dp->dst = dst;
+	dp->is_def_cpu_port = of_property_read_bool(dp->dn, "dsa,def-cpu-port");
 
 	/* At this point, the tree may be configured to use a different
 	 * tagger than the one chosen by the switch driver during
diff -ruw linux-6.4/net/dsa/slave.c linux-6.4-fbx/net/dsa/slave.c
--- linux-6.4/net/dsa/slave.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/dsa/slave.c	2023-05-22 20:06:45.383884125 +0200
@@ -515,7 +515,7 @@
 	ndm->ndm_family  = AF_BRIDGE;
 	ndm->ndm_pad1    = 0;
 	ndm->ndm_pad2    = 0;
-	ndm->ndm_flags   = NTF_SELF;
+	ndm->ndm_flags   = NTF_SELF | NTF_OFFLOADED;
 	ndm->ndm_type    = 0;
 	ndm->ndm_ifindex = dump->dev->ifindex;
 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
@@ -2389,7 +2389,7 @@
 	return 0;
 }
 
-static const struct net_device_ops dsa_slave_netdev_ops = {
+const struct net_device_ops dsa_slave_netdev_ops = {
 	.ndo_open	 	= dsa_slave_open,
 	.ndo_stop		= dsa_slave_close,
 	.ndo_start_xmit		= dsa_slave_xmit,
diff -ruw linux-6.4/net/dsa/slave.h linux-6.4-fbx/net/dsa/slave.h
--- linux-6.4/net/dsa/slave.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/dsa/slave.h	2023-05-22 20:06:45.383884125 +0200
@@ -51,6 +51,13 @@
 int dsa_slave_manage_vlan_filtering(struct net_device *dev,
 				    bool vlan_filtering);
 
+extern const struct net_device_ops dsa_slave_netdev_ops;
+
+static inline bool dsa_is_slave(const struct net_device *dev)
+{
+	return (dev->netdev_ops == &dsa_slave_netdev_ops);
+}
+
 static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
diff -ruw linux-6.4/net/dsa/tag_brcm.c linux-6.4-fbx/net/dsa/tag_brcm.c
--- linux-6.4/net/dsa/tag_brcm.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/dsa/tag_brcm.c	2023-05-22 20:06:45.387884231 +0200
@@ -79,7 +79,8 @@
 #define BRCM_EG_PID_MASK	0x1f
 
 #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) || \
-	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
+	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND) || \
+	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
 
 static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
 					struct net_device *dev,
@@ -177,7 +178,8 @@
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) || \
+	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
 static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb,
 				     struct net_device *dev)
 {
@@ -199,7 +201,9 @@
 
 	return nskb;
 }
+#endif
 
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
 static const struct dsa_device_ops brcm_netdev_ops = {
 	.name	= BRCM_NAME,
 	.proto	= DSA_TAG_PROTO_BRCM,
@@ -321,6 +325,37 @@
 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_PREPEND, BRCM_PREPEND_NAME);
 #endif
 
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
+static struct sk_buff *
+brcm_tag_rcv_fbx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sk_buff *nskb = brcm_tag_rcv(skb, dev);
+
+	if (!nskb)
+		return nskb;
+
+	/* if the packet was broadcast, the switch already did the
+	 * flood to the other ports */
+	if (nskb->pkt_type == PACKET_BROADCAST)
+		nskb->offload_fwd_mark = 1;
+	else
+		nskb->offload_fwd_mark = 0;
+
+	return nskb;
+}
+
+static const struct dsa_device_ops brcm_fbx_netdev_ops = {
+	.name	= "brcm-fbx",
+	.proto	= DSA_TAG_PROTO_BRCM_FBX,
+	.xmit	= brcm_tag_xmit,
+	.rcv	= brcm_tag_rcv_fbx,
+	.needed_headroom = BRCM_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_fbx_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_FBX, "brcm-fbx");
+#endif
+
 static struct dsa_tag_driver *dsa_tag_driver_array[] =	{
 #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
 	&DSA_TAG_DRIVER_NAME(brcm_netdev_ops),
@@ -331,6 +366,9 @@
 #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
 	&DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
 #endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
+	&DSA_TAG_DRIVER_NAME(brcm_fbx_netdev_ops),
+#endif
 };
 
 module_dsa_tag_drivers(dsa_tag_driver_array);
diff -ruw linux-6.4/net/ethernet/eth.c linux-6.4-fbx/net/ethernet/eth.c
--- linux-6.4/net/ethernet/eth.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ethernet/eth.c	2023-05-22 20:06:45.387884231 +0200
@@ -62,6 +62,7 @@
 #include <net/gro.h>
 #include <linux/uaccess.h>
 #include <net/pkt_sched.h>
+#include <linux/fbxserial.h>
 
 /**
  * eth_header - create the Ethernet header
@@ -609,6 +610,21 @@
  */
 int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr)
 {
+#ifdef CONFIG_FBXSERIAL
+	u32 index;
+	int ret;
+
+	ret = fwnode_property_read_u32(fwnode, "fbxserial-mac-address",
+				       &index);
+	if (ret == 0) {
+		const void *res = fbxserialinfo_get_mac_addr(index);
+		if (res) {
+			memcpy(addr, res, ETH_ALEN);
+			return 0;
+		}
+	}
+#endif
+
 	if (!fwnode_get_mac_addr(fwnode, "mac-address", addr) ||
 	    !fwnode_get_mac_addr(fwnode, "local-mac-address", addr) ||
 	    !fwnode_get_mac_addr(fwnode, "address", addr))
diff -ruw linux-6.4/net/ethtool/common.c linux-6.4-fbx/net/ethtool/common.c
--- linux-6.4/net/ethtool/common.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ethtool/common.c	2023-05-31 17:11:03.421680714 +0200
@@ -211,6 +211,12 @@
 	__DEFINE_LINK_MODE_NAME(10, T1S, Full),
 	__DEFINE_LINK_MODE_NAME(10, T1S, Half),
 	__DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half),
+	__DEFINE_LINK_MODE_NAME(1000, PX_D, Full),
+	__DEFINE_LINK_MODE_NAME(1000, PX_U, Full),
+	__DEFINE_LINK_MODE_NAME(10000, PR_D, Full),
+	__DEFINE_LINK_MODE_NAME(10000, PR_U, Full),
+	__DEFINE_LINK_MODE_NAME(10000_1000, PRX_D, Full),
+	__DEFINE_LINK_MODE_NAME(10000_1000, PRX_U, Full),
 };
 static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
 
@@ -251,6 +257,10 @@
 #define __LINK_MODE_LANES_T1S_P2MP	1
 #define __LINK_MODE_LANES_VR8		8
 #define __LINK_MODE_LANES_DR8_2		8
+#define __LINK_MODE_LANES_PX_D		1
+#define __LINK_MODE_LANES_PX_U		1
+#define __LINK_MODE_LANES_PR_U		1
+#define __LINK_MODE_LANES_PR_D		1
 
 #define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex)	\
 	[ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = {		\
@@ -374,6 +384,18 @@
 	__DEFINE_LINK_MODE_PARAMS(10, T1S, Full),
 	__DEFINE_LINK_MODE_PARAMS(10, T1S, Half),
 	__DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half),
+	__DEFINE_LINK_MODE_PARAMS(1000, PX_D, Full),
+	__DEFINE_LINK_MODE_PARAMS(1000, PX_U, Full),
+	__DEFINE_LINK_MODE_PARAMS(10000, PR_D, Full),
+	__DEFINE_LINK_MODE_PARAMS(10000, PR_U, Full),
+	[ETHTOOL_LINK_MODE_10000_1000basePRX_D_Full_BIT] = {
+		.speed  = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+	},
+	[ETHTOOL_LINK_MODE_10000_1000basePRX_U_Full_BIT] = {
+		.speed  = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+	},
 };
 static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
 
diff -ruw linux-6.4/net/ethtool/ioctl.c linux-6.4-fbx/net/ethtool/ioctl.c
--- linux-6.4/net/ethtool/ioctl.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ethtool/ioctl.c	2023-05-31 17:11:03.425680822 +0200
@@ -17,6 +17,7 @@
 #include <linux/netdevice.h>
 #include <linux/net_tstamp.h>
 #include <linux/phy.h>
+#include <linux/phylink.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
@@ -179,6 +180,9 @@
 	if (sset == ETH_SS_LINK_MODES)
 		return __ETHTOOL_LINK_MODE_MASK_NBITS;
 
+	if (sset == ETH_SS_PHYLINK_IFTYPES)
+		return PHY_INTERFACE_MODE_MAX - 1;
+
 	if (ops->get_sset_count && ops->get_strings)
 		return ops->get_sset_count(dev, sset);
 	else
@@ -208,7 +212,16 @@
 	else if (stringset == ETH_SS_LINK_MODES)
 		memcpy(data, link_mode_names,
 		       __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN);
-	else
+	else if (stringset == ETH_SS_PHYLINK_IFTYPES) {
+		int i;
+
+		for (i = PHY_INTERFACE_MODE_NA + 1;
+		     i < PHY_INTERFACE_MODE_MAX; i++) {
+			strlcpy(data + (i - 1) * ETH_GSTRING_LEN,
+				phy_modes(i),
+				ETH_GSTRING_LEN);
+		}
+	} else
 		/* ops->get_strings is valid because checked earlier */
 		ops->get_strings(dev, stringset, data);
 }
@@ -2624,6 +2637,7 @@
 	switch (tuna->id) {
 	case ETHTOOL_PHY_DOWNSHIFT:
 	case ETHTOOL_PHY_FAST_LINK_DOWN:
+	case ETHTOOL_PHY_BROKEN:
 		if (tuna->len != sizeof(u8) ||
 		    tuna->type_id != ETHTOOL_TUNABLE_U8)
 			return -EINVAL;
@@ -2750,6 +2764,156 @@
 	return dev->ethtool_ops->set_fecparam(dev, &fecparam);
 }
 
+static int ethtool_get_sfp_state(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_sfp_state sfp_state;
+	int rc;
+
+	if (!dev->sfp_bus) {
+		printk("no SFP bus ya twat.\n");
+		return -ENODEV;
+	}
+
+	rc = sfp_get_sfp_state(dev->sfp_bus, &sfp_state);
+	if (rc)
+		return rc;
+
+	if (copy_to_user(useraddr, &sfp_state, sizeof (sfp_state)))
+		return -EFAULT;
+	return 0;
+}
+
+static int ethtool_get_shaper_params(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_shaper_params sp;
+	int rc;
+
+	if (!dev->ethtool_ops->get_shaper_param)
+		return -EOPNOTSUPP;
+
+	memset(&sp, 0, sizeof (sp));
+	rc = dev->ethtool_ops->get_shaper_param(dev, &sp);
+	if (rc)
+		return rc;
+
+	if (copy_to_user(uaddr, &sp, sizeof (sp)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ethtool_set_shaper_params(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_shaper_params sp;
+
+	if (!dev->ethtool_ops->set_shaper_param)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&sp, uaddr, sizeof (sp)))
+		return -EFAULT;
+
+	return dev->ethtool_ops->set_shaper_param(dev, &sp);
+}
+
+static int ethtool_get_eponparam(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_epon_param eponparam = { .cmd = ETHTOOL_GEPON_PARAM };
+	int rc;
+
+	if (!dev->ethtool_ops->get_epon_param)
+		return -EOPNOTSUPP;
+
+	rc = dev->ethtool_ops->get_epon_param(dev, &eponparam);
+	if (rc)
+		return rc;
+
+	if (copy_to_user(useraddr, &eponparam, sizeof(eponparam)))
+		return -EFAULT;
+	return 0;
+}
+
+static int ethtool_set_eponparam(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_epon_param eponparam;
+
+	if (!dev->ethtool_ops->set_epon_param)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&eponparam, useraddr, sizeof(eponparam)))
+		return -EFAULT;
+
+	return dev->ethtool_ops->set_epon_param(dev, &eponparam);
+}
+
+#ifdef CONFIG_PHYLINK
+static int ethtool_get_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_phylink_iftype sp;
+	phy_interface_t interface;
+	struct phylink *pl;
+	int mode, an_enable;
+
+	if (!dev->ethtool_ops->get_phylink)
+		return -EOPNOTSUPP;
+
+	pl = dev->ethtool_ops->get_phylink(dev);
+	if (!pl)
+		return -EIO;
+
+	memset(&sp, 0, sizeof (sp));
+	phylink_get_interface(pl, &interface, &an_enable, &mode);
+	strlcpy(sp.iftype, phy_modes(interface), sizeof (sp.iftype));
+	sp.autoneg_en = an_enable;
+	sp.mode = mode;
+
+	if (copy_to_user(uaddr, &sp, sizeof (sp)))
+		return -EFAULT;
+
+	return 0;
+}
+#else
+static inline int ethtool_get_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_PHYLINK
+static int ethtool_set_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_phylink_iftype sp;
+	phy_interface_t i;
+	struct phylink *pl;
+
+	if (copy_from_user(&sp, uaddr, sizeof (sp)))
+		return -EFAULT;
+
+	if (!dev->ethtool_ops->get_phylink)
+		return -EOPNOTSUPP;
+
+	pl = dev->ethtool_ops->get_phylink(dev);
+	if (!pl)
+		return -EIO;
+
+	sp.iftype[sizeof (sp.iftype) - 1] = 0;
+
+	for (i = PHY_INTERFACE_MODE_NA; i < PHY_INTERFACE_MODE_MAX; i++) {
+		if (!strcmp(phy_modes(i), sp.iftype))
+			break;
+	}
+
+	if (i == PHY_INTERFACE_MODE_MAX)
+		return -EINVAL;
+
+	return phylink_set_interface(pl, i, sp.autoneg_en ? 1 : 0);
+}
+#else
+static inline int ethtool_set_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	return -ENOTSUPP;
+}
+#endif
+
 /* The main entry point in this file.  Called from net/core/dev_ioctl.c */
 
 static int
@@ -3036,6 +3200,33 @@
 	case ETHTOOL_SFECPARAM:
 		rc = ethtool_set_fecparam(dev, useraddr);
 		break;
+	case ETHTOOL_SSHAPER_PARAMS:
+		rc = ethtool_set_shaper_params(dev, useraddr);
+		break;
+	case ETHTOOL_GSHAPER_PARAMS:
+		rc = ethtool_get_shaper_params(dev, useraddr);
+		break;
+	case ETHTOOL_GEPON_PARAM:
+		rc = ethtool_get_eponparam(dev, useraddr);
+		break;
+	case ETHTOOL_SEPON_KEYS:
+	case ETHTOOL_SEPON_ENCRYPT:
+	case ETHTOOL_SEPON_RESTART:
+	case ETHTOOL_SEPON_BURST:
+	case ETHTOOL_SEPON_ADD_MCLLID:
+	case ETHTOOL_SEPON_DEL_MCLLID:
+	case ETHTOOL_SEPON_CLR_MCLLID:
+		rc = ethtool_set_eponparam(dev, useraddr);
+		break;
+	case ETHTOOL_GSFP_STATE:
+		rc = ethtool_get_sfp_state(dev, useraddr);
+		break;
+	case ETHTOOL_GPHYLINK_IFTYPE:
+		rc = ethtool_get_phylink_iftype(dev, useraddr);
+		break;
+	case ETHTOOL_SPHYLINK_IFTYPE:
+		rc = ethtool_set_phylink_iftype(dev, useraddr);
+		break;
 	default:
 		rc = -EOPNOTSUPP;
 	}
diff -ruw linux-6.4/net/ipv4/Makefile linux-6.4-fbx/net/ipv4/Makefile
--- linux-6.4/net/ipv4/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/Makefile	2023-05-22 20:06:45.399884550 +0200
@@ -20,6 +20,8 @@
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+
+obj-$(CONFIG_IP_FFN) += ip_ffn.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff -ruw linux-6.4/net/ipv4/af_inet.c linux-6.4-fbx/net/ipv4/af_inet.c
--- linux-6.4/net/ipv4/af_inet.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/af_inet.c	2023-11-07 13:38:44.066256801 +0100
@@ -100,6 +100,7 @@
 #include <net/ip_fib.h>
 #include <net/inet_connection_sock.h>
 #include <net/gro.h>
+#include <net/gso.h>
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <net/udplite.h>
diff -ruw linux-6.4/net/ipv4/gre_offload.c linux-6.4-fbx/net/ipv4/gre_offload.c
--- linux-6.4/net/ipv4/gre_offload.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/gre_offload.c	2023-11-07 13:38:44.066256801 +0100
@@ -11,6 +11,7 @@
 #include <net/protocol.h>
 #include <net/gre.h>
 #include <net/gro.h>
+#include <net/gso.h>
 
 static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
 				       netdev_features_t features)
diff -ruw linux-6.4/net/ipv4/ip_input.c linux-6.4-fbx/net/ipv4/ip_input.c
--- linux-6.4/net/ipv4/ip_input.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/ip_input.c	2023-05-22 20:06:45.415884976 +0200
@@ -224,8 +224,12 @@
 	}
 }
 
-static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ip_ffn_add(skb, IP_FFN_LOCAL_IN);
+#endif
 	skb_clear_delivery_time(skb);
 	__skb_pull(skb, skb_network_header_len(skb));
 
@@ -566,6 +570,11 @@
 	if (skb == NULL)
 		return NET_RX_DROP;
 
+#ifdef CONFIG_IP_FFN
+	if (!ip_ffn_process(skb))
+		return NET_RX_SUCCESS;
+#endif
+
 	return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
 		       net, NULL, skb, dev, NULL,
 		       ip_rcv_finish);
diff -ruw linux-6.4/net/ipv4/ip_output.c linux-6.4-fbx/net/ipv4/ip_output.c
--- linux-6.4/net/ipv4/ip_output.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/ip_output.c	2023-11-07 13:38:44.066256801 +0100
@@ -73,6 +73,7 @@
 #include <net/arp.h>
 #include <net/icmp.h>
 #include <net/checksum.h>
+#include <net/gso.h>
 #include <net/inetpeer.h>
 #include <net/inet_ecn.h>
 #include <net/lwtunnel.h>
@@ -219,6 +220,11 @@
 			return res;
 	}
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ip_ffn_add(skb, IP_FFN_FINISH_OUT);
+#endif
+
 	rcu_read_lock();
 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
 	if (!IS_ERR(neigh)) {
@@ -428,6 +434,11 @@
 	skb->dev = dev;
 	skb->protocol = htons(ETH_P_IP);
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FAST_FORWARDED)
+		return ip_finish_output(net, sk, skb);
+#endif
+
 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 			    net, sk, skb, indev, dev,
 			    ip_finish_output,
@@ -1769,4 +1780,7 @@
 #if defined(CONFIG_IP_MULTICAST)
 	igmp_mc_init();
 #endif
+#ifdef CONFIG_IP_FFN
+	ip_ffn_init();
+#endif
 }
diff -ruw linux-6.4/net/ipv4/ip_tunnel_core.c linux-6.4-fbx/net/ipv4/ip_tunnel_core.c
--- linux-6.4/net/ipv4/ip_tunnel_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/ip_tunnel_core.c	2023-05-22 20:06:45.419885082 +0200
@@ -38,6 +38,9 @@
 #include <net/geneve.h>
 #include <net/vxlan.h>
 #include <net/erspan.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <net/netfilter/nf_conntrack.h>
+#endif
 
 const struct ip_tunnel_encap_ops __rcu *
 		iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
@@ -60,6 +63,11 @@
 	skb_scrub_packet(skb, xnet);
 
 	skb_clear_hash_if_not_l4(skb);
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	if (proto == IPPROTO_IPV6)
+		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+#endif
+
 	skb_dst_set(skb, &rt->dst);
 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
diff -ruw linux-6.4/net/ipv4/ipconfig.c linux-6.4-fbx/net/ipv4/ipconfig.c
--- linux-6.4/net/ipv4/ipconfig.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/ipconfig.c	2023-05-22 20:06:45.419885082 +0200
@@ -197,16 +197,62 @@
 static struct ic_device *ic_first_dev __initdata;	/* List of open device */
 static struct ic_device *ic_dev __initdata;		/* Selected device */
 
-static bool __init ic_is_init_dev(struct net_device *dev)
+static bool __init ic_is_init_dev(struct net_device *dev, bool partial)
 {
+	char *p = NULL;
+	bool ret;
+
 	if (dev->flags & IFF_LOOPBACK)
 		return false;
-	return user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
+
+	if (partial) {
+		p = strchr(user_dev_name, '.');
+		if (p)
+			*p = 0;
+	}
+
+	ret = false;
+	if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
 	    (!(dev->flags & IFF_LOOPBACK) &&
 	     (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
-	     strncmp(dev->name, "dummy", 5));
+	     strncmp(dev->name, "dummy", 5)))
+		ret = true;
+	if (p)
+		*p = '.';
+	return ret;
 }
 
+#ifdef CONFIG_VLAN_8021Q
+int register_vlan_device(struct net_device *real_dev, u16 vlan_id);
+
+static void __init prepare_vlan(void)
+{
+	unsigned short oflags;
+	struct net_device *dev;
+	char *p;
+	u16 vid;
+
+	if (!strchr(user_dev_name, '.'))
+		return;
+
+	p = strchr(user_dev_name, '.');
+	*p = 0;
+	vid = simple_strtoul(p + 1, NULL, 10);
+	dev = __dev_get_by_name(&init_net, user_dev_name);
+	if (!dev)
+		goto fail;
+
+	oflags = dev->flags;
+	if (dev_change_flags(dev, oflags | IFF_UP, NULL) < 0)
+		goto fail;
+
+	register_vlan_device(dev, vid);
+
+fail:
+	*p = '.';
+}
+#endif
+
 static int __init ic_open_devs(void)
 {
 	struct ic_device *d, **last;
@@ -225,8 +271,13 @@
 			pr_err("IP-Config: Failed to open %s\n", dev->name);
 	}
 
+#ifdef CONFIG_VLAN_8021Q
+	/* register vlan device if needed */
+	prepare_vlan();
+#endif
+
 	for_each_netdev(&init_net, dev) {
-		if (ic_is_init_dev(dev)) {
+		if (ic_is_init_dev(dev, false)) {
 			int able = 0;
 			if (dev->mtu >= 364)
 				able |= IC_BOOTP;
@@ -281,7 +332,7 @@
 
 		rtnl_lock();
 		for_each_netdev(&init_net, dev)
-			if (ic_is_init_dev(dev) && netif_carrier_ok(dev)) {
+			if (ic_is_init_dev(dev, false) && netif_carrier_ok(dev)) {
 				rtnl_unlock();
 				goto have_carrier;
 			}
@@ -729,8 +780,10 @@
 			e += len;
 		}
 		if (*vendor_class_identifier) {
+#ifdef IPCONFIG_DEBUG
 			pr_info("DHCP: sending class identifier \"%s\"\n",
 				vendor_class_identifier);
+#endif
 			*e++ = 60;	/* Class-identifier */
 			len = strlen(vendor_class_identifier);
 			*e++ = len;
@@ -1445,7 +1498,7 @@
 
 		rtnl_lock();
 		for_each_netdev(&init_net, dev) {
-			if (ic_is_init_dev(dev)) {
+			if (ic_is_init_dev(dev, true)) {
 				found = 1;
 				break;
 			}
diff -ruw linux-6.4/net/ipv4/netfilter/Kconfig linux-6.4-fbx/net/ipv4/netfilter/Kconfig
--- linux-6.4/net/ipv4/netfilter/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/netfilter/Kconfig	2023-05-22 20:06:45.427885295 +0200
@@ -6,6 +6,13 @@
 menu "IP: Netfilter Configuration"
 	depends on INET && NETFILTER
 
+config IP_FFN
+	bool "IP: Fast forwarding and NAT"
+
+config IP_FFN_PROCFS
+	bool "IP: Fast forwarding and NAT /proc/net entries"
+	depends on IP_FFN
+
 config NF_DEFRAG_IPV4
 	tristate
 	default n
diff -ruw linux-6.4/net/ipv4/netfilter/ip_tables.c linux-6.4-fbx/net/ipv4/netfilter/ip_tables.c
--- linux-6.4/net/ipv4/netfilter/ip_tables.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/netfilter/ip_tables.c	2023-05-22 20:06:45.427885295 +0200
@@ -1099,6 +1099,8 @@
 	return ret;
 }
 
+extern void fbxbr_flush_cache(void);
+
 static int
 do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
@@ -1138,6 +1140,14 @@
 			   tmp.num_counters, tmp.counters);
 	if (ret)
 		goto free_newinfo_untrans;
+
+#ifdef CONFIG_FBXBRIDGE
+	fbxbr_flush_cache();
+#endif
+
+#ifdef CONFIG_IP_FFN
+	ip_ffn_flush_all();
+#endif
 	return 0;
 
  free_newinfo_untrans:
diff -ruw linux-6.4/net/ipv4/tcp.c linux-6.4-fbx/net/ipv4/tcp.c
--- linux-6.4/net/ipv4/tcp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/tcp.c	2023-06-27 11:47:16.151869227 +0200
@@ -3599,6 +3599,13 @@
 			err = -EINVAL;
 		break;
 
+	case TCP_LINEAR_RTO:
+		if (val < 0 || val > 1)
+			err = -EINVAL;
+		else
+			tp->linear_rto = val;
+		break;
+
 	case TCP_REPAIR:
 		if (!tcp_can_repair_sock(sk))
 			err = -EPERM;
@@ -4203,6 +4210,9 @@
 	case TCP_THIN_DUPACK:
 		val = 0;
 		break;
+	case TCP_LINEAR_RTO:
+		val = tp->linear_rto;
+		break;
 
 	case TCP_REPAIR:
 		val = tp->repair;
diff -ruw linux-6.4/net/ipv4/tcp_offload.c linux-6.4-fbx/net/ipv4/tcp_offload.c
--- linux-6.4/net/ipv4/tcp_offload.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/tcp_offload.c	2023-11-07 13:38:44.066256801 +0100
@@ -9,6 +9,7 @@
 #include <linux/indirect_call_wrapper.h>
 #include <linux/skbuff.h>
 #include <net/gro.h>
+#include <net/gso.h>
 #include <net/tcp.h>
 #include <net/protocol.h>
 
diff -ruw linux-6.4/net/ipv4/tcp_timer.c linux-6.4-fbx/net/ipv4/tcp_timer.c
--- linux-6.4/net/ipv4/tcp_timer.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/tcp_timer.c	2023-06-27 11:47:16.151869227 +0200
@@ -587,6 +587,10 @@
 	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
 		icsk->icsk_backoff = 0;
 		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
+
+	} else if (sk->sk_state == TCP_ESTABLISHED && tp->linear_rto) {
+		icsk->icsk_backoff = 0;
+		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 	} else {
 		/* Use normal (exponential) backoff */
 		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
diff -ruw linux-6.4/net/ipv4/udp.c linux-6.4-fbx/net/ipv4/udp.c
--- linux-6.4/net/ipv4/udp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/udp.c	2023-11-07 13:38:44.066256801 +0100
@@ -103,6 +103,7 @@
 #include <net/ip_tunnels.h>
 #include <net/route.h>
 #include <net/checksum.h>
+#include <net/gso.h>
 #include <net/xfrm.h>
 #include <trace/events/udp.h>
 #include <linux/static_key.h>
@@ -315,6 +316,49 @@
 	inet_sk(sk)->inet_num = snum;
 	udp_sk(sk)->udp_port_hash = snum;
 	udp_sk(sk)->udp_portaddr_hash ^= snum;
+
+	/* resolve udp reuse conflict */
+	if (sk->sk_reuse) {
+		struct sock *sk2;
+		bool found;
+
+		found = false;
+		sk_for_each(sk2, &hslot->head) {
+			if (!net_eq(sock_net(sk2), net) ||
+			    sk2 == sk ||
+			    (udp_sk(sk2)->udp_port_hash != snum))
+				continue;
+
+			if (sk2->sk_bound_dev_if &&
+			    sk->sk_bound_dev_if &&
+			    sk2->sk_bound_dev_if != sk->sk_bound_dev_if)
+				continue;
+
+			if (!inet_rcv_saddr_equal(sk, sk2, true))
+				continue;
+
+			found = true;
+			break;
+		}
+
+		sk_for_each(sk2, &hslot->head) {
+			if (!net_eq(sock_net(sk2), net) ||
+			    sk2 == sk ||
+			    (udp_sk(sk2)->udp_port_hash != snum))
+				continue;
+
+			if (sk2->sk_bound_dev_if &&
+			    sk->sk_bound_dev_if &&
+			    sk2->sk_bound_dev_if != sk->sk_bound_dev_if)
+				continue;
+
+			if (!inet_rcv_saddr_equal(sk, sk2, true))
+				continue;
+
+			sk->sk_reuse_conflict = found;
+		}
+	}
+
 	if (sk_unhashed(sk)) {
 		if (sk->sk_reuseport &&
 		    udp_reuseport_add_sock(sk, hslot)) {
@@ -2342,6 +2386,90 @@
 	return 0;
 }
 
+/*
+ *	Unicast goes to one listener and all sockets with dup flag
+ *
+ *	Note: called only from the BH handler context.
+ *
+ *	Note2: it is okay to use the udp_table.hash table only here
+ *	and not udp_table.hash2 table as the sock is always hashed in
+ *	both udp_table.hash and udp_table.hash2. This might impact
+ *	performance if the sock hash bucket hosts more than 10 socks
+ *	but has the benefit of keeping the code simplier.
+ *
+ *	Note3: __udp_is_mcast_sock() does not have really anything to
+ *	do with multicast, it used there to deliver the packet only to
+ *	the sockets that are bound to the ip:port/interface the skbuff
+ *	is targeted to.
+ */
+static int __udp4_lib_uc_conflict_deliver(struct net *net, struct sk_buff *skb,
+					  struct udphdr  *uh,
+					  __be32 saddr, __be32 daddr,
+					  struct udp_table *udptable,
+					  int proto)
+{
+	struct sock *sk, *first = NULL;
+	unsigned short hnum = ntohs(uh->dest);
+	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+	int dif = skb->dev->ifindex;
+	unsigned int offset = offsetof(typeof(*sk), sk_node);
+	struct hlist_node *node;
+	struct sk_buff *nskb;
+	int sdif = inet_sdif(skb);
+	bool found_non_dup;
+
+	found_non_dup = false;
+	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+		bool need_deliver;
+
+		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
+					 uh->source, saddr, dif, sdif, hnum))
+			continue;
+
+		if (sock_flag(sk, SOCK_UDP_DUP_UNICAST))
+			need_deliver = true;
+		else {
+			if (!found_non_dup)
+				need_deliver = true;
+			else
+				need_deliver = false;
+			found_non_dup = true;
+		}
+
+		if (!need_deliver)
+			continue;
+
+		if (!first) {
+			first = sk;
+			continue;
+		}
+		nskb = skb_clone(skb, GFP_ATOMIC);
+
+		if (unlikely(!nskb)) {
+			atomic_inc(&sk->sk_drops);
+			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+					IS_UDPLITE(sk));
+			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
+					IS_UDPLITE(sk));
+			continue;
+		}
+
+		if (udp_queue_rcv_skb(sk, nskb) > 0)
+			consume_skb(nskb);
+	}
+
+	if (first) {
+		if (udp_queue_rcv_skb(first, skb) > 0)
+			consume_skb(skb);
+	} else {
+		kfree_skb(skb);
+		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+				proto == IPPROTO_UDPLITE);
+	}
+
+	return 0;
+}
+
 /* Initialize UDP checksum. If exited with zero value (success),
  * CHECKSUM_UNNECESSARY means, that no more checks are required.
  * Otherwise, csum completion requires checksumming packet body,
@@ -2471,9 +2599,15 @@
 						saddr, daddr, udptable, proto);
 
 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-	if (sk)
-		return udp_unicast_rcv_skb(sk, skb, uh);
+	if (sk) {
+		if (sk->sk_reuse_conflict)
+			return __udp4_lib_uc_conflict_deliver(net,
+							      skb, uh,
+							      saddr, daddr,
+							      udptable, proto);
 
+		return udp_unicast_rcv_skb(sk, skb, uh);
+	}
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 		goto drop;
 	nf_reset_ct(skb);
diff -ruw linux-6.4/net/ipv4/udp_offload.c linux-6.4-fbx/net/ipv4/udp_offload.c
--- linux-6.4/net/ipv4/udp_offload.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv4/udp_offload.c	2023-11-07 13:38:44.066256801 +0100
@@ -8,6 +8,7 @@
 
 #include <linux/skbuff.h>
 #include <net/gro.h>
+#include <net/gso.h>
 #include <net/udp.h>
 #include <net/protocol.h>
 #include <net/inet_common.h>
diff -ruw linux-6.4/net/ipv6/Makefile linux-6.4-fbx/net/ipv6/Makefile
--- linux-6.4/net/ipv6/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/Makefile	2023-05-22 20:06:45.487886891 +0200
@@ -13,6 +13,7 @@
 		udp_offload.o seg6.o fib6_notifier.o rpl.o ioam6.o
 
 ipv6-$(CONFIG_SYSCTL) += sysctl_net_ipv6.o
+ipv6-$(CONFIG_IPV6_FFN) += ip6_ffn.o
 ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
 
 ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
diff -ruw linux-6.4/net/ipv6/addrconf.c linux-6.4-fbx/net/ipv6/addrconf.c
--- linux-6.4/net/ipv6/addrconf.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/addrconf.c	2023-05-22 20:06:45.495887104 +0200
@@ -2310,12 +2310,27 @@
 	return 0;
 }
 
+static int addrconf_ifid_ppp(u8 *eui, struct net_device *dev)
+{
+	if (is_zero_ether_addr(dev->perm_addr))
+		return -1;
+
+	memcpy(eui, dev->perm_addr, 3);
+	memcpy(eui + 5, dev->perm_addr + 3, 3);
+	eui[3] = 0xFF;
+	eui[4] = 0xFE;
+	eui[0] ^= 2;
+	return 0;
+}
+
 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
 {
 	switch (dev->type) {
 	case ARPHRD_ETHER:
 	case ARPHRD_FDDI:
 		return addrconf_ifid_eui48(eui, dev);
+	case ARPHRD_PPP:
+		return addrconf_ifid_ppp(eui, dev);
 	case ARPHRD_ARCNET:
 		return addrconf_ifid_arcnet(eui, dev);
 	case ARPHRD_INFINIBAND:
@@ -3363,6 +3378,7 @@
 
 	if ((dev->type != ARPHRD_ETHER) &&
 	    (dev->type != ARPHRD_FDDI) &&
+	    (dev->type != ARPHRD_PPP) &&
 	    (dev->type != ARPHRD_ARCNET) &&
 	    (dev->type != ARPHRD_INFINIBAND) &&
 	    (dev->type != ARPHRD_IEEE1394) &&
diff -ruw linux-6.4/net/ipv6/af_inet6.c linux-6.4-fbx/net/ipv6/af_inet6.c
--- linux-6.4/net/ipv6/af_inet6.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/af_inet6.c	2023-05-22 20:06:45.499887210 +0200
@@ -1180,6 +1180,10 @@
 	if (err)
 		goto udpv6_fail;
 
+#ifdef CONFIG_IPV6_FFN
+	ipv6_ffn_init();
+#endif
+
 	err = udplitev6_init();
 	if (err)
 		goto udplitev6_fail;
diff -ruw linux-6.4/net/ipv6/esp6_offload.c linux-6.4-fbx/net/ipv6/esp6_offload.c
--- linux-6.4/net/ipv6/esp6_offload.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/esp6_offload.c	2023-11-07 13:38:44.070256910 +0100
@@ -17,6 +17,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <net/gro.h>
+#include <net/gso.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
 #include <net/esp.h>
diff -ruw linux-6.4/net/ipv6/ip6_input.c linux-6.4-fbx/net/ipv6/ip6_input.c
--- linux-6.4/net/ipv6/ip6_input.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/ip6_input.c	2023-05-22 20:06:45.511887529 +0200
@@ -306,6 +306,12 @@
 	skb = ip6_rcv_core(skb, dev, net);
 	if (skb == NULL)
 		return NET_RX_DROP;
+
+#ifdef CONFIG_IPV6_FFN
+	if (!ipv6_ffn_process(skb))
+		return NET_RX_SUCCESS;
+#endif
+
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
 		       net, NULL, skb, dev, NULL,
 		       ip6_rcv_finish);
@@ -475,8 +481,13 @@
 	kfree_skb_reason(skb, reason);
 }
 
-static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+#ifdef CONFIG_IPV6_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ipv6_ffn_add(skb, IPV6_FFN_LOCAL_IN);
+#endif
+
 	skb_clear_delivery_time(skb);
 	rcu_read_lock();
 	ip6_protocol_deliver_rcu(net, skb, 0, false);
diff -ruw linux-6.4/net/ipv6/ip6_offload.c linux-6.4-fbx/net/ipv6/ip6_offload.c
--- linux-6.4/net/ipv6/ip6_offload.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/ip6_offload.c	2023-11-07 13:38:44.070256910 +0100
@@ -16,6 +16,7 @@
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <net/gro.h>
+#include <net/gso.h>
 
 #include "ip6_offload.h"
 
diff -ruw linux-6.4/net/ipv6/ip6_output.c linux-6.4-fbx/net/ipv6/ip6_output.c
--- linux-6.4/net/ipv6/ip6_output.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/ip6_output.c	2023-11-07 13:38:44.070256910 +0100
@@ -42,6 +42,7 @@
 #include <net/sock.h>
 #include <net/snmp.h>
 
+#include <net/gso.h>
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/protocol.h>
@@ -51,6 +52,7 @@
 #include <net/icmp.h>
 #include <net/xfrm.h>
 #include <net/checksum.h>
+#include <net/dsfield.h>
 #include <linux/mroute6.h>
 #include <net/l3mdev.h>
 #include <net/lwtunnel.h>
@@ -116,6 +118,11 @@
 			return res;
 	}
 
+#ifdef CONFIG_IPV6_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ipv6_ffn_add(skb, IPV6_FFN_FINISH_OUT);
+#endif
+
 	rcu_read_lock();
 	nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
 	neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
@@ -224,6 +231,11 @@
 		return 0;
 	}
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FAST_FORWARDED)
+		return ip6_finish_output(net, sk, skb);
+#endif
+
 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 			    net, sk, skb, indev, dev,
 			    ip6_finish_output,
@@ -646,6 +658,8 @@
 
 	hdr->hop_limit--;
 
+	skb->priority = rt_tos2priority(ipv6_get_dsfield(hdr));
+
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
 		       net, NULL, skb, skb->dev, dst->dev,
 		       ip6_forward_finish);
diff -ruw linux-6.4/net/ipv6/ip6_tunnel.c linux-6.4-fbx/net/ipv6/ip6_tunnel.c
--- linux-6.4/net/ipv6/ip6_tunnel.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/ip6_tunnel.c	2023-05-22 20:06:45.515887636 +0200
@@ -67,9 +67,9 @@
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+static u32 HASH(const struct in6_addr *addr)
 {
-	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+	u32 hash = ipv6_addr_hash(addr);
 
 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
 }
@@ -114,17 +114,26 @@
 ip6_tnl_lookup(struct net *net, int link,
 	       const struct in6_addr *remote, const struct in6_addr *local)
 {
-	unsigned int hash = HASH(remote, local);
+	unsigned int hash = HASH(local);
 	struct ip6_tnl *t, *cand = NULL;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 	struct in6_addr any;
+	struct __ip6_tnl_fmr *fmr;
 
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
-		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
 		    !(t->dev->flags & IFF_UP))
 			continue;
 
+		if (!ipv6_addr_equal(remote, &t->parms.raddr)) {
+			for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
+				if (ipv6_prefix_equal(remote, &fmr->ip6_prefix,
+						      fmr->ip6_prefix_len))
+					return t;
+			}
+			continue ;
+		}
+
 		if (link == t->parms.link)
 			return t;
 		else
@@ -132,7 +141,7 @@
 	}
 
 	memset(&any, 0, sizeof(any));
-	hash = HASH(&any, local);
+	hash = HASH(local);
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
 		    !ipv6_addr_any(&t->parms.raddr) ||
@@ -145,7 +154,7 @@
 			cand = t;
 	}
 
-	hash = HASH(remote, &any);
+	hash = HASH(&any);
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
 		    !ipv6_addr_any(&t->parms.laddr) ||
@@ -194,7 +203,7 @@
 
 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
 		prio = 1;
-		h = HASH(remote, local);
+		h = HASH(local);
 	}
 	return &ip6n->tnls[prio][h];
 }
@@ -376,6 +385,12 @@
 	struct net *net = t->net;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
+	while (t->parms.fmrs) {
+		struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
+		kfree(t->parms.fmrs);
+		t->parms.fmrs = next;
+	}
+
 	if (dev == ip6n->fb_tnl_dev)
 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
 	else
@@ -788,6 +803,107 @@
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
+/**
+ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR
+ *   @dest: destination IPv6 address buffer
+ *   @skb: received socket buffer
+ *   @fmr: MAP FMR
+ *   @xmit: Calculate for xmit or rcv
+ **/
+static void ip4ip6_fmr_calc(struct in6_addr *dest,
+		const struct iphdr *iph, const uint8_t *end,
+		const struct __ip6_tnl_fmr *fmr, bool xmit)
+{
+	int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len);
+	u8 *portp = NULL;
+	bool use_dest_addr;
+	const struct iphdr *dsth = iph;
+
+	if ((u8*)dsth >= end)
+		return;
+
+	/* find significant IP header */
+	if (iph->protocol == IPPROTO_ICMP) {
+		struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
+		if (ih && ((u8*)&ih[1]) <= end && (
+			ih->type == ICMP_DEST_UNREACH ||
+			ih->type == ICMP_SOURCE_QUENCH ||
+			ih->type == ICMP_TIME_EXCEEDED ||
+			ih->type == ICMP_PARAMETERPROB ||
+			ih->type == ICMP_REDIRECT))
+				dsth = (const struct iphdr*)&ih[1];
+	}
+
+	/* in xmit-path use dest port by default and source port only if
+		this is an ICMP reply to something else; vice versa in rcv-path */
+	use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph);
+
+	/* get dst port */
+	if (((u8*)&dsth[1]) <= end && (
+		dsth->protocol == IPPROTO_UDP ||
+		dsth->protocol == IPPROTO_TCP ||
+		dsth->protocol == IPPROTO_SCTP ||
+		dsth->protocol == IPPROTO_DCCP)) {
+			/* for UDP, TCP, SCTP and DCCP source and dest port
+			follow IPv4 header directly */
+			portp = ((u8*)dsth) + dsth->ihl * 4;
+
+			if (use_dest_addr)
+				portp += sizeof(u16);
+	} else if (iph->protocol == IPPROTO_ICMP) {
+		struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
+
+		/* use icmp identifier as port */
+		if (((u8*)&ih) <= end && (
+		    (use_dest_addr && (
+		    ih->type == ICMP_ECHOREPLY ||
+			ih->type == ICMP_TIMESTAMPREPLY ||
+			ih->type == ICMP_INFO_REPLY ||
+			ih->type == ICMP_ADDRESSREPLY)) ||
+			(!use_dest_addr && (
+			ih->type == ICMP_ECHO ||
+			ih->type == ICMP_TIMESTAMP ||
+			ih->type == ICMP_INFO_REQUEST ||
+			ih->type == ICMP_ADDRESS)
+			)))
+				portp = (u8*)&ih->un.echo.id;
+	}
+
+	if ((portp && &portp[2] <= end) || psidlen == 0) {
+		int frombyte = fmr->ip6_prefix_len / 8;
+		int fromrem = fmr->ip6_prefix_len % 8;
+		int bytes = sizeof(struct in6_addr) - frombyte;
+		const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr;
+		u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len);
+		u64 t = 0;
+
+		/* extract PSID from port and add it to eabits */
+		u16 psidbits = 0;
+		if (psidlen > 0) {
+			psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]);
+			psidbits >>= 16 - psidlen - fmr->offset;
+			psidbits = (u16)(psidbits << (16 - psidlen));
+			eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen));
+		}
+
+		/* rewrite destination address */
+		*dest = fmr->ip6_prefix;
+		memcpy(&dest->s6_addr[10], addr, sizeof(*addr));
+		dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen));
+
+		if (bytes > sizeof(u64))
+			bytes = sizeof(u64);
+
+		/* insert eabits */
+		memcpy(&t, &dest->s6_addr[frombyte], bytes);
+		t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1)
+			<< (64 - fmr->ea_len - fromrem));
+		t = cpu_to_be64(t | (eabits >> fromrem));
+		memcpy(&dest->s6_addr[frombyte], &t, bytes);
+	}
+}
+
+
 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
 			 const struct tnl_ptk_info *tpi,
 			 struct metadata_dst *tun_dst,
@@ -840,6 +956,27 @@
 	skb_reset_network_header(skb);
 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
 
+	if (tpi->proto == htons(ETH_P_IP) &&
+		!ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) {
+			/* Packet didn't come from BR, so lookup FMR */
+			struct __ip6_tnl_fmr *fmr;
+			struct in6_addr expected = tunnel->parms.raddr;
+			for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next)
+				if (ipv6_prefix_equal(&ipv6h->saddr,
+					&fmr->ip6_prefix, fmr->ip6_prefix_len))
+						break;
+
+			/* Check that IPv6 matches IPv4 source to prevent spoofing */
+			if (fmr)
+				ip4ip6_fmr_calc(&expected, ip_hdr(skb),
+						skb_tail_pointer(skb), fmr, false);
+
+			if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) {
+				rcu_read_unlock();
+				goto drop;
+			}
+	}
+
 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
 
 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
@@ -987,6 +1124,7 @@
 	opt->ops.opt_nflen = 8;
 }
 
+
 /**
  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
  *   @t: the outgoing tunnel device
@@ -1281,6 +1419,7 @@
 	const struct iphdr  *iph;
 	int encap_limit = -1;
 	__u16 offset;
+	struct __ip6_tnl_fmr *fmr;
 	struct flowi6 fl6;
 	__u8 dsfield, orig_dsfield;
 	__u32 mtu;
@@ -1376,6 +1515,18 @@
 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
 	dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
 
+	/* try to find matching FMR */
+	for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
+		unsigned mshift = 32 - fmr->ip4_prefix_len;
+		if (ntohl(fmr->ip4_prefix.s_addr) >> mshift ==
+				ntohl(ip_hdr(skb)->daddr) >> mshift)
+			break;
+	}
+
+	/* change dstaddr according to FMR */
+	if (fmr)
+		ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true);
+
 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
 		return -1;
 
@@ -1528,6 +1679,14 @@
 	t->parms.link = p->link;
 	t->parms.proto = p->proto;
 	t->parms.fwmark = p->fwmark;
+
+	while (t->parms.fmrs) {
+		struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
+		kfree(t->parms.fmrs);
+		t->parms.fmrs = next;
+	}
+	t->parms.fmrs = p->fmrs;
+
 	dst_cache_reset(&t->dst_cache);
 	ip6_tnl_link_config(t);
 }
@@ -1562,6 +1721,7 @@
 	p->flowinfo = u->flowinfo;
 	p->link = u->link;
 	p->proto = u->proto;
+	p->fmrs = NULL;
 	memcpy(p->name, u->name, sizeof(u->name));
 }
 
@@ -1948,13 +2108,22 @@
 	return 0;
 }
 
-static void ip6_tnl_netlink_parms(struct nlattr *data[],
+static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = {
+	[IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) },
+	[IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) },
+	[IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 }
+};
+
+static int ip6_tnl_netlink_parms(struct nlattr *data[],
 				  struct __ip6_tnl_parm *parms)
 {
 	memset(parms, 0, sizeof(*parms));
 
 	if (!data)
-		return;
+		return 0;
 
 	if (data[IFLA_IPTUN_LINK])
 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
@@ -1985,6 +2154,52 @@
 
 	if (data[IFLA_IPTUN_FWMARK])
 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
+
+	if (data[IFLA_IPTUN_FMRS]) {
+		unsigned rem;
+		struct nlattr *fmr;
+
+		nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) {
+			struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c;
+			struct __ip6_tnl_fmr *nfmr;
+			int err;
+
+			err = nla_parse_nested_deprecated(fmrd, IFLA_IPTUN_FMR_MAX,
+					       fmr, ip6_tnl_fmr_policy, NULL);
+			if (err)
+				return err;
+
+			if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL)))
+				return -ENOMEM;
+
+			nfmr->offset = 6;
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX]))
+				nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX],
+					sizeof(nfmr->ip6_prefix));
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX]))
+				nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX],
+					sizeof(nfmr->ip4_prefix));
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN]))
+				nfmr->ip6_prefix_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN]))
+				nfmr->ip4_prefix_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN]))
+				nfmr->ea_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET]))
+				nfmr->offset = nla_get_u8(c);
+
+			nfmr->next = parms->fmrs;
+			parms->fmrs = nfmr;
+		}
+	}
+
+	return 0;
 }
 
 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
@@ -2005,7 +2220,9 @@
 			return err;
 	}
 
-	ip6_tnl_netlink_parms(data, &nt->parms);
+	err = ip6_tnl_netlink_parms(data, &nt->parms);
+	if (err)
+		return err;
 
 	if (nt->parms.collect_md) {
 		if (rtnl_dereference(ip6n->collect_md_tun))
@@ -2032,6 +2249,7 @@
 	struct net *net = t->net;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 	struct ip_tunnel_encap ipencap;
+	int err;
 
 	if (dev == ip6n->fb_tnl_dev)
 		return -EINVAL;
@@ -2042,7 +2260,10 @@
 		if (err < 0)
 			return err;
 	}
-	ip6_tnl_netlink_parms(data, &p);
+	err = ip6_tnl_netlink_parms(data, &p);
+	if (err)
+		return err;
+
 	if (p.collect_md)
 		return -EINVAL;
 
@@ -2068,6 +2289,12 @@
 
 static size_t ip6_tnl_get_size(const struct net_device *dev)
 {
+	const struct ip6_tnl *t = netdev_priv(dev);
+	struct __ip6_tnl_fmr *c;
+	int fmrs = 0;
+	for (c = t->parms.fmrs; c; c = c->next)
+		++fmrs;
+
 	return
 		/* IFLA_IPTUN_LINK */
 		nla_total_size(4) +
@@ -2097,6 +2324,24 @@
 		nla_total_size(0) +
 		/* IFLA_IPTUN_FWMARK */
 		nla_total_size(4) +
+		/* IFLA_IPTUN_FMRS */
+		nla_total_size(0) +
+		(
+			/* nest */
+			nla_total_size(0) +
+			/* IFLA_IPTUN_FMR_IP6_PREFIX */
+			nla_total_size(sizeof(struct in6_addr)) +
+			/* IFLA_IPTUN_FMR_IP4_PREFIX */
+			nla_total_size(sizeof(struct in_addr)) +
+			/* IFLA_IPTUN_FMR_EA_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_OFFSET */
+			nla_total_size(1)
+		) * fmrs +
 		0;
 }
 
@@ -2104,6 +2349,9 @@
 {
 	struct ip6_tnl *tunnel = netdev_priv(dev);
 	struct __ip6_tnl_parm *parm = &tunnel->parms;
+	struct __ip6_tnl_fmr *c;
+	int fmrcnt = 0;
+	struct nlattr *fmrs;
 
 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
@@ -2113,9 +2361,27 @@
 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
-	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
+	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark) ||
+	    !(fmrs = nla_nest_start_noflag(skb, IFLA_IPTUN_FMRS)))
 		goto nla_put_failure;
 
+	for (c = parm->fmrs; c; c = c->next) {
+		struct nlattr *fmr = nla_nest_start_noflag(skb, ++fmrcnt);
+		if (!fmr ||
+			nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX,
+				sizeof(c->ip6_prefix), &c->ip6_prefix) ||
+			nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX,
+				sizeof(c->ip4_prefix), &c->ip4_prefix) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset))
+				goto nla_put_failure;
+
+		nla_nest_end(skb, fmr);
+	}
+	nla_nest_end(skb, fmrs);
+
 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
@@ -2155,6 +2421,7 @@
 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_FMRS]		= { .type = NLA_NESTED },
 };
 
 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
diff -ruw linux-6.4/net/ipv6/netfilter/Kconfig linux-6.4-fbx/net/ipv6/netfilter/Kconfig
--- linux-6.4/net/ipv6/netfilter/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/netfilter/Kconfig	2023-05-22 20:06:45.515887636 +0200
@@ -6,6 +6,13 @@
 menu "IPv6: Netfilter Configuration"
 	depends on INET && IPV6 && NETFILTER
 
+config IPV6_FFN
+	bool "IPv6: Fast forwarding and NAT"
+
+config IPV6_FFN_PROCFS
+	bool "IPv6: Fast forwarding and NAT /proc/net entries"
+	depends on IPV6_FFN
+
 config NF_SOCKET_IPV6
 	tristate "IPv6 socket lookup support"
 	help
diff -ruw linux-6.4/net/ipv6/netfilter/ip6_tables.c linux-6.4-fbx/net/ipv6/netfilter/ip6_tables.c
--- linux-6.4/net/ipv6/netfilter/ip6_tables.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/netfilter/ip6_tables.c	2023-05-22 20:06:45.519887742 +0200
@@ -1155,6 +1155,10 @@
 			   tmp.num_counters, tmp.counters);
 	if (ret)
 		goto free_newinfo_untrans;
+
+#ifdef CONFIG_IPV6_FFN
+	ipv6_ffn_flush_all();
+#endif
 	return 0;
 
  free_newinfo_untrans:
diff -ruw linux-6.4/net/ipv6/udp.c linux-6.4-fbx/net/ipv6/udp.c
--- linux-6.4/net/ipv6/udp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/udp.c	2023-05-22 20:06:45.527887955 +0200
@@ -844,6 +844,82 @@
  * Note: called only from the BH handler context,
  * so we don't need to lock the hashes.
  */
+static int __udp6_lib_uc_conflict_deliver(struct net *net, struct sk_buff *skb,
+		const struct in6_addr *saddr, const struct in6_addr *daddr,
+		struct udp_table *udptable, int proto)
+{
+	struct sock *sk, *first = NULL;
+	const struct udphdr *uh = udp_hdr(skb);
+	unsigned short hnum = ntohs(uh->dest);
+	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+	unsigned int offset = offsetof(typeof(*sk), sk_node);
+	int dif = inet6_iif(skb);
+	int sdif = inet6_sdif(skb);
+	struct hlist_node *node;
+	struct sk_buff *nskb;
+	bool found_non_dup;
+
+	found_non_dup = false;
+	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+		bool need_deliver;
+
+		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
+					    uh->source, saddr, dif, sdif, hnum))
+
+			continue;
+
+		/* If zero checksum and no_check is not on for
+		 * the socket then skip it.
+		 */
+		if (!uh->check && !udp_sk(sk)->no_check6_rx)
+			continue;
+
+		if (sock_flag(sk, SOCK_UDP_DUP_UNICAST))
+			need_deliver = true;
+		else {
+			if (!found_non_dup)
+				need_deliver = true;
+			else
+				need_deliver = false;
+			found_non_dup = true;
+		}
+
+		if (!need_deliver)
+			continue;
+
+		if (!first) {
+			first = sk;
+			continue;
+		}
+		nskb = skb_clone(skb, GFP_ATOMIC);
+		if (unlikely(!nskb)) {
+			atomic_inc(&sk->sk_drops);
+			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+					 IS_UDPLITE(sk));
+			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
+					 IS_UDPLITE(sk));
+			continue;
+		}
+
+		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
+			consume_skb(nskb);
+	}
+
+	if (first) {
+		if (udpv6_queue_rcv_skb(first, skb) > 0)
+			consume_skb(skb);
+	} else {
+		kfree_skb(skb);
+		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+				 proto == IPPROTO_UDPLITE);
+	}
+	return 0;
+}
+
+/*
+ * Note: called only from the BH handler context,
+ * so we don't need to lock the hashes.
+ */
 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 		const struct in6_addr *saddr, const struct in6_addr *daddr,
 		struct udp_table *udptable, int proto)
@@ -1018,6 +1094,12 @@
 	if (sk) {
 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
 			goto report_csum_error;
+
+		if (sk->sk_reuse_conflict)
+			return __udp6_lib_uc_conflict_deliver(net, skb,
+						      saddr, daddr,
+						      udptable, proto);
+
 		return udp6_unicast_rcv_skb(sk, skb, uh);
 	}
 
diff -ruw linux-6.4/net/ipv6/udp_offload.c linux-6.4-fbx/net/ipv6/udp_offload.c
--- linux-6.4/net/ipv6/udp_offload.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/ipv6/udp_offload.c	2023-11-07 13:38:44.070256910 +0100
@@ -14,6 +14,7 @@
 #include <net/ip6_checksum.h>
 #include "ip6_offload.h"
 #include <net/gro.h>
+#include <net/gso.h>
 
 static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
 					 netdev_features_t features)
diff -ruw linux-6.4/net/mac80211/Kconfig linux-6.4-fbx/net/mac80211/Kconfig
--- linux-6.4/net/mac80211/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/Kconfig	2023-12-12 17:24:34.171627426 +0100
@@ -57,6 +57,17 @@
 comment "Some wireless drivers require a rate control algorithm"
 	depends on MAC80211 && MAC80211_HAS_RC=n
 
+config MAC80211_KUNIT_TEST
+	tristate "KUnit tests for mac80211" if !KUNIT_ALL_TESTS
+	depends on KUNIT
+	depends on MAC80211
+	default KUNIT_ALL_TESTS
+	depends on !KERNEL_6_2
+	help
+	  Enable this option to test mac80211 internals with kunit.
+
+	  If unsure, say N.
+
 config MAC80211_MESH
 	bool "Enable mac80211 mesh networking support"
 	depends on MAC80211
@@ -304,3 +315,9 @@
 	  connect more stations than the number selected here.)
 
 	  If unsure, leave the default of 0.
+
+config FBX80211_SCUM
+	bool "Same channel unassociated metrics"
+	depends on FBX80211
+	help
+	  Support for unassociated STA metrics
diff -ruw linux-6.4/net/mac80211/Makefile linux-6.4-fbx/net/mac80211/Makefile
--- linux-6.4/net/mac80211/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/Makefile	2024-04-19 16:04:28.969736104 +0200
@@ -36,7 +36,8 @@
 	tdls.o \
 	ocb.o \
 	airtime.o \
-	eht.o
+	eht.o \
+	nmeshd_nl.o
 
 mac80211-$(CONFIG_MAC80211_LEDS) += led.o
 mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -54,6 +55,7 @@
 	mesh_ps.o
 
 mac80211-$(CONFIG_PM) += pm.o
+mac80211-$(CONFIG_FBX80211_SCUM) += fbx_scum.o
 
 CFLAGS_trace.o := -I$(src)
 
@@ -65,4 +67,6 @@
 
 mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
 
+obj-y += tests/
+
 ccflags-y += -DDEBUG
diff -ruw linux-6.4/net/mac80211/agg-rx.c linux-6.4-fbx/net/mac80211/agg-rx.c
--- linux-6.4/net/mac80211/agg-rx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/agg-rx.c	2023-11-07 13:38:44.070256910 +0100
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 
 /**
@@ -55,7 +55,7 @@
 	kfree(tid_rx);
 }
 
-void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				     u16 initiator, u16 reason, bool tx)
 {
 	struct ieee80211_local *local = sta->local;
@@ -69,10 +69,10 @@
 		.ssn = 0,
 	};
 
-	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
 
 	tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
-					lockdep_is_held(&sta->ampdu_mlme.mtx));
+					lockdep_is_held(&sta->local->hw.wiphy->mtx));
 
 	if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid))
 		return;
@@ -114,14 +114,6 @@
 	call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
 }
 
-void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
-				    u16 initiator, u16 reason, bool tx)
-{
-	mutex_lock(&sta->ampdu_mlme.mtx);
-	___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, tx);
-	mutex_unlock(&sta->ampdu_mlme.mtx);
-}
-
 void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
 				  const u8 *addr)
 {
@@ -140,7 +132,7 @@
 		if (ba_rx_bitmap & BIT(i))
 			set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested);
 
-	ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
 	rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
@@ -166,7 +158,7 @@
 	       sta->sta.addr, tid);
 
 	set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
-	ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
 }
 
 static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
@@ -250,7 +242,7 @@
 	ieee80211_tx_skb(sdata, skb);
 }
 
-void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
+void __ieee80211_start_rx_ba_session(struct sta_info *sta,
 				      u8 dialog_token, u16 timeout,
 				      u16 start_seq_num, u16 ba_policy, u16 tid,
 				      u16 buf_size, bool tx, bool auto_seq,
@@ -270,6 +262,8 @@
 	u16 status = WLAN_STATUS_REQUEST_DECLINED;
 	u16 max_buf_size;
 
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
+
 	if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
 		ht_dbg(sta->sdata,
 		       "STA %pM requests BA session on unsupported tid %d\n",
@@ -325,9 +319,6 @@
 	ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n",
 	       buf_size, sta->sta.addr);
 
-	/* examine state machine */
-	lockdep_assert_held(&sta->ampdu_mlme.mtx);
-
 	if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
 		if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) {
 			struct tid_ampdu_rx *tid_rx;
@@ -355,7 +346,7 @@
 				   sta->sta.addr, tid);
 
 		/* delete existing Rx BA session on the same tid */
-		___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
+		__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
 						WLAN_STATUS_UNSPECIFIED_QOS,
 						false);
 	}
@@ -444,20 +435,6 @@
 					  timeout, addbaext);
 }
 
-static void __ieee80211_start_rx_ba_session(struct sta_info *sta,
-					    u8 dialog_token, u16 timeout,
-					    u16 start_seq_num, u16 ba_policy,
-					    u16 tid, u16 buf_size, bool tx,
-					    bool auto_seq,
-					    const struct ieee80211_addba_ext_ie *addbaext)
-{
-	mutex_lock(&sta->ampdu_mlme.mtx);
-	___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
-					 start_seq_num, ba_policy, tid,
-					 buf_size, tx, auto_seq, addbaext);
-	mutex_unlock(&sta->ampdu_mlme.mtx);
-}
-
 void ieee80211_process_addba_request(struct ieee80211_local *local,
 				     struct sta_info *sta,
 				     struct ieee80211_mgmt *mgmt,
@@ -507,7 +484,6 @@
 				 const u8 *addr, unsigned int tid)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
 	rcu_read_lock();
@@ -516,7 +492,7 @@
 		goto unlock;
 
 	set_bit(tid, sta->ampdu_mlme.tid_rx_manage_offl);
-	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
  unlock:
 	rcu_read_unlock();
 }
@@ -526,7 +502,6 @@
 				   const u8 *addr, unsigned int tid)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
 	rcu_read_lock();
@@ -535,7 +510,7 @@
 		goto unlock;
 
 	set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
-	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
 
  unlock:
 	rcu_read_unlock();
diff -ruw linux-6.4/net/mac80211/agg-tx.c linux-6.4-fbx/net/mac80211/agg-tx.c
--- linux-6.4/net/mac80211/agg-tx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/agg-tx.c	2023-11-07 13:38:44.070256910 +0100
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -139,10 +139,18 @@
 }
 EXPORT_SYMBOL(ieee80211_send_bar);
 
+void ieee80211_send_bar_sta(struct ieee80211_sta *pubsta,
+			    u16 tid, u16 ssn)
+{
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+	ieee80211_send_bar(&sta->sdata->vif, pubsta->addr, tid, ssn);
+}
+EXPORT_SYMBOL(ieee80211_send_bar_sta);
+
 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
 			     struct tid_ampdu_tx *tid_tx)
 {
-	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
 	lockdep_assert_held(&sta->lock);
 	rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
 }
@@ -213,7 +221,7 @@
 	struct ieee80211_txq *txq = sta->sta.txq[tid];
 	struct txq_info *txqi;
 
-	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
 
 	if (!txq)
 		return;
@@ -271,7 +279,7 @@
 {
 	struct tid_ampdu_tx *tid_tx;
 
-	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
 	lockdep_assert_held(&sta->lock);
 
 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -296,7 +304,7 @@
 	kfree_rcu(tid_tx, rcu_head);
 }
 
-int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
+int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 				    enum ieee80211_agg_stop_reason reason)
 {
 	struct ieee80211_local *local = sta->local;
@@ -311,7 +319,7 @@
 	};
 	int ret;
 
-	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
 
 	switch (reason) {
 	case AGG_STOP_DECLINED:
@@ -457,6 +465,12 @@
 	u8 tid = tid_tx->tid;
 	u16 buf_size;
 
+	if (WARN_ON_ONCE(test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) ||
+			 test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)))
+		return;
+
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
+
 	/* activate the timer for the recipient's addBA response */
 	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
 	ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
@@ -491,7 +505,7 @@
 {
 	struct tid_ampdu_tx *tid_tx;
 	struct ieee80211_local *local = sta->local;
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_ampdu_params params = {
 		.sta = &sta->sta,
 		.action = IEEE80211_AMPDU_TX_START,
@@ -519,7 +533,6 @@
 	 */
 	synchronize_net();
 
-	sdata = sta->sdata;
 	params.ssn = sta->tid_seq[tid] >> 4;
 	ret = drv_ampdu_action(local, sdata, &params);
 	tid_tx->ssn = params.ssn;
@@ -533,9 +546,6 @@
 		 */
 		set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
 	} else if (ret) {
-		if (!sdata)
-			return;
-
 		ht_dbg(sdata,
 		       "BA request denied - HW unavailable for %pM tid %d\n",
 		       sta->sta.addr, tid);
@@ -737,7 +747,7 @@
 	 */
 	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
 
-	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
 
 	/* this flow continues off the work */
  err_unlock_sta:
@@ -758,7 +768,7 @@
 		.ssn = 0,
 	};
 
-	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
 
 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 	params.buf_size = tid_tx->buf_size;
@@ -795,9 +805,15 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
 
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
+
 	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
 		return;
 
+	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) ||
+	    test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
+		return;
+
 	if (!test_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)) {
 		ieee80211_send_addba_with_timeout(sta, tid_tx);
 		/* RESPONSE_RECEIVED state whould trigger the flow again */
@@ -850,26 +866,12 @@
 		goto out;
 
 	set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
-	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
  out:
 	rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
 
-int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
-				   enum ieee80211_agg_stop_reason reason)
-{
-	int ret;
-
-	mutex_lock(&sta->ampdu_mlme.mtx);
-
-	ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
-
-	mutex_unlock(&sta->ampdu_mlme.mtx);
-
-	return ret;
-}
-
 int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
 {
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
@@ -904,7 +906,7 @@
 	}
 
 	set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
-	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
 
  unlock:
 	spin_unlock_bh(&sta->lock);
@@ -964,7 +966,7 @@
 		goto out;
 
 	set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
-	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+	wiphy_work_queue(local->hw.wiphy, &sta->ampdu_mlme.work);
  out:
 	rcu_read_unlock();
 }
@@ -981,6 +983,8 @@
 	u16 capab, tid, buf_size;
 	bool amsdu;
 
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
+
 	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
 	amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
 	tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK);
@@ -991,16 +995,14 @@
 	if (!amsdu && txq)
 		set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);
 
-	mutex_lock(&sta->ampdu_mlme.mtx);
-
 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 	if (!tid_tx)
-		goto out;
+		return;
 
 	if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
 		ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
 		       sta->sta.addr, tid);
-		goto out;
+		return;
 	}
 
 	del_timer_sync(&tid_tx->addba_resp_timer);
@@ -1018,7 +1020,7 @@
 		ht_dbg(sta->sdata,
 		       "got addBA resp for %pM tid %d but we already gave up\n",
 		       sta->sta.addr, tid);
-		goto out;
+		return;
 	}
 
 	/*
@@ -1032,7 +1034,7 @@
 		if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
 				     &tid_tx->state)) {
 			/* ignore duplicate response */
-			goto out;
+			return;
 		}
 
 		tid_tx->buf_size = buf_size;
@@ -1053,9 +1055,6 @@
 		}
 
 	} else {
-		___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
+		__ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
 	}
-
- out:
-	mutex_unlock(&sta->ampdu_mlme.mtx);
 }
diff -ruw linux-6.4/net/mac80211/airtime.c linux-6.4-fbx/net/mac80211/airtime.c
--- linux-6.4/net/mac80211/airtime.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/airtime.c	2023-11-07 13:38:44.070256910 +0100
@@ -557,7 +557,7 @@
 	if (ieee80211_fill_rate_info(hw, stat, band, ri))
 		return 0;
 
-	if (rate->idx < 0 || !rate->count)
+	if (!ieee80211_rate_valid(rate))
 		return -1;
 
 	if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
@@ -632,7 +632,7 @@
 {
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_chanctx_conf *conf;
-	int rateidx, shift = 0;
+	int rateidx;
 	bool cck, short_pream;
 	u32 basic_rates;
 	u8 band = 0;
@@ -641,10 +641,8 @@
 	len += 38; /* Ethernet header length */
 
 	conf = rcu_dereference(vif->bss_conf.chanctx_conf);
-	if (conf) {
+	if (conf)
 		band = conf->def.chan->band;
-		shift = ieee80211_chandef_get_shift(&conf->def);
-	}
 
 	if (pubsta) {
 		struct sta_info *sta = container_of(pubsta, struct sta_info,
@@ -704,7 +702,7 @@
 	short_pream = vif->bss_conf.use_short_preamble;
 
 	rateidx = basic_rates ? ffs(basic_rates) - 1 : 0;
-	rate = sband->bitrates[rateidx].bitrate << shift;
+	rate = sband->bitrates[rateidx].bitrate;
 	cck = sband->bitrates[rateidx].flags & IEEE80211_RATE_MANDATORY_B;
 
 	return ieee80211_calc_legacy_rate_duration(rate, short_pream, cck, len);
diff -ruw linux-6.4/net/mac80211/cfg.c linux-6.4-fbx/net/mac80211/cfg.c
--- linux-6.4/net/mac80211/cfg.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/cfg.c	2024-04-19 16:04:28.969736104 +0200
@@ -35,7 +35,7 @@
 		 * the return value at all (if it's not a pairwise key),
 		 * so in that case (require_valid==false) don't error.
 		 */
-		if (require_valid && sdata->vif.valid_links)
+		if (require_valid && ieee80211_vif_is_mld(&sdata->vif))
 			return ERR_PTR(-EINVAL);
 
 		return &sdata->deflink;
@@ -214,6 +214,8 @@
 	struct sta_info *sta;
 	int ret;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	ret = ieee80211_if_change_type(sdata, type);
 	if (ret)
 		return ret;
@@ -228,19 +230,17 @@
 			return 0;
 
 		/* FIXME: no support for 4-addr MLO yet */
-		if (sdata->vif.valid_links)
+		if (ieee80211_vif_is_mld(&sdata->vif))
 			return -EOPNOTSUPP;
 
 		sdata->u.mgd.use_4addr = params->use_4addr;
 		if (!ifmgd->associated)
 			return 0;
 
-		mutex_lock(&local->sta_mtx);
 		sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
 		if (sta)
 			drv_sta_set_4addr(local, sdata, &sta->sta,
 					  params->use_4addr);
-		mutex_unlock(&local->sta_mtx);
 
 		if (params->use_4addr)
 			ieee80211_send_4addr_nullfunc(local, sdata);
@@ -261,9 +261,9 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 	int ret;
 
-	mutex_lock(&sdata->local->chanctx_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
-	mutex_unlock(&sdata->local->chanctx_mtx);
 	if (ret < 0)
 		return ret;
 
@@ -283,9 +283,9 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 	int ret;
 
-	mutex_lock(&sdata->local->chanctx_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
-	mutex_unlock(&sdata->local->chanctx_mtx);
 	if (ret < 0)
 		return ret;
 
@@ -452,13 +452,11 @@
 	if (sta->ptk_idx == key_idx)
 		return 0;
 
-	mutex_lock(&local->key_mtx);
-	key = key_mtx_dereference(local, sta->ptk[key_idx]);
+	key = wiphy_dereference(local->hw.wiphy, sta->ptk[key_idx]);
 
 	if (key && key->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)
 		ret = ieee80211_set_tx_key(key);
 
-	mutex_unlock(&local->key_mtx);
 	return ret;
 }
 
@@ -474,6 +472,8 @@
 	struct ieee80211_key *key;
 	int err;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!ieee80211_sdata_running(sdata))
 		return -ENETDOWN;
 
@@ -510,8 +510,6 @@
 	if (params->mode == NL80211_KEY_NO_TX)
 		key->conf.flags |= IEEE80211_KEY_FLAG_NO_AUTO_TX;
 
-	mutex_lock(&local->sta_mtx);
-
 	if (mac_addr) {
 		sta = sta_info_get_bss(sdata, mac_addr);
 		/*
@@ -526,8 +524,7 @@
 		 */
 		if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
 			ieee80211_key_free_unused(key);
-			err = -ENOENT;
-			goto out_unlock;
+			return -ENOENT;
 		}
 	}
 
@@ -566,10 +563,9 @@
 	}
 
 	err = ieee80211_key_link(key, link, sta);
-
- out_unlock:
-	mutex_unlock(&local->sta_mtx);
-
+	/* KRACK protection, shouldn't happen but just silently accept key */
+	if (err == -EALREADY)
+		err = 0;
 	return err;
 }
 
@@ -582,8 +578,7 @@
 	struct ieee80211_key *key;
 
 	if (link_id >= 0) {
-		link = rcu_dereference_check(sdata->link[link_id],
-					     lockdep_is_held(&sdata->wdev.mtx));
+		link = sdata_dereference(sdata->link[link_id], sdata);
 		if (!link)
 			return NULL;
 	}
@@ -598,7 +593,7 @@
 
 		if (link_id >= 0) {
 			link_sta = rcu_dereference_check(sta->link[link_id],
-							 lockdep_is_held(&local->sta_mtx));
+							 lockdep_is_held(&local->hw.wiphy->mtx));
 			if (!link_sta)
 				return NULL;
 		} else {
@@ -606,30 +601,29 @@
 		}
 
 		if (pairwise && key_idx < NUM_DEFAULT_KEYS)
-			return rcu_dereference_check_key_mtx(local,
+			return wiphy_dereference(local->hw.wiphy,
 							     sta->ptk[key_idx]);
 
 		if (!pairwise &&
 		    key_idx < NUM_DEFAULT_KEYS +
 			      NUM_DEFAULT_MGMT_KEYS +
 			      NUM_DEFAULT_BEACON_KEYS)
-			return rcu_dereference_check_key_mtx(local,
+			return wiphy_dereference(local->hw.wiphy,
 							     link_sta->gtk[key_idx]);
 
 		return NULL;
 	}
 
 	if (pairwise && key_idx < NUM_DEFAULT_KEYS)
-		return rcu_dereference_check_key_mtx(local,
-						     sdata->keys[key_idx]);
+		return wiphy_dereference(local->hw.wiphy, sdata->keys[key_idx]);
 
-	key = rcu_dereference_check_key_mtx(local, link->gtk[key_idx]);
+	key = wiphy_dereference(local->hw.wiphy, link->gtk[key_idx]);
 	if (key)
 		return key;
 
 	/* or maybe it was a WEP key */
 	if (key_idx < NUM_DEFAULT_KEYS)
-		return rcu_dereference_check_key_mtx(local, sdata->keys[key_idx]);
+		return wiphy_dereference(local->hw.wiphy, sdata->keys[key_idx]);
 
 	return NULL;
 }
@@ -641,25 +635,16 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_key *key;
-	int ret;
 
-	mutex_lock(&local->sta_mtx);
-	mutex_lock(&local->key_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	key = ieee80211_lookup_key(sdata, link_id, key_idx, pairwise, mac_addr);
-	if (!key) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (!key)
+		return -ENOENT;
 
 	ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION);
 
-	ret = 0;
- out_unlock:
-	mutex_unlock(&local->key_mtx);
-	mutex_unlock(&local->sta_mtx);
-
-	return ret;
+	return 0;
 }
 
 static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
@@ -830,15 +815,11 @@
 		rinfo->nss = ieee80211_rate_get_vht_nss(rate);
 	} else {
 		struct ieee80211_supported_band *sband;
-		int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
-		u16 brate;
 
 		sband = ieee80211_get_sband(sta->sdata);
 		WARN_ON_ONCE(sband && !sband->bitrates);
-		if (sband && sband->bitrates) {
-			brate = sband->bitrates[rate->idx].bitrate;
-			rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
-		}
+		if (sband && sband->bitrates)
+			rinfo->legacy = sband->bitrates[rate->idx].bitrate;
 	}
 	if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 		rinfo->bw = RATE_INFO_BW_40;
@@ -860,7 +841,7 @@
 	struct sta_info *sta;
 	int ret = -ENOENT;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sta = sta_info_get_by_idx(sdata, idx);
 	if (sta) {
@@ -869,8 +850,6 @@
 		sta_set_sinfo(sta, sinfo, true);
 	}
 
-	mutex_unlock(&local->sta_mtx);
-
 	return ret;
 }
 
@@ -890,7 +869,7 @@
 	struct sta_info *sta;
 	int ret = -ENOENT;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sta = sta_info_get_bss(sdata, mac);
 	if (sta) {
@@ -898,8 +877,6 @@
 		sta_set_sinfo(sta, sinfo, true);
 	}
 
-	mutex_unlock(&local->sta_mtx);
-
 	return ret;
 }
 
@@ -910,10 +887,11 @@
 	struct ieee80211_sub_if_data *sdata;
 	int ret = 0;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
 		return 0;
 
-	mutex_lock(&local->mtx);
 	if (local->use_chanctx) {
 		sdata = wiphy_dereference(local->hw.wiphy,
 					  local->monitor_sdata);
@@ -923,14 +901,15 @@
 							 chandef,
 							 IEEE80211_CHANCTX_EXCLUSIVE);
 		}
-	} else if (local->open_count == local->monitors) {
+	} else {
+		if (local->open_count == local->monitors) {
 		local->_oper_chandef = *chandef;
 		ieee80211_hw_config(local, 0);
 	}
+	}
 
 	if (ret == 0)
 		local->monitor_chandef = *chandef;
-	mutex_unlock(&local->mtx);
 
 	return ret;
 }
@@ -978,25 +957,29 @@
 	struct fils_discovery_data *new, *old = NULL;
 	struct ieee80211_fils_discovery *fd;
 
-	if (!params->tmpl || !params->tmpl_len)
-		return -EINVAL;
+	if (!params->update)
+		return 0;
 
 	fd = &link_conf->fils_discovery;
 	fd->min_interval = params->min_interval;
 	fd->max_interval = params->max_interval;
 
 	old = sdata_dereference(link->u.ap.fils_discovery, sdata);
+	if (old)
+		kfree_rcu(old, rcu_head);
+
+	if (params->tmpl && params->tmpl_len) {
 	new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 	new->len = params->tmpl_len;
 	memcpy(new->data, params->tmpl, params->tmpl_len);
 	rcu_assign_pointer(link->u.ap.fils_discovery, new);
+	} else {
+		RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
+	}
 
-	if (old)
-		kfree_rcu(old, rcu_head);
-
-	return 0;
+	return BSS_CHANGED_FILS_DISCOVERY;
 }
 
 static int
@@ -1007,23 +990,27 @@
 {
 	struct unsol_bcast_probe_resp_data *new, *old = NULL;
 
-	if (!params->tmpl || !params->tmpl_len)
-		return -EINVAL;
+	if (!params->update)
+		return 0;
+
+	link_conf->unsol_bcast_probe_resp_interval = params->interval;
 
 	old = sdata_dereference(link->u.ap.unsol_bcast_probe_resp, sdata);
+	if (old)
+		kfree_rcu(old, rcu_head);
+
+	if (params->tmpl && params->tmpl_len) {
 	new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 	new->len = params->tmpl_len;
 	memcpy(new->data, params->tmpl, params->tmpl_len);
 	rcu_assign_pointer(link->u.ap.unsol_bcast_probe_resp, new);
+	} else {
+		RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL);
+	}
 
-	if (old)
-		kfree_rcu(old, rcu_head);
-
-	link_conf->unsol_bcast_probe_resp_interval = params->interval;
-
-	return 0;
+	return BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
 }
 
 static int ieee80211_set_ftm_responder_params(
@@ -1101,18 +1088,20 @@
 	return offset;
 }
 
-static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+static int
+ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
 				   struct ieee80211_link_data *link,
 				   struct cfg80211_beacon_data *params,
 				   const struct ieee80211_csa_settings *csa,
-				   const struct ieee80211_color_change_settings *cca)
+			const struct ieee80211_color_change_settings *cca,
+			u64 *changed)
 {
 	struct cfg80211_mbssid_elems *mbssid = NULL;
 	struct cfg80211_rnr_elems *rnr = NULL;
 	struct beacon_data *new, *old;
 	int new_head_len, new_tail_len;
 	int size, err;
-	u32 changed = BSS_CHANGED_BEACON;
+	u64 _changed = BSS_CHANGED_BEACON;
 	struct ieee80211_bss_conf *link_conf = link->conf;
 
 	old = sdata_dereference(link->u.ap.beacon, sdata);
@@ -1219,7 +1208,7 @@
 		return err;
 	}
 	if (err == 0)
-		changed |= BSS_CHANGED_AP_PROBE_RESP;
+		_changed |= BSS_CHANGED_AP_PROBE_RESP;
 
 	if (params->ftm_responder != -1) {
 		link_conf->ftm_responder = params->ftm_responder;
@@ -1235,7 +1224,7 @@
 			return err;
 		}
 
-		changed |= BSS_CHANGED_FTM_RESPONDER;
+		_changed |= BSS_CHANGED_FTM_RESPONDER;
 	}
 
 	rcu_assign_pointer(link->u.ap.beacon, new);
@@ -1244,7 +1233,8 @@
 	if (old)
 		kfree_rcu(old, rcu_head);
 
-	return changed;
+	*changed |= _changed;
+	return 0;
 }
 
 static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
@@ -1266,6 +1256,8 @@
 	struct ieee80211_link_data *link;
 	struct ieee80211_bss_conf *link_conf;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	link = sdata_dereference(sdata->link[link_id], sdata);
 	if (!link)
 		return -ENOLINK;
@@ -1360,10 +1352,14 @@
 				(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
 				 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
 				 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ);
+		link_conf->eht_80mhz_full_bw_ul_mumimo =
+			params->eht_cap->fixed.phy_cap_info[7] &
+			IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ;
 	} else {
 		link_conf->eht_su_beamformer = false;
 		link_conf->eht_su_beamformee = false;
 		link_conf->eht_mu_beamformer = false;
+		link_conf->eht_80mhz_full_bw_ul_mumimo = false;
 	}
 
 	if (sdata->vif.type == NL80211_IFTYPE_AP &&
@@ -1375,12 +1371,10 @@
 			return err;
 	}
 
-	mutex_lock(&local->mtx);
 	err = ieee80211_link_use_channel(link, &params->chandef,
 					 IEEE80211_CHANCTX_SHARED);
 	if (!err)
 		ieee80211_link_copy_chanctx_to_vlans(link, false);
-	mutex_unlock(&local->mtx);
 	if (err) {
 		link_conf->beacon_int = prev_beacon_int;
 		return err;
@@ -1446,28 +1440,23 @@
 	if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
 		link_conf->beacon_tx_rate = params->beacon_rate;
 
-	err = ieee80211_assign_beacon(sdata, link, &params->beacon, NULL, NULL);
+	err = ieee80211_assign_beacon(sdata, link, &params->beacon, NULL, NULL,
+				      &changed);
 	if (err < 0)
 		goto error;
-	changed |= err;
 
-	if (params->fils_discovery.max_interval) {
-		err = ieee80211_set_fils_discovery(sdata,
-						   &params->fils_discovery,
+	err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
 						   link, link_conf);
 		if (err < 0)
 			goto error;
-		changed |= BSS_CHANGED_FILS_DISCOVERY;
-	}
+	changed |= err;
 
-	if (params->unsol_bcast_probe_resp.interval) {
 		err = ieee80211_set_unsol_bcast_probe_resp(sdata,
 							   &params->unsol_bcast_probe_resp,
 							   link, link_conf);
 		if (err < 0)
 			goto error;
-		changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
-	}
+	changed |= err;
 
 	err = drv_start_ap(sdata->local, sdata, link_conf);
 	if (err) {
@@ -1491,25 +1480,26 @@
 	return 0;
 
 error:
-	mutex_lock(&local->mtx);
 	ieee80211_link_release_channel(link);
-	mutex_unlock(&local->mtx);
 
 	return err;
 }
 
 static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
-				   struct cfg80211_beacon_data *params)
+				   struct cfg80211_ap_update *params)
+
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_link_data *link;
+	struct cfg80211_beacon_data *beacon = &params->beacon;
 	struct beacon_data *old;
 	int err;
 	struct ieee80211_bss_conf *link_conf;
+	u64 changed = 0;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(wiphy);
 
-	link = sdata_dereference(sdata->link[params->link_id], sdata);
+	link = sdata_dereference(sdata->link[beacon->link_id], sdata);
 	if (!link)
 		return -ENOLINK;
 
@@ -1525,17 +1515,31 @@
 	if (!old)
 		return -ENOENT;
 
-	err = ieee80211_assign_beacon(sdata, link, params, NULL, NULL);
+	err = ieee80211_assign_beacon(sdata, link, beacon, NULL, NULL,
+				      &changed);
+	if (err < 0)
+		return err;
+
+	err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
+					   link, link_conf);
+	if (err < 0)
+		return err;
+	changed |= err;
+
+	err = ieee80211_set_unsol_bcast_probe_resp(sdata,
+						   &params->unsol_bcast_probe_resp,
+						   link, link_conf);
 	if (err < 0)
 		return err;
+	changed |= err;
 
-	if (params->he_bss_color_valid &&
-	    params->he_bss_color.enabled != link_conf->he_bss_color.enabled) {
-		link_conf->he_bss_color.enabled = params->he_bss_color.enabled;
-		err |= BSS_CHANGED_HE_BSS_COLOR;
+	if (beacon->he_bss_color_valid &&
+	    beacon->he_bss_color.enabled != link_conf->he_bss_color.enabled) {
+		link_conf->he_bss_color.enabled = beacon->he_bss_color.enabled;
+		changed |= BSS_CHANGED_HE_BSS_COLOR;
 	}
 
-	ieee80211_link_info_change_notify(sdata, link, err);
+	ieee80211_link_info_change_notify(sdata, link, changed);
 	return 0;
 }
 
@@ -1565,7 +1569,7 @@
 		sdata_dereference(sdata->link[link_id], sdata);
 	struct ieee80211_bss_conf *link_conf = link->conf;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	old_beacon = sdata_dereference(link->u.ap.beacon, sdata);
 	if (!old_beacon)
@@ -1579,7 +1583,6 @@
 				  sdata);
 
 	/* abort any running channel switch or color change */
-	mutex_lock(&local->mtx);
 	link_conf->csa_active = false;
 	link_conf->color_change_active = false;
 	if (link->csa_block_tx) {
@@ -1588,8 +1591,6 @@
 		link->csa_block_tx = false;
 	}
 
-	mutex_unlock(&local->mtx);
-
 	ieee80211_free_next_beacon(link);
 
 	/* turn off carrier for this interface and dependent VLANs */
@@ -1632,7 +1633,7 @@
 
 	if (sdata->wdev.cac_started) {
 		chandef = link_conf->chandef;
-		cancel_delayed_work_sync(&link->dfs_cac_timer_work);
+		wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
 		cfg80211_cac_event(sdata->dev, &chandef,
 				   NL80211_RADAR_CAC_ABORTED,
 				   GFP_KERNEL);
@@ -1644,10 +1645,8 @@
 	local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
 	ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
 
-	mutex_lock(&local->mtx);
 	ieee80211_link_copy_chanctx_to_vlans(link, true);
 	ieee80211_link_release_channel(link);
-	mutex_unlock(&local->mtx);
 
 	return 0;
 }
@@ -1718,7 +1717,7 @@
 {
 #ifdef CONFIG_MAC80211_MESH
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed = 0;
+	u64 changed = 0;
 
 	if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
 		switch (params->plink_state) {
@@ -1789,7 +1788,7 @@
 		sdata_dereference(sdata->link[link_id], sdata);
 	struct link_sta_info *link_sta =
 		rcu_dereference_protected(sta->link[link_id],
-					  lockdep_is_held(&local->sta_mtx));
+					  lockdep_is_held(&local->hw.wiphy->mtx));
 
 	/*
 	 * If there are no changes, then accept a link that doesn't exist,
@@ -1800,7 +1799,8 @@
 	    !params->supported_rates_len &&
 	    !params->ht_capa && !params->vht_capa &&
 	    !params->he_capa && !params->eht_capa &&
-	    !params->opmode_notif_used)
+	    !params->opmode_notif_used &&
+            !params->tp_overridden)
 		return 0;
 
 	if (!link || !link_sta)
@@ -1846,7 +1846,8 @@
 	/* VHT can override some HT caps such as the A-MSDU max length */
 	if (params->vht_capa)
 		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
-						    params->vht_capa, link_sta);
+						    params->vht_capa, NULL,
+						    link_sta);
 
 	if (params->he_capa)
 		ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
@@ -1872,6 +1873,10 @@
 					      sband->band);
 	}
 
+	if (params->tp_overridden)
+		link_sta->pub->tp_override = params->tp_override;
+
+
 	return ret;
 }
 
@@ -2023,6 +2028,8 @@
 	struct ieee80211_sub_if_data *sdata;
 	int err;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (params->vlan) {
 		sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
 
@@ -2066,9 +2073,7 @@
 	 * visible yet), sta_apply_parameters (and inner functions) require
 	 * the mutex due to other paths.
 	 */
-	mutex_lock(&local->sta_mtx);
 	err = sta_apply_parameters(local, sta, params);
-	mutex_unlock(&local->sta_mtx);
 	if (err) {
 		sta_info_free(local, sta);
 		return err;
@@ -2111,13 +2116,11 @@
 	enum cfg80211_station_type statype;
 	int err;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sta = sta_info_get_bss(sdata, mac);
-	if (!sta) {
-		err = -ENOENT;
-		goto out_err;
-	}
+	if (!sta)
+		return -ENOENT;
 
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_MESH_POINT:
@@ -2147,22 +2150,19 @@
 			statype = CFG80211_STA_AP_CLIENT_UNASSOC;
 		break;
 	default:
-		err = -EOPNOTSUPP;
-		goto out_err;
+		return -EOPNOTSUPP;
 	}
 
 	err = cfg80211_check_station_change(wiphy, params, statype);
 	if (err)
-		goto out_err;
+		return err;
 
 	if (params->vlan && params->vlan != sta->sdata->dev) {
 		vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
 
 		if (params->vlan->ieee80211_ptr->use_4addr) {
-			if (vlansdata->u.vlan.sta) {
-				err = -EBUSY;
-				goto out_err;
-			}
+			if (vlansdata->u.vlan.sta)
+				return -EBUSY;
 
 			rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
 			__ieee80211_check_fast_rx_iface(vlansdata);
@@ -2188,18 +2188,9 @@
 		}
 	}
 
-	/* we use sta_info_get_bss() so this might be different */
-	if (sdata != sta->sdata) {
-		mutex_lock_nested(&sta->sdata->wdev.mtx, 1);
 		err = sta_apply_parameters(local, sta, params);
-		mutex_unlock(&sta->sdata->wdev.mtx);
-	} else {
-		err = sta_apply_parameters(local, sta, params);
-	}
 	if (err)
-		goto out_err;
-
-	mutex_unlock(&local->sta_mtx);
+		return err;
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
@@ -2208,9 +2199,6 @@
 	}
 
 	return 0;
-out_err:
-	mutex_unlock(&local->sta_mtx);
-	return err;
 }
 
 #ifdef CONFIG_MAC80211_MESH
@@ -2422,6 +2410,163 @@
 	return 0;
 }
 
+int ieee80211_update_mpp(struct wiphy *wiphy, struct net_device *dev,
+			 const u8 *dst, const u8 *next_hop)
+{
+	struct ieee80211_sub_if_data *sdata;
+	int ret = 0;
+	struct mesh_path *mppath;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	rcu_read_lock();
+
+	mppath = mpp_path_lookup(sdata, dst);
+	if (!mppath) {
+		ret = mpp_path_add(sdata, dst, next_hop);
+		if (!ret) {
+			mppath = mpp_path_lookup(sdata, dst);
+			spin_lock_bh(&mppath->state_lock);
+			mppath->flags |= MESH_PATH_FIXED;
+			spin_unlock_bh(&mppath->state_lock);
+		}
+	} else {
+		spin_lock_bh(&mppath->state_lock);
+		if (!ether_addr_equal(mppath->mpp, next_hop))
+			memcpy(mppath->mpp, next_hop, ETH_ALEN);
+		mppath->exp_time = jiffies;
+		mppath->flags |= MESH_PATH_FIXED;
+		spin_unlock_bh(&mppath->state_lock);
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+int ieee80211_delete_mpp(struct wiphy *wiphy, struct net_device *dev,
+			 const u8 *dst)
+{
+	struct ieee80211_sub_if_data *sdata;
+	int ret = 0;
+	struct mesh_path *mppath;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	rcu_read_lock();
+	mppath = mpp_path_lookup(sdata, dst);
+	if (mppath) {
+		spin_lock_bh(&mppath->state_lock);
+		mppath->flags &= ~MESH_PATH_FIXED;
+		spin_unlock_bh(&mppath->state_lock);
+	} else {
+		ret = -ENOENT;
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+static struct mplink_block_list_info *
+ieee80211_find_mp_blink_info(struct ieee80211_if_mesh *ifmsh, const u8 *dst)
+{
+	struct mplink_block_list_info *mp_blink;
+
+	list_for_each_entry(mp_blink, &ifmsh->mplink_blocking_list, list) {
+		if (ether_addr_equal(dst, mp_blink->dst))
+			return mp_blink;
+	}
+
+	return NULL;
+}
+
+int ieee80211_mplink_block(struct wiphy *wiphy, struct net_device *dev,
+			   const u8 *dst)
+{
+	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_if_mesh *ifmsh;
+	struct mplink_block_list_info *mp_blink;
+	int ret = 0;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	ifmsh = &sdata->u.mesh;
+
+	spin_lock_bh(&ifmsh->mplink_blocking_list_lock);
+
+	if (ieee80211_find_mp_blink_info(ifmsh, dst))
+		goto out;
+
+	mp_blink = kzalloc(sizeof(*mp_blink), GFP_ATOMIC);
+	if (!mp_blink) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(mp_blink->dst, dst, ETH_ALEN);
+	list_add_tail(&mp_blink->list, &ifmsh->mplink_blocking_list);
+
+out:
+	spin_unlock_bh(&ifmsh->mplink_blocking_list_lock);
+	return ret;
+}
+
+int ieee80211_mplink_unblock(struct wiphy *wiphy, struct net_device *dev,
+			     const u8 *dst)
+{
+	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_if_mesh *ifmsh;
+	struct mplink_block_list_info *mp_blink;
+	int ret = 0;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	ifmsh = &sdata->u.mesh;
+
+	spin_lock_bh(&ifmsh->mplink_blocking_list_lock);
+
+	mp_blink = ieee80211_find_mp_blink_info(ifmsh, dst);
+	if (!mp_blink) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	list_del(&mp_blink->list);
+	kfree(mp_blink);
+
+out:
+	spin_unlock_bh(&ifmsh->mplink_blocking_list_lock);
+
+	return ret;
+}
+
+int ieee80211_dump_blocked_mplink_info(struct wiphy *wiphy, struct net_device *dev,
+				       struct mplink_blocked_info *minfo)
+{
+	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_if_mesh *ifmsh;
+	struct mplink_block_list_info *mp_blink;
+	u32 len = 0;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	ifmsh = &sdata->u.mesh;
+
+	spin_lock_bh(&ifmsh->mplink_blocking_list_lock);
+	list_for_each_entry(mp_blink, &ifmsh->mplink_blocking_list, list) {
+		if (minfo->count > (sizeof(struct mplink_blocked_info) / ETH_ALEN))
+			break;
+		memcpy(minfo->info + len, mp_blink->dst, ETH_ALEN);
+		len += ETH_ALEN;
+		minfo->count++;
+	}
+	spin_unlock_bh(&ifmsh->mplink_blocking_list_lock);
+
+	return 0;
+}
+
+int ieee80211_mplink_flush(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	ieee80211_mesh_free_blocked_mplink_entries(sdata);
+
+	return 0;
+}
+
 static int ieee80211_get_mesh_config(struct wiphy *wiphy,
 				struct net_device *dev,
 				struct mesh_config *conf)
@@ -2615,6 +2760,36 @@
 	return 0;
 }
 
+int ieee80211_update_mesh_vendor_node_metrics_ie(struct wiphy *wiphy,
+						 struct net_device *dev,
+						 const struct mesh_vendor_ie *vendor_ie)
+{
+	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_if_mesh *ifmsh;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	ifmsh = &sdata->u.mesh;
+	memcpy(ifmsh->node_vendor_ie, vendor_ie->ie, vendor_ie->ie_len);
+	ifmsh->node_vendor_ie_len = vendor_ie->ie_len;
+	ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+	return 0;
+}
+
+int ieee80211_update_mesh_vendor_path_metrics_ie(struct wiphy *wiphy,
+						 struct net_device *dev,
+						 const struct mesh_vendor_ie *vendor_ie)
+{
+	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_if_mesh *ifmsh;
+
+	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	ifmsh = &sdata->u.mesh;
+	memcpy(ifmsh->mpm_vendor_ie, vendor_ie->ie, vendor_ie->ie_len);
+	ifmsh->mpm_vendor_ie_len = vendor_ie->ie_len;
+	ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+	return 0;
+}
+
 static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
 			       const struct mesh_config *conf,
 			       const struct mesh_setup *setup)
@@ -2623,6 +2798,8 @@
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	int err;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	memcpy(&ifmsh->mshcfg, conf, sizeof(struct mesh_config));
 	err = copy_mesh_setup(ifmsh, setup);
 	if (err)
@@ -2634,12 +2811,12 @@
 	sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
 	sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
 
-	mutex_lock(&sdata->local->mtx);
 	err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
 					 IEEE80211_CHANCTX_SHARED);
-	mutex_unlock(&sdata->local->mtx);
-	if (err)
+	if (err) {
+		kfree(ifmsh->ie);
 		return err;
+	}
 
 	return ieee80211_start_mesh(sdata);
 }
@@ -2648,11 +2825,11 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	ieee80211_stop_mesh(sdata);
-	mutex_lock(&sdata->local->mtx);
 	ieee80211_link_release_channel(&sdata->deflink);
 	kfree(sdata->u.mesh.ie);
-	mutex_unlock(&sdata->local->mtx);
 
 	return 0;
 }
@@ -2665,7 +2842,7 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_link_data *link;
 	struct ieee80211_supported_band *sband;
-	u32 changed = 0;
+	u64 changed = 0;
 
 	link = ieee80211_link_or_deflink(sdata, params->link_id, true);
 	if (IS_ERR(link))
@@ -3010,6 +3187,8 @@
 	bool update_txp_type = false;
 	bool has_monitor = false;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (wdev) {
 		sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
@@ -3057,7 +3236,6 @@
 		break;
 	}
 
-	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
 			has_monitor = true;
@@ -3073,7 +3251,6 @@
 			continue;
 		ieee80211_recalc_txpower(sdata, update_txp_type);
 	}
-	mutex_unlock(&local->iflist_mtx);
 
 	if (has_monitor) {
 		sdata = wiphy_dereference(local->hw.wiphy,
@@ -3106,6 +3283,10 @@
 	else
 		*dbm = sdata->vif.bss_conf.txpower;
 
+	/* INT_MIN indicates no power level was set yet */
+	if (*dbm == INT_MIN)
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -3162,14 +3343,24 @@
 	struct sta_info *sta;
 	bool tdls_peer_found = false;
 
-	lockdep_assert_held(&sdata->wdev.mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
 		return -EINVAL;
 
+	if (ieee80211_vif_is_mld(&sdata->vif) &&
+	    !(sdata->vif.active_links & BIT(link->link_id)))
+		return 0;
+
 	old_req = link->u.mgd.req_smps;
 	link->u.mgd.req_smps = smps_mode;
 
+	/* The driver indicated that EML is enabled for the interface, which
+	 * implies that SMPS flows towards the AP should be stopped.
+	 */
+	if (sdata->vif.driver_flags & IEEE80211_VIF_EML_ACTIVE)
+		return 0;
+
 	if (old_req == smps_mode &&
 	    smps_mode != IEEE80211_SMPS_AUTOMATIC)
 		return 0;
@@ -3183,7 +3374,7 @@
 	    link->conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
 		return 0;
 
-	ap = link->u.mgd.bssid;
+	ap = sdata->vif.cfg.ap_addr;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
@@ -3205,7 +3396,9 @@
 
 	/* send SM PS frame to AP */
 	err = ieee80211_send_smps_action(sdata, smps_mode,
-					 ap, ap);
+					 ap, ap,
+					 ieee80211_vif_is_mld(&sdata->vif) ?
+					 link->link_id : -1);
 	if (err)
 		link->u.mgd.req_smps = old_req;
 	else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
@@ -3235,7 +3428,6 @@
 	local->dynamic_ps_forced_timeout = timeout;
 
 	/* no change, but if automatic follow powersave */
-	sdata_lock(sdata);
 	for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
 		struct ieee80211_link_data *link;
 
@@ -3246,7 +3438,6 @@
 		__ieee80211_request_smps_mgd(sdata, link,
 					     link->u.mgd.req_smps);
 	}
-	sdata_unlock(sdata);
 
 	if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
@@ -3392,7 +3583,8 @@
 	struct ieee80211_local *local = sdata->local;
 	int err;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!list_empty(&local->roc_list) || local->scanning) {
 		err = -EBUSY;
 		goto out_unlock;
@@ -3407,12 +3599,10 @@
 	if (err)
 		goto out_unlock;
 
-	ieee80211_queue_delayed_work(&sdata->local->hw,
-				     &sdata->deflink.dfs_cac_timer_work,
+	wiphy_delayed_work_queue(wiphy, &sdata->deflink.dfs_cac_timer_work,
 				     msecs_to_jiffies(cac_time_ms));
 
  out_unlock:
-	mutex_unlock(&local->mtx);
 	return err;
 }
 
@@ -3422,20 +3612,21 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		/* it might be waiting for the local->mtx, but then
 		 * by the time it gets it, sdata->wdev.cac_started
 		 * will no longer be true
 		 */
-		cancel_delayed_work(&sdata->deflink.dfs_cac_timer_work);
+		wiphy_delayed_work_cancel(wiphy,
+					  &sdata->deflink.dfs_cac_timer_work);
 
 		if (sdata->wdev.cac_started) {
 			ieee80211_link_release_channel(&sdata->deflink);
 			sdata->wdev.cac_started = false;
 		}
 	}
-	mutex_unlock(&local->mtx);
 }
 
 static struct cfg80211_beacon_data *
@@ -3567,11 +3758,11 @@
 			if (iter == sdata || iter->vif.mbssid_tx_vif != vif)
 				continue;
 
-			ieee80211_queue_work(&iter->local->hw,
+			wiphy_work_queue(iter->local->hw.wiphy,
 					     &iter->deflink.csa_finalize_work);
 		}
 	}
-	ieee80211_queue_work(&local->hw, &sdata->deflink.csa_finalize_work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
 
 	rcu_read_unlock();
 }
@@ -3585,7 +3776,7 @@
 
 	sdata->deflink.csa_block_tx = block_tx;
 	sdata_info(sdata, "channel switch failed, disconnecting\n");
-	ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
+	wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work);
 }
 EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
 
@@ -3601,25 +3792,22 @@
 
 		err = ieee80211_assign_beacon(sdata, &sdata->deflink,
 					      sdata->deflink.u.ap.next_beacon,
-					      NULL, NULL);
+					      NULL, NULL, changed);
 		ieee80211_free_next_beacon(&sdata->deflink);
 
 		if (err < 0)
 			return err;
-		*changed |= err;
 		break;
 	case NL80211_IFTYPE_ADHOC:
-		err = ieee80211_ibss_finish_csa(sdata);
+		err = ieee80211_ibss_finish_csa(sdata, changed);
 		if (err < 0)
 			return err;
-		*changed |= err;
 		break;
 #ifdef CONFIG_MAC80211_MESH
 	case NL80211_IFTYPE_MESH_POINT:
-		err = ieee80211_mesh_finish_csa(sdata);
+		err = ieee80211_mesh_finish_csa(sdata, changed);
 		if (err < 0)
 			return err;
-		*changed |= err;
 		break;
 #endif
 	default:
@@ -3630,21 +3818,14 @@
 	return 0;
 }
 
-static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
+static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
 {
+	struct ieee80211_sub_if_data *sdata = link_data->sdata;
 	struct ieee80211_local *local = sdata->local;
 	u64 changed = 0;
 	int err;
 
-	sdata_assert_lock(sdata);
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
-
-	if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
-		sdata->vif.bss_conf.eht_puncturing =
-					sdata->vif.bss_conf.csa_punct_bitmap;
-		changed |= BSS_CHANGED_EHT_PUNCTURING;
-	}
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/*
 	 * using reservation isn't immediate as it may be deferred until later
@@ -3653,20 +3834,20 @@
 	 * completed successfully
 	 */
 
-	if (sdata->deflink.reserved_chanctx) {
+	if (link_data->reserved_chanctx) {
 		/*
 		 * with multi-vif csa driver may call ieee80211_csa_finish()
 		 * many times while waiting for other interfaces to use their
 		 * reservations
 		 */
-		if (sdata->deflink.reserved_ready)
+		if (link_data->reserved_ready)
 			return 0;
 
 		return ieee80211_link_use_reserved_context(&sdata->deflink);
 	}
 
-	if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef,
-					&sdata->deflink.csa_chandef))
+	if (!cfg80211_chandef_identical(&link_data->conf->chandef,
+					&link_data->csa_chandef))
 		return -EINVAL;
 
 	sdata->vif.bss_conf.csa_active = false;
@@ -3675,62 +3856,64 @@
 	if (err)
 		return err;
 
-	ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
+	if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
+		sdata->vif.bss_conf.eht_puncturing =
+					sdata->vif.bss_conf.csa_punct_bitmap;
+		changed |= BSS_CHANGED_EHT_PUNCTURING;
+	}
+
+	ieee80211_link_info_change_notify(sdata, link_data, changed);
 
-	if (sdata->deflink.csa_block_tx) {
+	if (link_data->csa_block_tx) {
 		ieee80211_wake_vif_queues(local, sdata,
 					  IEEE80211_QUEUE_STOP_REASON_CSA);
-		sdata->deflink.csa_block_tx = false;
+		link_data->csa_block_tx = false;
 	}
 
-	err = drv_post_channel_switch(sdata);
+	err = drv_post_channel_switch(link_data);
 	if (err)
 		return err;
 
-	cfg80211_ch_switch_notify(sdata->dev, &sdata->deflink.csa_chandef, 0,
-				  sdata->vif.bss_conf.eht_puncturing);
+	cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chandef,
+				  link_data->link_id,
+				  link_data->conf->eht_puncturing);
 
 	return 0;
 }
 
-static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
 {
-	if (__ieee80211_csa_finalize(sdata)) {
+	struct ieee80211_sub_if_data *sdata = link_data->sdata;
+
+	if (__ieee80211_csa_finalize(link_data)) {
 		sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
 		cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
 				    GFP_KERNEL);
 	}
 }
 
-void ieee80211_csa_finalize_work(struct work_struct *work)
+void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
 {
-	struct ieee80211_sub_if_data *sdata =
-		container_of(work, struct ieee80211_sub_if_data,
-			     deflink.csa_finalize_work);
+	struct ieee80211_link_data *link =
+		container_of(work, struct ieee80211_link_data, csa_finalize_work);
+	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_local *local = sdata->local;
 
-	sdata_lock(sdata);
-	mutex_lock(&local->mtx);
-	mutex_lock(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* AP might have been stopped while waiting for the lock. */
-	if (!sdata->vif.bss_conf.csa_active)
-		goto unlock;
+	if (!link->conf->csa_active)
+		return;
 
 	if (!ieee80211_sdata_running(sdata))
-		goto unlock;
-
-	ieee80211_csa_finalize(sdata);
+		return;
 
-unlock:
-	mutex_unlock(&local->chanctx_mtx);
-	mutex_unlock(&local->mtx);
-	sdata_unlock(sdata);
+	ieee80211_csa_finalize(link);
 }
 
 static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
 				    struct cfg80211_csa_settings *params,
-				    u32 *changed)
+				    u64 *changed)
 {
 	struct ieee80211_csa_settings csa = {};
 	int err;
@@ -3777,12 +3960,11 @@
 
 		err = ieee80211_assign_beacon(sdata, &sdata->deflink,
 					      &params->beacon_csa, &csa,
-					      NULL);
+					      NULL, changed);
 		if (err < 0) {
 			ieee80211_free_next_beacon(&sdata->deflink);
 			return err;
 		}
-		*changed |= err;
 
 		break;
 	case NL80211_IFTYPE_ADHOC:
@@ -3814,10 +3996,9 @@
 
 		/* see comments in the NL80211_IFTYPE_AP block */
 		if (params->count > 1) {
-			err = ieee80211_ibss_csa_beacon(sdata, params);
+			err = ieee80211_ibss_csa_beacon(sdata, params, changed);
 			if (err < 0)
 				return err;
-			*changed |= err;
 		}
 
 		ieee80211_send_action_csa(sdata, params);
@@ -3842,12 +4023,11 @@
 
 		/* see comments in the NL80211_IFTYPE_AP block */
 		if (params->count > 1) {
-			err = ieee80211_mesh_csa_beacon(sdata, params);
+			err = ieee80211_mesh_csa_beacon(sdata, params, changed);
 			if (err < 0) {
 				ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
 				return err;
 			}
-			*changed |= err;
 		}
 
 		if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT)
@@ -3881,11 +4061,10 @@
 	struct ieee80211_channel_switch ch_switch;
 	struct ieee80211_chanctx_conf *conf;
 	struct ieee80211_chanctx *chanctx;
-	u32 changed = 0;
+	u64 changed = 0;
 	int err;
 
-	sdata_assert_lock(sdata);
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!list_empty(&local->roc_list) || local->scanning)
 		return -EBUSY;
@@ -3901,9 +4080,8 @@
 	if (sdata->vif.bss_conf.csa_active)
 		return -EBUSY;
 
-	mutex_lock(&local->chanctx_mtx);
 	conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	if (!conf) {
 		err = -EBUSY;
 		goto out;
@@ -3973,11 +4151,10 @@
 		drv_channel_switch_beacon(sdata, &params->chandef);
 	} else {
 		/* if the beacon didn't change, we can finalize immediately */
-		ieee80211_csa_finalize(sdata);
+		ieee80211_csa_finalize(&sdata->deflink);
 	}
 
 out:
-	mutex_unlock(&local->chanctx_mtx);
 	return err;
 }
 
@@ -3986,18 +4163,15 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
-	int err;
 
-	mutex_lock(&local->mtx);
-	err = __ieee80211_channel_switch(wiphy, dev, params);
-	mutex_unlock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
-	return err;
+	return __ieee80211_channel_switch(wiphy, dev, params);
 }
 
 u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local)
 {
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	local->roc_cookie_counter++;
 
@@ -4029,7 +4203,8 @@
 		return -ENOMEM;
 	}
 
-	IEEE80211_SKB_CB(skb)->ack_frame_id = id;
+	IEEE80211_SKB_CB(skb)->status_data_idr = 1;
+	IEEE80211_SKB_CB(skb)->status_data = id;
 
 	*cookie = ieee80211_mgmt_tx_cookie(local);
 	IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
@@ -4079,11 +4254,17 @@
 static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
+	int ret;
 
 	if (local->started)
 		return -EOPNOTSUPP;
 
-	return drv_set_antenna(local, tx_ant, rx_ant);
+	ret = drv_set_antenna(local, tx_ant, rx_ant);
+	if (ret)
+		return ret;
+
+	local->rx_chains = hweight8(rx_ant);
+	return 0;
 }
 
 static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
@@ -4125,22 +4306,23 @@
 	int ret;
 
 	/* the lock is needed to assign the cookie later */
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	rcu_read_lock();
+	sta = sta_info_get_bss(sdata, peer);
+	if (!sta) {
+		ret = -ENOLINK;
+		goto unlock;
+	}
+
+	qos = sta->sta.wme;
+
 	chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
 	if (WARN_ON(!chanctx_conf)) {
 		ret = -EINVAL;
 		goto unlock;
 	}
 	band = chanctx_conf->def.chan->band;
-	sta = sta_info_get_bss(sdata, peer);
-	if (sta) {
-		qos = sta->sta.wme;
-	} else {
-		ret = -ENOLINK;
-		goto unlock;
-	}
 
 	if (qos) {
 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
@@ -4195,7 +4377,6 @@
 	ret = 0;
 unlock:
 	rcu_read_unlock();
-	mutex_unlock(&local->mtx);
 
 	return ret;
 }
@@ -4553,7 +4734,8 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct sta_info *sta;
-	int ret;
+
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (!sdata->local->ops->set_tid_config)
 		return -EOPNOTSUPP;
@@ -4561,17 +4743,11 @@
 	if (!tid_conf->peer)
 		return drv_set_tid_config(sdata->local, sdata, NULL, tid_conf);
 
-	mutex_lock(&sdata->local->sta_mtx);
 	sta = sta_info_get_bss(sdata, tid_conf->peer);
-	if (!sta) {
-		mutex_unlock(&sdata->local->sta_mtx);
+	if (!sta)
 		return -ENOENT;
-	}
 
-	ret = drv_set_tid_config(sdata->local, sdata, &sta->sta, tid_conf);
-	mutex_unlock(&sdata->local->sta_mtx);
-
-	return ret;
+	return drv_set_tid_config(sdata->local, sdata, &sta->sta, tid_conf);
 }
 
 static int ieee80211_reset_tid_config(struct wiphy *wiphy,
@@ -4580,7 +4756,8 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct sta_info *sta;
-	int ret;
+
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (!sdata->local->ops->reset_tid_config)
 		return -EOPNOTSUPP;
@@ -4588,17 +4765,11 @@
 	if (!peer)
 		return drv_reset_tid_config(sdata->local, sdata, NULL, tids);
 
-	mutex_lock(&sdata->local->sta_mtx);
 	sta = sta_info_get_bss(sdata, peer);
-	if (!sta) {
-		mutex_unlock(&sdata->local->sta_mtx);
+	if (!sta)
 		return -ENOENT;
-	}
-
-	ret = drv_reset_tid_config(sdata->local, sdata, &sta->sta, tids);
-	mutex_unlock(&sdata->local->sta_mtx);
 
-	return ret;
+	return drv_reset_tid_config(sdata->local, sdata, &sta->sta, tids);
 }
 
 static int ieee80211_set_sar_specs(struct wiphy *wiphy,
@@ -4614,7 +4785,7 @@
 
 static int
 ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata,
-					u32 *changed)
+					u64 *changed)
 {
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP: {
@@ -4625,13 +4796,12 @@
 
 		ret = ieee80211_assign_beacon(sdata, &sdata->deflink,
 					      sdata->deflink.u.ap.next_beacon,
-					      NULL, NULL);
+					      NULL, NULL, changed);
 		ieee80211_free_next_beacon(&sdata->deflink);
 
 		if (ret < 0)
 			return ret;
 
-		*changed |= ret;
 		break;
 	}
 	default:
@@ -4645,7 +4815,7 @@
 static int
 ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
 				  struct cfg80211_color_change_settings *params,
-				  u32 *changed)
+				  u64 *changed)
 {
 	struct ieee80211_color_change_settings color_change = {};
 	int err;
@@ -4668,12 +4838,11 @@
 
 		err = ieee80211_assign_beacon(sdata, &sdata->deflink,
 					      &params->beacon_color_change,
-					      NULL, &color_change);
+					      NULL, &color_change, changed);
 		if (err < 0) {
 			ieee80211_free_next_beacon(&sdata->deflink);
 			return err;
 		}
-		*changed |= err;
 		break;
 	default:
 		return -EOPNOTSUPP;
@@ -4684,8 +4853,10 @@
 
 static void
 ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata,
-					 u8 color, int enable, u32 changed)
+					 u8 color, int enable, u64 changed)
 {
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	sdata->vif.bss_conf.he_bss_color.color = color;
 	sdata->vif.bss_conf.he_bss_color.enabled = enable;
 	changed |= BSS_CHANGED_HE_BSS_COLOR;
@@ -4695,7 +4866,6 @@
 	if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) {
 		struct ieee80211_sub_if_data *child;
 
-		mutex_lock(&sdata->local->iflist_mtx);
 		list_for_each_entry(child, &sdata->local->interfaces, list) {
 			if (child != sdata && child->vif.mbssid_tx_vif == &sdata->vif) {
 				child->vif.bss_conf.he_bss_color.color = color;
@@ -4705,18 +4875,16 @@
 								  BSS_CHANGED_HE_BSS_COLOR);
 			}
 		}
-		mutex_unlock(&sdata->local->iflist_mtx);
 	}
 }
 
 static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
-	u32 changed = 0;
+	u64 changed = 0;
 	int err;
 
-	sdata_assert_lock(sdata);
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata->vif.bss_conf.color_change_active = false;
 
@@ -4734,28 +4902,24 @@
 	return 0;
 }
 
-void ieee80211_color_change_finalize_work(struct work_struct *work)
+void ieee80211_color_change_finalize_work(struct wiphy *wiphy,
+					  struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
 			     deflink.color_change_finalize_work);
 	struct ieee80211_local *local = sdata->local;
 
-	sdata_lock(sdata);
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* AP might have been stopped while waiting for the lock. */
 	if (!sdata->vif.bss_conf.color_change_active)
-		goto unlock;
+		return;
 
 	if (!ieee80211_sdata_running(sdata))
-		goto unlock;
+		return;
 
 	ieee80211_color_change_finalize(sdata);
-
-unlock:
-	mutex_unlock(&local->mtx);
-	sdata_unlock(sdata);
 }
 
 void ieee80211_color_collision_detection_work(struct work_struct *work)
@@ -4766,16 +4930,14 @@
 			     color_collision_detect_work);
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 
-	sdata_lock(sdata);
 	cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap);
-	sdata_unlock(sdata);
 }
 
 void ieee80211_color_change_finish(struct ieee80211_vif *vif)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
-	ieee80211_queue_work(&sdata->local->hw,
+	wiphy_work_queue(sdata->local->hw.wiphy,
 			     &sdata->deflink.color_change_finalize_work);
 }
 EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
@@ -4809,16 +4971,14 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
-	u32 changed = 0;
+	u64 changed = 0;
 	int err;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (sdata->vif.bss_conf.nontransmitted)
 		return -EINVAL;
 
-	mutex_lock(&local->mtx);
-
 	/* don't allow another color change if one is already active or if csa
 	 * is active
 	 */
@@ -4843,7 +5003,6 @@
 		ieee80211_color_change_finalize(sdata);
 
 out:
-	mutex_unlock(&local->mtx);
 
 	return err;
 }
@@ -4865,16 +5024,13 @@
 				   unsigned int link_id)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
-	int res;
+
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (wdev->use_4addr)
 		return -EOPNOTSUPP;
 
-	mutex_lock(&sdata->local->mtx);
-	res = ieee80211_vif_set_links(sdata, wdev->valid_links);
-	mutex_unlock(&sdata->local->mtx);
-
-	return res;
+	return ieee80211_vif_set_links(sdata, wdev->valid_links, 0);
 }
 
 static void ieee80211_del_intf_link(struct wiphy *wiphy,
@@ -4883,9 +5039,9 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
-	mutex_lock(&sdata->local->mtx);
-	ieee80211_vif_set_links(sdata, wdev->valid_links);
-	mutex_unlock(&sdata->local->mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+	ieee80211_vif_set_links(sdata, wdev->valid_links, 0);
 }
 
 static int sta_add_link_station(struct ieee80211_local *local,
@@ -4925,13 +5081,10 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = wiphy_priv(wiphy);
-	int ret;
 
-	mutex_lock(&sdata->local->sta_mtx);
-	ret = sta_add_link_station(local, sdata, params);
-	mutex_unlock(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	return ret;
+	return sta_add_link_station(local, sdata, params);
 }
 
 static int sta_mod_link_station(struct ieee80211_local *local,
@@ -4956,13 +5109,10 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = wiphy_priv(wiphy);
-	int ret;
 
-	mutex_lock(&sdata->local->sta_mtx);
-	ret = sta_mod_link_station(local, sdata, params);
-	mutex_unlock(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	return ret;
+	return sta_mod_link_station(local, sdata, params);
 }
 
 static int sta_del_link_station(struct ieee80211_sub_if_data *sdata,
@@ -4991,13 +5141,10 @@
 			   struct link_station_del_parameters *params)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	int ret;
 
-	mutex_lock(&sdata->local->sta_mtx);
-	ret = sta_del_link_station(sdata, params);
-	mutex_unlock(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	return ret;
+	return sta_del_link_station(sdata, params);
 }
 
 static int ieee80211_set_hw_timestamp(struct wiphy *wiphy,
@@ -5053,6 +5200,7 @@
 	.join_ocb = ieee80211_join_ocb,
 	.leave_ocb = ieee80211_leave_ocb,
 	.change_bss = ieee80211_change_bss,
+	.inform_bss = ieee80211_inform_bss,
 	.set_txq_params = ieee80211_set_txq_params,
 	.set_monitor_channel = ieee80211_set_monitor_channel,
 	.suspend = ieee80211_suspend,
diff -ruw linux-6.4/net/mac80211/chan.c linux-6.4-fbx/net/mac80211/chan.c
--- linux-6.4/net/mac80211/chan.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/chan.c	2023-11-07 13:38:44.074257019 +0100
@@ -18,7 +18,7 @@
 	struct ieee80211_link_data *link;
 	int num = 0;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list)
 		num++;
@@ -32,7 +32,7 @@
 	struct ieee80211_link_data *link;
 	int num = 0;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list)
 		num++;
@@ -52,7 +52,7 @@
 	struct ieee80211_chanctx *ctx;
 	int num = 0;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(ctx, &local->chanctx_list, list)
 		num++;
@@ -62,7 +62,8 @@
 
 static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
 {
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
 }
 
@@ -73,7 +74,7 @@
 	struct ieee80211_chanctx_conf *conf;
 
 	conf = rcu_dereference_protected(link->conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	if (!conf)
 		return NULL;
 
@@ -87,7 +88,7 @@
 {
 	struct ieee80211_link_data *link;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(link, &ctx->reserved_links,
 			    reserved_chanctx_list) {
@@ -110,7 +111,7 @@
 {
 	struct ieee80211_link_data *link;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(link, &ctx->assigned_links,
 			    assigned_chanctx_list) {
@@ -136,7 +137,7 @@
 				   struct ieee80211_chanctx *ctx,
 				   const struct cfg80211_chan_def *compat)
 {
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
 	if (!compat)
@@ -154,7 +155,7 @@
 				      struct ieee80211_chanctx *ctx,
 				      const struct cfg80211_chan_def *def)
 {
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (ieee80211_chanctx_combined_chandef(local, ctx, def))
 		return true;
@@ -173,7 +174,7 @@
 {
 	struct ieee80211_chanctx *ctx;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
 		return NULL;
@@ -361,7 +362,7 @@
 	enum nl80211_chan_width max_bw;
 	struct cfg80211_chan_def min_def;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* don't optimize non-20MHz based and radar_enabled confs */
 	if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 ||
@@ -537,7 +538,7 @@
 {
 	struct ieee80211_chanctx *ctx;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
 		return NULL;
@@ -572,7 +573,7 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -602,8 +603,7 @@
 	struct ieee80211_sub_if_data *sdata;
 	bool required = false;
 
-	lockdep_assert_held(&local->chanctx_mtx);
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -641,7 +641,7 @@
 {
 	struct ieee80211_chanctx *ctx;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
 	if (!ctx)
@@ -665,8 +665,7 @@
 	u32 changed;
 	int err;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!local->use_chanctx)
 		local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
@@ -698,8 +697,7 @@
 	struct ieee80211_chanctx *ctx;
 	int err;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	ctx = ieee80211_alloc_chanctx(local, chandef, mode);
 	if (!ctx)
@@ -718,7 +716,7 @@
 static void ieee80211_del_chanctx(struct ieee80211_local *local,
 				  struct ieee80211_chanctx *ctx)
 {
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!local->use_chanctx) {
 		struct cfg80211_chan_def *chandef = &local->_oper_chandef;
@@ -753,7 +751,7 @@
 static void ieee80211_free_chanctx(struct ieee80211_local *local,
 				   struct ieee80211_chanctx *ctx)
 {
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0);
 
@@ -770,7 +768,7 @@
 	const struct cfg80211_chan_def *compat = NULL;
 	struct sta_info *sta;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -802,6 +800,11 @@
 		}
 	}
 
+	if (WARN_ON_ONCE(!compat)) {
+		rcu_read_unlock();
+		return;
+	}
+
 	/* TDLS peers can sometimes affect the chandef width */
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
 		if (!sta->uploaded ||
@@ -828,9 +831,7 @@
 {
 	bool radar_enabled;
 
-	lockdep_assert_held(&local->chanctx_mtx);
-	/* for ieee80211_is_radar_required */
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	radar_enabled = ieee80211_chanctx_radar_required(local, chanctx);
 
@@ -860,7 +861,7 @@
 		return -ENOTSUPP;
 
 	conf = rcu_dereference_protected(link->conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 
 	if (conf) {
 		curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
@@ -915,7 +916,7 @@
 	struct ieee80211_sub_if_data *sdata;
 	u8 rx_chains_static, rx_chains_dynamic;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	rx_chains_static = 1;
 	rx_chains_dynamic = 1;
@@ -1018,7 +1019,7 @@
 	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
 		return;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* Check that conf exists, even when clearing this function
 	 * must be called with the AP's channel context still there
@@ -1027,7 +1028,7 @@
 	 * to a channel context that has already been freed.
 	 */
 	conf = rcu_dereference_protected(link_conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	WARN_ON(!conf);
 
 	if (clear)
@@ -1051,11 +1052,9 @@
 {
 	struct ieee80211_local *local = link->sdata->local;
 
-	mutex_lock(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	__ieee80211_link_copy_chanctx_to_vlans(link, clear);
-
-	mutex_unlock(&local->chanctx_mtx);
 }
 
 int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
@@ -1063,7 +1062,7 @@
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_chanctx *ctx = link->reserved_chanctx;
 
-	lockdep_assert_held(&sdata->local->chanctx_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (WARN_ON(!ctx))
 		return -EINVAL;
@@ -1103,7 +1102,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	curr_ctx = ieee80211_link_get_chanctx(link);
 	if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx)
@@ -1201,12 +1200,12 @@
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_MESH_POINT:
 	case NL80211_IFTYPE_OCB:
-		ieee80211_queue_work(&sdata->local->hw,
+		wiphy_work_queue(sdata->local->hw.wiphy,
 				     &link->csa_finalize_work);
 		break;
 	case NL80211_IFTYPE_STATION:
-		ieee80211_queue_work(&sdata->local->hw,
-				     &link->u.mgd.chswitch_work);
+		wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+					 &link->u.mgd.chswitch_work, 0);
 		break;
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NL80211_IFTYPE_AP_VLAN:
@@ -1257,11 +1256,10 @@
 	struct ieee80211_vif_chanctx_switch vif_chsw[1] = {};
 	struct ieee80211_chanctx *old_ctx, *new_ctx;
 	const struct cfg80211_chan_def *chandef;
-	u32 changed = 0;
+	u64 changed = 0;
 	int err;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	new_ctx = link->reserved_chanctx;
 	old_ctx = ieee80211_link_get_chanctx(link);
@@ -1385,7 +1383,7 @@
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_chanctx *old_ctx, *new_ctx;
 
-	lockdep_assert_held(&sdata->local->chanctx_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	new_ctx = link->reserved_chanctx;
 	old_ctx = ieee80211_link_get_chanctx(link);
@@ -1410,8 +1408,7 @@
 {
 	const struct cfg80211_chan_def *chandef;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL);
 	if (WARN_ON(!chandef))
@@ -1432,8 +1429,7 @@
 	struct ieee80211_chanctx *ctx, *old_ctx;
 	int i, err;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	vif_chsw = kcalloc(n_vifs, sizeof(vif_chsw[0]), GFP_KERNEL);
 	if (!vif_chsw)
@@ -1477,8 +1473,7 @@
 	struct ieee80211_chanctx *ctx;
 	int err;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(ctx, &local->chanctx_list, list) {
 		if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
@@ -1518,8 +1513,7 @@
 	int err, n_assigned, n_reserved, n_ready;
 	int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/*
 	 * If there are 2 independent pairs of channel contexts performing
@@ -1653,7 +1647,7 @@
 				    reserved_chanctx_list) {
 			struct ieee80211_sub_if_data *sdata = link->sdata;
 			struct ieee80211_bss_conf *link_conf = link->conf;
-			u32 changed = 0;
+			u64 changed = 0;
 
 			if (!ieee80211_link_has_in_place_reservation(link))
 				continue;
@@ -1778,10 +1772,10 @@
 	struct ieee80211_chanctx *ctx;
 	bool use_reserved_switch = false;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	conf = rcu_dereference_protected(link_conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	if (!conf)
 		return;
 
@@ -1816,7 +1810,7 @@
 	u8 radar_detect_width = 0;
 	int ret;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (sdata->vif.active_links &&
 	    !(sdata->vif.active_links & BIT(link->link_id))) {
@@ -1824,8 +1818,6 @@
 		return 0;
 	}
 
-	mutex_lock(&local->chanctx_mtx);
-
 	ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
 					    chandef,
 					    sdata->wdev.iftype);
@@ -1867,7 +1859,6 @@
 	if (ret)
 		link->radar_required = false;
 
-	mutex_unlock(&local->chanctx_mtx);
 	return ret;
 }
 
@@ -1879,8 +1870,7 @@
 	struct ieee80211_chanctx *old_ctx;
 	int err;
 
-	lockdep_assert_held(&local->mtx);
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	new_ctx = link->reserved_chanctx;
 	old_ctx = ieee80211_link_get_chanctx(link);
@@ -1943,51 +1933,40 @@
 	struct ieee80211_chanctx_conf *conf;
 	struct ieee80211_chanctx *ctx;
 	const struct cfg80211_chan_def *compat;
-	int ret;
+
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
 				     IEEE80211_CHAN_DISABLED))
 		return -EINVAL;
 
-	mutex_lock(&local->chanctx_mtx);
-	if (cfg80211_chandef_identical(chandef, &link_conf->chandef)) {
-		ret = 0;
-		goto out;
-	}
+	if (cfg80211_chandef_identical(chandef, &link_conf->chandef))
+		return 0;
 
 	if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
-	    link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
-		ret = -EINVAL;
-		goto out;
-	}
+	    link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+		return -EINVAL;
 
 	conf = rcu_dereference_protected(link_conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
-	if (!conf) {
-		ret = -EINVAL;
-		goto out;
-	}
+					 lockdep_is_held(&local->hw.wiphy->mtx));
+	if (!conf)
+		return -EINVAL;
 
 	ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
 	compat = cfg80211_chandef_compatible(&conf->def, chandef);
-	if (!compat) {
-		ret = -EINVAL;
-		goto out;
-	}
+	if (!compat)
+		return -EINVAL;
 
 	switch (ctx->replace_state) {
 	case IEEE80211_CHANCTX_REPLACE_NONE:
-		if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat)) {
-			ret = -EBUSY;
-			goto out;
-		}
+		if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat))
+			return -EBUSY;
 		break;
 	case IEEE80211_CHANCTX_WILL_BE_REPLACED:
 		/* TODO: Perhaps the bandwidth change could be treated as a
 		 * reservation itself? */
-		ret = -EBUSY;
-		goto out;
+		return -EBUSY;
 	case IEEE80211_CHANCTX_REPLACES_OTHER:
 		/* channel context that is going to replace another channel
 		 * context doesn't really exist and shouldn't be assigned
@@ -2001,23 +1980,18 @@
 	ieee80211_recalc_chanctx_chantype(local, ctx);
 
 	*changed |= BSS_CHANGED_BANDWIDTH;
-	ret = 0;
- out:
-	mutex_unlock(&local->chanctx_mtx);
-	return ret;
+	return 0;
 }
 
 void ieee80211_link_release_channel(struct ieee80211_link_data *link)
 {
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 
-	mutex_lock(&sdata->local->chanctx_mtx);
-	if (rcu_access_pointer(link->conf->chanctx_conf)) {
-		lockdep_assert_held(&sdata->local->mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+	if (rcu_access_pointer(link->conf->chanctx_conf))
 		__ieee80211_link_release_channel(link);
 	}
-	mutex_unlock(&sdata->local->chanctx_mtx);
-}
 
 void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link)
 {
@@ -2029,20 +2003,19 @@
 	struct ieee80211_sub_if_data *ap;
 	struct ieee80211_chanctx_conf *conf;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
 		return;
 
 	ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
 
-	mutex_lock(&local->chanctx_mtx);
-
 	rcu_read_lock();
 	ap_conf = rcu_dereference(ap->vif.link_conf[link_id]);
 	conf = rcu_dereference_protected(ap_conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	rcu_assign_pointer(link_conf->chanctx_conf, conf);
 	rcu_read_unlock();
-	mutex_unlock(&local->chanctx_mtx);
 }
 
 void ieee80211_iter_chan_contexts_atomic(
diff -ruw linux-6.4/net/mac80211/debug.h linux-6.4-fbx/net/mac80211/debug.h
--- linux-6.4/net/mac80211/debug.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/debug.h	2023-11-07 13:38:44.074257019 +0100
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Portions
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022 - 2023 Intel Corporation
  */
 #ifndef __MAC80211_DEBUG_H
 #define __MAC80211_DEBUG_H
@@ -136,7 +136,7 @@
 
 #define link_info(link, fmt, ...)					\
 	do {								\
-		if ((link)->sdata->vif.valid_links)			\
+		if (ieee80211_vif_is_mld(&(link)->sdata->vif))          \
 			_sdata_info((link)->sdata, "[link %d] " fmt,	\
 				    (link)->link_id,			\
 				    ##__VA_ARGS__);			\
@@ -145,7 +145,7 @@
 	} while (0)
 #define link_err(link, fmt, ...)					\
 	do {								\
-		if ((link)->sdata->vif.valid_links)			\
+		if (ieee80211_vif_is_mld(&(link)->sdata->vif))          \
 			_sdata_err((link)->sdata, "[link %d] " fmt,	\
 				   (link)->link_id,			\
 				   ##__VA_ARGS__);			\
@@ -154,7 +154,7 @@
 	} while (0)
 #define link_dbg(link, fmt, ...)					\
 	do {								\
-		if ((link)->sdata->vif.valid_links)			\
+		if (ieee80211_vif_is_mld(&(link)->sdata->vif))          \
 			_sdata_dbg(1, (link)->sdata, "[link %d] " fmt,	\
 				   (link)->link_id,			\
 				   ##__VA_ARGS__);			\
diff -ruw linux-6.4/net/mac80211/debugfs.c linux-6.4-fbx/net/mac80211/debugfs.c
--- linux-6.4/net/mac80211/debugfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/debugfs.c	2023-11-07 13:38:44.074257019 +0100
@@ -4,7 +4,7 @@
  *
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019, 2021-2022 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021-2023 Intel Corporation
  */
 
 #include <linux/debugfs.h>
@@ -288,10 +288,10 @@
 	q_limit_low_old = local->aql_txq_limit_low[ac];
 	q_limit_high_old = local->aql_txq_limit_high[ac];
 
+	wiphy_lock(local->hw.wiphy);
 	local->aql_txq_limit_low[ac] = q_limit_low;
 	local->aql_txq_limit_high[ac] = q_limit_high;
 
-	mutex_lock(&local->sta_mtx);
 	list_for_each_entry(sta, &local->sta_list, list) {
 		/* If a sta has customized queue limits, keep it */
 		if (sta->airtime[ac].aql_limit_low == q_limit_low_old &&
@@ -300,7 +300,8 @@
 			sta->airtime[ac].aql_limit_high = q_limit_high;
 		}
 	}
-	mutex_unlock(&local->sta_mtx);
+	wiphy_unlock(local->hw.wiphy);
+
 	return count;
 }
 
@@ -496,6 +497,7 @@
 	FLAG(SUPPORTS_CONC_MON_RX_DECAP),
 	FLAG(DETECTS_COLOR_COLLISION),
 	FLAG(MLO_MCAST_MULTI_LINK_TX),
+	FLAG(APVLAN_NEED_MCAST_TO_UCAST),
 #undef FLAG
 };
 
@@ -594,9 +596,9 @@
 	char buf[20];
 	int res;
 
-	rtnl_lock();
+	wiphy_lock(local->hw.wiphy);
 	res = drv_get_stats(local, &stats);
-	rtnl_unlock();
+	wiphy_unlock(local->hw.wiphy);
 	if (res)
 		return res;
 	res = printvalue(&stats, buf, sizeof(buf));
diff -ruw linux-6.4/net/mac80211/debugfs_key.c linux-6.4-fbx/net/mac80211/debugfs_key.c
--- linux-6.4/net/mac80211/debugfs_key.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/debugfs_key.c	2023-11-07 13:38:44.074257019 +0100
@@ -4,7 +4,7 @@
  * Copyright (c) 2006	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
  * Copyright (C) 2015	Intel Deutschland GmbH
- * Copyright (C) 2021-2022   Intel Corporation
+ * Copyright (C) 2021-2023   Intel Corporation
  */
 
 #include <linux/kobject.h>
@@ -378,13 +378,13 @@
 	if (!sdata->vif.debugfs_dir)
 		return;
 
-	lockdep_assert_held(&sdata->local->key_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	debugfs_remove(sdata->debugfs.default_unicast_key);
 	sdata->debugfs.default_unicast_key = NULL;
 
 	if (sdata->default_unicast_key) {
-		key = key_mtx_dereference(sdata->local,
+		key = wiphy_dereference(sdata->local->hw.wiphy,
 					  sdata->default_unicast_key);
 		sprintf(buf, "../keys/%d", key->debugfs.cnt);
 		sdata->debugfs.default_unicast_key =
@@ -396,7 +396,7 @@
 	sdata->debugfs.default_multicast_key = NULL;
 
 	if (sdata->deflink.default_multicast_key) {
-		key = key_mtx_dereference(sdata->local,
+		key = wiphy_dereference(sdata->local->hw.wiphy,
 					  sdata->deflink.default_multicast_key);
 		sprintf(buf, "../keys/%d", key->debugfs.cnt);
 		sdata->debugfs.default_multicast_key =
@@ -413,7 +413,7 @@
 	if (!sdata->vif.debugfs_dir)
 		return;
 
-	key = key_mtx_dereference(sdata->local,
+	key = wiphy_dereference(sdata->local->hw.wiphy,
 				  sdata->deflink.default_mgmt_key);
 	if (key) {
 		sprintf(buf, "../keys/%d", key->debugfs.cnt);
@@ -442,7 +442,7 @@
 	if (!sdata->vif.debugfs_dir)
 		return;
 
-	key = key_mtx_dereference(sdata->local,
+	key = wiphy_dereference(sdata->local->hw.wiphy,
 				  sdata->deflink.default_beacon_key);
 	if (key) {
 		sprintf(buf, "../keys/%d", key->debugfs.cnt);
diff -ruw linux-6.4/net/mac80211/debugfs_netdev.c linux-6.4-fbx/net/mac80211/debugfs_netdev.c
--- linux-6.4/net/mac80211/debugfs_netdev.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/debugfs_netdev.c	2023-11-07 13:38:44.074257019 +0100
@@ -2,7 +2,7 @@
 /*
  * Copyright (c) 2006	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2022 Intel Corporation
+ * Copyright (C) 2020-2023 Intel Corporation
  */
 
 #include <linux/kernel.h>
@@ -22,18 +22,18 @@
 #include "debugfs_netdev.h"
 #include "driver-ops.h"
 
-static ssize_t ieee80211_if_read(
-	void *data,
+static ssize_t ieee80211_if_read_sdata(
+	struct ieee80211_sub_if_data *sdata,
 	char __user *userbuf,
 	size_t count, loff_t *ppos,
-	ssize_t (*format)(const void *, char *, int))
+	ssize_t (*format)(const struct ieee80211_sub_if_data *sdata, char *, int))
 {
 	char buf[200];
 	ssize_t ret = -EINVAL;
 
-	read_lock(&dev_base_lock);
-	ret = (*format)(data, buf, sizeof(buf));
-	read_unlock(&dev_base_lock);
+	wiphy_lock(sdata->local->hw.wiphy);
+	ret = (*format)(sdata, buf, sizeof(buf));
+	wiphy_unlock(sdata->local->hw.wiphy);
 
 	if (ret >= 0)
 		ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
@@ -41,11 +41,11 @@
 	return ret;
 }
 
-static ssize_t ieee80211_if_write(
-	void *data,
+static ssize_t ieee80211_if_write_sdata(
+	struct ieee80211_sub_if_data *sdata,
 	const char __user *userbuf,
 	size_t count, loff_t *ppos,
-	ssize_t (*write)(void *, const char *, int))
+	ssize_t (*write)(struct ieee80211_sub_if_data *sdata, const char *, int))
 {
 	char buf[64];
 	ssize_t ret;
@@ -57,9 +57,51 @@
 		return -EFAULT;
 	buf[count] = '\0';
 
-	rtnl_lock();
-	ret = (*write)(data, buf, count);
-	rtnl_unlock();
+	wiphy_lock(sdata->local->hw.wiphy);
+	ret = (*write)(sdata, buf, count);
+	wiphy_unlock(sdata->local->hw.wiphy);
+
+	return ret;
+}
+
+static ssize_t ieee80211_if_read_link(
+	struct ieee80211_link_data *link,
+	char __user *userbuf,
+	size_t count, loff_t *ppos,
+	ssize_t (*format)(const struct ieee80211_link_data *link, char *, int))
+{
+	char buf[200];
+	ssize_t ret = -EINVAL;
+
+	wiphy_lock(link->sdata->local->hw.wiphy);
+	ret = (*format)(link, buf, sizeof(buf));
+	wiphy_unlock(link->sdata->local->hw.wiphy);
+
+	if (ret >= 0)
+		ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
+
+	return ret;
+}
+
+static ssize_t ieee80211_if_write_link(
+	struct ieee80211_link_data *link,
+	const char __user *userbuf,
+	size_t count, loff_t *ppos,
+	ssize_t (*write)(struct ieee80211_link_data *link, const char *, int))
+{
+	char buf[64];
+	ssize_t ret;
+
+	if (count >= sizeof(buf))
+		return -E2BIG;
+
+	if (copy_from_user(buf, userbuf, count))
+		return -EFAULT;
+	buf[count] = '\0';
+
+	wiphy_lock(link->sdata->local->hw.wiphy);
+	ret = (*write)(link, buf, count);
+	wiphy_unlock(link->sdata->local->hw.wiphy);
 
 	return ret;
 }
@@ -126,41 +168,37 @@
 	.llseek = generic_file_llseek,					\
 }
 
-#define _IEEE80211_IF_FILE_R_FN(name, type)				\
+#define _IEEE80211_IF_FILE_R_FN(name)					\
 static ssize_t ieee80211_if_read_##name(struct file *file,		\
 					char __user *userbuf,		\
 					size_t count, loff_t *ppos)	\
 {									\
-	ssize_t (*fn)(const void *, char *, int) = (void *)		\
-		((ssize_t (*)(const type, char *, int))			\
+	return ieee80211_if_read_sdata(file->private_data,		\
+				       userbuf, count, ppos,		\
 		 ieee80211_if_fmt_##name);				\
-	return ieee80211_if_read(file->private_data,			\
-				 userbuf, count, ppos, fn);		\
 }
 
-#define _IEEE80211_IF_FILE_W_FN(name, type)				\
+#define _IEEE80211_IF_FILE_W_FN(name)					\
 static ssize_t ieee80211_if_write_##name(struct file *file,		\
 					 const char __user *userbuf,	\
 					 size_t count, loff_t *ppos)	\
 {									\
-	ssize_t (*fn)(void *, const char *, int) = (void *)		\
-		((ssize_t (*)(type, const char *, int))			\
+	return ieee80211_if_write_sdata(file->private_data, userbuf,	\
+					count, ppos,			\
 		 ieee80211_if_parse_##name);				\
-	return ieee80211_if_write(file->private_data, userbuf, count,	\
-				  ppos, fn);				\
 }
 
 #define IEEE80211_IF_FILE_R(name)					\
-	_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_sub_if_data *)	\
+	_IEEE80211_IF_FILE_R_FN(name)					\
 	_IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, NULL)
 
 #define IEEE80211_IF_FILE_W(name)					\
-	_IEEE80211_IF_FILE_W_FN(name, struct ieee80211_sub_if_data *)	\
+	_IEEE80211_IF_FILE_W_FN(name)					\
 	_IEEE80211_IF_FILE_OPS(name, NULL, ieee80211_if_write_##name)
 
 #define IEEE80211_IF_FILE_RW(name)					\
-	_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_sub_if_data *)	\
-	_IEEE80211_IF_FILE_W_FN(name, struct ieee80211_sub_if_data *)	\
+	_IEEE80211_IF_FILE_R_FN(name)					\
+	_IEEE80211_IF_FILE_W_FN(name)					\
 	_IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name,		\
 			       ieee80211_if_write_##name)
 
@@ -168,18 +206,37 @@
 	IEEE80211_IF_FMT_##format(name, struct ieee80211_sub_if_data, field) \
 	IEEE80211_IF_FILE_R(name)
 
-/* Same but with a link_ prefix in the ops variable name and different type */
+#define _IEEE80211_IF_LINK_R_FN(name)					\
+static ssize_t ieee80211_if_read_##name(struct file *file,		\
+					char __user *userbuf,		\
+					size_t count, loff_t *ppos)	\
+{									\
+	return ieee80211_if_read_link(file->private_data,		\
+				      userbuf, count, ppos,		\
+				      ieee80211_if_fmt_##name);	\
+}
+
+#define _IEEE80211_IF_LINK_W_FN(name)					\
+static ssize_t ieee80211_if_write_##name(struct file *file,		\
+					 const char __user *userbuf,	\
+					 size_t count, loff_t *ppos)	\
+{									\
+	return ieee80211_if_write_link(file->private_data, userbuf,	\
+				       count, ppos,			\
+				       ieee80211_if_parse_##name);	\
+}
+
 #define IEEE80211_IF_LINK_FILE_R(name)					\
-	_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_link_data *)	\
+	_IEEE80211_IF_LINK_R_FN(name)					\
 	_IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name, NULL)
 
 #define IEEE80211_IF_LINK_FILE_W(name)					\
-	_IEEE80211_IF_FILE_W_FN(name)					\
+	_IEEE80211_IF_LINK_W_FN(name)					\
 	_IEEE80211_IF_FILE_OPS(link_##name, NULL, ieee80211_if_write_##name)
 
 #define IEEE80211_IF_LINK_FILE_RW(name)					\
-	_IEEE80211_IF_FILE_R_FN(name, struct ieee80211_link_data *)	\
-	_IEEE80211_IF_FILE_W_FN(name, struct ieee80211_link_data *)	\
+	_IEEE80211_IF_LINK_R_FN(name)					\
+	_IEEE80211_IF_LINK_W_FN(name)					\
 	_IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name,	\
 			       ieee80211_if_write_##name)
 
@@ -265,7 +322,12 @@
 {
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_local *local = sdata->local;
-	int err;
+
+	/* The driver indicated that EML is enabled for the interface, thus do
+	 * not allow to override the SMPS state.
+	 */
+	if (sdata->vif.driver_flags & IEEE80211_VIF_EML_ACTIVE)
+		return -EOPNOTSUPP;
 
 	if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) &&
 	    smps_mode == IEEE80211_SMPS_STATIC)
@@ -280,11 +342,7 @@
 	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return -EOPNOTSUPP;
 
-	sdata_lock(sdata);
-	err = __ieee80211_request_smps_mgd(link->sdata, link, smps_mode);
-	sdata_unlock(sdata);
-
-	return err;
+	return __ieee80211_request_smps_mgd(link->sdata, link, smps_mode);
 }
 
 static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
@@ -356,16 +414,13 @@
 	case NL80211_IFTYPE_STATION:
 		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
 		/* BSSID SA DA */
-		sdata_lock(sdata);
 		if (!sdata->u.mgd.associated) {
-			sdata_unlock(sdata);
 			dev_kfree_skb(skb);
 			return -ENOTCONN;
 		}
 		memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN);
 		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
 		memcpy(hdr->addr3, addr, ETH_ALEN);
-		sdata_unlock(sdata);
 		break;
 	default:
 		dev_kfree_skb(skb);
@@ -690,6 +745,19 @@
 	debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \
 			    sdata, &name##_ops)
 
+#define DEBUGFS_ADD_X(_bits, _name, _mode) \
+	debugfs_create_x##_bits(#_name, _mode, sdata->vif.debugfs_dir, \
+				&sdata->vif._name)
+
+#define DEBUGFS_ADD_X8(_name, _mode) \
+	DEBUGFS_ADD_X(8, _name, _mode)
+
+#define DEBUGFS_ADD_X16(_name, _mode) \
+	DEBUGFS_ADD_X(16, _name, _mode)
+
+#define DEBUGFS_ADD_X32(_name, _mode) \
+	DEBUGFS_ADD_X(32, _name, _mode)
+
 #define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
 
 static void add_common_files(struct ieee80211_sub_if_data *sdata)
@@ -717,8 +785,9 @@
 	DEBUGFS_ADD_MODE(uapsd_queues, 0600);
 	DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
 	DEBUGFS_ADD_MODE(tdls_wider_bw, 0600);
-	DEBUGFS_ADD_MODE(valid_links, 0200);
+	DEBUGFS_ADD_MODE(valid_links, 0400);
 	DEBUGFS_ADD_MODE(active_links, 0600);
+	DEBUGFS_ADD_X16(dormant_links, 0400);
 }
 
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -868,18 +937,20 @@
 	}
 }
 
-void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
+void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
+				  bool mld_vif)
 {
 	char buf[10+IFNAMSIZ];
 
 	sprintf(buf, "netdev:%s", sdata->name);
 	sdata->vif.debugfs_dir = debugfs_create_dir(buf,
 		sdata->local->hw.wiphy->debugfsdir);
+	/* deflink also has this */
+	sdata->deflink.debugfs_dir = sdata->vif.debugfs_dir;
 	sdata->debugfs.subdir_stations = debugfs_create_dir("stations",
 							sdata->vif.debugfs_dir);
 	add_files(sdata);
-
-	if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
+	if (!mld_vif)
 		add_link_files(&sdata->deflink, sdata->vif.debugfs_dir);
 }
 
@@ -907,11 +978,21 @@
 	debugfs_rename(dir->d_parent, dir, dir->d_parent, buf);
 }
 
+void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
+				       bool mld_vif)
+{
+	ieee80211_debugfs_remove_netdev(sdata);
+	ieee80211_debugfs_add_netdev(sdata, mld_vif);
+	drv_vif_add_debugfs(sdata->local, sdata);
+	if (!mld_vif)
+		ieee80211_link_debugfs_drv_add(&sdata->deflink);
+}
+
 void ieee80211_link_debugfs_add(struct ieee80211_link_data *link)
 {
 	char link_dir_name[10];
 
-	if (WARN_ON(!link->sdata->vif.debugfs_dir))
+	if (WARN_ON(!link->sdata->vif.debugfs_dir || link->debugfs_dir))
 		return;
 
 	/* For now, this should not be called for non-MLO capable drivers */
@@ -948,7 +1029,8 @@
 
 void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link)
 {
-	if (WARN_ON(!link->debugfs_dir))
+	if (link->sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+	    WARN_ON(!link->debugfs_dir))
 		return;
 
 	drv_link_add_debugfs(link->sdata->local, link->sdata,
diff -ruw linux-6.4/net/mac80211/debugfs_netdev.h linux-6.4-fbx/net/mac80211/debugfs_netdev.h
--- linux-6.4/net/mac80211/debugfs_netdev.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/debugfs_netdev.h	2023-11-07 13:38:44.074257019 +0100
@@ -1,4 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions:
+ * Copyright (C) 2023 Intel Corporation
+ */
 /* routines exported for debugfs handling */
 
 #ifndef __IEEE80211_DEBUGFS_NETDEV_H
@@ -7,9 +11,12 @@
 #include "ieee80211_i.h"
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
+void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
+				  bool mld_vif);
 void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
+void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
+				       bool mld_vif);
 
 void ieee80211_link_debugfs_add(struct ieee80211_link_data *link);
 void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link);
@@ -18,7 +25,7 @@
 void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link);
 #else
 static inline void ieee80211_debugfs_add_netdev(
-	struct ieee80211_sub_if_data *sdata)
+	struct ieee80211_sub_if_data *sdata, bool mld_vif)
 {}
 static inline void ieee80211_debugfs_remove_netdev(
 	struct ieee80211_sub_if_data *sdata)
@@ -26,7 +33,9 @@
 static inline void ieee80211_debugfs_rename_netdev(
 	struct ieee80211_sub_if_data *sdata)
 {}
-
+static inline void ieee80211_debugfs_recreate_netdev(
+	struct ieee80211_sub_if_data *sdata, bool mld_vif)
+{}
 static inline void ieee80211_link_debugfs_add(struct ieee80211_link_data *link)
 {}
 static inline void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link)
diff -ruw linux-6.4/net/mac80211/debugfs_sta.c linux-6.4-fbx/net/mac80211/debugfs_sta.c
--- linux-6.4/net/mac80211/debugfs_sta.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/debugfs_sta.c	2023-11-07 13:38:44.074257019 +0100
@@ -5,7 +5,7 @@
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
  */
 
 #include <linux/debugfs.h>
@@ -420,6 +420,7 @@
 	if (ret || tid >= IEEE80211_NUM_TIDS)
 		return -EINVAL;
 
+	wiphy_lock(sta->local->hw.wiphy);
 	if (tx) {
 		if (start)
 			ret = ieee80211_start_tx_ba_session(&sta->sta, tid,
@@ -431,6 +432,7 @@
 					       3, true);
 		ret = 0;
 	}
+	wiphy_unlock(sta->local->hw.wiphy);
 
 	return ret ?: count;
 }
@@ -1035,6 +1037,190 @@
 }
 LINK_STA_OPS(he_capa);
 
+static ssize_t link_sta_eht_capa_read(struct file *file, char __user *userbuf,
+				      size_t count, loff_t *ppos)
+{
+	char *buf, *p;
+	size_t buf_sz = PAGE_SIZE;
+	struct link_sta_info *link_sta = file->private_data;
+	struct ieee80211_sta_eht_cap *bec = &link_sta->pub->eht_cap;
+	struct ieee80211_eht_cap_elem_fixed *fixed = &bec->eht_cap_elem;
+	struct ieee80211_eht_mcs_nss_supp *nss = &bec->eht_mcs_nss_supp;
+	u8 *cap;
+	int i;
+	ssize_t ret;
+	static const char *mcs_desc[] = { "0-7", "8-9", "10-11", "12-13"};
+
+	buf = kmalloc(buf_sz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	p = buf;
+
+	p += scnprintf(p, buf_sz + buf - p, "EHT %ssupported\n",
+		       bec->has_eht ? "" : "not ");
+	if (!bec->has_eht)
+		goto out;
+
+	p += scnprintf(p, buf_sz + buf - p,
+		       "MAC-CAP: %#.2x %#.2x\n",
+		       fixed->mac_cap_info[0], fixed->mac_cap_info[1]);
+	p += scnprintf(p, buf_sz + buf - p,
+		       "PHY-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
+		       fixed->phy_cap_info[0], fixed->phy_cap_info[1],
+		       fixed->phy_cap_info[2], fixed->phy_cap_info[3],
+		       fixed->phy_cap_info[4], fixed->phy_cap_info[5],
+		       fixed->phy_cap_info[6], fixed->phy_cap_info[7],
+		       fixed->phy_cap_info[8]);
+
+#define PRINT(fmt, ...)							\
+	p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n",		\
+		       ##__VA_ARGS__)
+
+#define PFLAG(t, n, a, b)						\
+	do {								\
+		if (cap[n] & IEEE80211_EHT_##t##_CAP##n##_##a)		\
+			PRINT("%s", b);					\
+	} while (0)
+
+	cap = fixed->mac_cap_info;
+	PFLAG(MAC, 0, EPCS_PRIO_ACCESS, "EPCS-PRIO-ACCESS");
+	PFLAG(MAC, 0, OM_CONTROL, "OM-CONTROL");
+	PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE1, "TRIG-TXOP-SHARING-MODE1");
+	PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE2, "TRIG-TXOP-SHARING-MODE2");
+	PFLAG(MAC, 0, RESTRICTED_TWT, "RESTRICTED-TWT");
+	PFLAG(MAC, 0, SCS_TRAFFIC_DESC, "SCS-TRAFFIC-DESC");
+	switch ((cap[0] & 0xc0) >> 6) {
+	case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895:
+		PRINT("MAX-MPDU-LEN: 3985");
+		break;
+	case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991:
+		PRINT("MAX-MPDU-LEN: 7991");
+		break;
+	case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454:
+		PRINT("MAX-MPDU-LEN: 11454");
+		break;
+	}
+
+	cap = fixed->phy_cap_info;
+	PFLAG(PHY, 0, 320MHZ_IN_6GHZ, "320MHZ-IN-6GHZ");
+	PFLAG(PHY, 0, 242_TONE_RU_GT20MHZ, "242-TONE-RU-GT20MHZ");
+	PFLAG(PHY, 0, NDP_4_EHT_LFT_32_GI, "NDP-4-EHT-LFT-32-GI");
+	PFLAG(PHY, 0, PARTIAL_BW_UL_MU_MIMO, "PARTIAL-BW-UL-MU-MIMO");
+	PFLAG(PHY, 0, SU_BEAMFORMER, "SU-BEAMFORMER");
+	PFLAG(PHY, 0, SU_BEAMFORMEE, "SU-BEAMFORMEE");
+	i = cap[0] >> 7;
+	i |= (cap[1] & 0x3) << 1;
+	PRINT("BEAMFORMEE-80-NSS: %i", i);
+	PRINT("BEAMFORMEE-160-NSS: %i", (cap[1] >> 2) & 0x7);
+	PRINT("BEAMFORMEE-320-NSS: %i", (cap[1] >> 5) & 0x7);
+	PRINT("SOUNDING-DIM-80-NSS: %i", (cap[2] & 0x7));
+	PRINT("SOUNDING-DIM-160-NSS: %i", (cap[2] >> 3) & 0x7);
+	i = cap[2] >> 6;
+	i |= (cap[3] & 0x1) << 3;
+	PRINT("SOUNDING-DIM-320-NSS: %i", i);
+
+	PFLAG(PHY, 3, NG_16_SU_FEEDBACK, "NG-16-SU-FEEDBACK");
+	PFLAG(PHY, 3, NG_16_MU_FEEDBACK, "NG-16-MU-FEEDBACK");
+	PFLAG(PHY, 3, CODEBOOK_4_2_SU_FDBK, "CODEBOOK-4-2-SU-FDBK");
+	PFLAG(PHY, 3, CODEBOOK_7_5_MU_FDBK, "CODEBOOK-7-5-MU-FDBK");
+	PFLAG(PHY, 3, TRIG_SU_BF_FDBK, "TRIG-SU-BF-FDBK");
+	PFLAG(PHY, 3, TRIG_MU_BF_PART_BW_FDBK, "TRIG-MU-BF-PART-BW-FDBK");
+	PFLAG(PHY, 3, TRIG_CQI_FDBK, "TRIG-CQI-FDBK");
+
+	PFLAG(PHY, 4, PART_BW_DL_MU_MIMO, "PART-BW-DL-MU-MIMO");
+	PFLAG(PHY, 4, PSR_SR_SUPP, "PSR-SR-SUPP");
+	PFLAG(PHY, 4, POWER_BOOST_FACT_SUPP, "POWER-BOOST-FACT-SUPP");
+	PFLAG(PHY, 4, EHT_MU_PPDU_4_EHT_LTF_08_GI, "EHT-MU-PPDU-4-EHT-LTF-08-GI");
+	PRINT("MAX_NC: %i", cap[4] >> 4);
+
+	PFLAG(PHY, 5, NON_TRIG_CQI_FEEDBACK, "NON-TRIG-CQI-FEEDBACK");
+	PFLAG(PHY, 5, TX_LESS_242_TONE_RU_SUPP, "TX-LESS-242-TONE-RU-SUPP");
+	PFLAG(PHY, 5, RX_LESS_242_TONE_RU_SUPP, "RX-LESS-242-TONE-RU-SUPP");
+	PFLAG(PHY, 5, PPE_THRESHOLD_PRESENT, "PPE_THRESHOLD_PRESENT");
+	switch (cap[5] >> 4 & 0x3) {
+	case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
+		PRINT("NOMINAL_PKT_PAD: 0us");
+		break;
+	case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
+		PRINT("NOMINAL_PKT_PAD: 8us");
+		break;
+	case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
+		PRINT("NOMINAL_PKT_PAD: 16us");
+		break;
+	case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
+		PRINT("NOMINAL_PKT_PAD: 20us");
+		break;
+	}
+	i = cap[5] >> 6;
+	i |= cap[6] & 0x7;
+	PRINT("MAX-NUM-SUPP-EHT-LTF: %i", i);
+	PFLAG(PHY, 5, SUPP_EXTRA_EHT_LTF, "SUPP-EXTRA-EHT-LTF");
+
+	i = (cap[6] >> 3) & 0xf;
+	PRINT("MCS15-SUPP-MASK: %i", i);
+	PFLAG(PHY, 6, EHT_DUP_6GHZ_SUPP, "EHT-DUP-6GHZ-SUPP");
+
+	PFLAG(PHY, 7, 20MHZ_STA_RX_NDP_WIDER_BW, "20MHZ-STA-RX-NDP-WIDER-BW");
+	PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_80MHZ, "NON-OFDMA-UL-MU-MIMO-80MHZ");
+	PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_160MHZ, "NON-OFDMA-UL-MU-MIMO-160MHZ");
+	PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_320MHZ, "NON-OFDMA-UL-MU-MIMO-320MHZ");
+	PFLAG(PHY, 7, MU_BEAMFORMER_80MHZ, "MU-BEAMFORMER-80MHZ");
+	PFLAG(PHY, 7, MU_BEAMFORMER_160MHZ, "MU-BEAMFORMER-160MHZ");
+	PFLAG(PHY, 7, MU_BEAMFORMER_320MHZ, "MU-BEAMFORMER-320MHZ");
+	PFLAG(PHY, 7, TB_SOUNDING_FDBK_RATE_LIMIT, "TB-SOUNDING-FDBK-RATE-LIMIT");
+
+	PFLAG(PHY, 8, RX_1024QAM_WIDER_BW_DL_OFDMA, "RX-1024QAM-WIDER-BW-DL-OFDMA");
+	PFLAG(PHY, 8, RX_4096QAM_WIDER_BW_DL_OFDMA, "RX-4096QAM-WIDER-BW-DL-OFDMA");
+
+#undef PFLAG
+
+	PRINT(""); /* newline */
+	if (!(link_sta->pub->he_cap.he_cap_elem.phy_cap_info[0] &
+	      IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+		u8 *mcs_vals = (u8 *)(&nss->only_20mhz);
+
+		for (i = 0; i < 4; i++)
+			PRINT("EHT bw=20 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
+			      mcs_desc[i],
+			      mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
+	} else {
+		u8 *mcs_vals = (u8 *)(&nss->bw._80);
+
+		for (i = 0; i < 3; i++)
+			PRINT("EHT bw <= 80 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
+			      mcs_desc[i + 1],
+			      mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
+
+		mcs_vals = (u8 *)(&nss->bw._160);
+		for (i = 0; i < 3; i++)
+			PRINT("EHT bw <= 160 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
+			      mcs_desc[i + 1],
+			      mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
+
+		mcs_vals = (u8 *)(&nss->bw._320);
+		for (i = 0; i < 3; i++)
+			PRINT("EHT bw <= 320 MHz, max NSS for MCS %s: Rx=%u, Tx=%u",
+			      mcs_desc[i + 1],
+			      mcs_vals[i] & 0xf, mcs_vals[i] >> 4);
+	}
+
+	if (cap[5] & IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+		u8 ppe_size = ieee80211_eht_ppe_size(bec->eht_ppe_thres[0], cap);
+
+		p += scnprintf(p, buf_sz + buf - p, "EHT PPE Thresholds: ");
+		for (i = 0; i < ppe_size; i++)
+			p += scnprintf(p, buf_sz + buf - p, "0x%02x ",
+				       bec->eht_ppe_thres[i]);
+		PRINT(""); /* newline */
+	}
+
+out:
+	ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+	kfree(buf);
+	return ret;
+}
+LINK_STA_OPS(eht_capa);
+
 #define DEBUGFS_ADD(name) \
 	debugfs_create_file(#name, 0400, \
 		sta->debugfs_dir, sta, &sta_ ##name## _ops)
@@ -1128,6 +1314,7 @@
 	DEBUGFS_ADD(ht_capa);
 	DEBUGFS_ADD(vht_capa);
 	DEBUGFS_ADD(he_capa);
+	DEBUGFS_ADD(eht_capa);
 
 	DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
 	DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
diff -ruw linux-6.4/net/mac80211/driver-ops.c linux-6.4-fbx/net/mac80211/driver-ops.c
--- linux-6.4/net/mac80211/driver-ops.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/driver-ops.c	2023-11-07 13:38:44.074257019 +0100
@@ -15,6 +15,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(local->started))
 		return -EALREADY;
@@ -35,6 +36,7 @@
 void drv_stop(struct ieee80211_local *local)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(!local->started))
 		return;
@@ -52,12 +54,37 @@
 	local->started = false;
 }
 
+int drv_get_powered(struct ieee80211_local *local, bool *up, bool *busy)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (local->ops->get_powered)
+		ret = local->ops->get_powered(&local->hw, up, busy);
+
+	return ret;
+}
+
+int drv_set_powered(struct ieee80211_local *local)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (local->ops->set_powered)
+		ret = local->ops->set_powered(&local->hw);
+
+	return ret;
+}
+
 int drv_add_interface(struct ieee80211_local *local,
 		      struct ieee80211_sub_if_data *sdata)
 {
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
 		    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
@@ -69,10 +96,18 @@
 	ret = local->ops->add_interface(&local->hw, &sdata->vif);
 	trace_drv_return_int(local, ret);
 
-	if (ret == 0)
+	if (ret)
+		return ret;
+
 		sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
 
-	return ret;
+	if (!local->in_reconfig) {
+		drv_vif_add_debugfs(local, sdata);
+		/* initially vif is not MLD */
+		ieee80211_link_debugfs_drv_add(&sdata->deflink);
+	}
+
+	return 0;
 }
 
 int drv_change_interface(struct ieee80211_local *local,
@@ -82,6 +117,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -96,6 +132,7 @@
 			  struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -116,6 +153,7 @@
 	int ret = 0;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -149,6 +187,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -190,6 +229,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -223,6 +263,7 @@
 	u64 ret = -1ULL;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return ret;
@@ -239,6 +280,7 @@
 		 u64 tsf)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -254,6 +296,7 @@
 		    s64 offset)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -268,6 +311,7 @@
 		   struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -285,7 +329,9 @@
 {
 	int ret = 0;
 
-	drv_verify_link_exists(sdata, link_conf);
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
@@ -312,8 +358,8 @@
 			      struct ieee80211_chanctx *ctx)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
-	drv_verify_link_exists(sdata, link_conf);
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -340,6 +386,7 @@
 	int i;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!local->ops->switch_vif_chanctx)
 		return -EOPNOTSUPP;
@@ -392,9 +439,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
-
-	if (!sdata)
-		return -EIO;
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -416,6 +461,7 @@
 			   int link_id, u64 changed)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
 				    BSS_CHANGED_BEACON_ENABLED) &&
@@ -429,7 +475,8 @@
 			 sdata->vif.type == NL80211_IFTYPE_NAN ||
 			 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
 			  !sdata->vif.bss_conf.mu_mimo_owner &&
-			  !(changed & BSS_CHANGED_TXPOWER))))
+			  !(changed & (BSS_CHANGED_TXPOWER |
+				       BSS_CHANGED_QOS)))))
 		return;
 
 	if (!check_sdata_in_driver(sdata))
@@ -458,6 +505,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -485,6 +533,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -510,11 +559,14 @@
 	if (ret)
 		return ret;
 
-	for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+	if (!local->in_reconfig) {
+		for_each_set_bit(link_id, &links_to_add,
+				 IEEE80211_MLD_MAX_NUM_LINKS) {
 		link = rcu_access_pointer(sdata->link[link_id]);
 
 		ieee80211_link_debugfs_drv_add(link);
 	}
+	}
 
 	return 0;
 }
@@ -532,6 +584,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -547,7 +600,7 @@
 
 	for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
 		link_sta = rcu_dereference_protected(info->link[link_id],
-						     lockdep_is_held(&local->sta_mtx));
+						     lockdep_is_held(&local->hw.wiphy->mtx));
 
 		ieee80211_link_sta_debugfs_drv_remove(link_sta);
 	}
@@ -563,7 +616,7 @@
 
 	for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
 		link_sta = rcu_dereference_protected(info->link[link_id],
-						     lockdep_is_held(&local->sta_mtx));
+						     lockdep_is_held(&local->hw.wiphy->mtx));
 		ieee80211_link_sta_debugfs_drv_add(link_sta);
 	}
 
diff -ruw linux-6.4/net/mac80211/driver-ops.h linux-6.4-fbx/net/mac80211/driver-ops.h
--- linux-6.4/net/mac80211/driver-ops.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/driver-ops.h	2024-01-11 15:42:54.309009962 +0100
@@ -2,7 +2,7 @@
 /*
 * Portions of this file
 * Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2019, 2021 Intel Corporation
+* Copyright (C) 2018 - 2019, 2021 - 2023 Intel Corporation
 */
 
 #ifndef __MAC80211_DRIVER_OPS
@@ -13,15 +13,17 @@
 #include "trace.h"
 
 #define check_sdata_in_driver(sdata)	({					\
-	!WARN_ONCE(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),			\
+	WARN_ONCE(!sdata->local->reconfig_failure &&				\
+		  !(sdata->flags & IEEE80211_SDATA_IN_DRIVER),			\
 		   "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",	\
 		   sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);	\
+	!!(sdata->flags & IEEE80211_SDATA_IN_DRIVER);				\
 })
 
 static inline struct ieee80211_sub_if_data *
 get_bss_sdata(struct ieee80211_sub_if_data *sdata)
 {
-	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+	if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
 				     u.ap);
 
@@ -38,6 +40,9 @@
 static inline void drv_sync_rx_queues(struct ieee80211_local *local,
 				      struct sta_info *sta)
 {
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (local->ops->sync_rx_queues) {
 		trace_drv_sync_rx_queues(local, sta->sdata, &sta->sta);
 		local->ops->sync_rx_queues(&local->hw);
@@ -84,6 +89,8 @@
 
 int drv_start(struct ieee80211_local *local);
 void drv_stop(struct ieee80211_local *local);
+int drv_get_powered(struct ieee80211_local *local, bool *up, bool *busy);
+int drv_set_powered(struct ieee80211_local *local);
 
 #ifdef CONFIG_PM
 static inline int drv_suspend(struct ieee80211_local *local,
@@ -92,6 +99,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_suspend(local);
 	ret = local->ops->suspend(&local->hw, wowlan);
@@ -104,6 +112,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_resume(local);
 	ret = local->ops->resume(&local->hw);
@@ -115,6 +124,7 @@
 				  bool enabled)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!local->ops->set_wakeup)
 		return;
@@ -140,6 +150,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_config(local, changed);
 	ret = local->ops->config(&local->hw, changed);
@@ -152,6 +163,7 @@
 				       u64 changed)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -191,6 +203,7 @@
 					u64 multicast)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_configure_filter(local, changed_flags, total_flags,
 				   multicast);
@@ -205,6 +218,7 @@
 					   unsigned int changed_flags)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_config_iface_filter(local, sdata, filter_flags,
 				      changed_flags);
@@ -261,6 +275,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -275,6 +290,7 @@
 				      struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -293,6 +309,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -310,6 +327,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -326,6 +344,7 @@
 				     const u8 *mac_addr)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_sw_scan_start(local, sdata, mac_addr);
 	if (local->ops->sw_scan_start)
@@ -337,6 +356,7 @@
 					struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_sw_scan_complete(local, sdata);
 	if (local->ops->sw_scan_complete)
@@ -350,6 +370,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (local->ops->get_stats)
 		ret = local->ops->get_stats(&local->hw, stats);
@@ -373,6 +394,7 @@
 	int ret = 0;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_set_frag_threshold(local, value);
 	if (local->ops->set_frag_threshold)
@@ -387,6 +409,7 @@
 	int ret = 0;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_set_rts_threshold(local, value);
 	if (local->ops->set_rts_threshold)
@@ -400,6 +423,7 @@
 {
 	int ret = 0;
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_set_coverage_class(local, value);
 	if (local->ops->set_coverage_class)
@@ -433,6 +457,7 @@
 	int ret = 0;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -452,6 +477,7 @@
 				  struct ieee80211_sta *sta)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -465,12 +491,30 @@
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
+static inline void drv_vif_add_debugfs(struct ieee80211_local *local,
+				       struct ieee80211_sub_if_data *sdata)
+{
+	might_sleep();
+
+	if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+	    WARN_ON(!sdata->vif.debugfs_dir))
+		return;
+
+	sdata = get_bss_sdata(sdata);
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	if (local->ops->vif_add_debugfs)
+		local->ops->vif_add_debugfs(&local->hw, &sdata->vif);
+}
+
 static inline void drv_link_add_debugfs(struct ieee80211_local *local,
 					struct ieee80211_sub_if_data *sdata,
 					struct ieee80211_bss_conf *link_conf,
 					struct dentry *dir)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -487,6 +531,7 @@
 				       struct dentry *dir)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -503,6 +548,7 @@
 					    struct dentry *dir)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -512,6 +558,12 @@
 		local->ops->link_sta_add_debugfs(&local->hw, &sdata->vif,
 						 link_sta, dir);
 }
+#else
+static inline void drv_vif_add_debugfs(struct ieee80211_local *local,
+				       struct ieee80211_sub_if_data *sdata)
+{
+	might_sleep();
+}
 #endif
 
 static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
@@ -519,6 +571,7 @@
 					  struct sta_info *sta)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
@@ -567,6 +620,9 @@
 				      struct ieee80211_sta *sta,
 				      struct station_info *sinfo)
 {
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	sdata = get_bss_sdata(sdata);
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -597,6 +653,7 @@
 	int ret = 0; /* default unsupported op for less congestion */
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_tx_last_beacon(local);
 	if (local->ops->tx_last_beacon)
@@ -614,6 +671,9 @@
 {
 	int ret = -EOPNOTSUPP;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	trace_drv_get_survey(local, idx, survey);
 
 	if (local->ops->get_survey)
@@ -627,6 +687,7 @@
 static inline void drv_rfkill_poll(struct ieee80211_local *local)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (local->ops->rfkill_poll)
 		local->ops->rfkill_poll(&local->hw);
@@ -636,9 +697,13 @@
 			     struct ieee80211_sub_if_data *sdata,
 			     u32 queues, bool drop)
 {
-	struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
+	struct ieee80211_vif *vif;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
+	sdata = get_bss_sdata(sdata);
+	vif = sdata ? &sdata->vif : NULL;
 
 	if (sdata && !check_sdata_in_driver(sdata))
 		return;
@@ -654,6 +719,9 @@
 				 struct sta_info *sta)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
+	sdata = get_bss_sdata(sdata);
 
 	if (sdata && !check_sdata_in_driver(sdata))
 		return;
@@ -669,6 +737,7 @@
 				      struct ieee80211_channel_switch *ch_switch)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_channel_switch(local, sdata, ch_switch);
 	local->ops->channel_switch(&local->hw, &sdata->vif, ch_switch);
@@ -681,6 +750,7 @@
 {
 	int ret = -EOPNOTSUPP;
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (local->ops->set_antenna)
 		ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant);
 	trace_drv_set_antenna(local, tx_ant, rx_ant, ret);
@@ -692,6 +762,7 @@
 {
 	int ret = -EOPNOTSUPP;
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (local->ops->get_antenna)
 		ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant);
 	trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret);
@@ -707,6 +778,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_remain_on_channel(local, sdata, chan, duration, type);
 	ret = local->ops->remain_on_channel(&local->hw, &sdata->vif,
@@ -723,6 +795,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_cancel_remain_on_channel(local, sdata);
 	ret = local->ops->cancel_remain_on_channel(&local->hw, &sdata->vif);
@@ -737,6 +810,7 @@
 	int ret = -ENOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_set_ringparam(local, tx, rx);
 	if (local->ops->set_ringparam)
@@ -750,6 +824,7 @@
 				     u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_get_ringparam(local, tx, tx_max, rx, rx_max);
 	if (local->ops->get_ringparam)
@@ -762,6 +837,7 @@
 	bool ret = false;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_tx_frames_pending(local);
 	if (local->ops->tx_frames_pending)
@@ -778,6 +854,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -795,6 +872,9 @@
 				      struct ieee80211_sub_if_data *sdata,
 				      struct cfg80211_gtk_rekey_data *data)
 {
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -849,11 +929,13 @@
 				      struct ieee80211_prep_tx_info *info)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
 	WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
 
+	info->link_id = info->link_id < 0 ? 0 : info->link_id;
 	trace_drv_mgd_prepare_tx(local, sdata, info->duration,
 				 info->subtype, info->success);
 	if (local->ops->mgd_prepare_tx)
@@ -866,6 +948,7 @@
 				       struct ieee80211_prep_tx_info *info)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -880,17 +963,22 @@
 
 static inline void
 drv_mgd_protect_tdls_discover(struct ieee80211_local *local,
-			      struct ieee80211_sub_if_data *sdata)
+			      struct ieee80211_sub_if_data *sdata,
+			      int link_id)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
 	WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
 
+	link_id = link_id > 0 ? link_id : 0;
+
 	trace_drv_mgd_protect_tdls_discover(local, sdata);
 	if (local->ops->mgd_protect_tdls_discover)
-		local->ops->mgd_protect_tdls_discover(&local->hw, &sdata->vif);
+		local->ops->mgd_protect_tdls_discover(&local->hw, &sdata->vif,
+						      link_id);
 	trace_drv_return_void(local);
 }
 
@@ -900,6 +988,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_add_chanctx(local, ctx);
 	if (local->ops->add_chanctx)
@@ -915,6 +1004,7 @@
 				      struct ieee80211_chanctx *ctx)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(!ctx->driver_present))
 		return;
@@ -931,6 +1021,7 @@
 				      u32 changed)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_change_chanctx(local, ctx, changed);
 	if (local->ops->change_chanctx) {
@@ -940,14 +1031,6 @@
 	trace_drv_return_void(local);
 }
 
-static inline void drv_verify_link_exists(struct ieee80211_sub_if_data *sdata,
-					  struct ieee80211_bss_conf *link_conf)
-{
-	/* deflink always exists, so need to check only for other links */
-	if (sdata->deflink.conf != link_conf)
-		sdata_assert_lock(sdata);
-}
-
 int drv_assign_vif_chanctx(struct ieee80211_local *local,
 			   struct ieee80211_sub_if_data *sdata,
 			   struct ieee80211_bss_conf *link_conf,
@@ -966,10 +1049,8 @@
 {
 	int ret = 0;
 
-	/* make sure link_conf is protected */
-	drv_verify_link_exists(sdata, link_conf);
-
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
@@ -985,8 +1066,8 @@
 			       struct ieee80211_sub_if_data *sdata,
 			       struct ieee80211_bss_conf *link_conf)
 {
-	/* make sure link_conf is protected */
-	drv_verify_link_exists(sdata, link_conf);
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -1002,6 +1083,7 @@
 		      enum ieee80211_reconfig_type reconfig_type)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	trace_drv_reconfig_complete(local, reconfig_type);
 	if (local->ops->reconfig_complete)
@@ -1014,6 +1096,9 @@
 			    struct ieee80211_sub_if_data *sdata,
 			    int key_idx)
 {
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1044,6 +1129,9 @@
 {
 	struct ieee80211_local *local = sdata->local;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (local->ops->channel_switch_beacon) {
 		trace_drv_channel_switch_beacon(local, sdata, chandef);
 		local->ops->channel_switch_beacon(&local->hw, &sdata->vif,
@@ -1058,6 +1146,9 @@
 	struct ieee80211_local *local = sdata->local;
 	int ret = 0;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
@@ -1070,17 +1161,22 @@
 }
 
 static inline int
-drv_post_channel_switch(struct ieee80211_sub_if_data *sdata)
+drv_post_channel_switch(struct ieee80211_link_data *link)
 {
+	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_local *local = sdata->local;
 	int ret = 0;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
 	trace_drv_post_channel_switch(local, sdata);
 	if (local->ops->post_channel_switch)
-		ret = local->ops->post_channel_switch(&local->hw, &sdata->vif);
+		ret = local->ops->post_channel_switch(&local->hw, &sdata->vif,
+						      link->conf);
 	trace_drv_return_int(local, ret);
 	return ret;
 }
@@ -1090,6 +1186,9 @@
 {
 	struct ieee80211_local *local = sdata->local;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1105,6 +1204,9 @@
 {
 	struct ieee80211_local *local = sdata->local;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1120,6 +1222,7 @@
 	int ret = 0;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
@@ -1134,6 +1237,7 @@
 				  struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1161,6 +1265,9 @@
 {
 	int ret;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!local->ops->get_txpower)
 		return -EOPNOTSUPP;
 
@@ -1180,6 +1287,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
@@ -1200,6 +1308,7 @@
 			       struct ieee80211_sta *sta)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1265,6 +1374,11 @@
 {
 	u32 ret = -EOPNOTSUPP;
 
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
 	if (local->ops->get_ftm_responder_stats)
 		ret = local->ops->get_ftm_responder_stats(&local->hw,
 							 &sdata->vif,
@@ -1281,6 +1395,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return -EIO;
 
@@ -1300,6 +1415,7 @@
 	trace_drv_abort_pmsr(local, sdata);
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1315,6 +1431,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	check_sdata_in_driver(sdata);
 
 	trace_drv_start_nan(local, sdata, conf);
@@ -1327,6 +1444,7 @@
 				struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	check_sdata_in_driver(sdata);
 
 	trace_drv_stop_nan(local, sdata);
@@ -1342,6 +1460,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	check_sdata_in_driver(sdata);
 
 	if (!local->ops->nan_change_conf)
@@ -1362,6 +1481,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	check_sdata_in_driver(sdata);
 
 	if (!local->ops->add_nan_func)
@@ -1379,6 +1499,7 @@
 				   u8 instance_id)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	check_sdata_in_driver(sdata);
 
 	trace_drv_del_nan_func(local, sdata, instance_id);
@@ -1395,6 +1516,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	ret = local->ops->set_tid_config(&local->hw, &sdata->vif, sta,
 					 tid_conf);
 	trace_drv_return_int(local, ret);
@@ -1409,6 +1531,7 @@
 	int ret;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	ret = local->ops->reset_tid_config(&local->hw, &sdata->vif, sta, tids);
 	trace_drv_return_int(local, ret);
 
@@ -1419,6 +1542,7 @@
 					  struct ieee80211_sub_if_data *sdata)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	check_sdata_in_driver(sdata);
 
 	if (!local->ops->update_vif_offload)
@@ -1434,6 +1558,9 @@
 				     struct ieee80211_sta *sta, bool enabled)
 {
 	sdata = get_bss_sdata(sdata);
+
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1449,6 +1576,9 @@
 					     bool enabled)
 {
 	sdata = get_bss_sdata(sdata);
+
+	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1467,6 +1597,7 @@
 	struct ieee80211_twt_params *twt_agrt;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!check_sdata_in_driver(sdata))
 		return;
@@ -1484,6 +1615,7 @@
 					    u8 flowid)
 {
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 	if (!check_sdata_in_driver(sdata))
 		return;
 
@@ -1524,6 +1656,8 @@
 {
 	int ret = -EOPNOTSUPP;
 
+	might_sleep();
+
 	sdata = get_bss_sdata(sdata);
 	trace_drv_net_setup_tc(local, sdata, type);
 	if (local->ops->net_setup_tc)
diff -ruw linux-6.4/net/mac80211/drop.h linux-6.4-fbx/net/mac80211/drop.h
--- linux-6.4/net/mac80211/drop.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/drop.h	2023-11-07 13:38:44.074257019 +0100
@@ -18,9 +18,54 @@
 /* this line for the trailing \ - add before this */
 
 #define MAC80211_DROP_REASONS_UNUSABLE(R)	\
+	/* 0x00 == ___RX_DROP_UNUSABLE */	\
 	R(RX_DROP_U_MIC_FAIL)			\
 	R(RX_DROP_U_REPLAY)			\
 	R(RX_DROP_U_BAD_MMIE)			\
+	R(RX_DROP_U_DUP)			\
+	R(RX_DROP_U_SPURIOUS)			\
+	R(RX_DROP_U_DECRYPT_FAIL)		\
+	R(RX_DROP_U_NO_KEY_ID)			\
+	R(RX_DROP_U_BAD_CIPHER)			\
+	R(RX_DROP_U_OOM)			\
+	R(RX_DROP_U_NONSEQ_PN)			\
+	R(RX_DROP_U_BAD_KEY_COLOR)		\
+	R(RX_DROP_U_BAD_4ADDR)			\
+	R(RX_DROP_U_BAD_AMSDU)			\
+	R(RX_DROP_U_BAD_AMSDU_CIPHER)		\
+	R(RX_DROP_U_INVALID_8023)		\
+	/* 0x10 */				\
+	R(RX_DROP_U_RUNT_ACTION)		\
+	R(RX_DROP_U_UNPROT_ACTION)		\
+	R(RX_DROP_U_UNPROT_DUAL)		\
+	R(RX_DROP_U_UNPROT_UCAST_MGMT)		\
+	R(RX_DROP_U_UNPROT_MCAST_MGMT)		\
+	R(RX_DROP_U_UNPROT_BEACON)		\
+	R(RX_DROP_U_UNPROT_UNICAST_PUB_ACTION)	\
+	R(RX_DROP_U_UNPROT_ROBUST_ACTION)	\
+	R(RX_DROP_U_ACTION_UNKNOWN_SRC)		\
+	R(RX_DROP_U_REJECTED_ACTION_RESPONSE)	\
+	R(RX_DROP_U_EXPECT_DEFRAG_PROT)		\
+	R(RX_DROP_U_WEP_DEC_FAIL)		\
+	R(RX_DROP_U_NO_IV)			\
+	R(RX_DROP_U_NO_ICV)			\
+	R(RX_DROP_U_AP_RX_GROUPCAST)		\
+	R(RX_DROP_U_SHORT_MMIC)			\
+	/* 0x20 */				\
+	R(RX_DROP_U_MMIC_FAIL)			\
+	R(RX_DROP_U_SHORT_TKIP)			\
+	R(RX_DROP_U_TKIP_FAIL)			\
+	R(RX_DROP_U_SHORT_CCMP)			\
+	R(RX_DROP_U_SHORT_CCMP_MIC)		\
+	R(RX_DROP_U_SHORT_GCMP)			\
+	R(RX_DROP_U_SHORT_GCMP_MIC)		\
+	R(RX_DROP_U_SHORT_CMAC)			\
+	R(RX_DROP_U_SHORT_CMAC256)		\
+	R(RX_DROP_U_SHORT_GMAC)			\
+	R(RX_DROP_U_UNEXPECTED_VLAN_4ADDR)	\
+	R(RX_DROP_U_UNEXPECTED_STA_4ADDR)	\
+	R(RX_DROP_U_UNEXPECTED_VLAN_MCAST)	\
+	R(RX_DROP_U_NOT_PORT_CONTROL)		\
 /* this line for the trailing \ - add before this */
 
 /* having two enums allows for checking ieee80211_rx_result use with sparse */
@@ -46,11 +91,13 @@
 	RX_CONTINUE	 = (__force ieee80211_rx_result)___RX_CONTINUE,
 	RX_QUEUED	 = (__force ieee80211_rx_result)___RX_QUEUED,
 	RX_DROP_MONITOR	 = (__force ieee80211_rx_result)___RX_DROP_MONITOR,
-	RX_DROP_UNUSABLE = (__force ieee80211_rx_result)___RX_DROP_UNUSABLE,
 #define DEF(x) x = (__force ieee80211_rx_result)___ ## x,
 	MAC80211_DROP_REASONS_MONITOR(DEF)
 	MAC80211_DROP_REASONS_UNUSABLE(DEF)
 #undef DEF
 };
 
+#define RX_RES_IS_UNUSABLE(result)	\
+	(((__force u32)(result) & SKB_DROP_REASON_SUBSYS_MASK) == ___RX_DROP_UNUSABLE)
+
 #endif /* MAC80211_DROP_H */
diff -ruw linux-6.4/net/mac80211/eht.c linux-6.4-fbx/net/mac80211/eht.c
--- linux-6.4/net/mac80211/eht.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/eht.c	2024-01-19 17:01:19.905848123 +0100
@@ -2,7 +2,7 @@
 /*
  * EHT handling
  *
- * Copyright(c) 2021-2022 Intel Corporation
+ * Copyright(c) 2021-2023 Intel Corporation
  */
 
 #include "ieee80211_i.h"
@@ -25,8 +25,7 @@
 	memset(eht_cap, 0, sizeof(*eht_cap));
 
 	if (!eht_cap_ie_elem ||
-	    !ieee80211_get_eht_iftype_cap(sband,
-					 ieee80211_vif_type_p2p(&sdata->vif)))
+	    !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
 		return;
 
 	mcs_nss_size = ieee80211_eht_mcs_nss_size(he_cap_ie_elem,
@@ -76,4 +75,5 @@
 
 	link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta);
 	link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
+	link_sta->pub->sta_max_bandwidth = link_sta->cur_max_bandwidth;
 }
diff -ruw linux-6.4/net/mac80211/ethtool.c linux-6.4-fbx/net/mac80211/ethtool.c
--- linux-6.4/net/mac80211/ethtool.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/ethtool.c	2023-11-07 13:38:44.074257019 +0100
@@ -5,7 +5,7 @@
  * Copied from cfg.c - originally
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2014	Intel Corporation (Author: Johannes Berg)
- * Copyright (C) 2018, 2022 Intel Corporation
+ * Copyright (C) 2018, 2022-2023 Intel Corporation
  */
 #include <linux/types.h>
 #include <net/cfg80211.h>
@@ -19,11 +19,16 @@
 				   struct netlink_ext_ack *extack)
 {
 	struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
+	int ret;
 
 	if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
 		return -EINVAL;
 
-	return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
+	wiphy_lock(local->hw.wiphy);
+	ret = drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
+	wiphy_unlock(local->hw.wiphy);
+
+	return ret;
 }
 
 static void ieee80211_get_ringparam(struct net_device *dev,
@@ -35,8 +40,10 @@
 
 	memset(rp, 0, sizeof(*rp));
 
+	wiphy_lock(local->hw.wiphy);
 	drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending,
 			  &rp->rx_pending, &rp->rx_max_pending);
+	wiphy_unlock(local->hw.wiphy);
 }
 
 static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
@@ -50,6 +57,22 @@
 };
 #define STA_STATS_LEN	ARRAY_SIZE(ieee80211_gstrings_sta_stats)
 
+struct ethtool_priv_flags_strings {
+	const char string[ETH_GSTRING_LEN];
+};
+
+enum {
+	POWERED_SUPPORTED	= (1 << 0),
+	POWERED_STATUS		= (1 << 1),
+	POWERED_CHANGE_BUSY	= (1 << 2),
+};
+
+static const struct ethtool_priv_flags_strings ieee80211_pflags_strings[] = {
+	{ .string = "powered-supported" },
+	{ .string = "powered-status" },
+	{ .string = "powered-change-busy", },
+};
+
 static int ieee80211_get_sset_count(struct net_device *dev, int sset)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -60,6 +83,9 @@
 
 	rv += drv_get_et_sset_count(sdata, sset);
 
+	if (sset == ETH_SS_PRIV_FLAGS)
+		rv += ARRAY_SIZE(ieee80211_pflags_strings);
+
 	if (rv == 0)
 		return -EOPNOTSUPP;
 	return rv;
@@ -102,7 +128,7 @@
 	 * network device.
 	 */
 
-	mutex_lock(&local->sta_mtx);
+	wiphy_lock(local->hw.wiphy);
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 		sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid);
@@ -198,12 +224,13 @@
 	else
 		data[i++] = -1LL;
 
-	mutex_unlock(&local->sta_mtx);
-
-	if (WARN_ON(i != STA_STATS_LEN))
+	if (WARN_ON(i != STA_STATS_LEN)) {
+		wiphy_unlock(local->hw.wiphy);
 		return;
+	}
 
 	drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
+	wiphy_unlock(local->hw.wiphy);
 }
 
 static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
@@ -216,6 +243,9 @@
 		memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats);
 	}
 	drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
+	if (sset == ETH_SS_PRIV_FLAGS)
+		memcpy(data, ieee80211_pflags_strings,
+		       sizeof (ieee80211_pflags_strings));
 }
 
 static int ieee80211_get_regs_len(struct net_device *dev)
@@ -233,6 +263,35 @@
 	regs->len = 0;
 }
 
+static u32 ieee80211_get_priv_flags(struct net_device *dev)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	bool powered, powered_busy;
+	u32 ret;
+
+	ret = 0;
+	if (!drv_get_powered(local, &powered, &powered_busy)) {
+		ret |= POWERED_SUPPORTED;
+		if (powered)
+			ret |= POWERED_STATUS;
+		if (powered_busy)
+			ret |= POWERED_CHANGE_BUSY;
+	}
+	return ret;
+}
+
+static int ieee80211_set_priv_flags(struct net_device *dev, u32 flags)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+
+	if (flags & (POWERED_STATUS))
+		return drv_set_powered(local);
+
+	return 0;
+}
+
 const struct ethtool_ops ieee80211_ethtool_ops = {
 	.get_drvinfo = cfg80211_get_drvinfo,
 	.get_regs_len = ieee80211_get_regs_len,
@@ -243,4 +302,6 @@
 	.get_strings = ieee80211_get_strings,
 	.get_ethtool_stats = ieee80211_get_stats,
 	.get_sset_count = ieee80211_get_sset_count,
+	.set_priv_flags	= ieee80211_set_priv_flags,
+	.get_priv_flags	= ieee80211_get_priv_flags,
 };
diff -ruw linux-6.4/net/mac80211/fils_aead.c linux-6.4-fbx/net/mac80211/fils_aead.c
--- linux-6.4/net/mac80211/fils_aead.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/fils_aead.c	2023-11-07 13:38:44.074257019 +0100
@@ -5,9 +5,9 @@
  */
 
 #include <crypto/aes.h>
-#include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/skcipher.h>
+#include <crypto/utils.h>
 
 #include "ieee80211_i.h"
 #include "aes_cmac.h"
diff -ruw linux-6.4/net/mac80211/he.c linux-6.4-fbx/net/mac80211/he.c
--- linux-6.4/net/mac80211/he.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/he.c	2024-01-19 17:01:19.905848123 +0100
@@ -128,8 +128,7 @@
 		return;
 
 	own_he_cap_ptr =
-		ieee80211_get_he_iftype_cap(sband,
-					    ieee80211_vif_type_p2p(&sdata->vif));
+		ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
 	if (!own_he_cap_ptr)
 		return;
 
@@ -163,6 +162,7 @@
 
 	link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta);
 	link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
+	link_sta->pub->sta_max_bandwidth = link_sta->cur_max_bandwidth;
 
 	if (sband->band == NL80211_BAND_6GHZ && he_6ghz_capa)
 		ieee80211_update_from_he_6ghz_capa(he_6ghz_capa, link_sta);
diff -ruw linux-6.4/net/mac80211/ht.c linux-6.4-fbx/net/mac80211/ht.c
--- linux-6.4/net/mac80211/ht.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/ht.c	2024-01-19 17:01:19.905848123 +0100
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright 2017	Intel Deutschland GmbH
- * Copyright(c) 2020-2022 Intel Corporation
+ * Copyright(c) 2020-2023 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -282,6 +282,7 @@
 	link_sta->cur_max_bandwidth =
 		ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
 				IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+	link_sta->pub->sta_max_bandwidth = link_sta->cur_max_bandwidth;
 
 	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
 	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -316,16 +317,16 @@
 {
 	int i;
 
-	mutex_lock(&sta->ampdu_mlme.mtx);
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
+
 	for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
-		___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
+		__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
 						WLAN_REASON_QSTA_LEAVE_QBSS,
 						reason != AGG_STOP_DESTROY_STA &&
 						reason != AGG_STOP_PEER_REQUEST);
 
 	for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
-		___ieee80211_stop_tx_ba_session(sta, i, reason);
-	mutex_unlock(&sta->ampdu_mlme.mtx);
+		__ieee80211_stop_tx_ba_session(sta, i, reason);
 
 	/*
 	 * In case the tear down is part of a reconfigure due to HW restart
@@ -333,9 +334,8 @@
 	 * the BA session, so handle it to properly clean tid_tx data.
 	 */
 	if(reason == AGG_STOP_DESTROY_STA) {
-		cancel_work_sync(&sta->ampdu_mlme.work);
+		wiphy_work_cancel(sta->local->hw.wiphy, &sta->ampdu_mlme.work);
 
-		mutex_lock(&sta->ampdu_mlme.mtx);
 		for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
 			struct tid_ampdu_tx *tid_tx =
 				rcu_dereference_protected_tid_tx(sta, i);
@@ -346,11 +346,10 @@
 			if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
 				ieee80211_stop_tx_ba_cb(sta, i, tid_tx);
 		}
-		mutex_unlock(&sta->ampdu_mlme.mtx);
 	}
 }
 
-void ieee80211_ba_session_work(struct work_struct *work)
+void ieee80211_ba_session_work(struct wiphy *wiphy, struct wiphy_work *work)
 {
 	struct sta_info *sta =
 		container_of(work, struct sta_info, ampdu_mlme.work);
@@ -358,32 +357,33 @@
 	bool blocked;
 	int tid;
 
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
+
 	/* When this flag is set, new sessions should be blocked. */
 	blocked = test_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
-	mutex_lock(&sta->ampdu_mlme.mtx);
 	for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
 		if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
-			___ieee80211_stop_rx_ba_session(
+			__ieee80211_stop_rx_ba_session(
 				sta, tid, WLAN_BACK_RECIPIENT,
 				WLAN_REASON_QSTA_TIMEOUT, true);
 
 		if (test_and_clear_bit(tid,
 				       sta->ampdu_mlme.tid_rx_stop_requested))
-			___ieee80211_stop_rx_ba_session(
+			__ieee80211_stop_rx_ba_session(
 				sta, tid, WLAN_BACK_RECIPIENT,
 				WLAN_REASON_UNSPECIFIED, true);
 
 		if (!blocked &&
 		    test_and_clear_bit(tid,
 				       sta->ampdu_mlme.tid_rx_manage_offl))
-			___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
+			__ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
 							 IEEE80211_MAX_AMPDU_BUF_HT,
 							 false, true, NULL);
 
 		if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
 				       sta->ampdu_mlme.tid_rx_manage_offl))
-			___ieee80211_stop_rx_ba_session(
+			__ieee80211_stop_rx_ba_session(
 				sta, tid, WLAN_BACK_RECIPIENT,
 				0, false);
 
@@ -414,9 +414,7 @@
 				 */
 				synchronize_net();
 
-				mutex_unlock(&sta->ampdu_mlme.mtx);
-
-				ieee80211_queue_work(&sdata->local->hw, work);
+				wiphy_work_queue(sdata->local->hw.wiphy, work);
 				return;
 			}
 
@@ -448,12 +446,11 @@
 		    test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
 			ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
 		if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
-			___ieee80211_stop_tx_ba_session(sta, tid,
+			__ieee80211_stop_tx_ba_session(sta, tid,
 							AGG_STOP_LOCAL_REQUEST);
 		if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
 			ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
 	}
-	mutex_unlock(&sta->ampdu_mlme.mtx);
 }
 
 void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
@@ -538,11 +535,13 @@
 
 int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
 			       enum ieee80211_smps_mode smps, const u8 *da,
-			       const u8 *bssid)
+			       const u8 *bssid, int link_id)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *action_frame;
+	struct ieee80211_tx_info *info;
+	u8 status_link_id = link_id < 0 ? 0 : link_id;
 
 	/* 27 = header + category + action + smps mode */
 	skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
@@ -562,6 +561,7 @@
 	case IEEE80211_SMPS_AUTOMATIC:
 	case IEEE80211_SMPS_NUM_MODES:
 		WARN_ON(1);
+		smps = IEEE80211_SMPS_OFF;
 		fallthrough;
 	case IEEE80211_SMPS_OFF:
 		action_frame->u.action.u.ht_smps.smps_control =
@@ -578,8 +578,13 @@
 	}
 
 	/* we'll do more on status of this frame */
-	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
-	ieee80211_tx_skb(sdata, skb);
+	info = IEEE80211_SKB_CB(skb);
+	info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+	/* we have 12 bits, and need 6: link_id 4, smps 2 */
+	info->status_data = IEEE80211_STATUS_TYPE_SMPS |
+			    u16_encode_bits(status_link_id << 2 | smps,
+					    IEEE80211_STATUS_SUBDATA_MASK);
+	ieee80211_tx_skb_tid(sdata, skb, 7, link_id);
 
 	return 0;
 }
@@ -602,7 +607,8 @@
 		goto out;
 
 	link->u.mgd.driver_smps_mode = smps_mode;
-	ieee80211_queue_work(&sdata->local->hw, &link->u.mgd.request_smps_work);
+	wiphy_work_queue(sdata->local->hw.wiphy,
+			 &link->u.mgd.request_smps_work);
 out:
 	rcu_read_unlock();
 }
diff -ruw linux-6.4/net/mac80211/ibss.c linux-6.4-fbx/net/mac80211/ibss.c
--- linux-6.4/net/mac80211/ibss.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/ibss.c	2023-11-07 13:38:44.074257019 +0100
@@ -9,7 +9,7 @@
  * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018-2022 Intel Corporation
+ * Copyright(c) 2018-2023 Intel Corporation
  */
 
 #include <linux/delay.h>
@@ -51,7 +51,6 @@
 	u32 rate_flags, rates = 0, rates_added = 0;
 	struct beacon_data *presp;
 	int frame_len;
-	int shift;
 
 	/* Build IBSS probe response */
 	frame_len = sizeof(struct ieee80211_hdr_3addr) +
@@ -92,7 +91,6 @@
 
 	sband = local->hw.wiphy->bands[chandef->chan->band];
 	rate_flags = ieee80211_chandef_rate_flags(chandef);
-	shift = ieee80211_chandef_get_shift(chandef);
 	rates_n = 0;
 	if (have_higher_than_11mbit)
 		*have_higher_than_11mbit = false;
@@ -111,8 +109,7 @@
 	*pos++ = WLAN_EID_SUPP_RATES;
 	*pos++ = min_t(int, 8, rates_n);
 	for (ri = 0; ri < sband->n_bitrates; ri++) {
-		int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
-					5 * (1 << shift));
+		int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5);
 		u8 basic = 0;
 		if (!(rates & BIT(ri)))
 			continue;
@@ -155,8 +152,7 @@
 		*pos++ = WLAN_EID_EXT_SUPP_RATES;
 		*pos++ = rates_n - 8;
 		for (; ri < sband->n_bitrates; ri++) {
-			int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
-						5 * (1 << shift));
+			int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5);
 			u8 basic = 0;
 			if (!(rates & BIT(ri)))
 				continue;
@@ -226,7 +222,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_mgmt *mgmt;
 	struct cfg80211_bss *bss;
-	u32 bss_change;
+	u64 bss_change;
 	struct cfg80211_chan_def chandef;
 	struct ieee80211_channel *chan;
 	struct beacon_data *presp;
@@ -235,7 +231,7 @@
 	bool radar_required;
 	int err;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* Reset own TSF to allow time synchronization work. */
 	drv_reset_tsf(local, sdata);
@@ -299,17 +295,14 @@
 
 	radar_required = err;
 
-	mutex_lock(&local->mtx);
 	if (ieee80211_link_use_channel(&sdata->deflink, &chandef,
 				       ifibss->fixed_channel ?
 					IEEE80211_CHANCTX_SHARED :
 					IEEE80211_CHANCTX_EXCLUSIVE)) {
 		sdata_info(sdata, "Failed to join IBSS, no channel context\n");
-		mutex_unlock(&local->mtx);
 		return;
 	}
 	sdata->deflink.radar_required = radar_required;
-	mutex_unlock(&local->mtx);
 
 	memcpy(ifibss->bssid, bssid, ETH_ALEN);
 
@@ -367,9 +360,7 @@
 		sdata->vif.cfg.ssid_len = 0;
 		RCU_INIT_POINTER(ifibss->presp, NULL);
 		kfree_rcu(presp, rcu_head);
-		mutex_lock(&local->mtx);
 		ieee80211_link_release_channel(&sdata->deflink);
-		mutex_unlock(&local->mtx);
 		sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n",
 			   err);
 		return;
@@ -382,7 +373,6 @@
 		  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
 	bss_meta.chan = chan;
-	bss_meta.scan_width = cfg80211_chandef_to_scan_width(&chandef);
 	bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt,
 					     presp->head_len, GFP_KERNEL);
 
@@ -405,9 +395,8 @@
 	enum nl80211_channel_type chan_type;
 	u64 tsf;
 	u32 rate_flags;
-	int shift;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (beacon_int < 10)
 		beacon_int = 10;
@@ -440,7 +429,6 @@
 
 	sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
 	rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef);
-	shift = ieee80211_vif_get_shift(&sdata->vif);
 
 	basic_rates = 0;
 
@@ -454,8 +442,7 @@
 			    != rate_flags)
 				continue;
 
-			brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
-					     5 * (1 << shift));
+			brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, 5);
 			if (brate == rate) {
 				if (is_basic)
 					basic_rates |= BIT(j);
@@ -478,7 +465,8 @@
 }
 
 int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
-			      struct cfg80211_csa_settings *csa_settings)
+			      struct cfg80211_csa_settings *csa_settings,
+			      u64 *changed)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct beacon_data *presp, *old_presp;
@@ -487,7 +475,7 @@
 	u16 capability = WLAN_CAPABILITY_IBSS;
 	u64 tsf;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (ifibss->privacy)
 		capability |= WLAN_CAPABILITY_PRIVACY;
@@ -520,15 +508,16 @@
 	if (old_presp)
 		kfree_rcu(old_presp, rcu_head);
 
-	return BSS_CHANGED_BEACON;
+	*changed |= BSS_CHANGED_BEACON;
+	return 0;
 }
 
-int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct cfg80211_bss *cbss;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	/* When not connected/joined, sending CSA doesn't make sense. */
 	if (ifibss->state != IEEE80211_IBSS_MLME_JOINED)
@@ -552,14 +541,15 @@
 	ifibss->chandef = sdata->deflink.csa_chandef;
 
 	/* generate the beacon */
-	return ieee80211_ibss_csa_beacon(sdata, NULL);
+	return ieee80211_ibss_csa_beacon(sdata, NULL, changed);
 }
 
 void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 
-	cancel_work_sync(&ifibss->csa_connection_drop_work);
+	wiphy_work_cancel(sdata->local->hw.wiphy,
+			  &ifibss->csa_connection_drop_work);
 }
 
 static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
@@ -597,7 +587,6 @@
 	struct sta_info *sta;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_supported_band *sband;
-	enum nl80211_bss_scan_width scan_width;
 	int band;
 
 	/*
@@ -626,7 +615,6 @@
 	if (WARN_ON_ONCE(!chanctx_conf))
 		return NULL;
 	band = chanctx_conf->def.chan->band;
-	scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
 	rcu_read_unlock();
 
 	sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
@@ -638,7 +626,7 @@
 	/* make sure mandatory rates are always added */
 	sband = local->hw.wiphy->bands[band];
 	sta->sta.deflink.supp_rates[band] = supp_rates |
-			ieee80211_mandatory_rates(sband, scan_width);
+			ieee80211_mandatory_rates(sband);
 
 	return ieee80211_ibss_finish_sta(sta);
 }
@@ -649,7 +637,7 @@
 	int active = 0;
 	struct sta_info *sta;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	rcu_read_lock();
 
@@ -677,6 +665,8 @@
 	struct beacon_data *presp;
 	struct sta_info *sta;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!is_zero_ether_addr(ifibss->bssid)) {
 		cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
 					ifibss->bssid, ifibss->ssid,
@@ -723,27 +713,22 @@
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
 						BSS_CHANGED_IBSS);
 	drv_leave_ibss(local, sdata);
-	mutex_lock(&local->mtx);
 	ieee80211_link_release_channel(&sdata->deflink);
-	mutex_unlock(&local->mtx);
 }
 
-static void ieee80211_csa_connection_drop_work(struct work_struct *work)
+static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy,
+					       struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
 			     u.ibss.csa_connection_drop_work);
 
-	sdata_lock(sdata);
-
 	ieee80211_ibss_disconnect(sdata);
 	synchronize_rcu();
 	skb_queue_purge(&sdata->skb_queue);
 
 	/* trigger a scan to find another IBSS network to join */
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
-
-	sdata_unlock(sdata);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 }
 
 static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
@@ -775,7 +760,7 @@
 	ieee80211_conn_flags_t conn_flags;
 	u32 vht_cap_info = 0;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	conn_flags = IEEE80211_CONN_DISABLE_VHT;
 
@@ -894,7 +879,7 @@
 	return true;
 disconnect:
 	ibss_dbg(sdata, "Can't handle channel switch, disconnect\n");
-	ieee80211_queue_work(&sdata->local->hw,
+	wiphy_work_queue(sdata->local->hw.wiphy,
 			     &ifibss->csa_connection_drop_work);
 
 	ieee80211_ibss_csa_mark_radar(sdata);
@@ -947,7 +932,7 @@
 {
 	u16 auth_alg, auth_transaction;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (len < 24 + 6)
 		return;
@@ -980,7 +965,6 @@
 {
 	struct sta_info *sta;
 	enum nl80211_band band = rx_status->band;
-	enum nl80211_bss_scan_width scan_width;
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
 	bool rates_updated = false;
@@ -1006,15 +990,9 @@
 			u32 prev_rates;
 
 			prev_rates = sta->sta.deflink.supp_rates[band];
-			/* make sure mandatory rates are always added */
-			scan_width = NL80211_BSS_CHAN_WIDTH_20;
-			if (rx_status->bw == RATE_INFO_BW_5)
-				scan_width = NL80211_BSS_CHAN_WIDTH_5;
-			else if (rx_status->bw == RATE_INFO_BW_10)
-				scan_width = NL80211_BSS_CHAN_WIDTH_10;
 
 			sta->sta.deflink.supp_rates[band] = supp_rates |
-				ieee80211_mandatory_rates(sband, scan_width);
+				ieee80211_mandatory_rates(sband);
 			if (sta->sta.deflink.supp_rates[band] != prev_rates) {
 				ibss_dbg(sdata,
 					 "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
@@ -1068,7 +1046,7 @@
 						   &chandef);
 			memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
 			ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
-							    &cap_ie,
+							    &cap_ie, NULL,
 							    &sta->deflink);
 			if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
 				rates_updated |= true;
@@ -1201,7 +1179,6 @@
 	struct sta_info *sta;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_supported_band *sband;
-	enum nl80211_bss_scan_width scan_width;
 	int band;
 
 	/*
@@ -1227,7 +1204,6 @@
 		return;
 	}
 	band = chanctx_conf->def.chan->band;
-	scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
 	rcu_read_unlock();
 
 	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -1237,12 +1213,12 @@
 	/* make sure mandatory rates are always added */
 	sband = local->hw.wiphy->bands[band];
 	sta->sta.deflink.supp_rates[band] = supp_rates |
-			ieee80211_mandatory_rates(sband, scan_width);
+			ieee80211_mandatory_rates(sband);
 
 	spin_lock(&ifibss->incomplete_lock);
 	list_add(&sta->list, &ifibss->incomplete_stations);
 	spin_unlock(&ifibss->incomplete_lock);
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 }
 
 static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
@@ -1253,7 +1229,7 @@
 	unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
 	unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
 		unsigned long last_active = ieee80211_sta_last_active(sta);
@@ -1278,8 +1254,6 @@
 			WARN_ON(__sta_info_destroy(sta));
 		}
 	}
-
-	mutex_unlock(&local->sta_mtx);
 }
 
 /*
@@ -1289,9 +1263,8 @@
 static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
-	enum nl80211_bss_scan_width scan_width;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	mod_timer(&ifibss->timer,
 		  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
@@ -1311,9 +1284,8 @@
 	sdata_info(sdata,
 		   "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
 
-	scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
 	ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
-				    NULL, 0, scan_width);
+				    NULL, 0);
 }
 
 static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -1323,7 +1295,7 @@
 	u16 capability;
 	int i;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (ifibss->fixed_bssid) {
 		memcpy(bssid, ifibss->bssid, ETH_ALEN);
@@ -1403,6 +1375,9 @@
 	case NL80211_CHAN_WIDTH_160:
 		width = 160;
 		break;
+	case NL80211_CHAN_WIDTH_320:
+		width = 320;
+		break;
 	default:
 		width = 20;
 		break;
@@ -1431,10 +1406,9 @@
 	struct cfg80211_bss *cbss;
 	struct ieee80211_channel *chan = NULL;
 	const u8 *bssid = NULL;
-	enum nl80211_bss_scan_width scan_width;
 	int active_ibss;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	active_ibss = ieee80211_sta_active_ibss(sdata);
 	ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss);
@@ -1490,8 +1464,6 @@
 
 		sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
 
-		scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
-
 		if (ifibss->fixed_channel) {
 			num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
 								 &ifibss->chandef,
@@ -1499,11 +1471,10 @@
 								 ARRAY_SIZE(channels));
 			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
 						    ifibss->ssid_len, channels,
-						    num, scan_width);
+						    num);
 		} else {
 			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
-						    ifibss->ssid_len, NULL,
-						    0, scan_width);
+						    ifibss->ssid_len, NULL, 0);
 		}
 	} else {
 		int interval = IEEE80211_SCAN_INTERVAL;
@@ -1528,7 +1499,7 @@
 	struct beacon_data *presp;
 	u8 *pos, *end;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	presp = sdata_dereference(ifibss->presp, sdata);
 
@@ -1624,10 +1595,8 @@
 	mgmt = (struct ieee80211_mgmt *) skb->data;
 	fc = le16_to_cpu(mgmt->frame_control);
 
-	sdata_lock(sdata);
-
 	if (!sdata->u.ibss.ssid_len)
-		goto mgmt_out; /* not ready to merge yet */
+		return; /* not ready to merge yet */
 
 	switch (fc & IEEE80211_FCTL_STYPE) {
 	case IEEE80211_STYPE_PROBE_REQ:
@@ -1667,9 +1636,6 @@
 			break;
 		}
 	}
-
- mgmt_out:
-	sdata_unlock(sdata);
 }
 
 void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
@@ -1677,15 +1643,13 @@
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 	struct sta_info *sta;
 
-	sdata_lock(sdata);
-
 	/*
 	 * Work could be scheduled after scan or similar
 	 * when we aren't even joined (or trying) with a
 	 * network.
 	 */
 	if (!ifibss->ssid_len)
-		goto out;
+		return;
 
 	spin_lock_bh(&ifibss->incomplete_lock);
 	while (!list_empty(&ifibss->incomplete_stations)) {
@@ -1711,9 +1675,6 @@
 		WARN_ON(1);
 		break;
 	}
-
- out:
-	sdata_unlock(sdata);
 }
 
 static void ieee80211_ibss_timer(struct timer_list *t)
@@ -1721,7 +1682,7 @@
 	struct ieee80211_sub_if_data *sdata =
 		from_timer(sdata, t, u.ibss.timer);
 
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 }
 
 void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
@@ -1731,7 +1692,7 @@
 	timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0);
 	INIT_LIST_HEAD(&ifibss->incomplete_stations);
 	spin_lock_init(&ifibss->incomplete_lock);
-	INIT_WORK(&ifibss->csa_connection_drop_work,
+	wiphy_work_init(&ifibss->csa_connection_drop_work,
 		  ieee80211_csa_connection_drop_work);
 }
 
@@ -1740,7 +1701,8 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 
-	mutex_lock(&local->iflist_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (!ieee80211_sdata_running(sdata))
 			continue;
@@ -1748,13 +1710,12 @@
 			continue;
 		sdata->u.ibss.last_scan_completed = jiffies;
 	}
-	mutex_unlock(&local->iflist_mtx);
 }
 
 int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 			struct cfg80211_ibss_params *params)
 {
-	u32 changed = 0;
+	u64 changed = 0;
 	u32 rate_flags;
 	struct ieee80211_supported_band *sband;
 	enum ieee80211_chanctx_mode chanmode;
@@ -1763,6 +1724,8 @@
 	int i;
 	int ret;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (params->chandef.chan->freq_offset) {
 		/* this may work, but is untested */
 		return -EOPNOTSUPP;
@@ -1783,10 +1746,8 @@
 	chanmode = (params->channel_fixed && !ret) ?
 		IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE;
 
-	mutex_lock(&local->chanctx_mtx);
 	ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode,
 					   radar_detect_width);
-	mutex_unlock(&local->chanctx_mtx);
 	if (ret < 0)
 		return ret;
 
@@ -1856,7 +1817,7 @@
 	sdata->deflink.needed_rx_chains = local->rx_chains;
 	sdata->control_port_over_nl80211 = params->control_port_over_nl80211;
 
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 
 	return 0;
 }
diff -ruw linux-6.4/net/mac80211/ieee80211_i.h linux-6.4-fbx/net/mac80211/ieee80211_i.h
--- linux-6.4/net/mac80211/ieee80211_i.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/ieee80211_i.h	2024-04-19 16:04:28.969736104 +0200
@@ -85,6 +85,12 @@
 
 #define IEEE80211_MAX_NAN_INSTANCE_ID 255
 
+enum ieee80211_status_data {
+	IEEE80211_STATUS_TYPE_MASK	= 0x00f,
+	IEEE80211_STATUS_TYPE_INVALID	= 0,
+	IEEE80211_STATUS_TYPE_SMPS	= 1,
+	IEEE80211_STATUS_SUBDATA_MASK	= 0xff0,
+};
 
 /*
  * Keep a station's queues on the active list for deficit accounting purposes
@@ -410,6 +416,8 @@
 		ieee80211_conn_flags_t conn_flags;
 
 		u16 status;
+
+		bool disabled;
 	} link[IEEE80211_MLD_MAX_NUM_LINKS];
 
 	u8 ap_addr[ETH_ALEN] __aligned(2);
@@ -459,15 +467,26 @@
 	bool downgraded;
 };
 
+/* Advertised TID-to-link mapping info */
+struct ieee80211_adv_ttlm_info {
+	/* time in TUs at which the new mapping is established, or 0 if there is
+	 * no planned advertised TID-to-link mapping
+	 */
+	u16 switch_time;
+	u32 duration; /* duration of the planned T2L map in TUs */
+	u16 map; /* map of usable links for all TIDs */
+	bool active; /* whether the advertised mapping is active or not */
+};
+
 DECLARE_EWMA(beacon_signal, 4, 4)
 
 struct ieee80211_if_managed {
 	struct timer_list timer;
 	struct timer_list conn_mon_timer;
 	struct timer_list bcn_mon_timer;
-	struct work_struct monitor_work;
-	struct work_struct beacon_connection_loss_work;
-	struct work_struct csa_connection_drop_work;
+	struct wiphy_work monitor_work;
+	struct wiphy_work beacon_connection_loss_work;
+	struct wiphy_work csa_connection_drop_work;
 
 	unsigned long beacon_timeout;
 	unsigned long probe_timeout;
@@ -528,7 +547,7 @@
 
 	/* TDLS support */
 	u8 tdls_peer[ETH_ALEN] __aligned(2);
-	struct delayed_work tdls_peer_del_work;
+	struct wiphy_delayed_work tdls_peer_del_work;
 	struct sk_buff *orig_teardown_skb; /* The original teardown skb */
 	struct sk_buff *teardown_skb; /* A copy to send through the AP */
 	spinlock_t teardown_lock; /* To lock changing teardown_skb */
@@ -542,18 +561,25 @@
 	 * on the BE queue, but there's a lot of VO traffic, we might
 	 * get stuck in a downgraded situation and flush takes forever.
 	 */
-	struct delayed_work tx_tspec_wk;
+	struct wiphy_delayed_work tx_tspec_wk;
 
 	/* Information elements from the last transmitted (Re)Association
 	 * Request frame.
 	 */
 	u8 *assoc_req_ies;
 	size_t assoc_req_ies_len;
+
+	struct wiphy_delayed_work ml_reconf_work;
+	u16 removed_links;
+
+	/* TID-to-link mapping support */
+	struct wiphy_delayed_work ttlm_work;
+	struct ieee80211_adv_ttlm_info ttlm_info;
 };
 
 struct ieee80211_if_ibss {
 	struct timer_list timer;
-	struct work_struct csa_connection_drop_work;
+	struct wiphy_work csa_connection_drop_work;
 
 	unsigned long last_scan_completed;
 
@@ -613,8 +639,9 @@
  * these declarations define the interface, which enables
  * vendor-specific mesh synchronization
  *
+ * @rx_bcn_presp: beacon/probe response was received
+ * @adjust_tsf: TSF adjustment method
  */
-struct ieee802_11_elems;
 struct ieee80211_mesh_sync_ops {
 	void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata, u16 stype,
 			     struct ieee80211_mgmt *mgmt, unsigned int len,
@@ -671,7 +698,7 @@
 	struct timer_list mesh_path_root_timer;
 
 	unsigned long wrkq_flags;
-	unsigned long mbss_changed;
+	unsigned long mbss_changed[64 / BITS_PER_LONG];
 
 	bool userspace_handles_dfs;
 
@@ -744,6 +771,18 @@
 	int mesh_paths_generation;
 	int mpp_paths_generation;
 	struct mesh_tx_cache tx_cache;
+
+	/* Store Vendor specific node metrics IE */
+	u8 node_vendor_ie[260];
+	u8 node_vendor_ie_len;
+	/* Store Vendor specific mesh path metrics IE */
+	u8 mpm_vendor_ie[260];
+	u8 mpm_vendor_ie_len;
+
+	/* list to hold blocked mesh peer link entries */
+	struct list_head mplink_blocking_list;
+	/* lock for mplink_blocking_list list */
+	spinlock_t mplink_blocking_list_lock;
 };
 
 #ifdef CONFIG_MAC80211_MESH
@@ -854,12 +893,13 @@
  * struct txq_info - per tid queue
  *
  * @tin: contains packets split into multiple flows
- * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
- *	a fq_flow which is already owned by a different tin
- * @def_cvars: codel vars for @def_flow
+ * @def_cvars: codel vars for the @tin's default_flow
+ * @cstats: code statistics for this queue
  * @frags: used to keep fragments created after dequeue
  * @schedule_order: used with ieee80211_local->active_txqs
  * @schedule_round: counter to prevent infinite loops on TXQ scheduling
+ * @flags: TXQ flags from &enum txq_info_flags
+ * @txq: the driver visible part
  */
 struct txq_info {
 	struct fq_tin tin;
@@ -877,18 +917,30 @@
 	struct ieee80211_txq txq;
 };
 
+#ifdef CONFIG_FBX80211_SCUM
+struct ieee80211_if_scum {
+	struct list_head client_list;
+	struct list_head next;
+	bool skip_mon;
+};
+#endif
+
 struct ieee80211_if_mntr {
 	u32 flags;
 	u8 mu_follow_addr[ETH_ALEN] __aligned(2);
 
 	struct list_head list;
+#ifdef CONFIG_FBX80211_SCUM
+	struct ieee80211_if_scum scum;
+#endif
 };
 
 /**
  * struct ieee80211_if_nan - NAN state
  *
  * @conf: current NAN configuration
- * @func_ids: a bitmap of available instance_id's
+ * @func_lock: lock for @func_inst_ids
+ * @function_inst_ids: a bitmap of available instance_id's
  */
 struct ieee80211_if_nan {
 	struct cfg80211_nan_conf conf;
@@ -918,10 +970,12 @@
 
 	bool csa_waiting_bcn;
 	bool csa_ignored_same_chan;
-	struct timer_list chswitch_timer;
-	struct work_struct chswitch_work;
+	struct wiphy_delayed_work chswitch_work;
+
+	struct wiphy_work request_smps_work;
+	/* used to reconfigure hardware SM PS */
+	struct wiphy_work recalc_smps;
 
-	struct work_struct request_smps_work;
 	bool beacon_crc_valid;
 	u32 beacon_crc;
 	struct ewma_beacon_signal ave_beacon_signal;
@@ -947,6 +1001,8 @@
 	int wmm_last_param_set;
 	int mu_edca_last_param_set;
 
+	u8 bss_param_ch_cnt;
+
 	struct cfg80211_bss *bss;
 };
 
@@ -964,8 +1020,8 @@
 	struct ieee80211_sub_if_data *sdata;
 	unsigned int link_id;
 
-	struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
-	struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
+	struct list_head assigned_chanctx_list; /* protected by wiphy mutex */
+	struct list_head reserved_chanctx_list; /* protected by wiphy mutex */
 
 	/* multicast keys only */
 	struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS +
@@ -975,18 +1031,18 @@
 	struct ieee80211_key __rcu *default_mgmt_key;
 	struct ieee80211_key __rcu *default_beacon_key;
 
-	struct work_struct csa_finalize_work;
-	bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
+	struct wiphy_work csa_finalize_work;
+	bool csa_block_tx;
 
 	bool operating_11g_mode;
 
 	struct cfg80211_chan_def csa_chandef;
 
-	struct work_struct color_change_finalize_work;
+	struct wiphy_work color_change_finalize_work;
 	struct delayed_work color_collision_detect_work;
 	u64 color_bitmap;
 
-	/* context reservation -- protected with chanctx_mtx */
+	/* context reservation -- protected with wiphy mutex */
 	struct ieee80211_chanctx *reserved_chanctx;
 	struct cfg80211_chan_def reserved_chandef;
 	bool reserved_radar_required;
@@ -999,7 +1055,7 @@
 	int ap_power_level; /* in dBm */
 
 	bool radar_required;
-	struct delayed_work dfs_cac_timer_work;
+	struct wiphy_delayed_work dfs_cac_timer_work;
 
 	union {
 		struct ieee80211_link_data_managed mgd;
@@ -1026,7 +1082,7 @@
 	/* count for keys needing tailroom space allocation */
 	int crypto_tx_tailroom_needed_cnt;
 	int crypto_tx_tailroom_pending_dec;
-	struct delayed_work dec_tailroom_needed_wk;
+	struct wiphy_delayed_work dec_tailroom_needed_wk;
 
 	struct net_device *dev;
 	struct ieee80211_local *local;
@@ -1058,10 +1114,7 @@
 	atomic_t num_tx_queued;
 	struct mac80211_qos_map __rcu *qos_map;
 
-	/* used to reconfigure hardware SM PS */
-	struct work_struct recalc_smps;
-
-	struct work_struct work;
+	struct wiphy_work work;
 	struct sk_buff_head skb_queue;
 	struct sk_buff_head status_queue;
 
@@ -1100,7 +1153,7 @@
 	struct ieee80211_link_data __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
 
 	/* for ieee80211_set_active_links_async() */
-	struct work_struct activate_links_work;
+	struct wiphy_work activate_links_work;
 	u16 desired_active_links;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -1123,62 +1176,8 @@
 	return container_of(p, struct ieee80211_sub_if_data, vif);
 }
 
-static inline void sdata_lock(struct ieee80211_sub_if_data *sdata)
-	__acquires(&sdata->wdev.mtx)
-{
-	mutex_lock(&sdata->wdev.mtx);
-	__acquire(&sdata->wdev.mtx);
-}
-
-static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata)
-	__releases(&sdata->wdev.mtx)
-{
-	mutex_unlock(&sdata->wdev.mtx);
-	__release(&sdata->wdev.mtx);
-}
-
 #define sdata_dereference(p, sdata) \
-	rcu_dereference_protected(p, lockdep_is_held(&sdata->wdev.mtx))
-
-static inline void
-sdata_assert_lock(struct ieee80211_sub_if_data *sdata)
-{
-	lockdep_assert_held(&sdata->wdev.mtx);
-}
-
-static inline int
-ieee80211_chanwidth_get_shift(enum nl80211_chan_width width)
-{
-	switch (width) {
-	case NL80211_CHAN_WIDTH_5:
-		return 2;
-	case NL80211_CHAN_WIDTH_10:
-		return 1;
-	default:
-		return 0;
-	}
-}
-
-static inline int
-ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef)
-{
-	return ieee80211_chanwidth_get_shift(chandef->width);
-}
-
-static inline int
-ieee80211_vif_get_shift(struct ieee80211_vif *vif)
-{
-	struct ieee80211_chanctx_conf *chanctx_conf;
-	int shift = 0;
-
-	rcu_read_lock();
-	chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
-	if (chanctx_conf)
-		shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
-	rcu_read_unlock();
-
-	return shift;
-}
+	wiphy_dereference(sdata->local->hw.wiphy, p)
 
 static inline int
 ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
@@ -1248,7 +1247,7 @@
 #endif
 
 /**
- * mac80211 scan flags - currently active scan mode
+ * enum mac80211_scan_flags - currently active scan mode
  *
  * @SCAN_SW_SCANNING: We're currently in the process of scanning but may as
  *	well be on the operating channel
@@ -1266,7 +1265,7 @@
  *	and could send a probe request after receiving a beacon.
  * @SCAN_BEACON_DONE: Beacon received, we can now send a probe request
  */
-enum {
+enum mac80211_scan_flags {
 	SCAN_SW_SCANNING,
 	SCAN_HW_SCANNING,
 	SCAN_ONCHANNEL_SCANNING,
@@ -1356,7 +1355,7 @@
 	spinlock_t filter_lock;
 
 	/* used for uploading changed mc list */
-	struct work_struct reconfig_filter;
+	struct wiphy_work reconfig_filter;
 
 	/* aggregated multicast list */
 	struct netdev_hw_addr_list mc_list;
@@ -1394,10 +1393,13 @@
 	/* device is during a HW reconfig */
 	bool in_reconfig;
 
+	/* reconfiguration failed ... suppress some warnings etc. */
+	bool reconfig_failure;
+
 	/* wowlan is enabled -- don't reconfig on resume */
 	bool wowlan;
 
-	struct work_struct radar_detected_work;
+	struct wiphy_work radar_detected_work;
 
 	/* number of RX chains the hardware has */
 	u8 rx_chains;
@@ -1420,10 +1422,9 @@
 
 	/* Station data */
 	/*
-	 * The mutex only protects the list, hash table and
-	 * counter, reads are done with RCU.
+	 * The list, hash table and counter are protected
+	 * by the wiphy mutex, reads are done with RCU.
 	 */
-	struct mutex sta_mtx;
 	spinlock_t tim_lock;
 	unsigned long num_sta;
 	struct list_head sta_list;
@@ -1452,15 +1453,6 @@
 	struct list_head mon_list; /* only that are IFF_UP && !cooked */
 	struct mutex iflist_mtx;
 
-	/*
-	 * Key mutex, protects sdata's key_list and sta_info's
-	 * key pointers and ptk_idx (write access, they're RCU.)
-	 */
-	struct mutex key_mtx;
-
-	/* mutex for scan and work locking */
-	struct mutex mtx;
-
 	/* Scanning and BSS list */
 	unsigned long scanning;
 	struct cfg80211_ssid scan_ssid;
@@ -1474,14 +1466,14 @@
 	int hw_scan_ies_bufsize;
 	struct cfg80211_scan_info scan_info;
 
-	struct work_struct sched_scan_stopped_work;
+	struct wiphy_work sched_scan_stopped_work;
 	struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
 	struct cfg80211_sched_scan_request __rcu *sched_scan_req;
 	u8 scan_addr[ETH_ALEN];
 
 	unsigned long leave_oper_channel_time;
 	enum mac80211_scan_state next_scan_state;
-	struct delayed_work scan_work;
+	struct wiphy_delayed_work scan_work;
 	struct ieee80211_sub_if_data __rcu *scan_sdata;
 	/* For backward compatibility only -- do not use */
 	struct cfg80211_chan_def _oper_chandef;
@@ -1491,7 +1483,6 @@
 
 	/* channel contexts */
 	struct list_head chanctx_list;
-	struct mutex chanctx_mtx;
 
 #ifdef CONFIG_MAC80211_LEDS
 	struct led_trigger tx_led, rx_led, assoc_led, radio_led;
@@ -1545,8 +1536,8 @@
 	 * interface (and monitors) in PS, this then points there.
 	 */
 	struct ieee80211_sub_if_data *ps_sdata;
-	struct work_struct dynamic_ps_enable_work;
-	struct work_struct dynamic_ps_disable_work;
+	struct wiphy_work dynamic_ps_enable_work;
+	struct wiphy_work dynamic_ps_disable_work;
 	struct timer_list dynamic_ps_timer;
 	struct notifier_block ifa_notifier;
 	struct notifier_block ifa6_notifier;
@@ -1574,9 +1565,9 @@
 	/*
 	 * Remain-on-channel support
 	 */
-	struct delayed_work roc_work;
+	struct wiphy_delayed_work roc_work;
 	struct list_head roc_list;
-	struct work_struct hw_roc_start, hw_roc_done;
+	struct wiphy_work hw_roc_start, hw_roc_done;
 	unsigned long hw_roc_start_time;
 	u64 roc_cookie_counter;
 
@@ -1591,6 +1582,9 @@
 
 	/* extended capabilities provided by mac80211 */
 	u8 ext_capa[8];
+#ifdef CONFIG_FBX80211_SCUM
+	struct list_head scum_list;
+#endif
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1612,7 +1606,7 @@
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	enum nl80211_band band;
 
-	WARN_ON(sdata->vif.valid_links);
+	WARN_ON(ieee80211_vif_is_mld(&sdata->vif));
 
 	rcu_read_lock();
 	chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
@@ -1722,7 +1716,10 @@
 	const struct ieee80211_aid_response_ie *aid_resp;
 	const struct ieee80211_eht_cap_elem *eht_cap;
 	const struct ieee80211_eht_operation *eht_operation;
-	const struct ieee80211_multi_link_elem *multi_link;
+	const struct ieee80211_multi_link_elem *ml_basic;
+	const struct ieee80211_multi_link_elem *ml_reconf;
+	const struct ieee80211_bandwidth_indication *bandwidth_indication;
+	const struct ieee80211_ttlm_elem *ttlm[IEEE80211_TTLM_MAX_CNT];
 
 	/* length of them, respectively */
 	u8 ext_capab_len;
@@ -1747,7 +1744,16 @@
 	u8 eht_cap_len;
 
 	/* mult-link element can be de-fragmented and thus u8 is not sufficient */
-	size_t multi_link_len;
+	size_t ml_basic_len;
+	size_t ml_reconf_len;
+
+	/* The basic Multi-Link element in the original IEs */
+	const struct element *ml_basic_elem;
+
+	/* The reconfiguration Multi-Link element in the original IEs */
+	const struct element *ml_reconf_elem;
+
+	u8 ttlm_num;
 
 	/*
 	 * store the per station profile pointer and length in case that the
@@ -1766,7 +1772,14 @@
 	 */
 	size_t scratch_len;
 	u8 *scratch_pos;
-	u8 scratch[];
+	u8 scratch[] __counted_by(scratch_len);
+};
+
+struct ieee802_11_mesh_vendor_specific_elems {
+	const u8 *ie_start;
+	u8 ie_len;
+	/* whether a parse error occurred while retrieving these elements */
+	bool parse_error;
 };
 
 static inline struct ieee80211_local *hw_to_local(
@@ -1827,7 +1840,7 @@
 				       struct ieee80211_link_data *link,
 				       u64 changed);
 void ieee80211_configure_filter(struct ieee80211_local *local);
-u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
 
 u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
 int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
@@ -1855,7 +1868,6 @@
 			   struct ieee80211_sub_if_data *sdata);
 void ieee80211_recalc_ps(struct ieee80211_local *local);
 void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
-int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 				  struct sk_buff *skb);
@@ -1887,8 +1899,10 @@
 void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 				   struct sk_buff *skb);
 int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
-			      struct cfg80211_csa_settings *csa_settings);
-int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata);
+			      struct cfg80211_csa_settings *csa_settings,
+			      u64 *changed);
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata,
+			      u64 *changed);
 void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata);
 
 /* OCB code */
@@ -1905,22 +1919,26 @@
 void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 				   struct sk_buff *skb);
 int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
-			      struct cfg80211_csa_settings *csa_settings);
-int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
+			      struct cfg80211_csa_settings *csa_settings,
+			      u64 *changed);
+int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata,
+			      u64 *changed);
 
 /* scan/BSS handling */
-void ieee80211_scan_work(struct work_struct *work);
+void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
 int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
 				const u8 *ssid, u8 ssid_len,
 				struct ieee80211_channel **channels,
-				unsigned int n_channels,
-				enum nl80211_bss_scan_width scan_width);
+				unsigned int n_channels);
 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
 			   struct cfg80211_scan_request *req);
 void ieee80211_scan_cancel(struct ieee80211_local *local);
 void ieee80211_run_deferred_scan(struct ieee80211_local *local);
 void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb);
 
+void ieee80211_inform_bss(struct wiphy *wiphy, struct cfg80211_bss *bss,
+			  const struct cfg80211_bss_ies *ies, void *data);
+
 void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
 struct ieee80211_bss *
 ieee80211_bss_info_update(struct ieee80211_local *local,
@@ -1939,7 +1957,8 @@
 				       struct cfg80211_sched_scan_request *req);
 int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
 void ieee80211_sched_scan_end(struct ieee80211_local *local);
-void ieee80211_sched_scan_stopped_work(struct work_struct *work);
+void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
+				       struct wiphy_work *work);
 
 /* off-channel/mgmt-tx */
 void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
@@ -1959,19 +1978,20 @@
 				  struct wireless_dev *wdev, u64 cookie);
 
 /* channel switch handling */
-void ieee80211_csa_finalize_work(struct work_struct *work);
+void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work);
 int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
 			     struct cfg80211_csa_settings *params);
 
 /* color change handling */
-void ieee80211_color_change_finalize_work(struct work_struct *work);
+void ieee80211_color_change_finalize_work(struct wiphy *wiphy,
+					  struct wiphy_work *work);
 void ieee80211_color_collision_detection_work(struct work_struct *work);
 
 /* interface handling */
 #define MAC80211_SUPPORTED_FEATURES_TX	(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
 					 NETIF_F_HW_CSUM | NETIF_F_SG | \
 					 NETIF_F_HIGHDMA | NETIF_F_GSO_SOFTWARE | \
-					 NETIF_F_HW_TC)
+					 NETIF_F_LLTX)
 #define MAC80211_SUPPORTED_FEATURES_RX	(NETIF_F_RXCSUM)
 #define MAC80211_SUPPORTED_FEATURES	(MAC80211_SUPPORTED_FEATURES_TX | \
 					 MAC80211_SUPPORTED_FEATURES_RX)
@@ -2013,8 +2033,11 @@
 			 struct ieee80211_bss_conf *link_conf);
 void ieee80211_link_stop(struct ieee80211_link_data *link);
 int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata,
-			    u16 new_links);
-void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata);
+			    u16 new_links, u16 dormant_links);
+static inline void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata)
+{
+	ieee80211_vif_set_links(sdata, 0, 0);
+}
 
 /* tx handling */
 void ieee80211_clear_tx_pending(struct ieee80211_local *local);
@@ -2036,7 +2059,7 @@
 ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
 			      struct sk_buff *skb, u32 info_flags);
 void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
-			  int retry_count, int shift, bool send_to_cooked,
+			  int retry_count, bool send_to_cooked,
 			  struct ieee80211_tx_status *status);
 
 void ieee80211_check_fast_xmit(struct sta_info *sta);
@@ -2069,15 +2092,13 @@
 			  u16 initiator, u16 reason_code);
 int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
 			       enum ieee80211_smps_mode smps, const u8 *da,
-			       const u8 *bssid);
+			       const u8 *bssid, int link_id);
 bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
 				   enum ieee80211_smps_mode smps_mode_new);
 
-void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
-				     u16 initiator, u16 reason, bool stop);
 void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				    u16 initiator, u16 reason, bool stop);
-void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
+void __ieee80211_start_rx_ba_session(struct sta_info *sta,
 				      u8 dialog_token, u16 timeout,
 				      u16 start_seq_num, u16 ba_policy, u16 tid,
 				      u16 buf_size, bool tx, bool auto_seq,
@@ -2098,13 +2119,11 @@
 
 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 				   enum ieee80211_agg_stop_reason reason);
-int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
-				    enum ieee80211_agg_stop_reason reason);
 void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
 			      struct tid_ampdu_tx *tid_tx);
 void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
 			     struct tid_ampdu_tx *tid_tx);
-void ieee80211_ba_session_work(struct work_struct *work);
+void ieee80211_ba_session_work(struct wiphy *wiphy, struct wiphy_work *work);
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
 
@@ -2117,6 +2136,7 @@
 ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_supported_band *sband,
 				    const struct ieee80211_vht_cap *vht_cap_ie,
+				    const struct ieee80211_vht_cap *vht_cap_ie2,
 				    struct link_sta_info *link_sta);
 enum ieee80211_sta_rx_bandwidth
 ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
@@ -2181,7 +2201,7 @@
  *	flags from &enum ieee80211_conn_flags.
  * @bssid: the currently connected bssid (for reporting)
  * @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
-	All of them will be filled with if success only.
+ *	All of them will be filled with if success only.
  * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
  */
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
@@ -2213,8 +2233,7 @@
 /* utility functions/constants */
 extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
 int ieee80211_frame_duration(enum nl80211_band band, size_t len,
-			     int rate, int erp, int short_preamble,
-			     int shift);
+			     int rate, int erp, int short_preamble);
 void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 					   struct ieee80211_tx_queue_params *qparam,
 					   int ac);
@@ -2269,8 +2288,6 @@
  *	(or re-association) response frame if this is given
  * @from_ap: frame is received from an AP (currently used only
  *	for EHT capabilities parsing)
- * @scratch_len: if non zero, specifies the requested length of the scratch
- *      buffer; otherwise, 'len' is used.
  */
 struct ieee80211_elems_parse_params {
 	const u8 *start;
@@ -2281,7 +2298,6 @@
 	struct cfg80211_bss *bss;
 	int link_id;
 	bool from_ap;
-	size_t scratch_len;
 };
 
 struct ieee802_11_elems *
@@ -2305,6 +2321,10 @@
 	return ieee802_11_parse_elems_full(&params);
 }
 
+u32 ieee802_11_parse_mesh_vendor_elems(const u8 *start, size_t len, bool action,
+				       struct ieee802_11_mesh_vendor_specific_elems *elems,
+				       u64 filter, u32 crc, u8 type);
+
 static inline struct ieee802_11_elems *
 ieee802_11_parse_elems(const u8 *start, size_t len, bool action,
 		       struct cfg80211_bss *bss)
@@ -2312,8 +2332,6 @@
 	return ieee802_11_parse_elems_crc(start, len, action, 0, 0, bss);
 }
 
-void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id);
-
 extern const int ieee802_1d_to_ac[8];
 
 static inline int ieee80211_ac_from_tid(int tid)
@@ -2321,8 +2339,10 @@
 	return ieee802_1d_to_ac[tid & 7];
 }
 
-void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
-void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
+void ieee80211_dynamic_ps_enable_work(struct wiphy *wiphy,
+				      struct wiphy_work *work);
+void ieee80211_dynamic_ps_disable_work(struct wiphy *wiphy,
+				       struct wiphy_work *work);
 void ieee80211_dynamic_ps_timer(struct timer_list *t);
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
 			     struct ieee80211_sub_if_data *sdata,
@@ -2407,6 +2427,7 @@
 			struct txq_info *txq, int tid);
 void ieee80211_txq_purge(struct ieee80211_local *local,
 			 struct txq_info *txqi);
+void ieee80211_purge_sta_txqs(struct sta_info *sta);
 void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
 			       struct ieee80211_sub_if_data *sdata);
 void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats,
@@ -2421,6 +2442,7 @@
 				    const u8 *da, const u8 *bssid,
 				    u16 stype, u16 reason,
 				    bool send_frame, u8 *frame_buf);
+u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end);
 
 enum {
 	IEEE80211_PROBE_FLAG_DIRECTED		= BIT(0),
@@ -2499,7 +2521,7 @@
 				const struct ieee80211_vht_operation *oper,
 				const struct ieee80211_ht_operation *htop,
 				struct cfg80211_chan_def *chandef);
-void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation *eht_oper,
+void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
 				bool support_160, bool support_320,
 				struct cfg80211_chan_def *chandef);
 bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
@@ -2541,10 +2563,10 @@
 				      struct ieee80211_link_data *rsvd_for);
 bool ieee80211_is_radar_required(struct ieee80211_local *local);
 
-void ieee80211_dfs_cac_timer(unsigned long data);
-void ieee80211_dfs_cac_timer_work(struct work_struct *work);
+void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
 void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
-void ieee80211_dfs_radar_detected_work(struct work_struct *work);
+void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
+				       struct wiphy_work *work);
 int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
 			      struct cfg80211_csa_settings *csa_settings);
 
@@ -2560,13 +2582,13 @@
 
 /* TDLS */
 int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
-			const u8 *peer, u8 action_code, u8 dialog_token,
-			u16 status_code, u32 peer_capability,
-			bool initiator, const u8 *extra_ies,
-			size_t extra_ies_len);
+			const u8 *peer, int link_id,
+			u8 action_code, u8 dialog_token, u16 status_code,
+			u32 peer_capability, bool initiator,
+			const u8 *extra_ies, size_t extra_ies_len);
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 			const u8 *peer, enum nl80211_tdls_operation oper);
-void ieee80211_tdls_peer_del_work(struct work_struct *wk);
+void ieee80211_tdls_peer_del_work(struct wiphy *wiphy, struct wiphy_work *wk);
 int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
 				  const u8 *addr, u8 oper_class,
 				  struct cfg80211_chan_def *chandef);
diff -ruw linux-6.4/net/mac80211/iface.c linux-6.4-fbx/net/mac80211/iface.c
--- linux-6.4/net/mac80211/iface.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/iface.c	2023-12-12 17:24:34.175627535 +0100
@@ -8,7 +8,7 @@
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (c) 2016        Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 #include <linux/slab.h>
 #include <linux/kernel.h>
@@ -26,6 +26,7 @@
 #include "driver-ops.h"
 #include "wme.h"
 #include "rate.h"
+#include "fbx_scum.h"
 
 /**
  * DOC: Interface list locking
@@ -33,17 +34,16 @@
  * The interface list in each struct ieee80211_local is protected
  * three-fold:
  *
- * (1) modifications may only be done under the RTNL
- * (2) modifications and readers are protected against each other by
- *     the iflist_mtx.
- * (3) modifications are done in an RCU manner so atomic readers
+ * (1) modifications may only be done under the RTNL *and* wiphy mutex
+ *     *and* iflist_mtx
+ * (2) modifications are done in an RCU manner so atomic readers
  *     can traverse the list in RCU-safe blocks.
  *
  * As a consequence, reads (traversals) of the list can be protected
- * by either the RTNL, the iflist_mtx or RCU.
+ * by either the RTNL, the wiphy mutex, the iflist_mtx or RCU.
  */
 
-static void ieee80211_iface_work(struct work_struct *work);
+static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work);
 
 bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
 {
@@ -110,7 +110,7 @@
 	bool working, scanning, active;
 	unsigned int led_trig_start = 0, led_trig_stop = 0;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	active = force_active ||
 		 !list_empty(&local->chanctx_list) ||
@@ -160,6 +160,8 @@
 	u8 *m;
 	int ret = 0;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (is_zero_ether_addr(local->hw.wiphy->addr_mask))
 		return 0;
 
@@ -176,7 +178,6 @@
 	if (!check_dup)
 		return ret;
 
-	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(iter, &local->interfaces, list) {
 		if (iter == sdata)
 			continue;
@@ -195,7 +196,6 @@
 			break;
 		}
 	}
-	mutex_unlock(&local->iflist_mtx);
 
 	return ret;
 }
@@ -207,6 +207,8 @@
 	struct ieee80211_sub_if_data *scan_sdata;
 	int ret = 0;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	/* To be the most flexible here we want to only limit changing the
 	 * address if the specific interface is doing offchannel work or
 	 * scanning.
@@ -214,8 +216,6 @@
 	if (netif_carrier_ok(sdata->dev))
 		return -EBUSY;
 
-	mutex_lock(&local->mtx);
-
 	/* First check no ROC work is happening on this iface */
 	list_for_each_entry(roc, &local->roc_list, list) {
 		if (roc->sdata != sdata)
@@ -230,7 +230,7 @@
 	/* And if this iface is scanning */
 	if (local->scanning) {
 		scan_sdata = rcu_dereference_protected(local->scan_sdata,
-						       lockdep_is_held(&local->mtx));
+						       lockdep_is_held(&local->hw.wiphy->mtx));
 		if (sdata == scan_sdata)
 			ret = -EBUSY;
 	}
@@ -247,13 +247,12 @@
 	}
 
 unlock:
-	mutex_unlock(&local->mtx);
 	return ret;
 }
 
-static int ieee80211_change_mac(struct net_device *dev, void *addr)
+static int _ieee80211_change_mac(struct ieee80211_sub_if_data *sdata,
+				 void *addr)
 {
-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
 	struct sockaddr *sa = addr;
 	bool check_dup = true;
@@ -278,7 +277,7 @@
 
 	if (live)
 		drv_remove_interface(local, sdata);
-	ret = eth_mac_addr(dev, sa);
+	ret = eth_mac_addr(sdata->dev, sa);
 
 	if (ret == 0) {
 		memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
@@ -294,6 +293,27 @@
 	return ret;
 }
 
+static int ieee80211_change_mac(struct net_device *dev, void *addr)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	int ret;
+
+	/*
+	 * This happens during unregistration if there's a bond device
+	 * active (maybe other cases?) and we must get removed from it.
+	 * But we really don't care anymore if it's not registered now.
+	 */
+	if (!dev->ieee80211_ptr->registered)
+		return 0;
+
+	wiphy_lock(local->hw.wiphy);
+	ret = _ieee80211_change_mac(sdata, addr);
+	wiphy_unlock(local->hw.wiphy);
+
+	return ret;
+}
+
 static inline int identical_mac_addr_allowed(int type1, int type2)
 {
 	return type1 == NL80211_IFTYPE_MONITOR ||
@@ -311,9 +331,9 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_sub_if_data *nsdata;
-	int ret;
 
 	ASSERT_RTNL();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* we hold the RTNL here so can safely walk the list */
 	list_for_each_entry(nsdata, &local->interfaces, list) {
@@ -378,10 +398,7 @@
 		}
 	}
 
-	mutex_lock(&local->chanctx_mtx);
-	ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
-	mutex_unlock(&local->chanctx_mtx);
-	return ret;
+	return ieee80211_check_combinations(sdata, NULL, 0, 0);
 }
 
 static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@ -430,12 +447,13 @@
 	if (!is_valid_ether_addr(dev->dev_addr))
 		return -EADDRNOTAVAIL;
 
+	wiphy_lock(sdata->local->hw.wiphy);
 	err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
 	if (err)
-		return err;
+		goto out;
 
-	wiphy_lock(sdata->local->hw.wiphy);
 	err = ieee80211_do_open(&sdata->wdev, true);
+out:
 	wiphy_unlock(sdata->local->hw.wiphy);
 
 	return err;
@@ -453,6 +471,8 @@
 	bool cancel_scan;
 	struct cfg80211_nan_func *func;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 	synchronize_rcu(); /* flush _ieee80211_wake_txqs() */
 
@@ -472,6 +492,8 @@
 	case NL80211_IFTYPE_MONITOR:
 		if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
 			break;
+		if (fbx80211_skip_mon(sdata))
+			break;
 		list_del_rcu(&sdata->u.mntr.list);
 		break;
 	default:
@@ -516,16 +538,12 @@
 	}
 
 	del_timer_sync(&local->dynamic_ps_timer);
-	cancel_work_sync(&local->dynamic_ps_enable_work);
-
-	cancel_work_sync(&sdata->recalc_smps);
+	wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work);
 
-	sdata_lock(sdata);
-	WARN(sdata->vif.valid_links,
+	WARN(ieee80211_vif_is_mld(&sdata->vif),
 	     "destroying interface with valid links 0x%04x\n",
 	     sdata->vif.valid_links);
 
-	mutex_lock(&local->mtx);
 	sdata->vif.bss_conf.csa_active = false;
 	if (sdata->vif.type == NL80211_IFTYPE_STATION)
 		sdata->deflink.u.mgd.csa_waiting_bcn = false;
@@ -534,20 +552,17 @@
 					  IEEE80211_QUEUE_STOP_REASON_CSA);
 		sdata->deflink.csa_block_tx = false;
 	}
-	mutex_unlock(&local->mtx);
-	sdata_unlock(sdata);
-
-	cancel_work_sync(&sdata->deflink.csa_finalize_work);
-	cancel_work_sync(&sdata->deflink.color_change_finalize_work);
 
-	cancel_delayed_work_sync(&sdata->deflink.dfs_cac_timer_work);
+	wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+	wiphy_work_cancel(local->hw.wiphy,
+			  &sdata->deflink.color_change_finalize_work);
+	wiphy_delayed_work_cancel(local->hw.wiphy,
+				  &sdata->deflink.dfs_cac_timer_work);
 
 	if (sdata->wdev.cac_started) {
 		chandef = sdata->vif.bss_conf.chandef;
 		WARN_ON(local->suspended);
-		mutex_lock(&local->mtx);
 		ieee80211_link_release_channel(&sdata->deflink);
-		mutex_unlock(&local->mtx);
 		cfg80211_cac_event(sdata->dev, &chandef,
 				   NL80211_RADAR_CAC_ABORTED,
 				   GFP_KERNEL);
@@ -575,9 +590,7 @@
 
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP_VLAN:
-		mutex_lock(&local->mtx);
 		list_del(&sdata->u.vlan.list);
-		mutex_unlock(&local->mtx);
 		RCU_INIT_POINTER(sdata->vif.bss_conf.chanctx_conf, NULL);
 		/* see comment in the default case below */
 		ieee80211_free_keys(sdata, true);
@@ -614,7 +627,7 @@
 		RCU_INIT_POINTER(local->p2p_sdata, NULL);
 		fallthrough;
 	default:
-		cancel_work_sync(&sdata->work);
+		wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work);
 		/*
 		 * When we get here, the interface is marked down.
 		 * Free the remaining keys, if there are any
@@ -675,9 +688,7 @@
 		if (local->monitors == 0)
 			ieee80211_del_virtual_monitor(local);
 
-		mutex_lock(&local->mtx);
 		ieee80211_recalc_idle(local);
-		mutex_unlock(&local->mtx);
 
 		if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
 			break;
@@ -691,7 +702,7 @@
 	ieee80211_recalc_ps(local);
 
 	if (cancel_scan)
-		flush_delayed_work(&local->scan_work);
+		wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
 
 	if (local->open_count == 0) {
 		ieee80211_stop_device(local);
@@ -750,9 +761,9 @@
 		ieee80211_stop_mbssid(sdata);
 	}
 
-	cancel_work_sync(&sdata->activate_links_work);
-
 	wiphy_lock(sdata->local->hw.wiphy);
+	wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->activate_links_work);
+
 	ieee80211_do_stop(sdata, true);
 	wiphy_unlock(sdata->local->hw.wiphy);
 
@@ -779,7 +790,7 @@
 	spin_lock_bh(&local->filter_lock);
 	__hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
 	spin_unlock_bh(&local->filter_lock);
-	ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+	wiphy_work_queue(local->hw.wiphy, &local->reconfig_filter);
 }
 
 /*
@@ -788,6 +799,9 @@
  */
 static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
 {
+	if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+		fbx80211_scum_teardown(sdata);
+
 	/* free extra data */
 	ieee80211_free_keys(sdata, false);
 
@@ -1046,7 +1060,7 @@
 	if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD))
 		return;
 
-	mutex_lock(&local->iflist_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (!ieee80211_sdata_running(sdata))
@@ -1054,8 +1068,6 @@
 
 		ieee80211_recalc_sdata_offload(sdata);
 	}
-
-	mutex_unlock(&local->iflist_mtx);
 }
 
 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
@@ -1133,6 +1145,7 @@
 	snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
 		 wiphy_name(local->hw.wiphy));
 	sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
+	sdata->wdev.wiphy = local->hw.wiphy;
 
 	ieee80211_sdata_init(local, sdata);
 
@@ -1157,10 +1170,8 @@
 	rcu_assign_pointer(local->monitor_sdata, sdata);
 	mutex_unlock(&local->iflist_mtx);
 
-	mutex_lock(&local->mtx);
 	ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chandef,
 					 IEEE80211_CHANCTX_EXCLUSIVE);
-	mutex_unlock(&local->mtx);
 	if (ret) {
 		mutex_lock(&local->iflist_mtx);
 		RCU_INIT_POINTER(local->monitor_sdata, NULL);
@@ -1173,7 +1184,7 @@
 
 	skb_queue_head_init(&sdata->skb_queue);
 	skb_queue_head_init(&sdata->status_queue);
-	INIT_WORK(&sdata->work, ieee80211_iface_work);
+	wiphy_work_init(&sdata->work, ieee80211_iface_work);
 
 	return 0;
 }
@@ -1202,9 +1213,7 @@
 
 	synchronize_net();
 
-	mutex_lock(&local->mtx);
 	ieee80211_link_release_channel(&sdata->deflink);
-	mutex_unlock(&local->mtx);
 
 	drv_remove_interface(local, sdata);
 
@@ -1221,10 +1230,12 @@
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 	struct net_device *dev = wdev->netdev;
 	struct ieee80211_local *local = sdata->local;
-	u32 changed = 0;
+	u64 changed = 0;
 	int res;
 	u32 hw_reconf_flags = 0;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP_VLAN: {
 		struct ieee80211_sub_if_data *master;
@@ -1232,9 +1243,7 @@
 		if (!sdata->bss)
 			return -ENOLINK;
 
-		mutex_lock(&local->mtx);
 		list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
-		mutex_unlock(&local->mtx);
 
 		master = container_of(sdata->bss,
 				      struct ieee80211_sub_if_data, u.ap);
@@ -1251,10 +1260,8 @@
 		       sizeof(sdata->vif.hw_queue));
 		sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
 
-		mutex_lock(&local->key_mtx);
 		sdata->crypto_tx_tailroom_needed_cnt +=
 			master->crypto_tx_tailroom_needed_cnt;
-		mutex_unlock(&local->key_mtx);
 
 		break;
 		}
@@ -1281,6 +1288,9 @@
 	}
 
 	if (local->open_count == 0) {
+		/* here we can consider everything in good order (again) */
+		local->reconfig_failure = false;
+
 		res = drv_start(local);
 		if (res)
 			goto err_del_bss;
@@ -1342,9 +1352,7 @@
 		ieee80211_adjust_monitor_flags(sdata, 1);
 		ieee80211_configure_filter(local);
 		ieee80211_recalc_offload(local);
-		mutex_lock(&local->mtx);
 		ieee80211_recalc_idle(local);
-		mutex_unlock(&local->mtx);
 
 		netif_carrier_on(dev);
 		break;
@@ -1417,6 +1425,8 @@
 	case NL80211_IFTYPE_MONITOR:
 		if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
 			break;
+		if (fbx80211_skip_mon(sdata))
+			break;
 		list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
 		break;
 	default:
@@ -1449,11 +1459,8 @@
 		drv_stop(local);
  err_del_bss:
 	sdata->bss = NULL;
-	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
-		mutex_lock(&local->mtx);
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		list_del(&sdata->u.vlan.list);
-		mutex_unlock(&local->mtx);
-	}
 	/* might already be clear but that doesn't matter */
 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 	return res;
@@ -1480,12 +1487,13 @@
 {
 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (ieee80211_is_action(mgmt->frame_control) &&
 	    mgmt->u.action.category == WLAN_CATEGORY_BACK) {
 		struct sta_info *sta;
 		int len = skb->len;
 
-		mutex_lock(&local->sta_mtx);
 		sta = sta_info_get_bss(sdata, mgmt->sa);
 		if (sta) {
 			switch (mgmt->u.action.u.addba_req.action_code) {
@@ -1506,7 +1514,6 @@
 				break;
 			}
 		}
-		mutex_unlock(&local->sta_mtx);
 	} else if (ieee80211_is_action(mgmt->frame_control) &&
 		   mgmt->u.action.category == WLAN_CATEGORY_VHT) {
 		switch (mgmt->u.action.u.vht_group_notif.action_code) {
@@ -1520,7 +1527,6 @@
 			band = status->band;
 			opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
 
-			mutex_lock(&local->sta_mtx);
 			sta = sta_info_get_bss(sdata, mgmt->sa);
 
 			if (sta)
@@ -1528,7 +1534,6 @@
 							    &sta->deflink,
 							    opmode, band);
 
-			mutex_unlock(&local->sta_mtx);
 			break;
 		}
 		case WLAN_VHT_ACTION_GROUPID_MGMT:
@@ -1575,7 +1580,6 @@
 		 * a block-ack session was active. That cannot be
 		 * right, so terminate the session.
 		 */
-		mutex_lock(&local->sta_mtx);
 		sta = sta_info_get_bss(sdata, mgmt->sa);
 		if (sta) {
 			u16 tid = ieee80211_get_tid(hdr);
@@ -1585,7 +1589,6 @@
 				WLAN_REASON_QSTA_REQUIRE_SETUP,
 				true);
 		}
-		mutex_unlock(&local->sta_mtx);
 	} else switch (sdata->vif.type) {
 	case NL80211_IFTYPE_STATION:
 		ieee80211_sta_rx_queued_mgmt(sdata, skb);
@@ -1622,7 +1625,7 @@
 	}
 }
 
-static void ieee80211_iface_work(struct work_struct *work)
+static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data, work);
@@ -1682,15 +1685,8 @@
 	}
 }
 
-static void ieee80211_recalc_smps_work(struct work_struct *work)
-{
-	struct ieee80211_sub_if_data *sdata =
-		container_of(work, struct ieee80211_sub_if_data, recalc_smps);
-
-	ieee80211_recalc_smps(sdata, &sdata->deflink);
-}
-
-static void ieee80211_activate_links_work(struct work_struct *work)
+static void ieee80211_activate_links_work(struct wiphy *wiphy,
+					  struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
@@ -1734,9 +1730,9 @@
 
 	skb_queue_head_init(&sdata->skb_queue);
 	skb_queue_head_init(&sdata->status_queue);
-	INIT_WORK(&sdata->work, ieee80211_iface_work);
-	INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
-	INIT_WORK(&sdata->activate_links_work, ieee80211_activate_links_work);
+	wiphy_work_init(&sdata->work, ieee80211_iface_work);
+	wiphy_work_init(&sdata->activate_links_work,
+			ieee80211_activate_links_work);
 
 	switch (type) {
 	case NL80211_IFTYPE_P2P_GO:
@@ -1775,6 +1771,7 @@
 		sdata->dev->netdev_ops = &ieee80211_monitorif_ops;
 		sdata->u.mntr.flags = MONITOR_FLAG_CONTROL |
 				      MONITOR_FLAG_OTHER_BSS;
+		fbx80211_scum_setup(sdata);
 		break;
 	case NL80211_IFTYPE_NAN:
 		idr_init(&sdata->u.nan.function_inst_ids);
@@ -1795,7 +1792,7 @@
 	/* need to do this after the switch so vif.type is correct */
 	ieee80211_link_setup(&sdata->deflink);
 
-	ieee80211_debugfs_add_netdev(sdata);
+	ieee80211_debugfs_add_netdev(sdata, false);
 }
 
 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
@@ -1812,7 +1809,7 @@
 		return -EBUSY;
 
 	/* for now, don't support changing while links exist */
-	if (sdata->vif.valid_links)
+	if (ieee80211_vif_is_mld(&sdata->vif))
 		return -EBUSY;
 
 	switch (sdata->vif.type) {
@@ -1926,6 +1923,8 @@
 	u8 tmp_addr[ETH_ALEN];
 	int i;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	/* default ... something at least */
 	memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
 
@@ -1933,8 +1932,6 @@
 	    local->hw.wiphy->n_addresses <= 1)
 		return;
 
-	mutex_lock(&local->iflist_mtx);
-
 	switch (type) {
 	case NL80211_IFTYPE_MONITOR:
 		/* doesn't matter */
@@ -1958,7 +1955,7 @@
 				if (!ieee80211_sdata_running(sdata))
 					continue;
 				memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
-				goto out_unlock;
+				return;
 			}
 		}
 		fallthrough;
@@ -2044,9 +2041,6 @@
 
 		break;
 	}
-
- out_unlock:
-	mutex_unlock(&local->iflist_mtx);
 }
 
 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
@@ -2060,6 +2054,7 @@
 	int ret, i;
 
 	ASSERT_RTNL();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN) {
 		struct wireless_dev *wdev;
@@ -2147,7 +2142,7 @@
 
 	INIT_LIST_HEAD(&sdata->key_list);
 
-	INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
+	wiphy_delayed_work_init(&sdata->dec_tailroom_needed_wk,
 			  ieee80211_delayed_tailroom_dec);
 
 	for (i = 0; i < NUM_NL80211_BANDS; i++) {
@@ -2226,6 +2221,7 @@
 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
 {
 	ASSERT_RTNL();
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	mutex_lock(&sdata->local->iflist_mtx);
 	list_del_rcu(&sdata->list);
@@ -2255,7 +2251,6 @@
 {
 	struct ieee80211_sub_if_data *sdata, *tmp;
 	LIST_HEAD(unreg_list);
-	LIST_HEAD(wdev_list);
 
 	ASSERT_RTNL();
 
@@ -2272,28 +2267,34 @@
 	 */
 	cfg80211_shutdown_all_interfaces(local->hw.wiphy);
 
+	wiphy_lock(local->hw.wiphy);
+
 	WARN(local->open_count, "%s: open count remains %d\n",
 	     wiphy_name(local->hw.wiphy), local->open_count);
 
-	ieee80211_txq_teardown_flows(local);
-
 	mutex_lock(&local->iflist_mtx);
-	list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
-		list_del(&sdata->list);
-
-		if (sdata->dev)
-			unregister_netdevice_queue(sdata->dev, &unreg_list);
-		else
-			list_add(&sdata->list, &wdev_list);
-	}
+	list_splice_init(&local->interfaces, &unreg_list);
 	mutex_unlock(&local->iflist_mtx);
 
-	unregister_netdevice_many(&unreg_list);
+	list_for_each_entry_safe(sdata, tmp, &unreg_list, list) {
+		bool netdev = sdata->dev;
+
+		/*
+		 * Remove IP addresses explicitly, since the notifier will
+		 * skip the callbacks if wdev->registered is false, since
+		 * we can't acquire the wiphy_lock() again there if already
+		 * inside this locked section.
+		 */
+		sdata->vif.cfg.arp_addr_cnt = 0;
+		if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+		    sdata->u.mgd.associated)
+			ieee80211_vif_cfg_change_notify(sdata,
+							BSS_CHANGED_ARP_FILTER);
 
-	wiphy_lock(local->hw.wiphy);
-	list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
 		list_del(&sdata->list);
 		cfg80211_unregister_wdev(&sdata->wdev);
+
+		if (!netdev)
 		kfree(sdata);
 	}
 	wiphy_unlock(local->hw.wiphy);
diff -ruw linux-6.4/net/mac80211/key.c linux-6.4-fbx/net/mac80211/key.c
--- linux-6.4/net/mac80211/key.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/key.c	2023-11-07 13:38:44.078257129 +0100
@@ -6,9 +6,10 @@
  * Copyright 2007-2008	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017	Intel Deutschland GmbH
- * Copyright 2018-2020, 2022  Intel Corporation
+ * Copyright 2018-2020, 2022-2023  Intel Corporation
  */
 
+#include <crypto/utils.h>
 #include <linux/if_ether.h>
 #include <linux/etherdevice.h>
 #include <linux/list.h>
@@ -17,7 +18,6 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <net/mac80211.h>
-#include <crypto/algapi.h>
 #include <asm/unaligned.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -53,11 +53,6 @@
 
 static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
 
-static void assert_key_lock(struct ieee80211_local *local)
-{
-	lockdep_assert_held(&local->key_mtx);
-}
-
 static void
 update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
 {
@@ -67,7 +62,7 @@
 		return;
 
 	/* crypto_tx_tailroom_needed_cnt is protected by this */
-	assert_key_lock(sdata->local);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	rcu_read_lock();
 
@@ -98,7 +93,7 @@
 	 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
 	 */
 
-	assert_key_lock(sdata->local);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	update_vlan_tailroom_need_count(sdata, 1);
 
@@ -114,7 +109,7 @@
 static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
 					 int delta)
 {
-	assert_key_lock(sdata->local);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
 
@@ -129,6 +124,7 @@
 	int ret = -EOPNOTSUPP;
 
 	might_sleep();
+	lockdep_assert_wiphy(key->local->hw.wiphy);
 
 	if (key->flags & KEY_FLAG_TAINTED) {
 		/* If we get here, it's during resume and the key is
@@ -151,8 +147,6 @@
 	if (!key->local->ops->set_key)
 		goto out_unsupported;
 
-	assert_key_lock(key->local);
-
 	sta = key->sta;
 
 	/*
@@ -172,6 +166,12 @@
 		 * Hence, don't send GTKs for VLAN interfaces to the driver.
 		 */
 		if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+			if (ieee80211_hw_check(&key->local->hw,
+					       APVLAN_NEED_MCAST_TO_UCAST)) {
+				/* no need to fail, this key will
+				 * never be used */
+				return 0;
+			}
 			ret = 1;
 			goto out_unsupported;
 		}
@@ -242,14 +242,14 @@
 	if (!key || !key->local->ops->set_key)
 		return;
 
-	assert_key_lock(key->local);
-
 	if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
 		return;
 
 	sta = key->sta;
 	sdata = key->sdata;
 
+	lockdep_assert_wiphy(key->local->hw.wiphy);
+
 	if (key->conf.link_id >= 0 && sdata->vif.active_links &&
 	    !(sdata->vif.active_links & BIT(key->conf.link_id)))
 		return;
@@ -275,7 +275,7 @@
 	struct sta_info *sta = key->sta;
 	struct ieee80211_local *local = key->local;
 
-	assert_key_lock(local);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
 
@@ -300,7 +300,7 @@
 	struct sta_info *sta = new->sta;
 	int i;
 
-	assert_key_lock(local);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) {
 		/* Extended Key ID key install, initial one or rekey */
@@ -317,11 +317,9 @@
 			 * job done for the few ms we need it.)
 			 */
 			set_sta_flag(sta, WLAN_STA_BLOCK_BA);
-			mutex_lock(&sta->ampdu_mlme.mtx);
 			for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
-				___ieee80211_stop_tx_ba_session(sta, i,
+				__ieee80211_stop_tx_ba_session(sta, i,
 								AGG_STOP_LOCAL_REQUEST);
-			mutex_unlock(&sta->ampdu_mlme.mtx);
 		}
 	} else if (old) {
 		/* Rekey without Extended Key ID.
@@ -358,12 +356,14 @@
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_key *key = NULL;
 
-	assert_key_lock(sdata->local);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (idx >= 0 && idx < NUM_DEFAULT_KEYS) {
-		key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+		key = wiphy_dereference(sdata->local->hw.wiphy,
+					sdata->keys[idx]);
 		if (!key)
-			key = key_mtx_dereference(sdata->local, link->gtk[idx]);
+			key = wiphy_dereference(sdata->local->hw.wiphy,
+						link->gtk[idx]);
 	}
 
 	if (uni) {
@@ -382,9 +382,9 @@
 void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx,
 			       bool uni, bool multi)
 {
-	mutex_lock(&link->sdata->local->key_mtx);
+	lockdep_assert_wiphy(link->sdata->local->hw.wiphy);
+
 	__ieee80211_set_default_key(link, idx, uni, multi);
-	mutex_unlock(&link->sdata->local->key_mtx);
 }
 
 static void
@@ -393,11 +393,12 @@
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_key *key = NULL;
 
-	assert_key_lock(sdata->local);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (idx >= NUM_DEFAULT_KEYS &&
 	    idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
-		key = key_mtx_dereference(sdata->local, link->gtk[idx]);
+		key = wiphy_dereference(sdata->local->hw.wiphy,
+					link->gtk[idx]);
 
 	rcu_assign_pointer(link->default_mgmt_key, key);
 
@@ -407,9 +408,9 @@
 void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link,
 				    int idx)
 {
-	mutex_lock(&link->sdata->local->key_mtx);
+	lockdep_assert_wiphy(link->sdata->local->hw.wiphy);
+
 	__ieee80211_set_default_mgmt_key(link, idx);
-	mutex_unlock(&link->sdata->local->key_mtx);
 }
 
 static void
@@ -418,12 +419,13 @@
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_key *key = NULL;
 
-	assert_key_lock(sdata->local);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS &&
 	    idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
 	    NUM_DEFAULT_BEACON_KEYS)
-		key = key_mtx_dereference(sdata->local, link->gtk[idx]);
+		key = wiphy_dereference(sdata->local->hw.wiphy,
+					link->gtk[idx]);
 
 	rcu_assign_pointer(link->default_beacon_key, key);
 
@@ -433,9 +435,9 @@
 void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link,
 				      int idx)
 {
-	mutex_lock(&link->sdata->local->key_mtx);
+	lockdep_assert_wiphy(link->sdata->local->hw.wiphy);
+
 	__ieee80211_set_default_beacon_key(link, idx);
-	mutex_unlock(&link->sdata->local->key_mtx);
 }
 
 static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
@@ -452,6 +454,8 @@
 	bool defunikey, defmultikey, defmgmtkey, defbeaconkey;
 	bool is_wep;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	/* caller must provide at least one old/new */
 	if (WARN_ON(!new && !old))
 		return 0;
@@ -482,7 +486,7 @@
 
 		if (sta) {
 			link_sta = rcu_dereference_protected(sta->link[link_id],
-							     lockdep_is_held(&sta->local->sta_mtx));
+							     lockdep_is_held(&sta->local->hw.wiphy->mtx));
 			if (!link_sta)
 				return -ENOLINK;
 		}
@@ -512,6 +516,8 @@
 	} else {
 		if (!new->local->wowlan)
 			ret = ieee80211_key_enable_hw_accel(new);
+		else
+			new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 	}
 
 	if (ret)
@@ -537,16 +543,16 @@
 			ieee80211_check_fast_rx(sta);
 	} else {
 		defunikey = old &&
-			old == key_mtx_dereference(sdata->local,
+			old == wiphy_dereference(sdata->local->hw.wiphy,
 						sdata->default_unicast_key);
 		defmultikey = old &&
-			old == key_mtx_dereference(sdata->local,
+			old == wiphy_dereference(sdata->local->hw.wiphy,
 						   link->default_multicast_key);
 		defmgmtkey = old &&
-			old == key_mtx_dereference(sdata->local,
+			old == wiphy_dereference(sdata->local->hw.wiphy,
 						   link->default_mgmt_key);
 		defbeaconkey = old &&
-			old == key_mtx_dereference(sdata->local,
+			old == wiphy_dereference(sdata->local->hw.wiphy,
 						   link->default_beacon_key);
 
 		if (defunikey && !new)
@@ -771,7 +777,8 @@
 		if (delay_tailroom) {
 			/* see ieee80211_delayed_tailroom_dec */
 			sdata->crypto_tx_tailroom_pending_dec++;
-			schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
+			wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+						 &sdata->dec_tailroom_needed_wk,
 					      HZ/2);
 		} else {
 			decrease_tailroom_need_count(sdata, 1);
@@ -798,6 +805,9 @@
 
 void ieee80211_key_free_unused(struct ieee80211_key *key)
 {
+	if (!key)
+		return;
+
 	WARN_ON(key->sdata || key->local);
 	ieee80211_key_free_common(key);
 }
@@ -850,58 +860,64 @@
 	 * can cause warnings to appear.
 	 */
 	bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION;
-	int ret = -EOPNOTSUPP;
+	int ret;
 
-	mutex_lock(&sdata->local->key_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (sta && pairwise) {
 		struct ieee80211_key *alt_key;
 
-		old_key = key_mtx_dereference(sdata->local, sta->ptk[idx]);
-		alt_key = key_mtx_dereference(sdata->local, sta->ptk[idx ^ 1]);
+		old_key = wiphy_dereference(sdata->local->hw.wiphy,
+					    sta->ptk[idx]);
+		alt_key = wiphy_dereference(sdata->local->hw.wiphy,
+					    sta->ptk[idx ^ 1]);
 
 		/* The rekey code assumes that the old and new key are using
 		 * the same cipher. Enforce the assumption for pairwise keys.
 		 */
 		if ((alt_key && alt_key->conf.cipher != key->conf.cipher) ||
-		    (old_key && old_key->conf.cipher != key->conf.cipher))
+		    (old_key && old_key->conf.cipher != key->conf.cipher)) {
+			ret = -EOPNOTSUPP;
 			goto out;
+		}
 	} else if (sta) {
 		struct link_sta_info *link_sta = &sta->deflink;
 		int link_id = key->conf.link_id;
 
 		if (link_id >= 0) {
 			link_sta = rcu_dereference_protected(sta->link[link_id],
-							     lockdep_is_held(&sta->local->sta_mtx));
+							     lockdep_is_held(&sta->local->hw.wiphy->mtx));
 			if (!link_sta) {
 				ret = -ENOLINK;
 				goto out;
 			}
 		}
 
-		old_key = key_mtx_dereference(sdata->local, link_sta->gtk[idx]);
+		old_key = wiphy_dereference(sdata->local->hw.wiphy,
+					    link_sta->gtk[idx]);
 	} else {
 		if (idx < NUM_DEFAULT_KEYS)
-			old_key = key_mtx_dereference(sdata->local,
+			old_key = wiphy_dereference(sdata->local->hw.wiphy,
 						      sdata->keys[idx]);
 		if (!old_key)
-			old_key = key_mtx_dereference(sdata->local,
+			old_key = wiphy_dereference(sdata->local->hw.wiphy,
 						      link->gtk[idx]);
 	}
 
 	/* Non-pairwise keys must also not switch the cipher on rekey */
 	if (!pairwise) {
-		if (old_key && old_key->conf.cipher != key->conf.cipher)
+		if (old_key && old_key->conf.cipher != key->conf.cipher) {
+			ret = -EOPNOTSUPP;
 			goto out;
 	}
+	}
 
 	/*
 	 * Silently accept key re-installation without really installing the
 	 * new version of the key to avoid nonce reuse or replay issues.
 	 */
 	if (ieee80211_key_identical(sdata, old_key, key)) {
-		ieee80211_key_free_unused(key);
-		ret = 0;
+		ret = -EALREADY;
 		goto out;
 	}
 
@@ -926,9 +942,10 @@
 		ieee80211_key_free(key, delay_tailroom);
 	}
 
- out:
-	mutex_unlock(&sdata->local->key_mtx);
+	key = NULL;
 
+ out:
+	ieee80211_key_free_unused(key);
 	return ret;
 }
 
@@ -954,8 +971,6 @@
 
 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	mutex_lock(&sdata->local->key_mtx);
-
 	sdata->crypto_tx_tailroom_needed_cnt = 0;
 	sdata->crypto_tx_tailroom_pending_dec = 0;
 
@@ -972,8 +987,6 @@
 			ieee80211_key_enable_hw_accel(key);
 		}
 	}
-
-	mutex_unlock(&sdata->local->key_mtx);
 }
 
 void ieee80211_iter_keys(struct ieee80211_hw *hw,
@@ -991,7 +1004,6 @@
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	mutex_lock(&local->key_mtx);
 	if (vif) {
 		sdata = vif_to_sdata(vif);
 		list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
@@ -1006,7 +1018,6 @@
 				     key->sta ? &key->sta->sta : NULL,
 				     &key->conf, iter_data);
 	}
-	mutex_unlock(&local->key_mtx);
 }
 EXPORT_SYMBOL(ieee80211_iter_keys);
 
@@ -1086,7 +1097,8 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_key *key, *tmp;
 
-	mutex_lock(&local->key_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	list_for_each_entry_safe(key, tmp, &sdata->key_list, list) {
 		if (key->conf.link_id != link->link_id)
 			continue;
@@ -1095,7 +1107,6 @@
 				      key, NULL);
 		list_add_tail(&key->list, keys);
 	}
-	mutex_unlock(&local->key_mtx);
 }
 
 void ieee80211_free_key_list(struct ieee80211_local *local,
@@ -1103,10 +1114,10 @@
 {
 	struct ieee80211_key *key, *tmp;
 
-	mutex_lock(&local->key_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	list_for_each_entry_safe(key, tmp, keys, list)
 		__ieee80211_key_destroy(key, false);
-	mutex_unlock(&local->key_mtx);
 }
 
 void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
@@ -1118,9 +1129,10 @@
 	struct ieee80211_key *key, *tmp;
 	LIST_HEAD(keys);
 
-	cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk);
+	wiphy_delayed_work_cancel(local->hw.wiphy,
+				  &sdata->dec_tailroom_needed_wk);
 
-	mutex_lock(&local->key_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	ieee80211_free_keys_iface(sdata, &keys);
 
@@ -1153,8 +1165,6 @@
 			WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
 				     vlan->crypto_tx_tailroom_pending_dec);
 	}
-
-	mutex_unlock(&local->key_mtx);
 }
 
 void ieee80211_free_sta_keys(struct ieee80211_local *local,
@@ -1163,9 +1173,10 @@
 	struct ieee80211_key *key;
 	int i;
 
-	mutex_lock(&local->key_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	for (i = 0; i < ARRAY_SIZE(sta->deflink.gtk); i++) {
-		key = key_mtx_dereference(local, sta->deflink.gtk[i]);
+		key = wiphy_dereference(local->hw.wiphy, sta->deflink.gtk[i]);
 		if (!key)
 			continue;
 		ieee80211_key_replace(key->sdata, NULL, key->sta,
@@ -1176,7 +1187,7 @@
 	}
 
 	for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
-		key = key_mtx_dereference(local, sta->ptk[i]);
+		key = wiphy_dereference(local->hw.wiphy, sta->ptk[i]);
 		if (!key)
 			continue;
 		ieee80211_key_replace(key->sdata, NULL, key->sta,
@@ -1185,11 +1196,10 @@
 		__ieee80211_key_destroy(key, key->sdata->vif.type ==
 					NL80211_IFTYPE_STATION);
 	}
-
-	mutex_unlock(&local->key_mtx);
 }
 
-void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
+void ieee80211_delayed_tailroom_dec(struct wiphy *wiphy,
+				    struct wiphy_work *wk)
 {
 	struct ieee80211_sub_if_data *sdata;
 
@@ -1212,11 +1222,9 @@
 	 * within an ESS this usually won't happen.
 	 */
 
-	mutex_lock(&sdata->local->key_mtx);
 	decrease_tailroom_need_count(sdata,
 				     sdata->crypto_tx_tailroom_pending_dec);
 	sdata->crypto_tx_tailroom_pending_dec = 0;
-	mutex_unlock(&sdata->local->key_mtx);
 }
 
 void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
@@ -1345,7 +1353,7 @@
 
 	key = container_of(keyconf, struct ieee80211_key, conf);
 
-	assert_key_lock(key->local);
+	lockdep_assert_wiphy(key->local->hw.wiphy);
 
 	/*
 	 * if key was uploaded, we assume the driver will/has remove(d)
diff -ruw linux-6.4/net/mac80211/key.h linux-6.4-fbx/net/mac80211/key.h
--- linux-6.4/net/mac80211/key.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/key.h	2023-11-07 13:38:44.078257129 +0100
@@ -2,7 +2,7 @@
 /*
  * Copyright 2002-2004, Instant802 Networks, Inc.
  * Copyright 2005, Devicescape Software, Inc.
- * Copyright (C) 2019, 2022 Intel Corporation
+ * Copyright (C) 2019, 2022-2023 Intel Corporation
  */
 
 #ifndef IEEE80211_KEY_H
@@ -168,12 +168,7 @@
 int ieee80211_key_switch_links(struct ieee80211_sub_if_data *sdata,
 			       unsigned long del_links_mask,
 			       unsigned long add_links_mask);
-
-#define key_mtx_dereference(local, ref) \
-	rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
-#define rcu_dereference_check_key_mtx(local, ref) \
-	rcu_dereference_check(ref, lockdep_is_held(&((local)->key_mtx)))
-
-void ieee80211_delayed_tailroom_dec(struct work_struct *wk);
+void ieee80211_delayed_tailroom_dec(struct wiphy *wiphy,
+				    struct wiphy_work *wk);
 
 #endif /* IEEE80211_KEY_H */
diff -ruw linux-6.4/net/mac80211/link.c linux-6.4-fbx/net/mac80211/link.c
--- linux-6.4/net/mac80211/link.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/link.c	2023-11-07 13:38:44.078257129 +0100
@@ -37,15 +37,15 @@
 	link_conf->link_id = link_id;
 	link_conf->vif = &sdata->vif;
 
-	INIT_WORK(&link->csa_finalize_work,
+	wiphy_work_init(&link->csa_finalize_work,
 		  ieee80211_csa_finalize_work);
-	INIT_WORK(&link->color_change_finalize_work,
+	wiphy_work_init(&link->color_change_finalize_work,
 		  ieee80211_color_change_finalize_work);
 	INIT_DELAYED_WORK(&link->color_collision_detect_work,
 			  ieee80211_color_collision_detection_work);
 	INIT_LIST_HEAD(&link->assigned_chanctx_list);
 	INIT_LIST_HEAD(&link->reserved_chanctx_list);
-	INIT_DELAYED_WORK(&link->dfs_cac_timer_work,
+	wiphy_delayed_work_init(&link->dfs_cac_timer_work,
 			  ieee80211_dfs_cac_timer_work);
 
 	if (!deflink) {
@@ -142,25 +142,34 @@
 }
 
 static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata,
-					    u16 links)
+					    u16 valid_links, u16 dormant_links)
 {
-	sdata->vif.valid_links = links;
+	sdata->vif.valid_links = valid_links;
+	sdata->vif.dormant_links = dormant_links;
 
-	if (!links) {
+	if (!valid_links ||
+	    WARN((~valid_links & dormant_links) ||
+		 !(valid_links & ~dormant_links),
+		 "Invalid links: valid=0x%x, dormant=0x%x",
+		 valid_links, dormant_links)) {
 		sdata->vif.active_links = 0;
+		sdata->vif.dormant_links = 0;
 		return;
 	}
 
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP:
 		/* in an AP all links are always active */
-		sdata->vif.active_links = links;
+		sdata->vif.active_links = valid_links;
+
+		/* AP links are not expected to be disabled */
+		WARN_ON(dormant_links);
 		break;
 	case NL80211_IFTYPE_STATION:
 		if (sdata->vif.active_links)
 			break;
-		WARN_ON(hweight16(links) > 1);
-		sdata->vif.active_links = links;
+		sdata->vif.active_links = valid_links & ~dormant_links;
+		WARN_ON(hweight16(sdata->vif.active_links) > 1);
 		break;
 	default:
 		WARN_ON(1);
@@ -169,7 +178,7 @@
 
 static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
 				      struct link_container **to_free,
-				      u16 new_links)
+				      u16 new_links, u16 dormant_links)
 {
 	u16 old_links = sdata->vif.valid_links;
 	u16 old_active = sdata->vif.active_links;
@@ -182,11 +191,11 @@
 	struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS];
 	bool use_deflink = old_links == 0; /* set for error case */
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	memset(to_free, 0, sizeof(links));
 
-	if (old_links == new_links)
+	if (old_links == new_links && dormant_links == sdata->vif.dormant_links)
 		return 0;
 
 	/* if there were no old links, need to clear the pointers to deflink */
@@ -226,6 +235,9 @@
 		RCU_INIT_POINTER(sdata->vif.link_conf[link_id], NULL);
 	}
 
+	if (!old_links)
+		ieee80211_debugfs_recreate_netdev(sdata, true);
+
 	/* link them into data structures */
 	for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
 		WARN_ON(!use_deflink &&
@@ -245,20 +257,22 @@
 		/* for keys we will not be able to undo this */
 		ieee80211_tear_down_links(sdata, to_free, rem);
 
-		ieee80211_set_vif_links_bitmaps(sdata, new_links);
+		ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links);
 
 		/* tell the driver */
 		ret = drv_change_vif_links(sdata->local, sdata,
 					   old_links & old_active,
 					   new_links & sdata->vif.active_links,
 					   old);
+		if (!new_links)
+			ieee80211_debugfs_recreate_netdev(sdata, false);
 	}
 
 	if (ret) {
 		/* restore config */
 		memcpy(sdata->link, old_data, sizeof(old_data));
 		memcpy(sdata->vif.link_conf, old, sizeof(old));
-		ieee80211_set_vif_links_bitmaps(sdata, old_links);
+		ieee80211_set_vif_links_bitmaps(sdata, old_links, dormant_links);
 		/* and free (only) the newly allocated links */
 		memset(to_free, 0, sizeof(links));
 		goto free;
@@ -282,34 +296,18 @@
 }
 
 int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata,
-			    u16 new_links)
+			    u16 new_links, u16 dormant_links)
 {
 	struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS];
 	int ret;
 
-	ret = ieee80211_vif_update_links(sdata, links, new_links);
+	ret = ieee80211_vif_update_links(sdata, links, new_links,
+					 dormant_links);
 	ieee80211_free_links(sdata, links);
 
 	return ret;
 }
 
-void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata)
-{
-	struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS];
-
-	/*
-	 * The locking here is different because when we free links
-	 * in the station case we need to be able to cancel_work_sync()
-	 * something that also takes the lock.
-	 */
-
-	sdata_lock(sdata);
-	ieee80211_vif_update_links(sdata, links, 0);
-	sdata_unlock(sdata);
-
-	ieee80211_free_links(sdata, links);
-}
-
 static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
 				       u16 active_links)
 {
@@ -328,8 +326,7 @@
 	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return -EINVAL;
 
-	/* cannot activate links that don't exist */
-	if (active_links & ~sdata->vif.valid_links)
+	if (active_links & ~ieee80211_vif_usable_links(&sdata->vif))
 		return -EINVAL;
 
 	/* nothing to do */
@@ -445,10 +442,8 @@
 	u16 old_active;
 	int ret;
 
-	sdata_lock(sdata);
-	mutex_lock(&local->sta_mtx);
-	mutex_lock(&local->mtx);
-	mutex_lock(&local->key_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	old_active = sdata->vif.active_links;
 	if (old_active & active_links) {
 		/*
@@ -464,10 +459,6 @@
 		/* otherwise switch directly */
 		ret = _ieee80211_set_active_links(sdata, active_links);
 	}
-	mutex_unlock(&local->key_mtx);
-	mutex_unlock(&local->mtx);
-	mutex_unlock(&local->sta_mtx);
-	sdata_unlock(sdata);
 
 	return ret;
 }
@@ -484,8 +475,7 @@
 	if (sdata->vif.type != NL80211_IFTYPE_STATION)
 		return;
 
-	/* cannot activate links that don't exist */
-	if (active_links & ~sdata->vif.valid_links)
+	if (active_links & ~ieee80211_vif_usable_links(&sdata->vif))
 		return;
 
 	/* nothing to do */
@@ -493,6 +483,6 @@
 		return;
 
 	sdata->desired_active_links = active_links;
-	schedule_work(&sdata->activate_links_work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->activate_links_work);
 }
 EXPORT_SYMBOL_GPL(ieee80211_set_active_links_async);
diff -ruw linux-6.4/net/mac80211/main.c linux-6.4-fbx/net/mac80211/main.c
--- linux-6.4/net/mac80211/main.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/main.c	2024-04-19 16:04:28.969736104 +0200
@@ -5,7 +5,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2017     Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 
 #include <net/mac80211.h>
@@ -33,6 +33,8 @@
 #include "wep.h"
 #include "led.h"
 #include "debugfs.h"
+#include "nmeshd_nl.h"
+#include "fbx_scum.h"
 
 void ieee80211_configure_filter(struct ieee80211_local *local)
 {
@@ -84,7 +86,8 @@
 	local->filter_flags = new_flags & ~(1<<31);
 }
 
-static void ieee80211_reconfig_filter(struct work_struct *work)
+static void ieee80211_reconfig_filter(struct wiphy *wiphy,
+				      struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, reconfig_filter);
@@ -206,7 +209,8 @@
 				   BSS_CHANGED_PS |\
 				   BSS_CHANGED_IBSS |\
 				   BSS_CHANGED_ARP_FILTER |\
-				   BSS_CHANGED_SSID)
+				   BSS_CHANGED_SSID |\
+				   BSS_CHANGED_MLD_VALID_LINKS)
 
 void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
 				      u64 changed)
@@ -291,7 +295,7 @@
 	drv_link_info_changed(local, sdata, link->conf, link->link_id, changed);
 }
 
-u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
+u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
 {
 	sdata->vif.bss_conf.use_cts_prot = false;
 	sdata->vif.bss_conf.use_short_preamble = false;
@@ -335,14 +339,12 @@
 	struct ieee80211_sub_if_data *sdata;
 	int ret;
 
-	/* wait for scan work complete */
 	flush_workqueue(local->workqueue);
-	flush_work(&local->sched_scan_stopped_work);
-	flush_work(&local->radar_detected_work);
 
 	rtnl_lock();
 	/* we might do interface manipulations, so need both */
 	wiphy_lock(local->hw.wiphy);
+	wiphy_work_flush(local->hw.wiphy, NULL);
 
 	WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
 	     "%s called with hardware scan in progress\n", __func__);
@@ -364,22 +366,21 @@
 			 * The exception is ieee80211_chswitch_done.
 			 * Then we can have a race...
 			 */
-			cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
-			if (sdata->vif.bss_conf.csa_active) {
-				sdata_lock(sdata);
+			wiphy_work_cancel(local->hw.wiphy,
+					  &sdata->u.mgd.csa_connection_drop_work);
+			if (sdata->vif.bss_conf.csa_active)
 				ieee80211_sta_connection_lost(sdata,
 							      WLAN_REASON_UNSPECIFIED,
 							      false);
-				sdata_unlock(sdata);
 			}
-		}
-		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+		wiphy_delayed_work_flush(local->hw.wiphy,
+					 &sdata->dec_tailroom_needed_wk);
 	}
 	ieee80211_scan_cancel(local);
 
 	/* make sure any new ROC will consider local->in_reconfig */
-	flush_delayed_work(&local->roc_work);
-	flush_work(&local->hw_roc_done);
+	wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work);
+	wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
 
 	/* wait for all packet processing to be done */
 	synchronize_net();
@@ -438,7 +439,7 @@
 	if (!wdev)
 		return NOTIFY_DONE;
 
-	if (wdev->wiphy != local->hw.wiphy)
+	if (wdev->wiphy != local->hw.wiphy || !wdev->registered)
 		return NOTIFY_DONE;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
@@ -453,7 +454,25 @@
 		return NOTIFY_DONE;
 
 	ifmgd = &sdata->u.mgd;
-	sdata_lock(sdata);
+
+	/*
+	 * The nested here is needed to convince lockdep that this is
+	 * all OK. Yes, we lock the wiphy mutex here while we already
+	 * hold the notifier rwsem, that's the normal case. And yes,
+	 * we also acquire the notifier rwsem again when unregistering
+	 * a netdev while we already hold the wiphy mutex, so it does
+	 * look like a typical ABBA deadlock.
+	 *
+	 * However, both of these things happen with the RTNL held
+	 * already. Therefore, they can't actually happen, since the
+	 * lock orders really are ABC and ACB, which is fine due to
+	 * the RTNL (A).
+	 *
+	 * We still need to prevent recursion, which is accomplished
+	 * by the !wdev->registered check above.
+	 */
+	mutex_lock_nested(&local->hw.wiphy->mtx, 1);
+	__acquire(&local->hw.wiphy->mtx);
 
 	/* Copy the addresses to the vif config list */
 	ifa = rtnl_dereference(idev->ifa_list);
@@ -470,7 +489,7 @@
 	if (ifmgd->associated)
 		ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER);
 
-	sdata_unlock(sdata);
+	wiphy_unlock(local->hw.wiphy);
 
 	return NOTIFY_OK;
 }
@@ -783,9 +802,6 @@
 	__hw_addr_init(&local->mc_list);
 
 	mutex_init(&local->iflist_mtx);
-	mutex_init(&local->mtx);
-
-	mutex_init(&local->key_mtx);
 	spin_lock_init(&local->filter_lock);
 	spin_lock_init(&local->rx_path_lock);
 	spin_lock_init(&local->queue_stop_reason_lock);
@@ -806,25 +822,24 @@
 	spin_lock_init(&local->handle_wake_tx_queue_lock);
 
 	INIT_LIST_HEAD(&local->chanctx_list);
-	mutex_init(&local->chanctx_mtx);
 
-	INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
+	wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work);
 
 	INIT_WORK(&local->restart_work, ieee80211_restart_work);
 
-	INIT_WORK(&local->radar_detected_work,
+	wiphy_work_init(&local->radar_detected_work,
 		  ieee80211_dfs_radar_detected_work);
 
-	INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+	wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter);
 	local->smps_mode = IEEE80211_SMPS_OFF;
 
-	INIT_WORK(&local->dynamic_ps_enable_work,
+	wiphy_work_init(&local->dynamic_ps_enable_work,
 		  ieee80211_dynamic_ps_enable_work);
-	INIT_WORK(&local->dynamic_ps_disable_work,
+	wiphy_work_init(&local->dynamic_ps_disable_work,
 		  ieee80211_dynamic_ps_disable_work);
 	timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
 
-	INIT_WORK(&local->sched_scan_stopped_work,
+	wiphy_work_init(&local->sched_scan_stopped_work,
 		  ieee80211_sched_scan_stopped_work);
 
 	spin_lock_init(&local->ack_status_lock);
@@ -847,6 +862,7 @@
 
 	local->hw.radiotap_timestamp.units_pos = -1;
 	local->hw.radiotap_timestamp.accuracy = -1;
+	fbx80211_scum_local_init(local);
 
 	return &local->hw;
  err_free:
@@ -1445,6 +1461,7 @@
 	ieee80211_remove_interfaces(local);
 	rtnl_unlock();
  fail_rate:
+	ieee80211_txq_teardown_flows(local);
  fail_flows:
 	ieee80211_led_exit(local);
 	destroy_workqueue(local->workqueue);
@@ -1481,13 +1498,17 @@
 	 */
 	ieee80211_remove_interfaces(local);
 
+	ieee80211_txq_teardown_flows(local);
+
+	wiphy_lock(local->hw.wiphy);
+	wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work);
+	wiphy_work_cancel(local->hw.wiphy, &local->reconfig_filter);
+	wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work);
+	wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work);
+	wiphy_unlock(local->hw.wiphy);
 	rtnl_unlock();
 
-	cancel_delayed_work_sync(&local->roc_work);
 	cancel_work_sync(&local->restart_work);
-	cancel_work_sync(&local->reconfig_filter);
-	flush_work(&local->sched_scan_stopped_work);
-	flush_work(&local->radar_detected_work);
 
 	ieee80211_clear_tx_pending(local);
 	rate_control_deinitialize(local);
@@ -1517,8 +1538,9 @@
 	struct ieee80211_local *local = hw_to_local(hw);
 	enum nl80211_band band;
 
+	fbx80211_scum_local_cleanup(local);
+
 	mutex_destroy(&local->iflist_mtx);
-	mutex_destroy(&local->mtx);
 
 	if (local->wiphy_ciphers_allocated) {
 		kfree(local->hw.wiphy->cipher_suites);
@@ -1587,6 +1609,7 @@
 	drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE,
 				     &drop_reason_list_unusable);
 
+	nmeshd_nl_init();
 	return 0;
  err_netdev:
 	rc80211_minstrel_exit();
@@ -1596,6 +1619,8 @@
 
 static void __exit ieee80211_exit(void)
 {
+	nmeshd_nl_deinit();
+
 	rc80211_minstrel_exit();
 
 	ieee80211s_stop();
diff -ruw linux-6.4/net/mac80211/mesh.c linux-6.4-fbx/net/mac80211/mesh.c
--- linux-6.4/net/mac80211/mesh.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mesh.c	2024-04-19 16:04:28.973736213 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
  * Authors:    Luis Carlos Cobo <luisca@cozybit.com>
  * 	       Javier Cardona <javier@cozybit.com>
  */
@@ -45,7 +45,7 @@
 
 	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 }
 
 /**
@@ -56,6 +56,8 @@
  *
  * This function checks if the mesh configuration of a mesh point matches the
  * local mesh configuration, i.e. if both nodes belong to the same mesh network.
+ *
+ * Returns: %true if both nodes belong to the same mesh
  */
 bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
 			struct ieee802_11_elems *ie)
@@ -119,6 +121,8 @@
  * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
  *
  * @ie: information elements of a management frame from the mesh peer
+ *
+ * Returns: %true if the mesh peer is willing to establish peer links
  */
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
 {
@@ -133,10 +137,10 @@
  *
  * Returns: beacon changed flag if the beacon content changed.
  */
-u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
+u64 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
 {
 	bool free_plinks;
-	u32 changed = 0;
+	u64 changed = 0;
 
 	/* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
 	 * the mesh interface might be able to establish plinks with peers that
@@ -162,7 +166,7 @@
 void mesh_sta_cleanup(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed = mesh_plink_deactivate(sta);
+	u64 changed = mesh_plink_deactivate(sta);
 
 	if (changed)
 		ieee80211_mbss_info_change_notify(sdata, changed);
@@ -349,19 +353,32 @@
 	u8 offset, len;
 	const u8 *data;
 
-	if (!ifmsh->ie || !ifmsh->ie_len)
-		return 0;
-
 	/* fast-forward to vendor IEs */
-	offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
+	if (ifmsh->node_vendor_ie_len) {
+		offset = ieee80211_ie_split_vendor(ifmsh->node_vendor_ie,
+						   ifmsh->node_vendor_ie_len, 0);
+
+		if (offset < ifmsh->node_vendor_ie_len) {
+			len = ifmsh->node_vendor_ie_len - offset;
+			data = ifmsh->node_vendor_ie + offset;
+			if (skb_tailroom(skb) < len)
+				return -ENOMEM;
+			skb_put_data(skb, data, len);
+		}
+	}
 
-	if (offset < ifmsh->ie_len) {
-		len = ifmsh->ie_len - offset;
-		data = ifmsh->ie + offset;
+	if (ifmsh->mpm_vendor_ie_len) {
+		offset = ieee80211_ie_split_vendor(ifmsh->mpm_vendor_ie,
+						   ifmsh->mpm_vendor_ie_len, 0);
+
+		if (offset < ifmsh->mpm_vendor_ie_len) {
+			len = ifmsh->mpm_vendor_ie_len - offset;
+			data = ifmsh->mpm_vendor_ie + offset;
 		if (skb_tailroom(skb) < len)
 			return -ENOMEM;
 		skb_put_data(skb, data, len);
 	}
+	}
 
 	return 0;
 }
@@ -703,7 +720,7 @@
 	struct ieee80211_sub_if_data *sdata =
 		from_timer(sdata, t, u.mesh.mesh_path_timer);
 
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 }
 
 static void ieee80211_mesh_path_root_timer(struct timer_list *t)
@@ -714,7 +731,7 @@
 
 	set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
 
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 }
 
 void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
@@ -858,7 +875,7 @@
  * @meshsa:	source address in the mesh.  Same as TA, as frame is
  *              locally originated.
  *
- * Return the length of the 802.11 (does not include a mesh control header)
+ * Returns: the length of the 802.11 frame header (excludes mesh control header)
  */
 int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
 				  const u8 *meshda, const u8 *meshsa)
@@ -891,7 +908,7 @@
  * @addr6:	2nd address in the ae header, which corresponds to addr6 of the
  *              mesh frame
  *
- * Return the header length.
+ * Returns: the header length
  */
 unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
 				       struct ieee80211s_hdr *meshhdr,
@@ -923,7 +940,7 @@
 static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	u32 changed;
+	u64 changed;
 
 	if (ifmsh->mshcfg.plink_timeout > 0)
 		ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ);
@@ -1006,7 +1023,9 @@
 		   ie_len_eht_cap +
 		   2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
 			   offsetof(struct ieee80211_eht_operation_info, optional) +
-		   ifmsh->ie_len;
+		   ifmsh->ie_len +
+		   ifmsh->node_vendor_ie_len +
+		   ifmsh->mpm_vendor_ie_len;
 
 	bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
 	/* need an skb for IE builders to operate on */
@@ -1164,7 +1183,7 @@
 }
 
 void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
-				       u32 changed)
+				       u64 changed)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	unsigned long bits = changed;
@@ -1175,16 +1194,16 @@
 
 	/* if we race with running work, worst case this work becomes a noop */
 	for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
-		set_bit(bit, &ifmsh->mbss_changed);
+		set_bit(bit, ifmsh->mbss_changed);
 	set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 }
 
 int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct ieee80211_local *local = sdata->local;
-	u32 changed = BSS_CHANGED_BEACON |
+	u64 changed = BSS_CHANGED_BEACON |
 		      BSS_CHANGED_BEACON_ENABLED |
 		      BSS_CHANGED_HT |
 		      BSS_CHANGED_BASIC_RATES |
@@ -1202,7 +1221,7 @@
 	ifmsh->sync_offset_clockdrift_max = 0;
 	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 	ieee80211_mesh_root_setup(ifmsh);
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 	sdata->vif.bss_conf.ht_operation_mode =
 				ifmsh->mshcfg.ht_opmode;
 	sdata->vif.bss_conf.enable_beacon = true;
@@ -1257,7 +1276,7 @@
 
 	/* clear any mesh work (for next join) we may have accrued */
 	ifmsh->wrkq_flags = 0;
-	ifmsh->mbss_changed = 0;
+	memset(ifmsh->mbss_changed, 0, sizeof(ifmsh->mbss_changed));
 
 	local->fif_other_bss--;
 	atomic_dec(&local->iff_allmultis);
@@ -1291,7 +1310,7 @@
 	ieee80211_conn_flags_t conn_flags = 0;
 	u32 vht_cap_info = 0;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	sband = ieee80211_get_sband(sdata);
 	if (!sband)
@@ -1472,12 +1491,18 @@
 	size_t baselen;
 	int freq;
 	enum nl80211_band band = rx_status->band;
+	struct ieee802_11_mesh_vendor_specific_elems nm, pm;
+	u32 beacon_int;
+	bool is_neigh_conn_estab;
 
 	/* ignore ProbeResp to foreign address */
 	if (stype == IEEE80211_STYPE_PROBE_RESP &&
 	    !ether_addr_equal(mgmt->da, sdata->vif.addr))
 		return;
 
+	if (stype == IEEE80211_STYPE_BEACON)
+		beacon_int = mgmt->u.beacon.beacon_int;
+
 	baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
 	if (baselen > len)
 		return;
@@ -1507,6 +1532,22 @@
 	if (mesh_matches_local(sdata, elems)) {
 		mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n",
 			sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal);
+
+		is_neigh_conn_estab = mesh_neighbour_connection_established(sdata, mgmt->sa);
+		ieee802_11_parse_mesh_vendor_elems(mgmt->u.probe_resp.variable, len - baselen, false,
+						   &nm, 0, 0, NL80211_QBC_UPDATE_NODE_METRICS_IE);
+		if (nm.parse_error == false)
+			elems->total_len -= (nm.ie_len + 2);
+
+		ieee802_11_parse_mesh_vendor_elems(mgmt->u.probe_resp.variable, len - baselen, false,
+						   &pm, 0, 0, NL80211_QBC_UPDATE_PATH_METRICS_IE);
+		if (pm.parse_error == false)
+			elems->total_len -= (pm.ie_len + 2);
+
+		if (is_neigh_conn_estab)
+			nmeshd_nl_send_vendor_ies(sdata->dev, mgmt->sa, stype, rx_status->signal,
+						  beacon_int, &pm, &nm, GFP_KERNEL);
+
 		if (!sdata->u.mesh.user_mpm ||
 		    sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
 		    sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
@@ -1525,12 +1566,11 @@
 	kfree(elems);
 }
 
-int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
+int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_csa_settings *tmp_csa_settings;
 	int ret = 0;
-	int changed = 0;
 
 	/* Reset the TTL value and Initiator flag */
 	ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
@@ -1545,21 +1585,22 @@
 	if (ret)
 		return -EINVAL;
 
-	changed |= BSS_CHANGED_BEACON;
+	*changed |= BSS_CHANGED_BEACON;
 
 	mcsa_dbg(sdata, "complete switching to center freq %d MHz",
 		 sdata->vif.bss_conf.chandef.chan->center_freq);
-	return changed;
+	return 0;
 }
 
 int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
-			      struct cfg80211_csa_settings *csa_settings)
+			      struct cfg80211_csa_settings *csa_settings,
+			      u64 *changed)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_csa_settings *tmp_csa_settings;
 	int ret = 0;
 
-	lockdep_assert_held(&sdata->wdev.mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
 				   GFP_ATOMIC);
@@ -1579,7 +1620,8 @@
 		return ret;
 	}
 
-	return BSS_CHANGED_BEACON;
+	*changed |= BSS_CHANGED_BEACON;
+	return 0;
 }
 
 static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
@@ -1618,11 +1660,21 @@
 	bool fwd_csa = true;
 	size_t baselen;
 	u8 *pos;
+	struct sta_info *sta = NULL;
 
 	if (mgmt->u.action.u.measurement.action_code !=
 	    WLAN_ACTION_SPCT_CHL_SWITCH)
 		return;
 
+	/* Process action frames received from connected mesh nodes */
+	rcu_read_lock();
+	sta = sta_info_get(sdata, mgmt->bssid);
+	if (!sta) {
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
 	pos = mgmt->u.action.u.chan_switch.variable;
 	baselen = offsetof(struct ieee80211_mgmt,
 			   u.action.u.chan_switch.variable);
@@ -1690,11 +1742,11 @@
 	struct ieee80211_mgmt *mgmt;
 	u16 stype;
 
-	sdata_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	/* mesh already went down */
 	if (!sdata->u.mesh.mesh_id_len)
-		goto out;
+		return;
 
 	rx_status = IEEE80211_SKB_RXCB(skb);
 	mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1713,18 +1765,17 @@
 		ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
 		break;
 	}
-out:
-	sdata_unlock(sdata);
 }
 
 static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	u32 bit, changed = 0;
+	u32 bit;
+	u64 changed = 0;
 
-	for_each_set_bit(bit, &ifmsh->mbss_changed,
+	for_each_set_bit(bit, ifmsh->mbss_changed,
 			 sizeof(changed) * BITS_PER_BYTE) {
-		clear_bit(bit, &ifmsh->mbss_changed);
+		clear_bit(bit, ifmsh->mbss_changed);
 		changed |= BIT(bit);
 	}
 
@@ -1743,11 +1794,11 @@
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
-	sdata_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	/* mesh already went down */
 	if (!sdata->u.mesh.mesh_id_len)
-		goto out;
+		return;
 
 	if (ifmsh->preq_queue_len &&
 	    time_after(jiffies,
@@ -1765,8 +1816,6 @@
 
 	if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags))
 		mesh_bss_info_changed(sdata);
-out:
-	sdata_unlock(sdata);
 }
 
 
@@ -1797,13 +1846,29 @@
 	skb_queue_head_init(&ifmsh->ps.bc_buf);
 	spin_lock_init(&ifmsh->mesh_preq_queue_lock);
 	spin_lock_init(&ifmsh->sync_offset_lock);
+	INIT_LIST_HEAD(&ifmsh->mplink_blocking_list);
+	spin_lock_init(&ifmsh->mplink_blocking_list_lock);
 	RCU_INIT_POINTER(ifmsh->beacon, NULL);
 
 	sdata->vif.bss_conf.bssid = zero_addr;
 }
 
+void ieee80211_mesh_free_blocked_mplink_entries(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct mplink_block_list_info *mp_blink, *mp_blink_nxt;
+
+	spin_lock_bh(&ifmsh->mplink_blocking_list_lock);
+	list_for_each_entry_safe(mp_blink, mp_blink_nxt, &ifmsh->mplink_blocking_list, list) {
+		list_del(&mp_blink->list);
+		kfree(mp_blink);
+	}
+	spin_unlock_bh(&ifmsh->mplink_blocking_list_lock);
+}
+
 void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata)
 {
 	mesh_rmc_free(sdata);
 	mesh_pathtbl_unregister(sdata);
+	ieee80211_mesh_free_blocked_mplink_entries(sdata);
 }
diff -ruw linux-6.4/net/mac80211/mesh.h linux-6.4-fbx/net/mac80211/mesh.h
--- linux-6.4/net/mac80211/mesh.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mesh.h	2024-04-19 16:04:28.973736213 +0200
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2008, 2009 open80211s Ltd.
+ * Copyright (C) 2023 Intel Corporation
  * Authors:    Luis Carlos Cobo <luisca@cozybit.com>
  *             Javier Cardona <javier@cozybit.com>
  */
@@ -157,6 +158,11 @@
 	unsigned long timestamp;
 };
 
+struct mplink_block_list_info {
+	struct list_head list;
+	u8 dst[ETH_ALEN];
+};
+
 /* Recent multicast cache */
 /* RMC_BUCKETS must be a power of 2, maximum 256 */
 #define RMC_BUCKETS		256
@@ -211,7 +217,6 @@
 		   const u8 *addr, struct ieee80211s_hdr *mesh_hdr);
 bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
 			struct ieee802_11_elems *ie);
-void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
 int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
 			 struct sk_buff *skb);
 int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata,
@@ -252,11 +257,12 @@
 const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
 /* wrapper for ieee80211_bss_info_change_notify() */
 void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
-				       u32 changed);
+				       u64 changed);
+void ieee80211_mesh_free_blocked_mplink_entries(struct ieee80211_sub_if_data *sdata);
 
 /* mesh power save */
-u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata);
-u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
+u64 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata);
+u64 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
 				   enum nl80211_mesh_power_mode pm);
 void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
 				   struct sta_info *sta,
@@ -303,17 +309,18 @@
 			   u8 *hw_addr, struct ieee802_11_elems *ie,
 			   struct ieee80211_rx_status *rx_status);
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
-u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
+u64 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_timer(struct timer_list *t);
 void mesh_plink_broken(struct sta_info *sta);
-u32 mesh_plink_deactivate(struct sta_info *sta);
-u32 mesh_plink_open(struct sta_info *sta);
-u32 mesh_plink_block(struct sta_info *sta);
+u64 mesh_plink_deactivate(struct sta_info *sta);
+u64 mesh_plink_open(struct sta_info *sta);
+u64 mesh_plink_block(struct sta_info *sta);
 void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
 			 struct ieee80211_mgmt *mgmt, size_t len,
 			 struct ieee80211_rx_status *rx_status);
 void mesh_sta_cleanup(struct sta_info *sta);
-
+bool mesh_neighbour_connection_established(struct ieee80211_sub_if_data *sdata,
+					   u8 *hw_addr);
 /* Private interfaces */
 /* Mesh paths */
 int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
@@ -349,14 +356,14 @@
 
 #ifdef CONFIG_MAC80211_MESH
 static inline
-u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
+u64 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
 	atomic_inc(&sdata->u.mesh.estab_plinks);
 	return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON;
 }
 
 static inline
-u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
+u64 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
 	atomic_dec(&sdata->u.mesh.estab_plinks);
 	return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON;
@@ -387,12 +394,20 @@
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
 void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_stop(void);
+void nmeshd_nl_send_vendor_ies(struct net_device *dev, const u8 *macaddr, u16 stype, s8 signal,
+			       u32 beacon_int, struct ieee802_11_mesh_vendor_specific_elems *pm,
+			       struct ieee802_11_mesh_vendor_specific_elems *nm, gfp_t gfp);
+
 #else
 static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
 { return false; }
 static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
 {}
 static inline void ieee80211s_stop(void) {}
+static inline void nmeshd_nl_send_vendor_ies(struct net_device *dev, const u8 *macaddr, u16 stype, s8 signal,
+					     u32 beacon_int, struct ieee802_11_mesh_vendor_specific_elems *pm,
+					     struct ieee802_11_mesh_vendor_specific_elems *nm, gfp_t gfp)
+{}
 #endif
 
 #endif /* IEEE80211S_H */
diff -ruw linux-6.4/net/mac80211/mesh_hwmp.c linux-6.4-fbx/net/mac80211/mesh_hwmp.c
--- linux-6.4/net/mac80211/mesh_hwmp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mesh_hwmp.c	2023-11-07 13:38:44.078257129 +0100
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021-2022 Intel Corporation
+ * Copyright (C) 2019, 2021-2023 Intel Corporation
  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
  */
 
@@ -230,6 +230,8 @@
  * Note: This function may be called with driver locks taken that the driver
  * also acquires in the TX path.  To avoid a deadlock we don't transmit the
  * frame directly but add it to the pending queue instead.
+ *
+ * Returns: 0 on success
  */
 int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 		       u8 ttl, const u8 *target, u32 target_sn,
@@ -1026,14 +1028,14 @@
 	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 
 	if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
-		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+		wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 
 	else if (time_before(jiffies, ifmsh->last_preq)) {
 		/* avoid long wait if did not send preqs for a long time
 		 * and jiffies wrapped around
 		 */
 		ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
-		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+		wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 	} else
 		mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
 						min_preq_int_jiff(sdata));
diff -ruw linux-6.4/net/mac80211/mesh_pathtbl.c linux-6.4-fbx/net/mac80211/mesh_pathtbl.c
--- linux-6.4/net/mac80211/mesh_pathtbl.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mesh_pathtbl.c	2023-11-07 13:38:44.082257238 +0100
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2008, 2009 open80211s Ltd.
+ * Copyright (C) 2023 Intel Corporation
  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
  */
 
@@ -173,6 +174,11 @@
 /**
  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  *
+ * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
+ * @from_mpath: The failed mpath
+ * @copy: When true, copy all the frames to the new mpath queue.  When false,
+ * move them.
+ *
  * This function is used to transfer or copy frames from an unresolved mpath to
  * a gate mpath.  The function also adds the Address Extension field and
  * updates the next hop.
@@ -181,11 +187,6 @@
  * destination addresses are updated.
  *
  * The gate mpath must be an active mpath with a valid mpath->next_hop.
- *
- * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
- * @from_mpath: The failed mpath
- * @copy: When true, copy all the frames to the new mpath queue.  When false,
- * move them.
  */
 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
 				    struct mesh_path *from_mpath,
@@ -330,6 +331,8 @@
 /**
  * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
  * @mpath: gate path to add to table
+ *
+ * Returns: 0 on success, -EEXIST
  */
 int mesh_path_add_gate(struct mesh_path *mpath)
 {
@@ -388,6 +391,8 @@
 /**
  * mesh_gate_num - number of gates known to this interface
  * @sdata: subif data
+ *
+ * Returns: The number of gates
  */
 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
 {
@@ -648,7 +653,7 @@
 
 	cache = &sdata->u.mesh.tx_cache;
 	spin_lock_bh(&cache->walk_lock);
-	entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+	entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
 	if (entry)
 		mesh_fast_tx_entry_free(cache, entry);
 	spin_unlock_bh(&cache->walk_lock);
@@ -861,10 +866,9 @@
 /**
  * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
  *
- * This function deletes both mesh paths as well as mesh portal paths.
- *
  * @sdata: interface data to match
  *
+ * This function deletes both mesh paths as well as mesh portal paths.
  */
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
 {
@@ -944,6 +948,8 @@
  * queue to that gate's queue.  If there are more than one gates, the frames
  * are copied from each gate to the next.  After frames are copied, the
  * mpath queues are emptied onto the transmission queue.
+ *
+ * Returns: 0 on success, -EHOSTUNREACH
  */
 int mesh_path_send_to_gates(struct mesh_path *mpath)
 {
diff -ruw linux-6.4/net/mac80211/mesh_plink.c linux-6.4-fbx/net/mac80211/mesh_plink.c
--- linux-6.4/net/mac80211/mesh_plink.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mesh_plink.c	2024-04-19 16:04:28.973736213 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021-2022 Intel Corporation
+ * Copyright (C) 2019, 2021-2023 Intel Corporation
  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
  */
 #include <linux/gfp.h>
@@ -90,12 +90,13 @@
  *
  * Returns BSS_CHANGED_ERP_SLOT or 0 for no change.
  */
-static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
+static u64 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
 	struct sta_info *sta;
-	u32 erp_rates = 0, changed = 0;
+	u32 erp_rates = 0;
+	u64 changed = 0;
 	int i;
 	bool short_slot = false;
 
@@ -152,8 +153,10 @@
  * selected if any non-HT peers are present in our MBSS.  20MHz-protection mode
  * is selected if all peers in our 20/40MHz MBSS support HT and at least one
  * HT20 peer is present. Otherwise no-protection mode is selected.
+ *
+ * Returns: BSS_CHANGED_HT or 0 for no change
  */
-static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
+static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
@@ -361,14 +364,14 @@
  * Mesh paths with this peer as next hop should be flushed
  * by the caller outside of plink_lock.
  *
- * Returns beacon changed flag if the beacon content changed.
+ * Returns: beacon changed flag if the beacon content changed.
  *
  * Locking: the caller must hold sta->mesh->plink_lock
  */
-static u32 __mesh_plink_deactivate(struct sta_info *sta)
+static u64 __mesh_plink_deactivate(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed = 0;
+	u64 changed = 0;
 
 	lockdep_assert_held(&sta->mesh->plink_lock);
 
@@ -389,11 +392,13 @@
  * @sta: mesh peer link to deactivate
  *
  * All mesh paths with this peer as next hop will be flushed
+ *
+ * Returns: beacon changed flag if the beacon content changed.
  */
-u32 mesh_plink_deactivate(struct sta_info *sta)
+u64 mesh_plink_deactivate(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed;
+	u64 changed;
 
 	spin_lock_bh(&sta->mesh->plink_lock);
 	changed = __mesh_plink_deactivate(sta);
@@ -450,7 +455,7 @@
 		changed |= IEEE80211_RC_BW_CHANGED;
 
 	ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
-					    elems->vht_cap_elem,
+					    elems->vht_cap_elem, NULL,
 					    &sta->deflink);
 
 	ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap,
@@ -622,7 +627,7 @@
 			   struct ieee80211_rx_status *rx_status)
 {
 	struct sta_info *sta;
-	u32 changed = 0;
+	u64 changed = 0;
 
 	sta = mesh_sta_info_get(sdata, hw_addr, elems, rx_status);
 	if (!sta)
@@ -775,10 +780,10 @@
 	return llid;
 }
 
-u32 mesh_plink_open(struct sta_info *sta)
+u64 mesh_plink_open(struct sta_info *sta)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed;
+	u64 changed;
 
 	if (!test_sta_flag(sta, WLAN_STA_AUTH))
 		return 0;
@@ -805,9 +810,9 @@
 	return changed;
 }
 
-u32 mesh_plink_block(struct sta_info *sta)
+u64 mesh_plink_block(struct sta_info *sta)
 {
-	u32 changed;
+	u64 changed;
 
 	spin_lock_bh(&sta->mesh->plink_lock);
 	changed = __mesh_plink_deactivate(sta);
@@ -831,11 +836,11 @@
 	mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 }
 
-static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata,
+static u64 mesh_plink_establish(struct ieee80211_sub_if_data *sdata,
 				struct sta_info *sta)
 {
 	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
-	u32 changed = 0;
+	u64 changed = 0;
 
 	del_timer(&sta->mesh->plink_timer);
 	sta->mesh->plink_state = NL80211_PLINK_ESTAB;
@@ -857,12 +862,12 @@
  *
  * Return: changed MBSS flags
  */
-static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
+static u64 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
 			  struct sta_info *sta, enum plink_event event)
 {
 	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
 	enum ieee80211_self_protected_actioncode action = 0;
-	u32 changed = 0;
+	u64 changed = 0;
 	bool flush = false;
 
 	mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
@@ -1117,7 +1122,7 @@
 	struct sta_info *sta;
 	enum plink_event event;
 	enum ieee80211_self_protected_actioncode ftype;
-	u32 changed = 0;
+	u64 changed = 0;
 	u8 ie_len = elems->peering_len;
 	u16 plid, llid = 0;
 
@@ -1245,3 +1250,31 @@
 	mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
 	kfree(elems);
 }
+
+/*
+ * mesh_neighbour_connection_established - return if connection has been established with  neighbor.
+ *
+ * @sdata: local meshif
+ * @addr: peer's address
+ * @elems: IEs from beacon or mesh peering frame
+ *
+ *
+ */
+bool mesh_neighbour_connection_established(struct ieee80211_sub_if_data *sdata,
+					   u8 *hw_addr)
+{
+	struct sta_info *sta;
+	bool ret = false;
+
+	rcu_read_lock();
+	sta = sta_info_get(sdata, hw_addr);
+	if (!sta)
+		goto out;
+
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+		ret = true;
+
+out:
+	rcu_read_unlock();
+	return ret;
+}
diff -ruw linux-6.4/net/mac80211/mesh_ps.c linux-6.4-fbx/net/mac80211/mesh_ps.c
--- linux-6.4/net/mac80211/mesh_ps.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mesh_ps.c	2023-11-07 13:38:44.082257238 +0100
@@ -3,6 +3,7 @@
  * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
  * Copyright 2012-2013, cozybit Inc.
  * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2023 Intel Corporation
  */
 
 #include "mesh.h"
@@ -14,6 +15,8 @@
 /**
  * mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave
  * @sta: the station to get the frame for
+ *
+ * Returns: A newly allocated SKB
  */
 static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
 {
@@ -76,15 +79,17 @@
  *
  * sets the non-peer power mode and triggers the driver PS (re-)configuration
  * Return BSS_CHANGED_BEACON if a beacon update is necessary.
+ *
+ * Returns: BSS_CHANGED_BEACON if a beacon update is in order.
  */
-u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
+u64 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct sta_info *sta;
 	bool peering = false;
 	int light_sleep_cnt = 0;
 	int deep_sleep_cnt = 0;
-	u32 changed = 0;
+	u64 changed = 0;
 	enum nl80211_mesh_power_mode nonpeer_pm;
 
 	rcu_read_lock();
@@ -146,9 +151,9 @@
  *
  * @sta: mesh STA
  * @pm: the power mode to set
- * Return BSS_CHANGED_BEACON if a beacon update is in order.
+ * Returns: BSS_CHANGED_BEACON if a beacon update is in order.
  */
-u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
+u64 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
 				   enum nl80211_mesh_power_mode pm)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
diff -ruw linux-6.4/net/mac80211/mesh_sync.c linux-6.4-fbx/net/mac80211/mesh_sync.c
--- linux-6.4/net/mac80211/mesh_sync.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mesh_sync.c	2023-11-07 13:38:44.082257238 +0100
@@ -3,7 +3,7 @@
  * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com>
  * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
  * Copyright 2011-2012, cozybit Inc.
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021,2023 Intel Corporation
  */
 
 #include "ieee80211_i.h"
@@ -37,6 +37,8 @@
  * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT
  *
  * @cfg: mesh config element from the mesh peer (or %NULL)
+ *
+ * Returns: If the mesh peer is currently adjusting its TBTT
  */
 static bool mesh_peer_tbtt_adjusting(const struct ieee80211_meshconf_ie *cfg)
 {
diff -ruw linux-6.4/net/mac80211/mlme.c linux-6.4-fbx/net/mac80211/mlme.c
--- linux-6.4/net/mac80211/mlme.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/mlme.c	2023-11-07 13:38:44.082257238 +0100
@@ -110,7 +110,8 @@
 		return 0;
 
 	/* set 160/320 supported to get the full AP definition */
-	ieee80211_chandef_eht_oper(eht_oper, true, true, &ap_chandef);
+	ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
+				   true, true, &ap_chandef);
 	ap_center_freq = ap_chandef.center_freq1;
 	ap_bw = 20 * BIT(u8_get_bits(info->control,
 				     IEEE80211_EHT_OPER_CHAN_WIDTH));
@@ -175,7 +176,7 @@
 static void run_again(struct ieee80211_sub_if_data *sdata,
 		      unsigned long timeout)
 {
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (!timer_pending(&sdata->u.mgd.timer) ||
 	    time_before(timeout, sdata->u.mgd.timer.expires))
@@ -388,7 +389,7 @@
 	if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
 		struct cfg80211_chan_def eht_chandef = *chandef;
 
-		ieee80211_chandef_eht_oper(eht_oper,
+		ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
 					   eht_chandef.width ==
 					   NL80211_CHAN_WIDTH_160,
 					   false, &eht_chandef);
@@ -511,16 +512,14 @@
 
 	/* don't check HE if we associated as non-HE station */
 	if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE ||
-	    !ieee80211_get_he_iftype_cap(sband,
-					 ieee80211_vif_type_p2p(&sdata->vif))) {
+	    !ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
 		he_oper = NULL;
 		eht_oper = NULL;
 	}
 
 	/* don't check EHT if we associated as non-EHT station */
 	if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT ||
-	    !ieee80211_get_eht_iftype_cap(sband,
-					 ieee80211_vif_type_p2p(&sdata->vif)))
+	    !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
 		eht_oper = NULL;
 
 	/*
@@ -776,8 +775,7 @@
 	const struct ieee80211_sta_he_cap *he_cap;
 	u8 he_cap_size;
 
-	he_cap = ieee80211_get_he_iftype_cap(sband,
-					     ieee80211_vif_type_p2p(&sdata->vif));
+	he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
 	if (WARN_ON(!he_cap))
 		return;
 
@@ -806,10 +804,8 @@
 	const struct ieee80211_sta_eht_cap *eht_cap;
 	u8 eht_cap_size;
 
-	he_cap = ieee80211_get_he_iftype_cap(sband,
-					     ieee80211_vif_type_p2p(&sdata->vif));
-	eht_cap = ieee80211_get_eht_iftype_cap(sband,
-					       ieee80211_vif_type_p2p(&sdata->vif));
+	he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+	eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
 
 	/*
 	 * EHT capabilities element is only added if the HE capabilities element
@@ -835,7 +831,6 @@
 				      struct ieee80211_supported_band *sband,
 				      struct ieee80211_mgd_assoc_data *assoc_data)
 {
-	unsigned int shift = ieee80211_chanwidth_get_shift(width);
 	unsigned int rates_len, supp_rates_len;
 	u32 rates = 0;
 	int i, count;
@@ -874,8 +869,7 @@
 	count = 0;
 	for (i = 0; i < sband->n_bitrates; i++) {
 		if (BIT(i) & rates) {
-			int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
-						5 * (1 << shift));
+			int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
 			*pos++ = (u8)rate;
 			if (++count == 8)
 				break;
@@ -891,8 +885,7 @@
 			if (BIT(i) & rates) {
 				int rate;
 
-				rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
-						    5 * (1 << shift));
+				rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
 				*pos++ = (u8)rate;
 			}
 		}
@@ -1287,7 +1280,7 @@
 	u8 *ml_elem_len;
 	void *capab_pos;
 
-	if (!sdata->vif.valid_links)
+	if (!ieee80211_vif_is_mld(&sdata->vif))
 		return;
 
 	ift_ext_capa = cfg80211_get_iftype_ext_capa(local->hw.wiphy,
@@ -1406,7 +1399,7 @@
 						      assoc_data->ie,
 						      assoc_data->ie_len);
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	size = local->hw.extra_tx_headroom +
 	       sizeof(*mgmt) + /* bit too much but doesn't matter */
@@ -1462,7 +1455,7 @@
 			capab |= WLAN_CAPABILITY_PRIVACY;
 	}
 
-	if (sdata->vif.valid_links) {
+	if (ieee80211_vif_is_mld(&sdata->vif)) {
 		/* consider the multi-link element with STA profile */
 		size += sizeof(struct ieee80211_multi_link_elem);
 		/* max common info field in basic multi-link element */
@@ -1591,6 +1584,7 @@
 
 	ifmgd->assoc_req_ies_len = pos - ie_start;
 
+	info.link_id = assoc_data->assoc_link_id;
 	drv_mgd_prepare_tx(local, sdata, &info);
 
 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
@@ -1680,10 +1674,12 @@
 }
 
 /* spectrum management related things */
-static void ieee80211_chswitch_work(struct work_struct *work)
+static void ieee80211_chswitch_work(struct wiphy *wiphy,
+				    struct wiphy_work *work)
 {
 	struct ieee80211_link_data *link =
-		container_of(work, struct ieee80211_link_data, u.mgd.chswitch_work);
+		container_of(work, struct ieee80211_link_data,
+			     u.mgd.chswitch_work.work);
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1692,15 +1688,13 @@
 	if (!ieee80211_sdata_running(sdata))
 		return;
 
-	sdata_lock(sdata);
-	mutex_lock(&local->mtx);
-	mutex_lock(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!ifmgd->associated)
-		goto out;
+		return;
 
 	if (!link->conf->csa_active)
-		goto out;
+		return;
 
 	/*
 	 * using reservation isn't immediate as it may be deferred until later
@@ -1716,39 +1710,32 @@
 		 * reservations
 		 */
 		if (link->reserved_ready)
-			goto out;
+			return;
 
 		ret = ieee80211_link_use_reserved_context(link);
 		if (ret) {
 			sdata_info(sdata,
 				   "failed to use reserved channel context, disconnecting (err=%d)\n",
 				   ret);
-			ieee80211_queue_work(&sdata->local->hw,
+			wiphy_work_queue(sdata->local->hw.wiphy,
 					     &ifmgd->csa_connection_drop_work);
-			goto out;
 		}
-
-		goto out;
+		return;
 	}
 
 	if (!cfg80211_chandef_identical(&link->conf->chandef,
 					&link->csa_chandef)) {
 		sdata_info(sdata,
 			   "failed to finalize channel switch, disconnecting\n");
-		ieee80211_queue_work(&sdata->local->hw,
+		wiphy_work_queue(sdata->local->hw.wiphy,
 				     &ifmgd->csa_connection_drop_work);
-		goto out;
+		return;
 	}
 
 	link->u.mgd.csa_waiting_bcn = true;
 
 	ieee80211_sta_reset_beacon_monitor(sdata);
 	ieee80211_sta_reset_conn_monitor(sdata);
-
-out:
-	mutex_unlock(&local->chanctx_mtx);
-	mutex_unlock(&local->mtx);
-	sdata_unlock(sdata);
 }
 
 static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
@@ -1758,7 +1745,7 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	int ret;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	WARN_ON(!link->conf->csa_active);
 
@@ -1776,47 +1763,49 @@
 	 */
 	link->u.mgd.beacon_crc_valid = false;
 
-	ret = drv_post_channel_switch(sdata);
+	ret = drv_post_channel_switch(link);
 	if (ret) {
 		sdata_info(sdata,
 			   "driver post channel switch failed, disconnecting\n");
-		ieee80211_queue_work(&local->hw,
+		wiphy_work_queue(sdata->local->hw.wiphy,
 				     &ifmgd->csa_connection_drop_work);
 		return;
 	}
 
-	cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef, 0, 0);
+	cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef,
+				  link->link_id, 0);
 }
 
-void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
+void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
+			     unsigned int link_id)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-	if (WARN_ON(sdata->vif.valid_links))
-		success = false;
+	trace_api_chswitch_done(sdata, success, link_id);
+
+	rcu_read_lock();
 
-	trace_api_chswitch_done(sdata, success);
 	if (!success) {
 		sdata_info(sdata,
 			   "driver channel switch failed, disconnecting\n");
-		ieee80211_queue_work(&sdata->local->hw,
-				     &ifmgd->csa_connection_drop_work);
+		wiphy_work_queue(sdata->local->hw.wiphy,
+				 &sdata->u.mgd.csa_connection_drop_work);
 	} else {
-		ieee80211_queue_work(&sdata->local->hw,
-				     &sdata->deflink.u.mgd.chswitch_work);
-	}
+		struct ieee80211_link_data *link =
+			rcu_dereference(sdata->link[link_id]);
+
+		if (WARN_ON(!link)) {
+			rcu_read_unlock();
+			return;
 }
-EXPORT_SYMBOL(ieee80211_chswitch_done);
 
-static void ieee80211_chswitch_timer(struct timer_list *t)
-{
-	struct ieee80211_link_data *link =
-		from_timer(link, t, u.mgd.chswitch_timer);
+		wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+					 &link->u.mgd.chswitch_work, 0);
+	}
 
-	ieee80211_queue_work(&link->sdata->local->hw,
-			     &link->u.mgd.chswitch_work);
+	rcu_read_unlock();
 }
+EXPORT_SYMBOL(ieee80211_chswitch_done);
 
 static void
 ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
@@ -1824,14 +1813,12 @@
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_local *local = sdata->local;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!local->ops->abort_channel_switch)
 		return;
 
-	mutex_lock(&local->mtx);
-
-	mutex_lock(&local->chanctx_mtx);
 	ieee80211_link_unreserve_chanctx(link);
-	mutex_unlock(&local->chanctx_mtx);
 
 	if (link->csa_block_tx)
 		ieee80211_wake_vif_queues(local, sdata,
@@ -1840,8 +1827,6 @@
 	link->csa_block_tx = false;
 	link->conf->csa_active = false;
 
-	mutex_unlock(&local->mtx);
-
 	drv_abort_channel_switch(sdata);
 }
 
@@ -1861,16 +1846,14 @@
 	struct ieee80211_csa_ie csa_ie;
 	struct ieee80211_channel_switch ch_switch;
 	struct ieee80211_bss *bss;
+	unsigned long timeout;
 	int res;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!cbss)
 		return;
 
-	if (local->scanning)
-		return;
-
 	current_band = cbss->channel->band;
 	bss = (void *)cbss->priv;
 	res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
@@ -1888,7 +1871,7 @@
 	}
 
 	if (res < 0)
-		goto lock_and_drop_connection;
+		goto drop_connection;
 
 	if (beacon && link->conf->csa_active &&
 	    !link->u.mgd.csa_waiting_bcn) {
@@ -1910,7 +1893,7 @@
 			   csa_ie.chandef.chan->center_freq,
 			   csa_ie.chandef.width, csa_ie.chandef.center_freq1,
 			   csa_ie.chandef.center_freq2);
-		goto lock_and_drop_connection;
+		goto drop_connection;
 	}
 
 	if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
@@ -1925,7 +1908,7 @@
 			   csa_ie.chandef.width, csa_ie.chandef.center_freq1,
 			   csa_ie.chandef.freq1_offset,
 			   csa_ie.chandef.center_freq2);
-		goto lock_and_drop_connection;
+		goto drop_connection;
 	}
 
 	if (cfg80211_chandef_identical(&csa_ie.chandef,
@@ -1948,10 +1931,8 @@
 	 */
 	ieee80211_teardown_tdls_peers(sdata);
 
-	mutex_lock(&local->mtx);
-	mutex_lock(&local->chanctx_mtx);
 	conf = rcu_dereference_protected(link->conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	if (!conf) {
 		sdata_info(sdata,
 			   "no channel context assigned to vif?, disconnecting\n");
@@ -1981,7 +1962,6 @@
 			   res);
 		goto drop_connection;
 	}
-	mutex_unlock(&local->chanctx_mtx);
 
 	link->conf->csa_active = true;
 	link->csa_chandef = csa_ie.chandef;
@@ -1992,10 +1972,10 @@
 	if (link->csa_block_tx)
 		ieee80211_stop_vif_queues(local, sdata,
 					  IEEE80211_QUEUE_STOP_REASON_CSA);
-	mutex_unlock(&local->mtx);
 
-	cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, 0,
-					  csa_ie.count, csa_ie.mode, 0);
+	cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
+					  link->link_id, csa_ie.count,
+					  csa_ie.mode, 0);
 
 	if (local->ops->channel_switch) {
 		/* use driver's channel switch callback */
@@ -2004,16 +1984,12 @@
 	}
 
 	/* channel switch handled in software */
-	if (csa_ie.count <= 1)
-		ieee80211_queue_work(&local->hw, &link->u.mgd.chswitch_work);
-	else
-		mod_timer(&link->u.mgd.chswitch_timer,
-			  TU_TO_EXP_TIME((csa_ie.count - 1) *
-					 cbss->beacon_interval));
-	return;
- lock_and_drop_connection:
-	mutex_lock(&local->mtx);
-	mutex_lock(&local->chanctx_mtx);
+	timeout = TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
+				cbss->beacon_interval);
+	wiphy_delayed_work_queue(local->hw.wiphy,
+				 &link->u.mgd.chswitch_work,
+				 timeout);
+	return;
  drop_connection:
 	/*
 	 * This is just so that the disconnect flow will know that
@@ -2025,9 +2001,8 @@
 	link->conf->csa_active = true;
 	link->csa_block_tx = csa_ie.mode;
 
-	ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
-	mutex_unlock(&local->chanctx_mtx);
-	mutex_unlock(&local->mtx);
+	wiphy_work_queue(sdata->local->hw.wiphy,
+			 &ifmgd->csa_connection_drop_work);
 }
 
 static bool
@@ -2116,7 +2091,7 @@
 	*pwr_level = (__s8)cisco_dtpc_ie[4];
 }
 
-static u32 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link,
+static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link,
 				       struct ieee80211_channel *channel,
 				       struct ieee80211_mgmt *mgmt,
 				       const u8 *country_ie, u8 country_ie_len,
@@ -2223,7 +2198,8 @@
 		conf->flags &= ~IEEE80211_CONF_PS;
 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 		del_timer_sync(&local->dynamic_ps_timer);
-		cancel_work_sync(&local->dynamic_ps_enable_work);
+		wiphy_work_cancel(local->hw.wiphy,
+				  &local->dynamic_ps_enable_work);
 	}
 }
 
@@ -2320,7 +2296,8 @@
 	}
 }
 
-void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
+void ieee80211_dynamic_ps_disable_work(struct wiphy *wiphy,
+				       struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local,
@@ -2337,7 +2314,8 @@
 					false);
 }
 
-void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
+void ieee80211_dynamic_ps_enable_work(struct wiphy *wiphy,
+				      struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local,
@@ -2410,26 +2388,25 @@
 {
 	struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer);
 
-	ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
+	wiphy_work_queue(local->hw.wiphy, &local->dynamic_ps_enable_work);
 }
 
-void ieee80211_dfs_cac_timer_work(struct work_struct *work)
+void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
 {
-	struct delayed_work *delayed_work = to_delayed_work(work);
 	struct ieee80211_link_data *link =
-		container_of(delayed_work, struct ieee80211_link_data,
-			     dfs_cac_timer_work);
+		container_of(work, struct ieee80211_link_data,
+			     dfs_cac_timer_work.work);
 	struct cfg80211_chan_def chandef = link->conf->chandef;
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 
-	mutex_lock(&sdata->local->mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	if (sdata->wdev.cac_started) {
 		ieee80211_link_release_channel(link);
 		cfg80211_cac_event(sdata->dev, &chandef,
 				   NL80211_RADAR_CAC_FINISHED,
 				   GFP_KERNEL);
 	}
-	mutex_unlock(&sdata->local->mtx);
 }
 
 static bool
@@ -2499,8 +2476,10 @@
 					 ac);
 			tx_tspec->action = TX_TSPEC_ACTION_NONE;
 			ret = true;
-			schedule_delayed_work(&ifmgd->tx_tspec_wk,
-				tx_tspec->time_slice_start + HZ - now + 1);
+			wiphy_delayed_work_queue(local->hw.wiphy,
+						 &ifmgd->tx_tspec_wk,
+						 tx_tspec->time_slice_start +
+						 HZ - now + 1);
 			break;
 		case TX_TSPEC_ACTION_NONE:
 			/* nothing now */
@@ -2518,7 +2497,8 @@
 						  BSS_CHANGED_QOS);
 }
 
-static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work)
+static void ieee80211_sta_handle_tspec_ac_params_wk(struct wiphy *wiphy,
+						    struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata;
 
@@ -2650,7 +2630,7 @@
 		params[ac].aifs = pos[0] & 0x0f;
 
 		if (params[ac].aifs < 2) {
-			sdata_info(sdata,
+			link_info(link,
 				   "AP has invalid WMM params (AIFSN=%d for ACI %d), will use 2\n",
 				   params[ac].aifs, aci);
 			params[ac].aifs = 2;
@@ -2663,7 +2643,7 @@
 
 		if (params[ac].cw_min == 0 ||
 		    params[ac].cw_min > params[ac].cw_max) {
-			sdata_info(sdata,
+			link_info(link,
 				   "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
 				   params[ac].cw_min, params[ac].cw_max, aci);
 			return false;
@@ -2674,7 +2654,7 @@
 	/* WMM specification requires all 4 ACIs. */
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		if (params[ac].cw_min == 0) {
-			sdata_info(sdata,
+			link_info(link,
 				   "AP has invalid WMM params (missing AC %d), using defaults\n",
 				   ac);
 			return false;
@@ -2693,7 +2673,7 @@
 
 static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
 {
-	lockdep_assert_held(&sdata->local->mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL;
 	ieee80211_run_deferred_scan(sdata->local);
@@ -2701,17 +2681,17 @@
 
 static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
 {
-	mutex_lock(&sdata->local->mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	__ieee80211_stop_poll(sdata);
-	mutex_unlock(&sdata->local->mtx);
 }
 
-static u32 ieee80211_handle_bss_capability(struct ieee80211_link_data *link,
+static u64 ieee80211_handle_bss_capability(struct ieee80211_link_data *link,
 					   u16 capab, bool erp_valid, u8 erp)
 {
 	struct ieee80211_bss_conf *bss_conf = link->conf;
 	struct ieee80211_supported_band *sband;
-	u32 changed = 0;
+	u64 changed = 0;
 	bool use_protection;
 	bool use_short_preamble;
 	bool use_short_slot;
@@ -2757,7 +2737,7 @@
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_bss_conf *bss_conf = link->conf;
 	struct ieee80211_bss *bss = (void *)cbss->priv;
-	u32 changed = BSS_CHANGED_QOS;
+	u64 changed = BSS_CHANGED_QOS;
 
 	/* not really used in MLO */
 	sdata->u.mgd.beacon_timeout =
@@ -2821,6 +2801,8 @@
 	u64 vif_changed = BSS_CHANGED_ASSOC;
 	unsigned int link_id;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	sdata->u.mgd.associated = true;
 
 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
@@ -2831,6 +2813,10 @@
 		    assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS)
 			continue;
 
+		if (ieee80211_vif_is_mld(&sdata->vif) &&
+		    !(ieee80211_vif_usable_links(&sdata->vif) & BIT(link_id)))
+			continue;
+
 		link = sdata_dereference(sdata->link[link_id], sdata);
 		if (WARN_ON(!link))
 			return;
@@ -2849,7 +2835,7 @@
 	if (vif_cfg->arp_addr_cnt)
 		vif_changed |= BSS_CHANGED_ARP_FILTER;
 
-	if (sdata->vif.valid_links) {
+	if (ieee80211_vif_is_mld(&sdata->vif)) {
 		for (link_id = 0;
 		     link_id < IEEE80211_MLD_MAX_NUM_LINKS;
 		     link_id++) {
@@ -2857,6 +2843,8 @@
 			struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
 
 			if (!cbss ||
+			    !(BIT(link_id) &
+			      ieee80211_vif_usable_links(&sdata->vif)) ||
 			    assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS)
 				continue;
 
@@ -2876,12 +2864,10 @@
 						 vif_changed | changed[0]);
 	}
 
-	mutex_lock(&local->iflist_mtx);
 	ieee80211_recalc_ps(local);
-	mutex_unlock(&local->iflist_mtx);
 
 	/* leave this here to not change ordering in non-MLO cases */
-	if (!sdata->vif.valid_links)
+	if (!ieee80211_vif_is_mld(&sdata->vif))
 		ieee80211_recalc_smps(sdata, &sdata->deflink);
 	ieee80211_recalc_ps_vif(sdata);
 
@@ -2895,12 +2881,12 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_local *local = sdata->local;
 	unsigned int link_id;
-	u32 changed = 0;
+	u64 changed = 0;
 	struct ieee80211_prep_tx_info info = {
 		.subtype = stype,
 	};
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON_ONCE(tx && !frame_buf))
 		return;
@@ -2951,10 +2937,23 @@
 		 * deauthentication frame by calling mgd_prepare_tx, if the
 		 * driver requested so.
 		 */
-		if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) &&
-		    !sdata->deflink.u.mgd.have_beacon) {
+		if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP)) {
+			for (link_id = 0; link_id < ARRAY_SIZE(sdata->link);
+			     link_id++) {
+				struct ieee80211_link_data *link;
+
+				link = sdata_dereference(sdata->link[link_id],
+							 sdata);
+				if (!link)
+					continue;
+				if (link->u.mgd.have_beacon)
+					break;
+			}
+			if (link_id == IEEE80211_MLD_MAX_NUM_LINKS) {
+				info.link_id = ffs(sdata->vif.active_links) - 1;
 			drv_mgd_prepare_tx(sdata->local, sdata, &info);
 		}
+		}
 
 		ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr,
 					       sdata->vif.cfg.ap_addr, stype,
@@ -2977,7 +2976,7 @@
 	sta_info_flush(sdata);
 
 	/* finally reset all BSS / config parameters */
-	if (!sdata->vif.valid_links)
+	if (!ieee80211_vif_is_mld(&sdata->vif))
 		changed |= ieee80211_reset_erp_info(sdata);
 
 	ieee80211_led_assoc(local, 0);
@@ -3002,21 +3001,21 @@
 	       sizeof(sdata->vif.bss_conf.mu_group.membership));
 	memset(sdata->vif.bss_conf.mu_group.position, 0,
 	       sizeof(sdata->vif.bss_conf.mu_group.position));
-	if (!sdata->vif.valid_links)
+	if (!ieee80211_vif_is_mld(&sdata->vif))
 		changed |= BSS_CHANGED_MU_GROUPS;
 	sdata->vif.bss_conf.mu_mimo_owner = false;
 
 	sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
 
 	del_timer_sync(&local->dynamic_ps_timer);
-	cancel_work_sync(&local->dynamic_ps_enable_work);
+	wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work);
 
 	/* Disable ARP filtering */
 	if (sdata->vif.cfg.arp_addr_cnt)
 		changed |= BSS_CHANGED_ARP_FILTER;
 
 	sdata->vif.bss_conf.qos = false;
-	if (!sdata->vif.valid_links) {
+	if (!ieee80211_vif_is_mld(&sdata->vif)) {
 		changed |= BSS_CHANGED_QOS;
 		/* The BSSID (not really interesting) and HT changed */
 		changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
@@ -3031,7 +3030,6 @@
 	del_timer_sync(&sdata->u.mgd.conn_mon_timer);
 	del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
 	del_timer_sync(&sdata->u.mgd.timer);
-	del_timer_sync(&sdata->deflink.u.mgd.chswitch_timer);
 
 	sdata->vif.bss_conf.dtim_period = 0;
 	sdata->vif.bss_conf.beacon_rate = NULL;
@@ -3042,7 +3040,6 @@
 
 	ifmgd->flags = 0;
 	sdata->deflink.u.mgd.conn_flags = 0;
-	mutex_lock(&local->mtx);
 
 	for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
 		struct ieee80211_link_data *link;
@@ -3061,18 +3058,20 @@
 					  IEEE80211_QUEUE_STOP_REASON_CSA);
 		sdata->deflink.csa_block_tx = false;
 	}
-	mutex_unlock(&local->mtx);
 
 	/* existing TX TSPEC sessions no longer exist */
 	memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
-	cancel_delayed_work_sync(&ifmgd->tx_tspec_wk);
+	wiphy_delayed_work_cancel(local->hw.wiphy, &ifmgd->tx_tspec_wk);
 
 	sdata->vif.bss_conf.pwr_reduction = 0;
 	sdata->vif.bss_conf.tx_pwr_env_num = 0;
 	memset(sdata->vif.bss_conf.tx_pwr_env, 0,
 	       sizeof(sdata->vif.bss_conf.tx_pwr_env));
 
-	ieee80211_vif_set_links(sdata, 0);
+	memset(&sdata->u.mgd.ttlm_info, 0,
+	       sizeof(sdata->u.mgd.ttlm_info));
+	wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+	ieee80211_vif_set_links(sdata, 0, 0);
 }
 
 static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
@@ -3080,18 +3079,17 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_local *local = sdata->local;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL))
-		goto out;
+		return;
 
 	__ieee80211_stop_poll(sdata);
 
-	mutex_lock(&local->iflist_mtx);
 	ieee80211_recalc_ps(local);
-	mutex_unlock(&local->iflist_mtx);
 
 	if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
-		goto out;
+		return;
 
 	/*
 	 * We've received a probe response, but are not sure whether
@@ -3103,8 +3101,6 @@
 	mod_timer(&ifmgd->conn_mon_timer,
 		  round_jiffies_up(jiffies +
 				   IEEE80211_CONNECTION_IDLE_TIME));
-out:
-	mutex_unlock(&local->mtx);
 }
 
 static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
@@ -3133,7 +3129,8 @@
 
 		if (tx_tspec->downgraded) {
 			tx_tspec->action = TX_TSPEC_ACTION_STOP_DOWNGRADE;
-			schedule_delayed_work(&ifmgd->tx_tspec_wk, 0);
+			wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+						 &ifmgd->tx_tspec_wk, 0);
 		}
 	}
 
@@ -3145,7 +3142,8 @@
 	if (tx_tspec->consumed_tx_time >= tx_tspec->admitted_time) {
 		tx_tspec->downgraded = true;
 		tx_tspec->action = TX_TSPEC_ACTION_DOWNGRADE;
-		schedule_delayed_work(&ifmgd->tx_tspec_wk, 0);
+		wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+					 &ifmgd->tx_tspec_wk, 0);
 	}
 }
 
@@ -3162,7 +3160,7 @@
 		sdata->u.mgd.probe_send_count = 0;
 	else
 		sdata->u.mgd.nullfunc_failed = true;
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 }
 
 static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
@@ -3186,7 +3184,9 @@
 	u8 unicast_limit = max(1, max_probe_tries - 3);
 	struct sta_info *sta;
 
-	if (WARN_ON(sdata->vif.valid_links))
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+	if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
 		return;
 
 	/*
@@ -3207,11 +3207,9 @@
 	ifmgd->probe_send_count++;
 
 	if (dst) {
-		mutex_lock(&sdata->local->sta_mtx);
 		sta = sta_info_get(sdata, dst);
 		if (!WARN_ON(!sta))
 			ieee80211_check_fast_rx(sta);
-		mutex_unlock(&sdata->local->sta_mtx);
 	}
 
 	if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
@@ -3234,29 +3232,24 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	bool already = false;
 
-	if (WARN_ON_ONCE(sdata->vif.valid_links))
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+	if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)))
 		return;
 
 	if (!ieee80211_sdata_running(sdata))
 		return;
 
-	sdata_lock(sdata);
-
 	if (!ifmgd->associated)
-		goto out;
-
-	mutex_lock(&sdata->local->mtx);
+		return;
 
-	if (sdata->local->tmp_channel || sdata->local->scanning) {
-		mutex_unlock(&sdata->local->mtx);
-		goto out;
-	}
+	if (sdata->local->tmp_channel || sdata->local->scanning)
+		return;
 
 	if (sdata->local->suspending) {
 		/* reschedule after resume */
-		mutex_unlock(&sdata->local->mtx);
 		ieee80211_reset_ap_probe(sdata);
-		goto out;
+		return;
 	}
 
 	if (beacon) {
@@ -3283,19 +3276,13 @@
 
 	ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
 
-	mutex_unlock(&sdata->local->mtx);
-
 	if (already)
-		goto out;
+		return;
 
-	mutex_lock(&sdata->local->iflist_mtx);
 	ieee80211_recalc_ps(sdata->local);
-	mutex_unlock(&sdata->local->iflist_mtx);
 
 	ifmgd->probe_send_count = 0;
 	ieee80211_mgd_probe_ap_send(sdata);
- out:
-	sdata_unlock(sdata);
 }
 
 struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
@@ -3308,12 +3295,12 @@
 	const struct element *ssid;
 	int ssid_len;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
-		    sdata->vif.valid_links))
+		    ieee80211_vif_is_mld(&sdata->vif)))
 		return NULL;
 
-	sdata_assert_lock(sdata);
-
 	if (ifmgd->associated)
 		cbss = sdata->deflink.u.mgd.bss;
 	else if (ifmgd->auth_data)
@@ -3367,14 +3354,14 @@
 	u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 	bool tx;
 
-	sdata_lock(sdata);
-	if (!ifmgd->associated) {
-		sdata_unlock(sdata);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
+	if (!ifmgd->associated)
 		return;
-	}
 
 	/* in MLO assume we have a link where we can TX the frame */
-	tx = sdata->vif.valid_links || !sdata->deflink.csa_block_tx;
+	tx = ieee80211_vif_is_mld(&sdata->vif) ||
+		!sdata->deflink.csa_block_tx;
 
 	if (!ifmgd->driver_disconnect) {
 		unsigned int link_id;
@@ -3404,7 +3391,6 @@
 					WLAN_REASON_DEAUTH_LEAVING :
 					WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
 			       tx, frame_buf);
-	mutex_lock(&local->mtx);
 	/* the other links will be destroyed */
 	sdata->vif.bss_conf.csa_active = false;
 	sdata->deflink.u.mgd.csa_waiting_bcn = false;
@@ -3413,17 +3399,15 @@
 					  IEEE80211_QUEUE_STOP_REASON_CSA);
 		sdata->deflink.csa_block_tx = false;
 	}
-	mutex_unlock(&local->mtx);
 
 	ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
 				    WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
 				    ifmgd->reconnect);
 	ifmgd->reconnect = false;
-
-	sdata_unlock(sdata);
 }
 
-static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
+static void ieee80211_beacon_connection_loss_work(struct wiphy *wiphy,
+						  struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
@@ -3448,7 +3432,8 @@
 	}
 }
 
-static void ieee80211_csa_connection_drop_work(struct work_struct *work)
+static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy,
+					       struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
@@ -3465,7 +3450,7 @@
 	trace_api_beacon_loss(sdata);
 
 	sdata->u.mgd.connection_loss = false;
-	ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
+	wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work);
 }
 EXPORT_SYMBOL(ieee80211_beacon_loss);
 
@@ -3477,7 +3462,7 @@
 	trace_api_connection_loss(sdata);
 
 	sdata->u.mgd.connection_loss = true;
-	ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
+	wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work);
 }
 EXPORT_SYMBOL(ieee80211_connection_loss);
 
@@ -3493,7 +3478,7 @@
 
 	sdata->u.mgd.driver_disconnect = true;
 	sdata->u.mgd.reconnect = reconnect;
-	ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
+	wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work);
 }
 EXPORT_SYMBOL(ieee80211_disconnect);
 
@@ -3502,7 +3487,7 @@
 {
 	struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (!assoc) {
 		/*
@@ -3520,10 +3505,8 @@
 						  BSS_CHANGED_BSSID);
 		sdata->u.mgd.flags = 0;
 
-		mutex_lock(&sdata->local->mtx);
 		ieee80211_link_release_channel(&sdata->deflink);
-		ieee80211_vif_set_links(sdata, 0);
-		mutex_unlock(&sdata->local->mtx);
+		ieee80211_vif_set_links(sdata, 0, 0);
 	}
 
 	cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
@@ -3543,7 +3526,7 @@
 {
 	struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (status != ASSOC_SUCCESS) {
 		/*
@@ -3573,16 +3556,14 @@
 			for (i = 0; i < ARRAY_SIZE(data.bss); i++)
 				data.bss[i] = assoc_data->link[i].bss;
 
-			if (sdata->vif.valid_links)
+			if (ieee80211_vif_is_mld(&sdata->vif))
 				data.ap_mld_addr = assoc_data->ap_addr;
 
 			cfg80211_assoc_failure(sdata->dev, &data);
 		}
 
-		mutex_lock(&sdata->local->mtx);
 		ieee80211_link_release_channel(&sdata->deflink);
-		ieee80211_vif_set_links(sdata, 0);
-		mutex_unlock(&sdata->local->mtx);
+		ieee80211_vif_set_links(sdata, 0, 0);
 	}
 
 	kfree(assoc_data);
@@ -3599,6 +3580,7 @@
 	u32 tx_flags = 0;
 	struct ieee80211_prep_tx_info info = {
 		.subtype = IEEE80211_STYPE_AUTH,
+		.link_id = auth_data->link_id,
 	};
 
 	pos = mgmt->u.auth.variable;
@@ -3624,7 +3606,8 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	const u8 *ap_addr = ifmgd->auth_data->ap_addr;
 	struct sta_info *sta;
-	bool result = true;
+
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	sdata_info(sdata, "authenticated\n");
 	ifmgd->auth_data->done = true;
@@ -3633,22 +3616,17 @@
 	run_again(sdata, ifmgd->auth_data->timeout);
 
 	/* move station state to auth */
-	mutex_lock(&sdata->local->sta_mtx);
 	sta = sta_info_get(sdata, ap_addr);
 	if (!sta) {
 		WARN_ONCE(1, "%s: STA %pM not found", sdata->name, ap_addr);
-		result = false;
-		goto out;
+		return false;
 	}
 	if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
 		sdata_info(sdata, "failed moving %pM to auth\n", ap_addr);
-		result = false;
-		goto out;
+		return false;
 	}
 
-out:
-	mutex_unlock(&sdata->local->sta_mtx);
-	return result;
+	return true;
 }
 
 static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
@@ -3664,7 +3642,7 @@
 		.subtype = IEEE80211_STYPE_AUTH,
 	};
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (len < 24 + 6)
 		return;
@@ -3822,7 +3800,7 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (len < 24 + 2)
 		return;
@@ -3866,7 +3844,7 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	u16 reason_code;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (len < 24 + 2)
 		return;
@@ -3896,8 +3874,7 @@
 				u8 *supp_rates, unsigned int supp_rates_len,
 				u32 *rates, u32 *basic_rates,
 				bool *have_higher_than_11mbit,
-				int *min_rate, int *min_rate_index,
-				int shift)
+				int *min_rate, int *min_rate_index)
 {
 	int i, j;
 
@@ -3905,12 +3882,12 @@
 		int rate = supp_rates[i] & 0x7f;
 		bool is_basic = !!(supp_rates[i] & 0x80);
 
-		if ((rate * 5 * (1 << shift)) > 110)
+		if ((rate * 5) > 110)
 			*have_higher_than_11mbit = true;
 
 		/*
-		 * Skip HT, VHT, HE and SAE H2E only BSS membership selectors
-		 * since they're not rates.
+		 * Skip HT, VHT, HE, EHT and SAE H2E only BSS membership
+		 * selectors since they're not rates.
 		 *
 		 * Note: Even though the membership selector and the basic
 		 *	 rate flag share the same bit, they are not exactly
@@ -3919,6 +3896,7 @@
 		if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) ||
 		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) ||
 		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) ||
+		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY) ||
 		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E))
 			continue;
 
@@ -3928,7 +3906,7 @@
 
 			br = &sband->bitrates[j];
 
-			brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+			brate = DIV_ROUND_UP(br->bitrate, 5);
 			if (brate == rate) {
 				*rates |= BIT(j);
 				if (is_basic)
@@ -3949,8 +3927,7 @@
 					const struct ieee802_11_elems *elems)
 {
 	const struct ieee80211_sta_he_cap *own_he_cap =
-		ieee80211_get_he_iftype_cap(sband,
-					    ieee80211_vif_type_p2p(&sdata->vif));
+		ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
 
 	if (elems->ext_capab_len < 10)
 		return false;
@@ -3965,7 +3942,7 @@
 			IEEE80211_HE_MAC_CAP0_TWT_REQ);
 }
 
-static int ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
+static u64 ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_supported_band *sband,
 				    struct ieee80211_link_data *link,
 				    struct link_sta_info *link_sta,
@@ -3986,8 +3963,7 @@
 					struct link_sta_info *link_sta)
 {
 	const struct ieee80211_sta_he_cap *own_he_cap =
-		ieee80211_get_he_iftype_cap(sband,
-					    ieee80211_vif_type_p2p(&sdata->vif));
+		ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
 
 	return bss_conf->he_support &&
 		(link_sta->pub->he_cap.he_cap_elem.mac_cap_info[2] &
@@ -4021,6 +3997,8 @@
 	const struct cfg80211_bss_ies *bss_ies = NULL;
 	struct ieee80211_supported_band *sband;
 	struct ieee802_11_elems *elems;
+	const __le16 prof_bss_param_ch_present =
+		cpu_to_le16(IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT);
 	u16 capab_info;
 	bool ret;
 
@@ -4036,7 +4014,17 @@
 		 * successful, so set the status directly to success
 		 */
 		assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS;
-	} else if (!elems->prof) {
+		if (elems->ml_basic) {
+			if (!(elems->ml_basic->control &
+					cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) {
+				ret = false;
+				goto out;
+			}
+			link->u.mgd.bss_param_ch_cnt =
+				ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic);
+		}
+	} else if (!elems->prof ||
+		   !(elems->prof->control & prof_bss_param_ch_present)) {
 		ret = false;
 		goto out;
 	} else {
@@ -4049,6 +4037,8 @@
 		 */
 		capab_info = get_unaligned_le16(ptr);
 		assoc_data->link[link_id].status = get_unaligned_le16(ptr + 2);
+		link->u.mgd.bss_param_ch_cnt =
+			ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(elems->prof);
 
 		if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) {
 			link_info(link, "association response status code=%u\n",
@@ -4191,10 +4181,33 @@
 						  elems->ht_cap_elem,
 						  link_sta);
 
-	if (elems->vht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT))
+	if (elems->vht_cap_elem &&
+	    !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+		const struct ieee80211_vht_cap *bss_vht_cap = NULL;
+		const struct cfg80211_bss_ies *ies;
+
+		/*
+		 * Cisco AP module 9115 with FW 17.3 has a bug and sends a
+		 * too large maximum MPDU length in the association response
+		 * (indicating 12k) that it cannot actually process ...
+		 * Work around that.
+		 */
+		rcu_read_lock();
+		ies = rcu_dereference(cbss->ies);
+		if (ies) {
+			const struct element *elem;
+
+			elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY,
+						  ies->data, ies->len);
+			if (elem && elem->datalen >= sizeof(*bss_vht_cap))
+				bss_vht_cap = (const void *)elem->data;
+		}
+
 		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
 						    elems->vht_cap_elem,
-						    link_sta);
+						    bss_vht_cap, link_sta);
+		rcu_read_unlock();
+	}
 
 	if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
 	    elems->he_cap) {
@@ -4360,8 +4373,6 @@
 	u32 rates = 0, basic_rates = 0;
 	bool have_higher_than_11mbit = false;
 	int min_rate = INT_MAX, min_rate_index = -1;
-	/* this is clearly wrong for MLO but we'll just remove it later */
-	int shift = ieee80211_vif_get_shift(&sdata->vif);
 	struct ieee80211_supported_band *sband;
 
 	memcpy(link_sta->addr, cbss->bssid, ETH_ALEN);
@@ -4377,7 +4388,7 @@
 
 	ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len,
 			    &rates, &basic_rates, &have_higher_than_11mbit,
-			    &min_rate, &min_rate_index, shift);
+			    &min_rate, &min_rate_index);
 
 	/*
 	 * This used to be a workaround for basic rates missing
@@ -4624,8 +4635,7 @@
 				    const struct ieee80211_he_operation *he_op)
 {
 	const struct ieee80211_sta_he_cap *sta_he_cap =
-		ieee80211_get_he_iftype_cap(sband,
-					    ieee80211_vif_type_p2p(&sdata->vif));
+		ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
 	u16 ap_min_req_set;
 	int i;
 
@@ -4698,9 +4708,93 @@
 	return false;
 }
 
+static u8
+ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
+			      const struct ieee80211_sta_eht_cap *sta_eht_cap,
+			      unsigned int idx, int bw)
+{
+	u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
+	u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+
+	/* handle us being a 20 MHz-only EHT STA - with four values
+	 * for MCS 0-7, 8-9, 10-11, 12-13.
+	 */
+	if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
+		return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+
+	/* the others have MCS 0-9 together, rather than separately from 0-7 */
+	if (idx > 0)
+		idx--;
+
+	switch (bw) {
+	case 0:
+		return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
+	case 1:
+		if (!(he_phy_cap0 &
+		      (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+		       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
+			return 0xff; /* pass check */
+		return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
+	case 2:
+		if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
+			return 0xff; /* pass check */
+		return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
+	}
+
+	WARN_ON(1);
+	return 0;
+}
+
+static bool
+ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
+				     struct ieee80211_supported_band *sband,
+				     const struct ieee80211_eht_operation *eht_op)
+{
+	const struct ieee80211_sta_he_cap *sta_he_cap =
+		ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+	const struct ieee80211_sta_eht_cap *sta_eht_cap =
+		ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+	const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
+	unsigned int i;
+
+	if (!sta_he_cap || !sta_eht_cap || !eht_op)
+		return false;
+
+	req = &eht_op->basic_mcs_nss;
+
+	for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
+		u8 req_rx_nss, req_tx_nss;
+		unsigned int bw;
+
+		req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+					 IEEE80211_EHT_MCS_NSS_RX);
+		req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+					 IEEE80211_EHT_MCS_NSS_TX);
+
+		for (bw = 0; bw < 3; bw++) {
+			u8 have, have_rx_nss, have_tx_nss;
+
+			have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
+							     sta_eht_cap,
+							     i, bw);
+			have_rx_nss = u8_get_bits(have,
+						  IEEE80211_EHT_MCS_NSS_RX);
+			have_tx_nss = u8_get_bits(have,
+						  IEEE80211_EHT_MCS_NSS_TX);
+
+			if (req_rx_nss > have_rx_nss ||
+			    req_tx_nss > have_tx_nss)
+				return false;
+		}
+	}
+
+	return true;
+}
+
 static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
 				  struct ieee80211_link_data *link,
 				  struct cfg80211_bss *cbss,
+				  bool mlo,
 				  ieee80211_conn_flags_t *conn_flags)
 {
 	struct ieee80211_local *local = sdata->local;
@@ -4714,9 +4808,9 @@
 	struct cfg80211_chan_def chandef;
 	bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
 	bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
+	bool supports_mlo = false;
 	struct ieee80211_bss *bss = (void *)cbss->priv;
 	struct ieee80211_elems_parse_params parse_params = {
-		.bss = cbss,
 		.link_id = -1,
 		.from_ap = true,
 	};
@@ -4726,6 +4820,8 @@
 	u32 i;
 	bool have_80mhz;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	rcu_read_lock();
 
 	ies = rcu_dereference(cbss->ies);
@@ -4759,15 +4855,13 @@
 		*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
 	}
 
-	if (!ieee80211_get_he_iftype_cap(sband,
-					 ieee80211_vif_type_p2p(&sdata->vif))) {
+	if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
 		mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n");
 		*conn_flags |= IEEE80211_CONN_DISABLE_HE;
 		*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
 	}
 
-	if (!ieee80211_get_eht_iftype_cap(sband,
-					  ieee80211_vif_type_p2p(&sdata->vif))) {
+	if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) {
 		mlme_dbg(sdata, "EHT not supported, disabling EHT\n");
 		*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
 	}
@@ -4844,6 +4938,7 @@
 			 IEEE80211_CONN_DISABLE_EHT)) &&
 	    he_oper) {
 		const struct cfg80211_bss_ies *cbss_ies;
+		const struct element *eht_ml_elem;
 		const u8 *eht_oper_ie;
 
 		cbss_ies = rcu_dereference(cbss->ies);
@@ -4854,6 +4949,26 @@
 			eht_oper = (void *)(eht_oper_ie + 3);
 		else
 			eht_oper = NULL;
+
+		if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper))
+			*conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+
+		eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
+						     cbss_ies->data, cbss_ies->len);
+
+		/* data + 1 / datalen - 1 since it's an extended element */
+		if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) &&
+		    eht_ml_elem &&
+		    ieee80211_mle_type_ok(eht_ml_elem->data + 1,
+					  IEEE80211_ML_CONTROL_TYPE_BASIC,
+					  eht_ml_elem->datalen - 1)) {
+			supports_mlo = true;
+
+			sdata->vif.cfg.eml_cap =
+				ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1);
+			sdata->vif.cfg.eml_med_sync_delay =
+				ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1);
+		}
 	}
 
 	/* Allow VHT if at least one channel on the sband supports 80 MHz */
@@ -4904,13 +5019,17 @@
 		return -EINVAL;
 	}
 
+	if (mlo && !supports_mlo) {
+		sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
+		return -EINVAL;
+	}
+
 	if (!link)
 		return 0;
 
 	/* will change later if needed */
 	link->smps_mode = IEEE80211_SMPS_OFF;
 
-	mutex_lock(&local->mtx);
 	/*
 	 * If this fails (possibly due to channel context sharing
 	 * on incompatible channels, e.g. 80+80 and 160 sharing the
@@ -4931,7 +5050,6 @@
 						 IEEE80211_CHANCTX_SHARED);
 	}
  out:
-	mutex_unlock(&local->mtx);
 	return ret;
 }
 
@@ -4980,10 +5098,10 @@
 	unsigned int link_id;
 	struct sta_info *sta;
 	u64 changed[IEEE80211_MLD_MAX_NUM_LINKS] = {};
-	u16 valid_links = 0;
+	u16 valid_links = 0, dormant_links = 0;
 	int err;
 
-	mutex_lock(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 	/*
 	 * station info was already allocated and inserted before
 	 * the association and should be available to us
@@ -4992,11 +5110,14 @@
 	if (WARN_ON(!sta))
 		goto out_err;
 
-	if (sdata->vif.valid_links) {
+	if (ieee80211_vif_is_mld(&sdata->vif)) {
 		for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
 			if (!assoc_data->link[link_id].bss)
 				continue;
+
 			valid_links |= BIT(link_id);
+			if (assoc_data->link[link_id].disabled)
+				dormant_links |= BIT(link_id);
 
 			if (link_id != assoc_data->assoc_link_id) {
 				err = ieee80211_sta_allocate_link(sta, link_id);
@@ -5005,7 +5126,7 @@
 			}
 		}
 
-		ieee80211_vif_set_links(sdata, valid_links);
+		ieee80211_vif_set_links(sdata, valid_links, dormant_links);
 	}
 
 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
@@ -5020,7 +5141,7 @@
 		if (WARN_ON(!link))
 			goto out_err;
 
-		if (sdata->vif.valid_links)
+		if (ieee80211_vif_is_mld(&sdata->vif))
 			link_info(link,
 				  "local address %pM, AP link address %pM%s\n",
 				  link->conf->addr,
@@ -5029,7 +5150,7 @@
 					" (assoc)" : "");
 
 		link_sta = rcu_dereference_protected(sta->link[link_id],
-						     lockdep_is_held(&local->sta_mtx));
+						     lockdep_is_held(&local->hw.wiphy->mtx));
 		if (WARN_ON(!link_sta))
 			goto out_err;
 
@@ -5052,7 +5173,7 @@
 		link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
 
 		if (link_id != assoc_data->assoc_link_id) {
-			err = ieee80211_prep_channel(sdata, link, cbss,
+			err = ieee80211_prep_channel(sdata, link, cbss, true,
 						     &link->u.mgd.conn_flags);
 			if (err) {
 				link_info(link, "prep_channel failed\n");
@@ -5085,7 +5206,7 @@
 	}
 
 	/* links might have changed due to rejected ones, set them again */
-	ieee80211_vif_set_links(sdata, valid_links);
+	ieee80211_vif_set_links(sdata, valid_links, dormant_links);
 
 	rate_control_rate_init(sta);
 
@@ -5116,8 +5237,6 @@
 	if (sdata->wdev.use_4addr)
 		drv_sta_set_4addr(local, sdata, &sta->sta, true);
 
-	mutex_unlock(&sdata->local->sta_mtx);
-
 	ieee80211_set_associated(sdata, assoc_data, changed);
 
 	/*
@@ -5137,7 +5256,6 @@
 	return true;
 out_err:
 	eth_zero_addr(sdata->vif.cfg.ap_addr);
-	mutex_unlock(&sdata->local->sta_mtx);
 	return false;
 }
 
@@ -5163,13 +5281,13 @@
 		.u.mlme.data = ASSOC_EVENT,
 	};
 	struct ieee80211_prep_tx_info info = {};
-	struct cfg80211_rx_assoc_resp resp = {
+	struct cfg80211_rx_assoc_resp_data resp = {
 		.uapsd_queues = -1,
 	};
 	u8 ap_mld_addr[ETH_ALEN] __aligned(2);
 	unsigned int link_id;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (!assoc_data)
 		return;
@@ -5269,25 +5387,25 @@
 			ifmgd->broken_ap = true;
 		}
 
-		if (sdata->vif.valid_links) {
-			if (!elems->multi_link) {
+		if (ieee80211_vif_is_mld(&sdata->vif)) {
+			if (!elems->ml_basic) {
 				sdata_info(sdata,
 					   "MLO association with %pM but no multi-link element in response!\n",
 					   assoc_data->ap_addr);
 				goto abandon_assoc;
 			}
 
-			if (le16_get_bits(elems->multi_link->control,
+			if (le16_get_bits(elems->ml_basic->control,
 					  IEEE80211_ML_CONTROL_TYPE) !=
 					IEEE80211_ML_CONTROL_TYPE_BASIC) {
 				sdata_info(sdata,
 					   "bad multi-link element (control=0x%x)\n",
-					   le16_to_cpu(elems->multi_link->control));
+					   le16_to_cpu(elems->ml_basic->control));
 				goto abandon_assoc;
 			} else {
 				struct ieee80211_mle_basic_common_info *common;
 
-				common = (void *)elems->multi_link->variable;
+				common = (void *)elems->ml_basic->variable;
 
 				if (memcmp(assoc_data->ap_addr,
 					   common->mld_mac_addr, ETH_ALEN)) {
@@ -5318,17 +5436,18 @@
 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
 		struct ieee80211_link_data *link;
 
-		link = sdata_dereference(sdata->link[link_id], sdata);
-		if (!link)
-			continue;
-
 		if (!assoc_data->link[link_id].bss)
 			continue;
 
 		resp.links[link_id].bss = assoc_data->link[link_id].bss;
-		resp.links[link_id].addr = link->conf->addr;
+		ether_addr_copy(resp.links[link_id].addr,
+				assoc_data->link[link_id].addr);
 		resp.links[link_id].status = assoc_data->link[link_id].status;
 
+		link = sdata_dereference(sdata->link[link_id], sdata);
+		if (!link)
+			continue;
+
 		/* get uapsd queues configuration - same for all links */
 		resp.uapsd_queues = 0;
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -5336,7 +5455,7 @@
 				resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
 	}
 
-	if (sdata->vif.valid_links) {
+	if (ieee80211_vif_is_mld(&sdata->vif)) {
 		ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr);
 		resp.ap_mld_addr = ap_mld_addr;
 	}
@@ -5369,7 +5488,7 @@
 	struct ieee80211_bss *bss;
 	struct ieee80211_channel *channel;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	channel = ieee80211_get_channel_khz(local->hw.wiphy,
 					ieee80211_rx_status_to_khz(rx_status));
@@ -5396,7 +5515,7 @@
 
 	ifmgd = &sdata->u.mgd;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	/*
 	 * According to Draft P802.11ax D6.0 clause 26.17.2.3.2:
@@ -5598,6 +5717,351 @@
 	return true;
 }
 
+static void ieee80211_ml_reconf_work(struct wiphy *wiphy,
+				     struct wiphy_work *work)
+{
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data,
+			     u.mgd.ml_reconf_work.work);
+	u16 new_valid_links, new_active_links, new_dormant_links;
+	int ret;
+
+	if (!sdata->u.mgd.removed_links)
+		return;
+
+	sdata_info(sdata,
+		   "MLO Reconfiguration: work: valid=0x%x, removed=0x%x\n",
+		   sdata->vif.valid_links, sdata->u.mgd.removed_links);
+
+	new_valid_links = sdata->vif.valid_links & ~sdata->u.mgd.removed_links;
+	if (new_valid_links == sdata->vif.valid_links)
+		return;
+
+	if (!new_valid_links ||
+	    !(new_valid_links & ~sdata->vif.dormant_links)) {
+		sdata_info(sdata, "No valid links after reconfiguration\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	new_active_links = sdata->vif.active_links & ~sdata->u.mgd.removed_links;
+	if (new_active_links != sdata->vif.active_links) {
+		if (!new_active_links)
+			new_active_links =
+				BIT(ffs(new_valid_links &
+					~sdata->vif.dormant_links) - 1);
+
+		ret = ieee80211_set_active_links(&sdata->vif, new_active_links);
+		if (ret) {
+			sdata_info(sdata,
+				   "Failed setting active links\n");
+			goto out;
+		}
+	}
+
+	new_dormant_links = sdata->vif.dormant_links & ~sdata->u.mgd.removed_links;
+
+	ret = ieee80211_vif_set_links(sdata, new_valid_links,
+				      new_dormant_links);
+	if (ret)
+		sdata_info(sdata, "Failed setting valid links\n");
+
+	ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS);
+
+out:
+	if (!ret)
+		cfg80211_links_removed(sdata->dev, sdata->u.mgd.removed_links);
+	else
+		__ieee80211_disconnect(sdata);
+
+	sdata->u.mgd.removed_links = 0;
+}
+
+static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
+					 struct ieee802_11_elems *elems)
+{
+	const struct ieee80211_multi_link_elem *ml;
+	const struct element *sub;
+	size_t ml_len;
+	unsigned long removed_links = 0;
+	u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+	u8 link_id;
+	u32 delay;
+
+	if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf)
+		return;
+
+	ml_len = cfg80211_defragment_element(elems->ml_reconf_elem,
+					     elems->ie_start,
+					     elems->total_len,
+					     elems->scratch_pos,
+					     elems->scratch + elems->scratch_len -
+					     elems->scratch_pos,
+					     WLAN_EID_FRAGMENT);
+
+	elems->ml_reconf = (const void *)elems->scratch_pos;
+	elems->ml_reconf_len = ml_len;
+	ml = elems->ml_reconf;
+
+	/* Directly parse the sub elements as the common information doesn't
+	 * hold any useful information.
+	 */
+	for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+		struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+		u8 *pos = prof->variable;
+		u16 control;
+
+		if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
+			continue;
+
+		if (!ieee80211_mle_reconf_sta_prof_size_ok(sub->data,
+							   sub->datalen))
+			return;
+
+		control = le16_to_cpu(prof->control);
+		link_id = control & IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID;
+
+		removed_links |= BIT(link_id);
+
+		/* the MAC address should not be included, but handle it */
+		if (control &
+		    IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
+			pos += 6;
+
+		/* According to Draft P802.11be_D3.0, the control should
+		 * include the AP Removal Timer present. If the AP Removal Timer
+		 * is not present assume immediate removal.
+		 */
+		if (control &
+		    IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+			link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos);
+	}
+
+	removed_links &= sdata->vif.valid_links;
+	if (!removed_links) {
+		/* In case the removal was cancelled, abort it */
+		if (sdata->u.mgd.removed_links) {
+			sdata->u.mgd.removed_links = 0;
+			wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+						  &sdata->u.mgd.ml_reconf_work);
+		}
+		return;
+	}
+
+	delay = 0;
+	for_each_set_bit(link_id, &removed_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		struct ieee80211_bss_conf *link_conf =
+			sdata_dereference(sdata->vif.link_conf[link_id], sdata);
+		u32 link_delay;
+
+		if (!link_conf) {
+			removed_links &= ~BIT(link_id);
+			continue;
+		}
+
+		link_delay = link_conf->beacon_int *
+			link_removal_timeout[link_id];
+
+		if (!delay)
+			delay = link_delay;
+		else
+			delay = min(delay, link_delay);
+	}
+
+	sdata->u.mgd.removed_links = removed_links;
+	wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+				 &sdata->u.mgd.ml_reconf_work,
+				 TU_TO_JIFFIES(delay));
+}
+
+static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
+					   struct wiphy_work *work)
+{
+	u16 new_active_links, new_dormant_links;
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data,
+			     u.mgd.ttlm_work.work);
+	int ret;
+
+	new_active_links = sdata->u.mgd.ttlm_info.map &
+			   sdata->vif.valid_links;
+	new_dormant_links = ~sdata->u.mgd.ttlm_info.map &
+			    sdata->vif.valid_links;
+	if (!new_active_links) {
+		ieee80211_disconnect(&sdata->vif, false);
+		return;
+	}
+
+	ieee80211_vif_set_links(sdata, sdata->vif.valid_links, 0);
+	new_active_links = BIT(ffs(new_active_links) - 1);
+	ieee80211_set_active_links(&sdata->vif, new_active_links);
+
+	ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
+				      new_dormant_links);
+
+	sdata->u.mgd.ttlm_info.active = true;
+	sdata->u.mgd.ttlm_info.switch_time = 0;
+
+	if (!ret)
+		ieee80211_vif_cfg_change_notify(sdata,
+						BSS_CHANGED_MLD_VALID_LINKS);
+}
+
+static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
+{
+	if (bm_size == 1)
+		return *data;
+	else
+		return get_unaligned_le16(data);
+}
+
+static int
+ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata,
+			const struct ieee80211_ttlm_elem *ttlm,
+			struct ieee80211_adv_ttlm_info *ttlm_info)
+{
+	/* The element size was already validated in
+	 * ieee80211_tid_to_link_map_size_ok()
+	 */
+	u8 control, link_map_presence, map_size, tid;
+	u8 *pos;
+
+	memset(ttlm_info, 0, sizeof(*ttlm_info));
+	pos = (void *)ttlm->optional;
+	control	= ttlm->control;
+
+	if ((control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) ||
+	    !(control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT))
+		return 0;
+
+	if ((control & IEEE80211_TTLM_CONTROL_DIRECTION) !=
+	    IEEE80211_TTLM_DIRECTION_BOTH) {
+		sdata_info(sdata, "Invalid advertised T2L map direction\n");
+		return -EINVAL;
+	}
+
+	link_map_presence = *pos;
+	pos++;
+
+	ttlm_info->switch_time = get_unaligned_le16(pos);
+	pos += 2;
+
+	if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) {
+		ttlm_info->duration = pos[0] | pos[1] << 8 | pos[2] << 16;
+		pos += 3;
+	}
+
+	if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+		map_size = 1;
+	else
+		map_size = 2;
+
+	/* According to Draft P802.11be_D3.0 clause 35.3.7.1.7, an AP MLD shall
+	 * not advertise a TID-to-link mapping that does not map all TIDs to the
+	 * same link set, reject frame if not all links have mapping
+	 */
+	if (link_map_presence != 0xff) {
+		sdata_info(sdata,
+			   "Invalid advertised T2L mapping presence indicator\n");
+		return -EINVAL;
+	}
+
+	ttlm_info->map = ieee80211_get_ttlm(map_size, pos);
+	if (!ttlm_info->map) {
+		sdata_info(sdata,
+			   "Invalid advertised T2L map for TID 0\n");
+		return -EINVAL;
+	}
+
+	pos += map_size;
+
+	for (tid = 1; tid < 8; tid++) {
+		u16 map = ieee80211_get_ttlm(map_size, pos);
+
+		if (map != ttlm_info->map) {
+			sdata_info(sdata, "Invalid advertised T2L map for tid %d\n",
+				   tid);
+			return -EINVAL;
+		}
+
+		pos += map_size;
+	}
+	return 0;
+}
+
+static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
+					  struct ieee802_11_elems *elems,
+					  u64 beacon_ts)
+{
+	u8 i;
+	int ret;
+
+	if (!ieee80211_vif_is_mld(&sdata->vif))
+		return;
+
+	if (!elems->ttlm_num) {
+		if (sdata->u.mgd.ttlm_info.switch_time) {
+			/* if a planned TID-to-link mapping was cancelled -
+			 * abort it
+			 */
+			wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+						  &sdata->u.mgd.ttlm_work);
+		} else if (sdata->u.mgd.ttlm_info.active) {
+			/* if no TID-to-link element, set to default mapping in
+			 * which all TIDs are mapped to all setup links
+			 */
+			ret = ieee80211_vif_set_links(sdata,
+						      sdata->vif.valid_links,
+						      0);
+			if (ret) {
+				sdata_info(sdata, "Failed setting valid/dormant links\n");
+				return;
+			}
+			ieee80211_vif_cfg_change_notify(sdata,
+							BSS_CHANGED_MLD_VALID_LINKS);
+		}
+		memset(&sdata->u.mgd.ttlm_info, 0,
+		       sizeof(sdata->u.mgd.ttlm_info));
+		return;
+	}
+
+	for (i = 0; i < elems->ttlm_num; i++) {
+		struct ieee80211_adv_ttlm_info ttlm_info;
+		u32 res;
+
+		res = ieee80211_parse_adv_t2l(sdata, elems->ttlm[i],
+					      &ttlm_info);
+
+		if (res) {
+			__ieee80211_disconnect(sdata);
+			return;
+		}
+
+		if (ttlm_info.switch_time) {
+			u32 st_us, delay = 0;
+			u32 ts_l26 = beacon_ts & GENMASK(25, 0);
+
+			/* The t2l map switch time is indicated with a partial
+			 * TSF value, convert it to TSF and calc the delay
+			 * to the start time.
+			 */
+			st_us = ieee80211_tu_to_usec(ttlm_info.switch_time);
+			if (st_us > ts_l26)
+				delay = st_us - ts_l26;
+			else
+				continue;
+
+			sdata->u.mgd.ttlm_info = ttlm_info;
+			wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+						  &sdata->u.mgd.ttlm_work);
+			wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+						 &sdata->u.mgd.ttlm_work,
+						 usecs_to_jiffies(delay));
+			return;
+		}
+	}
+}
+
 static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
 				     struct ieee80211_hdr *hdr, size_t len,
 				     struct ieee80211_rx_status *rx_status)
@@ -5626,7 +6090,7 @@
 		.from_ap = true,
 	};
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* Process beacon from the current BSS */
 	bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type);
@@ -5662,7 +6126,7 @@
 	rcu_read_unlock();
 
 	if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
-	    !WARN_ON(sdata->vif.valid_links) &&
+	    !WARN_ON(ieee80211_vif_is_mld(&sdata->vif)) &&
 	    ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->link[0].bss)) {
 		parse_params.bss = ifmgd->assoc_data->link[0].bss;
 		elems = ieee802_11_parse_elems_full(&parse_params);
@@ -5842,9 +6306,7 @@
 		changed |= BSS_CHANGED_BEACON_INFO;
 		link->u.mgd.have_beacon = true;
 
-		mutex_lock(&local->iflist_mtx);
 		ieee80211_recalc_ps(local);
-		mutex_unlock(&local->iflist_mtx);
 
 		ieee80211_recalc_ps_vif(sdata);
 	}
@@ -5861,16 +6323,13 @@
 				le16_to_cpu(mgmt->u.beacon.capab_info),
 				erp_valid, erp_value);
 
-	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
 	if (WARN_ON(!sta)) {
-		mutex_unlock(&local->sta_mtx);
 		goto free;
 	}
 	link_sta = rcu_dereference_protected(sta->link[link->link_id],
-					     lockdep_is_held(&local->sta_mtx));
+					     lockdep_is_held(&local->hw.wiphy->mtx));
 	if (WARN_ON(!link_sta)) {
-		mutex_unlock(&local->sta_mtx);
 		goto free;
 	}
 
@@ -5886,7 +6345,6 @@
 				elems->vht_operation, elems->he_operation,
 				elems->eht_operation,
 				elems->s1g_oper, bssid, &changed)) {
-		mutex_unlock(&local->sta_mtx);
 		sdata_info(sdata,
 			   "failed to follow AP %pM bandwidth change, disconnect\n",
 			   bssid);
@@ -5904,7 +6362,6 @@
 		ieee80211_vht_handle_opmode(sdata, link_sta,
 					    *elems->opmode_notif,
 					    rx_status->band);
-	mutex_unlock(&local->sta_mtx);
 
 	changed |= ieee80211_handle_pwr_constr(link, chan, mgmt,
 					       elems->country_elem,
@@ -5927,6 +6384,10 @@
 		}
 	}
 
+	ieee80211_ml_reconfiguration(sdata, elems);
+	ieee80211_process_adv_ttlm(sdata, elems,
+				      le64_to_cpu(mgmt->u.beacon.timestamp));
+
 	ieee80211_link_info_change_notify(sdata, link, changed);
 free:
 	kfree(elems);
@@ -5940,17 +6401,17 @@
 	struct ieee80211_hdr *hdr;
 	u16 fc;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	rx_status = (struct ieee80211_rx_status *) skb->cb;
 	hdr = (struct ieee80211_hdr *) skb->data;
 	fc = le16_to_cpu(hdr->frame_control);
 
-	sdata_lock(sdata);
 	switch (fc & IEEE80211_FCTL_STYPE) {
 	case IEEE80211_STYPE_S1G_BEACON:
 		ieee80211_rx_mgmt_beacon(link, hdr, skb->len, rx_status);
 		break;
 	}
-	sdata_unlock(sdata);
 }
 
 void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -5962,17 +6423,17 @@
 	u16 fc;
 	int ies_len;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	rx_status = (struct ieee80211_rx_status *) skb->cb;
 	mgmt = (struct ieee80211_mgmt *) skb->data;
 	fc = le16_to_cpu(mgmt->frame_control);
 
-	sdata_lock(sdata);
-
 	if (rx_status->link_valid) {
 		link = sdata_dereference(sdata->link[rx_status->link_id],
 					 sdata);
 		if (!link)
-			goto out;
+			return;
 	}
 
 	switch (fc & IEEE80211_FCTL_STYPE) {
@@ -5997,6 +6458,10 @@
 		ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len);
 		break;
 	case IEEE80211_STYPE_ACTION:
+		if (!sdata->u.mgd.associated ||
+		    !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr))
+			break;
+
 		if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) {
 			struct ieee802_11_elems *elems;
 
@@ -6051,8 +6516,6 @@
 		}
 		break;
 	}
-out:
-	sdata_unlock(sdata);
 }
 
 static void ieee80211_sta_timer(struct timer_list *t)
@@ -6060,7 +6523,7 @@
 	struct ieee80211_sub_if_data *sdata =
 		from_timer(sdata, t, u.mgd.timer);
 
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 }
 
 void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
@@ -6087,7 +6550,7 @@
 		.subtype = IEEE80211_STYPE_AUTH,
 	};
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (WARN_ON_ONCE(!auth_data))
 		return -EINVAL;
@@ -6110,6 +6573,7 @@
 	if (auth_data->algorithm == WLAN_AUTH_SAE)
 		info.duration = jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE);
 
+	info.link_id = auth_data->link_id;
 	drv_mgd_prepare_tx(local, sdata, &info);
 
 	sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
@@ -6156,7 +6620,7 @@
 	struct ieee80211_local *local = sdata->local;
 	int ret;
 
-	sdata_assert_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	assoc_data->tries++;
 	if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
@@ -6204,7 +6668,7 @@
 	sdata->u.mgd.status_acked = acked;
 	sdata->u.mgd.status_received = true;
 
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 }
 
 void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
@@ -6212,7 +6676,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-	sdata_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (ifmgd->status_received) {
 		__le16 fc = ifmgd->status_fc;
@@ -6347,8 +6811,6 @@
 				WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
 		}
 	}
-
-	sdata_unlock(sdata);
 }
 
 static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
@@ -6356,7 +6818,7 @@
 	struct ieee80211_sub_if_data *sdata =
 		from_timer(sdata, t, u.mgd.bcn_mon_timer);
 
-	if (WARN_ON(sdata->vif.valid_links))
+	if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
 		return;
 
 	if (sdata->vif.bss_conf.csa_active &&
@@ -6367,7 +6829,7 @@
 		return;
 
 	sdata->u.mgd.connection_loss = false;
-	ieee80211_queue_work(&sdata->local->hw,
+	wiphy_work_queue(sdata->local->hw.wiphy,
 			     &sdata->u.mgd.beacon_connection_loss_work);
 }
 
@@ -6380,7 +6842,7 @@
 	struct sta_info *sta;
 	unsigned long timeout;
 
-	if (WARN_ON(sdata->vif.valid_links))
+	if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
 		return;
 
 	if (sdata->vif.bss_conf.csa_active &&
@@ -6404,10 +6866,11 @@
 		return;
 	}
 
-	ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->u.mgd.monitor_work);
 }
 
-static void ieee80211_sta_monitor_work(struct work_struct *work)
+static void ieee80211_sta_monitor_work(struct wiphy *wiphy,
+				       struct wiphy_work *work)
 {
 	struct ieee80211_sub_if_data *sdata =
 		container_of(work, struct ieee80211_sub_if_data,
@@ -6423,7 +6886,7 @@
 
 		/* let's probe the connection once */
 		if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
-			ieee80211_queue_work(&sdata->local->hw,
+			wiphy_work_queue(sdata->local->hw.wiphy,
 					     &sdata->u.mgd.monitor_work);
 	}
 }
@@ -6434,7 +6897,7 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
-	sdata_lock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (ifmgd->auth_data || ifmgd->assoc_data) {
 		const u8 *ap_addr = ifmgd->auth_data ?
@@ -6486,8 +6949,6 @@
 		memcpy(bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
 		ieee80211_mgd_deauth(sdata, &req);
 	}
-
-	sdata_unlock(sdata);
 }
 #endif
 
@@ -6495,11 +6956,10 @@
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-	sdata_lock(sdata);
-	if (!ifmgd->associated) {
-		sdata_unlock(sdata);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+	if (!ifmgd->associated)
 		return;
-	}
 
 	if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
 		sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
@@ -6507,7 +6967,6 @@
 		ieee80211_sta_connection_lost(sdata,
 					      WLAN_REASON_UNSPECIFIED,
 					      true);
-		sdata_unlock(sdata);
 		return;
 	}
 
@@ -6517,23 +6976,19 @@
 		ieee80211_sta_connection_lost(sdata,
 					      WLAN_REASON_UNSPECIFIED,
 					      true);
-		sdata_unlock(sdata);
 		return;
 	}
-
-	sdata_unlock(sdata);
 }
 
-static void ieee80211_request_smps_mgd_work(struct work_struct *work)
+static void ieee80211_request_smps_mgd_work(struct wiphy *wiphy,
+					    struct wiphy_work *work)
 {
 	struct ieee80211_link_data *link =
 		container_of(work, struct ieee80211_link_data,
 			     u.mgd.request_smps_work);
 
-	sdata_lock(link->sdata);
 	__ieee80211_request_smps_mgd(link->sdata, link,
 				     link->u.mgd.driver_smps_mode);
-	sdata_unlock(link->sdata);
 }
 
 /* interface setup */
@@ -6541,18 +6996,22 @@
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-	INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
-	INIT_WORK(&ifmgd->beacon_connection_loss_work,
+	wiphy_work_init(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
+	wiphy_work_init(&ifmgd->beacon_connection_loss_work,
 		  ieee80211_beacon_connection_loss_work);
-	INIT_WORK(&ifmgd->csa_connection_drop_work,
+	wiphy_work_init(&ifmgd->csa_connection_drop_work,
 		  ieee80211_csa_connection_drop_work);
-	INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
+	wiphy_delayed_work_init(&ifmgd->tdls_peer_del_work,
 			  ieee80211_tdls_peer_del_work);
+	wiphy_delayed_work_init(&ifmgd->ml_reconf_work,
+				ieee80211_ml_reconf_work);
 	timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
 	timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0);
 	timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
-	INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk,
+	wiphy_delayed_work_init(&ifmgd->tx_tspec_wk,
 			  ieee80211_sta_handle_tspec_ac_params_wk);
+	wiphy_delayed_work_init(&ifmgd->ttlm_work,
+				ieee80211_tid_to_link_map_work);
 
 	ifmgd->flags = 0;
 	ifmgd->powersave = sdata->wdev.ps;
@@ -6564,6 +7023,16 @@
 	ifmgd->orig_teardown_skb = NULL;
 }
 
+static void ieee80211_recalc_smps_work(struct wiphy *wiphy,
+				       struct wiphy_work *work)
+{
+	struct ieee80211_link_data *link =
+		container_of(work, struct ieee80211_link_data,
+			     u.mgd.recalc_smps);
+
+	ieee80211_recalc_smps(link->sdata, link);
+}
+
 void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
 {
 	struct ieee80211_sub_if_data *sdata = link->sdata;
@@ -6574,15 +7043,17 @@
 	link->u.mgd.conn_flags = 0;
 	link->conf->bssid = link->u.mgd.bssid;
 
-	INIT_WORK(&link->u.mgd.request_smps_work,
+	wiphy_work_init(&link->u.mgd.request_smps_work,
 		  ieee80211_request_smps_mgd_work);
+	wiphy_work_init(&link->u.mgd.recalc_smps,
+			ieee80211_recalc_smps_work);
 	if (local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS)
 		link->u.mgd.req_smps = IEEE80211_SMPS_AUTOMATIC;
 	else
 		link->u.mgd.req_smps = IEEE80211_SMPS_OFF;
 
-	INIT_WORK(&link->u.mgd.chswitch_work, ieee80211_chswitch_work);
-	timer_setup(&link->u.mgd.chswitch_timer, ieee80211_chswitch_timer, 0);
+	wiphy_delayed_work_init(&link->u.mgd.chswitch_work,
+				ieee80211_chswitch_work);
 
 	if (sdata->u.mgd.assoc_data)
 		ether_addr_copy(link->conf->addr,
@@ -6623,12 +7094,12 @@
 		mlo = true;
 		if (WARN_ON(!ap_mld_addr))
 			return -EINVAL;
-		err = ieee80211_vif_set_links(sdata, BIT(link_id));
+		err = ieee80211_vif_set_links(sdata, BIT(link_id), 0);
 	} else {
 		if (WARN_ON(ap_mld_addr))
 			return -EINVAL;
 		ap_mld_addr = cbss->bssid;
-		err = ieee80211_vif_set_links(sdata, 0);
+		err = ieee80211_vif_set_links(sdata, 0, 0);
 		link_id = 0;
 		mlo = false;
 	}
@@ -6739,7 +7210,7 @@
 	}
 
 	if (new_sta || override) {
-		err = ieee80211_prep_channel(sdata, link, cbss,
+		err = ieee80211_prep_channel(sdata, link, cbss, mlo,
 					     &link->u.mgd.conn_flags);
 		if (err) {
 			if (new_sta)
@@ -6780,7 +7251,7 @@
 
 out_err:
 	ieee80211_link_release_channel(&sdata->deflink);
-	ieee80211_vif_set_links(sdata, 0);
+	ieee80211_vif_set_links(sdata, 0, 0);
 	return err;
 }
 
@@ -6791,10 +7262,14 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_mgd_auth_data *auth_data;
+	struct ieee80211_link_data *link;
+	const struct element *csa_elem, *ecsa_elem;
 	u16 auth_alg;
 	int err;
 	bool cont_auth;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	/* prepare auth data structure */
 
 	switch (req->auth_type) {
@@ -6831,6 +7306,22 @@
 	if (ifmgd->assoc_data)
 		return -EBUSY;
 
+	rcu_read_lock();
+	csa_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_CHANNEL_SWITCH);
+	ecsa_elem = ieee80211_bss_get_elem(req->bss,
+					   WLAN_EID_EXT_CHANSWITCH_ANN);
+	if ((csa_elem &&
+	     csa_elem->datalen == sizeof(struct ieee80211_channel_sw_ie) &&
+	     ((struct ieee80211_channel_sw_ie *)csa_elem->data)->count != 0) ||
+	    (ecsa_elem &&
+	     ecsa_elem->datalen == sizeof(struct ieee80211_ext_chansw_ie) &&
+	     ((struct ieee80211_ext_chansw_ie *)ecsa_elem->data)->count != 0)) {
+		rcu_read_unlock();
+		sdata_info(sdata, "AP is in CSA process, reject auth\n");
+		return -EINVAL;
+	}
+	rcu_read_unlock();
+
 	auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
 			    req->ie_len, GFP_KERNEL);
 	if (!auth_data)
@@ -6914,8 +7405,6 @@
 					    false);
 	}
 
-	sdata_info(sdata, "authenticate with %pM\n", auth_data->ap_addr);
-
 	/* needed for transmitting the auth frame(s) properly */
 	memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN);
 
@@ -6924,6 +7413,19 @@
 	if (err)
 		goto err_clear;
 
+	if (req->link_id > 0)
+		link = sdata_dereference(sdata->link[req->link_id], sdata);
+	else
+		link = sdata_dereference(sdata->link[0], sdata);
+
+	if (WARN_ON(!link)) {
+		err = -ENOLINK;
+		goto err_clear;
+	}
+
+	sdata_info(sdata, "authenticate with %pM (local address=%pM)\n",
+		   auth_data->ap_addr, link->conf->addr);
+
 	err = ieee80211_auth(sdata);
 	if (err) {
 		sta_info_destroy_addr(sdata, auth_data->ap_addr);
@@ -6935,13 +7437,11 @@
 	return 0;
 
  err_clear:
-	if (!sdata->vif.valid_links) {
+	if (!ieee80211_vif_is_mld(&sdata->vif)) {
 		eth_zero_addr(sdata->deflink.u.mgd.bssid);
 		ieee80211_link_info_change_notify(sdata, &sdata->deflink,
 						  BSS_CHANGED_BSSID);
-		mutex_lock(&sdata->local->mtx);
 		ieee80211_link_release_channel(&sdata->deflink);
-		mutex_unlock(&sdata->local->mtx);
 	}
 	ifmgd->auth_data = NULL;
 	kfree(auth_data);
@@ -6956,7 +7456,7 @@
 			   unsigned int link_id)
 {
 	struct ieee80211_local *local = sdata->local;
-	const struct cfg80211_bss_ies *beacon_ies;
+	const struct cfg80211_bss_ies *bss_ies;
 	struct ieee80211_supported_band *sband;
 	const struct element *ht_elem, *vht_elem;
 	struct ieee80211_link_data *link;
@@ -7031,32 +7531,37 @@
 	link->conf->eht_puncturing = 0;
 
 	rcu_read_lock();
-	beacon_ies = rcu_dereference(cbss->beacon_ies);
-	if (beacon_ies) {
-		const struct ieee80211_eht_operation *eht_oper;
-		const struct element *elem;
+	bss_ies = rcu_dereference(cbss->beacon_ies);
+	if (bss_ies) {
 		u8 dtim_count = 0;
 
-		ieee80211_get_dtim(beacon_ies, &dtim_count,
+		ieee80211_get_dtim(bss_ies, &dtim_count,
 				   &link->u.mgd.dtim_period);
 
 		sdata->deflink.u.mgd.have_beacon = true;
 
 		if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
-			link->conf->sync_tsf = beacon_ies->tsf;
+			link->conf->sync_tsf = bss_ies->tsf;
 			link->conf->sync_device_ts = bss->device_ts_beacon;
 			link->conf->sync_dtim_count = dtim_count;
 		}
+	} else {
+		bss_ies = rcu_dereference(cbss->ies);
+	}
+
+	if (bss_ies) {
+		const struct ieee80211_eht_operation *eht_oper;
+		const struct element *elem;
 
 		elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION,
-					      beacon_ies->data, beacon_ies->len);
+					      bss_ies->data, bss_ies->len);
 		if (elem && elem->datalen >= 3)
 			link->conf->profile_periodicity = elem->data[2];
 		else
 			link->conf->profile_periodicity = 0;
 
 		elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
-					  beacon_ies->data, beacon_ies->len);
+					  bss_ies->data, bss_ies->len);
 		if (elem && elem->datalen >= 11 &&
 		    (elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
 			link->conf->ema_ap = true;
@@ -7064,7 +7569,7 @@
 			link->conf->ema_ap = false;
 
 		elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION,
-					      beacon_ies->data, beacon_ies->len);
+					      bss_ies->data, bss_ies->len);
 		eht_oper = (const void *)(elem->data + 1);
 
 		if (elem &&
@@ -7124,7 +7629,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_mgd_assoc_data *assoc_data;
-	const struct element *ssid_elem;
+	const struct element *ssid_elem, *csa_elem, *ecsa_elem;
 	struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
 	ieee80211_conn_flags_t conn_flags = 0;
 	struct ieee80211_link_data *link;
@@ -7154,6 +7659,21 @@
 		kfree(assoc_data);
 		return -EINVAL;
 	}
+
+	csa_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_CHANNEL_SWITCH);
+	ecsa_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_EXT_CHANSWITCH_ANN);
+	if ((csa_elem &&
+	     csa_elem->datalen == sizeof(struct ieee80211_channel_sw_ie) &&
+	     ((struct ieee80211_channel_sw_ie *)csa_elem->data)->count != 0) ||
+	    (ecsa_elem &&
+	     ecsa_elem->datalen == sizeof(struct ieee80211_ext_chansw_ie) &&
+	     ((struct ieee80211_ext_chansw_ie *)ecsa_elem->data)->count != 0)) {
+		sdata_info(sdata, "AP is in CSA process, reject assoc\n");
+		rcu_read_unlock();
+		kfree(assoc_data);
+		return -EINVAL;
+	}
+
 	memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen);
 	assoc_data->ssid_len = ssid_elem->datalen;
 	memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
@@ -7214,7 +7734,10 @@
 		match = ether_addr_equal(ifmgd->auth_data->ap_addr,
 					 assoc_data->ap_addr) &&
 			ifmgd->auth_data->link_id == req->link_id;
-		ieee80211_destroy_auth_data(sdata, match);
+
+		/* Cleanup is delayed if auth_data matches */
+		if (!match)
+			ieee80211_destroy_auth_data(sdata, false);
 	}
 
 	/* prepare assoc data */
@@ -7320,10 +7843,11 @@
 		for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) {
 			assoc_data->link[i].conn_flags = conn_flags;
 			assoc_data->link[i].bss = req->links[i].bss;
+			assoc_data->link[i].disabled = req->links[i].disabled;
 		}
 
 		/* if there was no authentication, set up the link */
-		err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id));
+		err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0);
 		if (err)
 			goto err_clear;
 	} else {
@@ -7394,11 +7918,14 @@
 		if (i == assoc_data->assoc_link_id)
 			continue;
 		/* only calculate the flags, hence link == NULL */
-		err = ieee80211_prep_channel(sdata, NULL, assoc_data->link[i].bss,
+		err = ieee80211_prep_channel(sdata, NULL,
+					     assoc_data->link[i].bss, true,
 					     &assoc_data->link[i].conn_flags);
-		if (err)
+		if (err) {
+			req->links[i].error = err;
 			goto err_clear;
 	}
+	}
 
 	/* needed for transmitting the assoc frames properly */
 	memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN);
@@ -7433,11 +7960,17 @@
 
 	run_again(sdata, assoc_data->timeout);
 
+	/* We are associating, clean up auth_data */
+	if (ifmgd->auth_data)
+		ieee80211_destroy_auth_data(sdata, true);
+
 	return 0;
  err_clear:
+	if (!ifmgd->auth_data) {
 	eth_zero_addr(sdata->deflink.u.mgd.bssid);
 	ieee80211_link_info_change_notify(sdata, &sdata->deflink,
 					  BSS_CHANGED_BSSID);
+	}
 	ifmgd->assoc_data = NULL;
  err_free:
 	kfree(assoc_data);
@@ -7461,6 +7994,7 @@
 			   req->bssid, req->reason_code,
 			   ieee80211_get_reason_code_string(req->reason_code));
 
+		info.link_id = ifmgd->auth_data->link_id;
 		drv_mgd_prepare_tx(sdata->local, sdata, &info);
 		ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
 					       IEEE80211_STYPE_DEAUTH,
@@ -7481,6 +8015,7 @@
 			   req->bssid, req->reason_code,
 			   ieee80211_get_reason_code_string(req->reason_code));
 
+		info.link_id = ifmgd->assoc_data->assoc_link_id;
 		drv_mgd_prepare_tx(sdata->local, sdata, &info);
 		ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
 					       IEEE80211_STYPE_DEAUTH,
@@ -7538,8 +8073,12 @@
 
 void ieee80211_mgd_stop_link(struct ieee80211_link_data *link)
 {
-	cancel_work_sync(&link->u.mgd.request_smps_work);
-	cancel_work_sync(&link->u.mgd.chswitch_work);
+	wiphy_work_cancel(link->sdata->local->hw.wiphy,
+			  &link->u.mgd.request_smps_work);
+	wiphy_work_cancel(link->sdata->local->hw.wiphy,
+			  &link->u.mgd.recalc_smps);
+	wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
+				  &link->u.mgd.chswitch_work);
 }
 
 void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
@@ -7551,12 +8090,18 @@
 	 * they will not do anything but might not have been
 	 * cancelled when disconnecting.
 	 */
-	cancel_work_sync(&ifmgd->monitor_work);
-	cancel_work_sync(&ifmgd->beacon_connection_loss_work);
-	cancel_work_sync(&ifmgd->csa_connection_drop_work);
-	cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work);
+	wiphy_work_cancel(sdata->local->hw.wiphy,
+			  &ifmgd->monitor_work);
+	wiphy_work_cancel(sdata->local->hw.wiphy,
+			  &ifmgd->beacon_connection_loss_work);
+	wiphy_work_cancel(sdata->local->hw.wiphy,
+			  &ifmgd->csa_connection_drop_work);
+	wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+				  &ifmgd->tdls_peer_del_work);
+	wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+				  &ifmgd->ml_reconf_work);
+	wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
 
-	sdata_lock(sdata);
 	if (ifmgd->assoc_data)
 		ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
 	if (ifmgd->auth_data)
@@ -7572,7 +8117,6 @@
 	ifmgd->assoc_req_ies_len = 0;
 	spin_unlock_bh(&ifmgd->teardown_lock);
 	del_timer_sync(&ifmgd->timer);
-	sdata_unlock(sdata);
 }
 
 void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
diff -ruw linux-6.4/net/mac80211/ocb.c linux-6.4-fbx/net/mac80211/ocb.c
--- linux-6.4/net/mac80211/ocb.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/ocb.c	2023-11-07 13:38:44.086257347 +0100
@@ -4,7 +4,7 @@
  *
  * Copyright: (c) 2014 Czech Technical University in Prague
  *            (c) 2014 Volkswagen Group Research
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022 - 2023 Intel Corporation
  * Author:    Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
  * Funded by: Volkswagen Group Research
  */
@@ -44,7 +44,6 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_supported_band *sband;
-	enum nl80211_bss_scan_width scan_width;
 	struct sta_info *sta;
 	int band;
 
@@ -66,7 +65,6 @@
 		return;
 	}
 	band = chanctx_conf->def.chan->band;
-	scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
 	rcu_read_unlock();
 
 	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -75,13 +73,12 @@
 
 	/* Add only mandatory rates for now */
 	sband = local->hw.wiphy->bands[band];
-	sta->sta.deflink.supp_rates[band] =
-		ieee80211_mandatory_rates(sband, scan_width);
+	sta->sta.deflink.supp_rates[band] = ieee80211_mandatory_rates(sband);
 
 	spin_lock(&ifocb->incomplete_lock);
 	list_add(&sta->list, &ifocb->incomplete_stations);
 	spin_unlock(&ifocb->incomplete_lock);
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 }
 
 static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta)
@@ -124,11 +121,11 @@
 	struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
 	struct sta_info *sta;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	if (ifocb->joined != true)
 		return;
 
-	sdata_lock(sdata);
-
 	spin_lock_bh(&ifocb->incomplete_lock);
 	while (!list_empty(&ifocb->incomplete_stations)) {
 		sta = list_first_entry(&ifocb->incomplete_stations,
@@ -144,8 +141,6 @@
 
 	if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags))
 		ieee80211_ocb_housekeeping(sdata);
-
-	sdata_unlock(sdata);
 }
 
 static void ieee80211_ocb_housekeeping_timer(struct timer_list *t)
@@ -157,7 +152,7 @@
 
 	set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
 
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 }
 
 void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
@@ -175,9 +170,11 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
-	u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
+	u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
 	int err;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	if (ifocb->joined == true)
 		return -EINVAL;
 
@@ -185,10 +182,8 @@
 	sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
 	sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
 
-	mutex_lock(&sdata->local->mtx);
 	err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
 					 IEEE80211_CHANCTX_SHARED);
-	mutex_unlock(&sdata->local->mtx);
 	if (err)
 		return err;
 
@@ -197,7 +192,7 @@
 	ifocb->joined = true;
 
 	set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	wiphy_work_queue(local->hw.wiphy, &sdata->work);
 
 	netif_carrier_on(sdata->dev);
 	return 0;
@@ -209,6 +204,8 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	ifocb->joined = false;
 	sta_info_flush(sdata);
 
@@ -228,9 +225,7 @@
 	clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB);
 
-	mutex_lock(&sdata->local->mtx);
 	ieee80211_link_release_channel(&sdata->deflink);
-	mutex_unlock(&sdata->local->mtx);
 
 	skb_queue_purge(&sdata->skb_queue);
 
diff -ruw linux-6.4/net/mac80211/offchannel.c linux-6.4-fbx/net/mac80211/offchannel.c
--- linux-6.4/net/mac80211/offchannel.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/offchannel.c	2023-11-07 13:38:44.086257347 +0100
@@ -8,7 +8,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2019, 2022 Intel Corporation
+ * Copyright (C) 2019, 2022-2023 Intel Corporation
  */
 #include <linux/export.h>
 #include <net/mac80211.h>
@@ -34,7 +34,7 @@
 	del_timer_sync(&ifmgd->bcn_mon_timer);
 	del_timer_sync(&ifmgd->conn_mon_timer);
 
-	cancel_work_sync(&local->dynamic_ps_enable_work);
+	wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work);
 
 	if (local->hw.conf.flags & IEEE80211_CONF_PS) {
 		offchannel_ps_enabled = true;
@@ -84,6 +84,8 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (WARN_ON(local->use_chanctx))
 		return;
 
@@ -101,7 +103,6 @@
 					false);
 	ieee80211_flush_queues(local, NULL, false);
 
-	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (!ieee80211_sdata_running(sdata))
 			continue;
@@ -127,17 +128,17 @@
 		    sdata->u.mgd.associated)
 			ieee80211_offchannel_ps_enable(sdata);
 	}
-	mutex_unlock(&local->iflist_mtx);
 }
 
 void ieee80211_offchannel_return(struct ieee80211_local *local)
 {
 	struct ieee80211_sub_if_data *sdata;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (WARN_ON(local->use_chanctx))
 		return;
 
-	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
 			continue;
@@ -161,7 +162,6 @@
 				BSS_CHANGED_BEACON_ENABLED);
 		}
 	}
-	mutex_unlock(&local->iflist_mtx);
 
 	ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
 					IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
@@ -197,7 +197,7 @@
 	struct ieee80211_roc_work *roc, *tmp;
 	long remaining_dur_min = LONG_MAX;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
 		long remaining;
@@ -230,7 +230,7 @@
 	if (dur == LONG_MAX)
 		return false;
 
-	mod_delayed_work(local->workqueue, &local->roc_work, dur);
+	wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur);
 	return true;
 }
 
@@ -258,13 +258,13 @@
 	roc->notified = true;
 }
 
-static void ieee80211_hw_roc_start(struct work_struct *work)
+static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, hw_roc_start);
 	struct ieee80211_roc_work *roc;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(roc, &local->roc_list, list) {
 		if (!roc->started)
@@ -273,8 +273,6 @@
 		roc->hw_begun = true;
 		ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
 	}
-
-	mutex_unlock(&local->mtx);
 }
 
 void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
@@ -285,7 +283,7 @@
 
 	trace_api_ready_on_channel(local);
 
-	ieee80211_queue_work(hw, &local->hw_roc_start);
+	wiphy_work_queue(hw->wiphy, &local->hw_roc_start);
 }
 EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
 
@@ -295,7 +293,7 @@
 	enum ieee80211_roc_type type;
 	u32 min_dur, max_dur;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(list_empty(&local->roc_list)))
 		return;
@@ -338,7 +336,7 @@
 				tmp->started = true;
 				tmp->abort = true;
 			}
-			ieee80211_queue_work(&local->hw, &local->hw_roc_done);
+			wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done);
 			return;
 		}
 
@@ -368,7 +366,7 @@
 			ieee80211_hw_config(local, 0);
 		}
 
-		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
 					     msecs_to_jiffies(min_dur));
 
 		/* tell userspace or send frame(s) */
@@ -386,7 +384,7 @@
 {
 	struct ieee80211_roc_work *roc;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (list_empty(&local->roc_list)) {
 		ieee80211_run_deferred_scan(local);
@@ -407,7 +405,7 @@
 		_ieee80211_start_next_roc(local);
 	} else {
 		/* delay it a bit */
-		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
 					     round_jiffies_relative(HZ/2));
 	}
 }
@@ -417,7 +415,7 @@
 	struct ieee80211_roc_work *roc;
 	bool on_channel;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(local->ops->remain_on_channel))
 		return;
@@ -451,29 +449,27 @@
 	}
 }
 
-static void ieee80211_roc_work(struct work_struct *work)
+static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, roc_work.work);
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	__ieee80211_roc_work(local);
-	mutex_unlock(&local->mtx);
 }
 
-static void ieee80211_hw_roc_done(struct work_struct *work)
+static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, hw_roc_done);
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	ieee80211_end_finished_rocs(local, jiffies);
 
 	/* if there's another roc, start it now */
 	ieee80211_start_next_roc(local);
-
-	mutex_unlock(&local->mtx);
 }
 
 void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
@@ -482,7 +478,7 @@
 
 	trace_api_remain_on_channel_expired(local);
 
-	ieee80211_queue_work(hw, &local->hw_roc_done);
+	wiphy_work_queue(hw->wiphy, &local->hw_roc_done);
 }
 EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
 
@@ -537,7 +533,7 @@
 	bool queued = false, combine_started = true;
 	int ret;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (channel->freq_offset)
 		/* this may work, but is untested */
@@ -586,7 +582,7 @@
 		/* if not HW assist, just queue & schedule work */
 		if (!local->ops->remain_on_channel) {
 			list_add_tail(&roc->list, &local->roc_list);
-			ieee80211_queue_delayed_work(&local->hw,
+			wiphy_delayed_work_queue(local->hw.wiphy,
 						     &local->roc_work, 0);
 		} else {
 			/* otherwise actually kick it off here
@@ -675,15 +671,12 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 	struct ieee80211_local *local = sdata->local;
-	int ret;
 
-	mutex_lock(&local->mtx);
-	ret = ieee80211_start_roc_work(local, sdata, chan,
+	lockdep_assert_wiphy(local->hw.wiphy);
+
+	return ieee80211_start_roc_work(local, sdata, chan,
 				       duration, cookie, NULL,
 				       IEEE80211_ROC_TYPE_NORMAL);
-	mutex_unlock(&local->mtx);
-
-	return ret;
 }
 
 static int ieee80211_cancel_roc(struct ieee80211_local *local,
@@ -692,12 +685,13 @@
 	struct ieee80211_roc_work *roc, *tmp, *found = NULL;
 	int ret;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!cookie)
 		return -ENOENT;
 
-	flush_work(&local->hw_roc_start);
+	wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
 
-	mutex_lock(&local->mtx);
 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
 		if (!mgmt_tx && roc->cookie != cookie)
 			continue;
@@ -709,7 +703,6 @@
 	}
 
 	if (!found) {
-		mutex_unlock(&local->mtx);
 		return -ENOENT;
 	}
 
@@ -721,10 +714,26 @@
 	if (local->ops->remain_on_channel) {
 		ret = drv_cancel_remain_on_channel(local, roc->sdata);
 		if (WARN_ON_ONCE(ret)) {
-			mutex_unlock(&local->mtx);
 			return ret;
 		}
 
+		/*
+		 * We could be racing against the notification from the driver:
+		 *  + driver is handling the notification on CPU0
+		 *  + user space is cancelling the remain on channel and
+		 *    schedules the hw_roc_done worker.
+		 *
+		 *  Now hw_roc_done might start to run after the next roc will
+		 *  start and mac80211 will think that this second roc has
+		 *  ended prematurely.
+		 *  Cancel the work to make sure that all the pending workers
+		 *  have completed execution.
+		 *  Note that this assumes that by the time the driver returns
+		 *  from drv_cancel_remain_on_channel, it has completed all
+		 *  the processing of related notifications.
+		 */
+		wiphy_work_cancel(local->hw.wiphy, &local->hw_roc_done);
+
 		/* TODO:
 		 * if multiple items were combined here then we really shouldn't
 		 * cancel them all - we should wait for as much time as needed
@@ -745,11 +754,10 @@
 	} else {
 		/* go through work struct to return to the operating channel */
 		found->abort = true;
-		mod_delayed_work(local->workqueue, &local->roc_work, 0);
+		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0);
 	}
 
  out_unlock:
-	mutex_unlock(&local->mtx);
 
 	return 0;
 }
@@ -778,6 +786,8 @@
 	int ret;
 	u8 *data;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (params->dont_wait_for_ack)
 		flags = IEEE80211_TX_CTL_NO_ACK;
 	else
@@ -833,13 +843,16 @@
 		break;
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
-		sdata_lock(sdata);
 		if (!sdata->u.mgd.associated ||
 		    (params->offchan && params->wait &&
 		     local->ops->remain_on_channel &&
-		     memcmp(sdata->vif.cfg.ap_addr, mgmt->bssid, ETH_ALEN)))
+		     memcmp(sdata->vif.cfg.ap_addr, mgmt->bssid, ETH_ALEN))) {
 			need_offchan = true;
-		sdata_unlock(sdata);
+		} else if (sdata->u.mgd.associated &&
+			   ether_addr_equal(sdata->vif.cfg.ap_addr, mgmt->da)) {
+			sta = sta_info_get_bss(sdata, mgmt->da);
+			mlo_sta = sta && sta->sta.mlo;
+		}
 		break;
 	case NL80211_IFTYPE_P2P_DEVICE:
 		need_offchan = true;
@@ -855,8 +868,6 @@
 	if (need_offchan && !params->chan)
 		return -EINVAL;
 
-	mutex_lock(&local->mtx);
-
 	/* Check if the operating channel is the requested channel */
 	if (!params->chan && mlo_sta) {
 		need_offchan = false;
@@ -980,7 +991,6 @@
 	if (ret)
 		ieee80211_free_txskb(&local->hw, skb);
  out_unlock:
-	mutex_unlock(&local->mtx);
 	return ret;
 }
 
@@ -994,9 +1004,9 @@
 
 void ieee80211_roc_setup(struct ieee80211_local *local)
 {
-	INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
-	INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
-	INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
+	wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start);
+	wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done);
+	wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work);
 	INIT_LIST_HEAD(&local->roc_list);
 }
 
@@ -1006,7 +1016,8 @@
 	struct ieee80211_roc_work *roc, *tmp;
 	bool work_to_do = false;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
 		if (sdata && roc->sdata != sdata)
 			continue;
@@ -1014,7 +1025,7 @@
 		if (roc->started) {
 			if (local->ops->remain_on_channel) {
 				/* can race, so ignore return value */
-				drv_cancel_remain_on_channel(local, sdata);
+				drv_cancel_remain_on_channel(local, roc->sdata);
 				ieee80211_roc_notify_destroy(roc);
 			} else {
 				roc->abort = true;
@@ -1026,5 +1037,4 @@
 	}
 	if (work_to_do)
 		__ieee80211_roc_work(local);
-	mutex_unlock(&local->mtx);
 }
diff -ruw linux-6.4/net/mac80211/rc80211_minstrel_ht.c linux-6.4-fbx/net/mac80211/rc80211_minstrel_ht.c
--- linux-6.4/net/mac80211/rc80211_minstrel_ht.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/rc80211_minstrel_ht.c	2023-11-07 13:38:44.086257347 +0100
@@ -1725,16 +1725,15 @@
 	mi->band = sband->band;
 	mi->last_stats_update = jiffies;
 
-	ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
-	mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
+	ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
+	mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1);
 	mi->overhead += ack_dur;
 	mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
 
 	ctl_rate = &sband->bitrates[rate_lowest_index(sband, sta)];
 	erp = ctl_rate->flags & IEEE80211_RATE_ERP_G;
 	ack_dur = ieee80211_frame_duration(sband->band, 10,
-					   ctl_rate->bitrate, erp, 1,
-					   ieee80211_chandef_get_shift(chandef));
+					   ctl_rate->bitrate, erp, 1);
 	mi->overhead_legacy = ack_dur;
 	mi->overhead_legacy_rtscts = mi->overhead_legacy + 2 * ack_dur;
 
diff -ruw linux-6.4/net/mac80211/rx.c linux-6.4-fbx/net/mac80211/rx.c
--- linux-6.4/net/mac80211/rx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/rx.c	2024-04-19 16:04:28.973736213 +0200
@@ -6,7 +6,7 @@
  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 
 #include <linux/jiffies.h>
@@ -32,6 +32,7 @@
 #include "tkip.h"
 #include "wme.h"
 #include "rate.h"
+#include "fbx_scum.h"
 
 /*
  * monitor mode reception
@@ -229,7 +230,7 @@
 	}
 
 	skb_queue_tail(&sdata->skb_queue, skb);
-	ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+	wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 	if (sta)
 		sta->deflink.rx_stats.packets++;
 }
@@ -817,17 +818,11 @@
 		return NULL;
 	}
 
+	fbx80211_rx_monitor(local, origskb, rate, rtap_space);
 	only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
 
-	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
-		if (only_monitor) {
-			dev_kfree_skb(origskb);
-			return NULL;
-		}
-
-		return ieee80211_clean_skb(origskb, present_fcs_len,
-					   rtap_space);
-	}
+	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR))
+		goto out;
 
 	ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
 
@@ -868,6 +863,11 @@
 	/* ditto */
 	if (!origskb)
 		return NULL;
+out:
+	if (only_monitor) {
+		dev_kfree_skb(origskb);
+		return NULL;
+	}
 
 	return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
 }
@@ -1083,7 +1083,8 @@
 	struct sk_buff *tail = skb_peek_tail(frames);
 	struct ieee80211_rx_status *status;
 
-	if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
+	if (tid_agg_rx->reorder_buf_filtered &&
+	    tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
 		return true;
 
 	if (!tail)
@@ -1124,6 +1125,7 @@
 	}
 
 no_frame:
+	if (tid_agg_rx->reorder_buf_filtered)
 	tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
 	tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
 }
@@ -1434,7 +1436,7 @@
 		     rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
 		I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
 		rx->link_sta->rx_stats.num_duplicates++;
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_DUP;
 	} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
 		rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
 	}
@@ -1488,7 +1490,7 @@
 		    cfg80211_rx_spurious_frame(rx->sdata->dev,
 					       hdr->addr2,
 					       GFP_ATOMIC))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_SPURIOUS;
 
 		return RX_DROP_MONITOR;
 	}
@@ -1732,7 +1734,7 @@
 		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
 		    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
 			link_sta->rx_stats.last_rx = jiffies;
-			if (ieee80211_is_data(hdr->frame_control) &&
+			if (ieee80211_is_data_present(hdr->frame_control) &&
 			    !is_multicast_ether_addr(hdr->addr1))
 				link_sta->rx_stats.last_rate =
 					sta_stats_encode_rate(status);
@@ -1746,7 +1748,7 @@
 		 * match the current local configuration when processed.
 		 */
 		link_sta->rx_stats.last_rx = jiffies;
-		if (ieee80211_is_data(hdr->frame_control))
+		if (ieee80211_is_data_present(hdr->frame_control))
 			link_sta->rx_stats.last_rate = sta_stats_encode_rate(status);
 	}
 
@@ -1881,7 +1883,7 @@
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	int keyidx;
-	ieee80211_rx_result result = RX_DROP_UNUSABLE;
+	ieee80211_rx_result result = RX_DROP_U_DECRYPT_FAIL;
 	struct ieee80211_key *sta_ptk = NULL;
 	struct ieee80211_key *ptk_idx = NULL;
 	int mmie_keyidx = -1;
@@ -1931,7 +1933,7 @@
 			keyid = ieee80211_get_keyid(rx->skb);
 
 			if (unlikely(keyid < 0))
-				return RX_DROP_UNUSABLE;
+				return RX_DROP_U_NO_KEY_ID;
 
 			ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
 		}
@@ -2036,7 +2038,7 @@
 		keyidx = ieee80211_get_keyid(rx->skb);
 
 		if (unlikely(keyidx < 0))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_NO_KEY_ID;
 
 		/* check per-station GTK first, if multicast packet */
 		if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta)
@@ -2102,7 +2104,7 @@
 		result = ieee80211_crypto_gcmp_decrypt(rx);
 		break;
 	default:
-		result = RX_DROP_UNUSABLE;
+		result = RX_DROP_U_BAD_CIPHER;
 	}
 
 	/* the hdr variable is invalid after the decrypt handlers */
@@ -2110,7 +2112,7 @@
 	/* either the frame has been decrypted or will be dropped */
 	status->flag |= RX_FLAG_DECRYPTED;
 
-	if (unlikely(ieee80211_is_beacon(fc) && (result & RX_DROP_UNUSABLE) &&
+	if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) &&
 		     rx->sdata->dev))
 		cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
 					     skb->data, skb->len);
@@ -2247,7 +2249,7 @@
 	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
 	if (skb_linearize(rx->skb))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_OOM;
 
 	/*
 	 *  skb_linearize() might change the skb->data and
@@ -2310,11 +2312,11 @@
 		u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
 
 		if (!requires_sequential_pn(rx, fc))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_NONSEQ_PN;
 
 		/* Prevent mixed key and fragment cache attacks */
 		if (entry->key_color != rx->key->color)
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_BAD_KEY_COLOR;
 
 		memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
 		for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -2325,7 +2327,7 @@
 
 		rpn = rx->ccm_gcm.pn;
 		if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_REPLAY;
 		memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
 	} else if (entry->is_protected &&
 		   (!rx->key ||
@@ -2336,11 +2338,11 @@
 		 * if for TKIP Michael MIC should protect us, and WEP is a
 		 * lost cause anyway.
 		 */
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_EXPECT_DEFRAG_PROT;
 	} else if (entry->is_protected && rx->key &&
 		   entry->key_color != rx->key->color &&
 		   (status->flag & RX_FLAG_DECRYPTED)) {
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_BAD_KEY_COLOR;
 	}
 
 	skb_pull(rx->skb, ieee80211_hdrlen(fc));
@@ -2359,7 +2361,7 @@
 					      GFP_ATOMIC))) {
 			I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
 			__skb_queue_purge(&entry->skb_list);
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_OOM;
 		}
 	}
 	while ((skb = __skb_dequeue(&entry->skb_list))) {
@@ -2403,29 +2405,42 @@
 	return 0;
 }
 
-static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
+static ieee80211_rx_result
+ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
 {
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-	__le16 fc = hdr->frame_control;
+	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+	__le16 fc = mgmt->frame_control;
 
 	/*
 	 * Pass through unencrypted frames if the hardware has
 	 * decrypted them already.
 	 */
 	if (status->flag & RX_FLAG_DECRYPTED)
-		return 0;
+		return RX_CONTINUE;
+
+	/* drop unicast protected dual (that wasn't protected) */
+	if (ieee80211_is_action(fc) &&
+	    mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
+		return RX_DROP_U_UNPROT_DUAL;
 
 	if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
 		if (unlikely(!ieee80211_has_protected(fc) &&
-			     ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
-			     rx->key)) {
+			     ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) {
 			if (ieee80211_is_deauth(fc) ||
-			    ieee80211_is_disassoc(fc))
+			    ieee80211_is_disassoc(fc)) {
+				/*
+				 * Permit unprotected deauth/disassoc frames
+				 * during 4-way-HS (key is installed after HS).
+				 */
+				if (!rx->key)
+					return RX_CONTINUE;
+
 				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
 							     rx->skb->data,
 							     rx->skb->len);
-			return -EACCES;
+			}
+			return RX_DROP_U_UNPROT_UCAST_MGMT;
 		}
 		/* BIP does not use Protected field, so need to check MMIE */
 		if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
@@ -2435,14 +2450,14 @@
 				cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
 							     rx->skb->data,
 							     rx->skb->len);
-			return -EACCES;
+			return RX_DROP_U_UNPROT_MCAST_MGMT;
 		}
 		if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
 			     ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
 			cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
 						     rx->skb->data,
 						     rx->skb->len);
-			return -EACCES;
+			return RX_DROP_U_UNPROT_BEACON;
 		}
 		/*
 		 * When using MFP, Action frames are not allowed prior to
@@ -2450,13 +2465,27 @@
 		 */
 		if (unlikely(ieee80211_is_action(fc) && !rx->key &&
 			     ieee80211_is_robust_mgmt_frame(rx->skb)))
-			return -EACCES;
+			return RX_DROP_U_UNPROT_ACTION;
+
+		/* drop unicast public action frames when using MPF */
+		if (is_unicast_ether_addr(mgmt->da) &&
+		    ieee80211_is_protected_dual_of_public_action(rx->skb))
+			return RX_DROP_U_UNPROT_UNICAST_PUB_ACTION;
 	}
 
-	return 0;
+	/*
+	 * Drop robust action frames before assoc regardless of MFP state,
+	 * after assoc we also have decided on MFP or not.
+	 */
+	if (ieee80211_is_action(fc) &&
+	    ieee80211_is_robust_mgmt_frame(rx->skb) &&
+	    (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))
+		return RX_DROP_U_UNPROT_ROBUST_ACTION;
+
+	return RX_CONTINUE;
 }
 
-static int
+static ieee80211_rx_result
 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
 {
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
@@ -2468,32 +2497,31 @@
 	*port_control = false;
 	if (ieee80211_has_a4(hdr->frame_control) &&
 	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
-		return -1;
+		return RX_DROP_U_UNEXPECTED_VLAN_4ADDR;
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
-
 		if (!sdata->u.mgd.use_4addr)
-			return -1;
+			return RX_DROP_U_UNEXPECTED_STA_4ADDR;
 		else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
 			check_port_control = true;
 	}
 
 	if (is_multicast_ether_addr(hdr->addr1) &&
 	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
-		return -1;
+		return RX_DROP_U_UNEXPECTED_VLAN_MCAST;
 
 	ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
 	if (ret < 0)
-		return ret;
+		return RX_DROP_U_INVALID_8023;
 
 	ehdr = (struct ethhdr *) rx->skb->data;
 	if (ehdr->h_proto == rx->sdata->control_port_protocol)
 		*port_control = true;
 	else if (check_port_control)
-		return -1;
+		return RX_DROP_U_NOT_PORT_CONTROL;
 
-	return 0;
+	return RX_CONTINUE;
 }
 
 bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata,
@@ -2505,7 +2533,7 @@
 	if (ether_addr_equal(sdata->vif.addr, addr))
 		return true;
 
-	if (!sdata->vif.valid_links)
+	if (!ieee80211_vif_is_mld(&sdata->vif))
 		return false;
 
 	for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) {
@@ -2884,10 +2912,10 @@
 		skb = NULL;
 
 		if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_OOM;
 
 		if (skb_linearize(fwd_skb))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_OOM;
 	}
 
 	fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
@@ -2983,7 +3011,7 @@
 					  rx->sdata->vif.addr,
 					  rx->sdata->vif.type,
 					  data_offset, true))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_BAD_AMSDU;
 
 	if (rx->sta->amsdu_mesh_control < 0) {
 		s8 valid = -1;
@@ -3058,21 +3086,21 @@
 		switch (rx->sdata->vif.type) {
 		case NL80211_IFTYPE_AP_VLAN:
 			if (!rx->sdata->u.vlan.sta)
-				return RX_DROP_UNUSABLE;
+				return RX_DROP_U_BAD_4ADDR;
 			break;
 		case NL80211_IFTYPE_STATION:
 			if (!rx->sdata->u.mgd.use_4addr)
-				return RX_DROP_UNUSABLE;
+				return RX_DROP_U_BAD_4ADDR;
 			break;
 		case NL80211_IFTYPE_MESH_POINT:
 			break;
 		default:
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_BAD_4ADDR;
 		}
 	}
 
 	if (is_multicast_ether_addr(hdr->addr1) || !rx->sta)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_BAD_AMSDU;
 
 	if (rx->key) {
 		/*
@@ -3085,7 +3113,7 @@
 		case WLAN_CIPHER_SUITE_WEP40:
 		case WLAN_CIPHER_SUITE_WEP104:
 		case WLAN_CIPHER_SUITE_TKIP:
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_BAD_AMSDU_CIPHER;
 		default:
 			break;
 		}
@@ -3104,7 +3132,6 @@
 	__le16 fc = hdr->frame_control;
 	ieee80211_rx_result res;
 	bool port_control;
-	int err;
 
 	if (unlikely(!ieee80211_is_data(hdr->frame_control)))
 		return RX_CONTINUE;
@@ -3125,9 +3152,9 @@
 		return RX_DROP_MONITOR;
 	}
 
-	err = __ieee80211_data_to_8023(rx, &port_control);
-	if (unlikely(err))
-		return RX_DROP_UNUSABLE;
+	res = __ieee80211_data_to_8023(rx, &port_control);
+	if (unlikely(res != RX_CONTINUE))
+		return res;
 
 	res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
 	if (res != RX_CONTINUE)
@@ -3356,6 +3383,11 @@
 	if (!ieee80211_is_mgmt(mgmt->frame_control))
 		return RX_DROP_MONITOR;
 
+	/* drop too small action frames */
+	if (ieee80211_is_action(mgmt->frame_control) &&
+	    rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
+		return RX_DROP_U_RUNT_ACTION;
+
 	if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
 	    ieee80211_is_beacon(mgmt->frame_control) &&
 	    !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
@@ -3375,10 +3407,7 @@
 		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
 	}
 
-	if (ieee80211_drop_unencrypted_mgmt(rx))
-		return RX_DROP_UNUSABLE;
-
-	return RX_CONTINUE;
+	return ieee80211_drop_unencrypted_mgmt(rx);
 }
 
 static bool
@@ -3445,14 +3474,10 @@
 	if (!ieee80211_is_action(mgmt->frame_control))
 		return RX_CONTINUE;
 
-	/* drop too small frames */
-	if (len < IEEE80211_MIN_ACTION_SIZE)
-		return RX_DROP_UNUSABLE;
-
 	if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
 	    mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
 	    mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_ACTION_UNKNOWN_SRC;
 
 	switch (mgmt->u.action.category) {
 	case WLAN_CATEGORY_HT:
@@ -3532,6 +3557,7 @@
 
 			/* set cur_max_bandwidth and recalc sta bw */
 			rx->link_sta->cur_max_bandwidth = max_bw;
+			rx->link_sta->pub->sta_max_bandwidth = rx->link_sta->cur_max_bandwidth;
 			new_bw = ieee80211_sta_cur_vht_bw(rx->link_sta);
 
 			if (rx->link_sta->pub->bandwidth == new_bw)
@@ -3713,6 +3739,10 @@
 			break;
 		goto queue;
 	case WLAN_CATEGORY_S1G:
+		if (len < offsetofend(typeof(*mgmt),
+				      u.action.u.s1g.action_code))
+			break;
+
 		switch (mgmt->u.action.u.s1g.action_code) {
 		case WLAN_S1G_TWT_SETUP:
 		case WLAN_S1G_TWT_TEARDOWN:
@@ -3853,7 +3883,7 @@
 
 	/* do not return rejected action frames */
 	if (mgmt->u.action.category & 0x80)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_REJECTED_ACTION_RESPONSE;
 
 	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
 			       GFP_ATOMIC);
@@ -4245,6 +4275,7 @@
 					  u16 ssn, u64 filtered,
 					  u16 received_mpdus)
 {
+	struct ieee80211_local *local;
 	struct sta_info *sta;
 	struct tid_ampdu_rx *tid_agg_rx;
 	struct sk_buff_head frames;
@@ -4262,6 +4293,11 @@
 
 	sta = container_of(pubsta, struct sta_info, sta);
 
+	local = sta->sdata->local;
+	WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64,
+		  "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n",
+		  local->hw.max_rx_aggregation_subframes);
+
 	if (!ieee80211_rx_data_set_sta(&rx, sta, -1))
 		return;
 
@@ -4638,7 +4674,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
-	lockdep_assert_held(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(sta, &local->sta_list, list) {
 		if (sdata != sta->sdata &&
@@ -4652,9 +4688,9 @@
 {
 	struct ieee80211_local *local = sdata->local;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	__ieee80211_check_fast_rx_iface(sdata);
-	mutex_unlock(&local->sta_mtx);
 }
 
 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
@@ -4907,6 +4943,37 @@
 	return true;
 }
 
+static bool ieee80211_check_frame_is_valid_to_process(struct ieee80211_rx_data *rx,
+						      struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_sub_if_data *sdata = rx->sdata;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct mplink_block_list_info *mp_blink;
+	__le16 fc;
+
+	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
+
+	if ((rx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) &&
+	    ieee80211_is_data(fc) && is_multicast_ether_addr(hdr->addr1)) {
+		spin_lock_bh(&ifmsh->mplink_blocking_list_lock);
+		list_for_each_entry(mp_blink, &ifmsh->mplink_blocking_list, list) {
+			if (ether_addr_equal(hdr->addr2, mp_blink->dst)) {
+				spin_unlock_bh(&ifmsh->mplink_blocking_list_lock);
+/* Commented this temporarily, need to give the option to enable/disable at run time */
+#if 0
+				print_hex_dump_bytes("skb_dump:", DUMP_PREFIX_NONE,
+						     skb->data, skb->len);
+#endif
+				return false;
+			}
+		}
+		spin_unlock_bh(&ifmsh->mplink_blocking_list_lock);
+	}
+
+	return true;
+}
+
 /*
  * This function returns whether or not the SKB
  * was destined for RX processing or not, which,
@@ -4922,6 +4989,9 @@
 	struct link_sta_info *link_sta = rx->link_sta;
 	struct ieee80211_link_data *link = rx->link;
 
+	if (!ieee80211_check_frame_is_valid_to_process(rx, skb))
+		return false;
+
 	rx->skb = skb;
 
 	/* See if we can do fast-rx; if we have to copy we already lost,
@@ -5274,7 +5344,7 @@
 	 * The same happens when we're not even started,
 	 * but that's worth a warning.
 	 */
-	if (WARN_ON(!local->started))
+	if (!local->started)
 		goto drop;
 
 	if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
diff -ruw linux-6.4/net/mac80211/s1g.c linux-6.4-fbx/net/mac80211/s1g.c
--- linux-6.4/net/mac80211/s1g.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/s1g.c	2023-11-07 13:38:44.086257347 +0100
@@ -2,6 +2,7 @@
 /*
  * S1G handling
  * Copyright(c) 2020 Adapt-IP
+ * Copyright (C) 2023 Intel Corporation
  */
 #include <linux/ieee80211.h>
 #include <net/mac80211.h>
@@ -153,11 +154,11 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sta = sta_info_get_bss(sdata, mgmt->sa);
 	if (!sta)
-		goto out;
+		return;
 
 	switch (mgmt->u.action.u.s1g.action_code) {
 	case WLAN_S1G_TWT_SETUP:
@@ -169,9 +170,6 @@
 	default:
 		break;
 	}
-
-out:
-	mutex_unlock(&local->sta_mtx);
 }
 
 void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata,
@@ -181,11 +179,11 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	sta = sta_info_get_bss(sdata, mgmt->da);
 	if (!sta)
-		goto out;
+		return;
 
 	switch (mgmt->u.action.u.s1g.action_code) {
 	case WLAN_S1G_TWT_SETUP:
@@ -195,7 +193,4 @@
 	default:
 		break;
 	}
-
-out:
-	mutex_unlock(&local->sta_mtx);
 }
diff -ruw linux-6.4/net/mac80211/scan.c linux-6.4-fbx/net/mac80211/scan.c
--- linux-6.4/net/mac80211/scan.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/scan.c	2023-11-07 13:38:44.086257347 +0100
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2015  Intel Mobile Communications GmbH
  * Copyright 2016-2017  Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 
 #include <linux/if_arp.h>
@@ -55,27 +55,45 @@
 	return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
 }
 
-static void
-ieee80211_update_bss_from_elems(struct ieee80211_local *local,
-				struct ieee80211_bss *bss,
-				struct ieee802_11_elems *elems,
-				struct ieee80211_rx_status *rx_status,
-				bool beacon)
-{
+struct inform_bss_update_data {
+	struct ieee80211_rx_status *rx_status;
+	bool beacon;
+};
+
+void ieee80211_inform_bss(struct wiphy *wiphy,
+			  struct cfg80211_bss *cbss,
+			  const struct cfg80211_bss_ies *ies,
+			  void *data)
+{
+	struct ieee80211_local *local = wiphy_priv(wiphy);
+	struct inform_bss_update_data *update_data = data;
+	struct ieee80211_bss *bss = (void *)cbss->priv;
+	struct ieee80211_rx_status *rx_status;
+	struct ieee802_11_elems *elems;
 	int clen, srlen;
 
-	if (beacon)
+	/* This happens while joining an IBSS */
+	if (!update_data)
+		return;
+
+	elems = ieee802_11_parse_elems(ies->data, ies->len, false, NULL);
+	if (!elems)
+		return;
+
+	rx_status = update_data->rx_status;
+
+	if (update_data->beacon)
 		bss->device_ts_beacon = rx_status->device_timestamp;
 	else
 		bss->device_ts_presp = rx_status->device_timestamp;
 
 	if (elems->parse_error) {
-		if (beacon)
+		if (update_data->beacon)
 			bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
 		else
 			bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP;
 	} else {
-		if (beacon)
+		if (update_data->beacon)
 			bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON;
 		else
 			bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP;
@@ -124,7 +142,7 @@
 			bss->valid_data |= IEEE80211_BSS_VALID_WMM;
 	}
 
-	if (beacon) {
+	if (update_data->beacon) {
 		struct ieee80211_supported_band *sband =
 			local->hw.wiphy->bands[rx_status->band];
 		if (!(rx_status->encoding == RX_ENC_HT) &&
@@ -138,6 +156,8 @@
 			le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
 	else
 		bss->vht_cap_info = 0;
+
+	kfree(elems);
 }
 
 struct ieee80211_bss *
@@ -148,16 +168,17 @@
 {
 	bool beacon = ieee80211_is_beacon(mgmt->frame_control) ||
 		      ieee80211_is_s1g_beacon(mgmt->frame_control);
-	struct cfg80211_bss *cbss, *non_tx_cbss;
-	struct ieee80211_bss *bss, *non_tx_bss;
+	struct cfg80211_bss *cbss;
+	struct inform_bss_update_data update_data = {
+		.rx_status = rx_status,
+		.beacon = beacon,
+	};
 	struct cfg80211_inform_bss bss_meta = {
 		.boottime_ns = rx_status->boottime_ns,
+		.drv_data = (void *)&update_data,
 	};
 	bool signal_valid;
 	struct ieee80211_sub_if_data *scan_sdata;
-	struct ieee802_11_elems *elems;
-	size_t baselen;
-	u8 *elements;
 
 	if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)
 		bss_meta.signal = 0; /* invalid signal indication */
@@ -166,12 +187,6 @@
 	else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
 		bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
 
-	bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20;
-	if (rx_status->bw == RATE_INFO_BW_5)
-		bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5;
-	else if (rx_status->bw == RATE_INFO_BW_10)
-		bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
-
 	bss_meta.chan = channel;
 
 	rcu_read_lock();
@@ -192,50 +207,12 @@
 	if (!cbss)
 		return NULL;
 
-	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
-		elements = mgmt->u.probe_resp.variable;
-		baselen = offsetof(struct ieee80211_mgmt,
-				   u.probe_resp.variable);
-	} else if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
-		struct ieee80211_ext *ext = (void *) mgmt;
-
-		baselen = offsetof(struct ieee80211_ext, u.s1g_beacon.variable);
-		elements = ext->u.s1g_beacon.variable;
-	} else {
-		baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
-		elements = mgmt->u.beacon.variable;
-	}
-
-	if (baselen > len)
-		return NULL;
-
-	elems = ieee802_11_parse_elems(elements, len - baselen, false, cbss);
-	if (!elems)
-		return NULL;
-
 	/* In case the signal is invalid update the status */
 	signal_valid = channel == cbss->channel;
 	if (!signal_valid)
 		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
-	bss = (void *)cbss->priv;
-	ieee80211_update_bss_from_elems(local, bss, elems, rx_status, beacon);
-	kfree(elems);
-
-	list_for_each_entry(non_tx_cbss, &cbss->nontrans_list, nontrans_list) {
-		non_tx_bss = (void *)non_tx_cbss->priv;
-
-		elems = ieee802_11_parse_elems(elements, len - baselen, false,
-					       non_tx_cbss);
-		if (!elems)
-			continue;
-
-		ieee80211_update_bss_from_elems(local, non_tx_bss, elems,
-						rx_status, beacon);
-		kfree(elems);
-	}
-
-	return bss;
+	return (void *)cbss->priv;
 }
 
 static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
@@ -292,7 +269,7 @@
 		 * to active scan
 		 */
 		 set_bit(SCAN_BEACON_DONE, &local->scanning);
-		 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
 	}
 
 	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
@@ -332,22 +309,11 @@
 		ieee80211_rx_bss_put(local, bss);
 }
 
-static void
-ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef,
-			       enum nl80211_bss_scan_width scan_width)
+static void ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef)
 {
 	memset(chandef, 0, sizeof(*chandef));
-	switch (scan_width) {
-	case NL80211_BSS_CHAN_WIDTH_5:
-		chandef->width = NL80211_CHAN_WIDTH_5;
-		break;
-	case NL80211_BSS_CHAN_WIDTH_10:
-		chandef->width = NL80211_CHAN_WIDTH_10;
-		break;
-	default:
+
 		chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
-		break;
-	}
 }
 
 /* return false if no more work */
@@ -361,7 +327,7 @@
 	u32 flags = 0;
 
 	req = rcu_dereference_protected(local->scan_req,
-					lockdep_is_held(&local->mtx));
+					lockdep_is_held(&local->hw.wiphy->mtx));
 
 	if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
 		return false;
@@ -395,7 +361,7 @@
 	}
 
 	local->hw_scan_req->req.n_channels = n_chans;
-	ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+	ieee80211_prepare_scan_chandef(&chandef);
 
 	if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
 		flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
@@ -426,7 +392,7 @@
 	struct ieee80211_sub_if_data *scan_sdata;
 	struct ieee80211_sub_if_data *sdata;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/*
 	 * It's ok to abort a not-yet-running scan (that
@@ -441,7 +407,7 @@
 		return;
 
 	scan_sdata = rcu_dereference_protected(local->scan_sdata,
-					       lockdep_is_held(&local->mtx));
+					       lockdep_is_held(&local->hw.wiphy->mtx));
 
 	if (hw_scan && !aborted &&
 	    !ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS) &&
@@ -450,7 +416,7 @@
 
 		rc = drv_hw_scan(local,
 			rcu_dereference_protected(local->scan_sdata,
-						  lockdep_is_held(&local->mtx)),
+						  lockdep_is_held(&local->hw.wiphy->mtx)),
 			local->hw_scan_req);
 
 		if (rc == 0)
@@ -467,7 +433,7 @@
 	local->hw_scan_req = NULL;
 
 	scan_req = rcu_dereference_protected(local->scan_req,
-					     lockdep_is_held(&local->mtx));
+					     lockdep_is_held(&local->hw.wiphy->mtx));
 
 	RCU_INIT_POINTER(local->scan_req, NULL);
 	RCU_INIT_POINTER(local->scan_sdata, NULL);
@@ -502,7 +468,7 @@
 	 */
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 		if (ieee80211_sdata_running(sdata))
-			ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+			wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
 	}
 
 	if (was_scanning)
@@ -522,7 +488,7 @@
 
 	memcpy(&local->scan_info, info, sizeof(*info));
 
-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
 }
 EXPORT_SYMBOL(ieee80211_scan_completed);
 
@@ -562,8 +528,7 @@
 	/* We need to set power level at maximum rate for scanning. */
 	ieee80211_hw_config(local, 0);
 
-	ieee80211_queue_delayed_work(&local->hw,
-				     &local->scan_work, 0);
+	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
 
 	return 0;
 }
@@ -573,20 +538,18 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_sub_if_data *sdata_iter;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!ieee80211_is_radar_required(local))
 		return true;
 
 	if (!regulatory_pre_cac_allowed(local->hw.wiphy))
 		return false;
 
-	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata_iter, &local->interfaces, list) {
-		if (sdata_iter->wdev.cac_started) {
-			mutex_unlock(&local->iflist_mtx);
+		if (sdata_iter->wdev.cac_started)
 			return false;
 		}
-	}
-	mutex_unlock(&local->iflist_mtx);
 
 	return true;
 }
@@ -609,7 +572,7 @@
 
 void ieee80211_run_deferred_scan(struct ieee80211_local *local)
 {
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!local->scan_req || local->scanning)
 		return;
@@ -617,10 +580,10 @@
 	if (!ieee80211_can_scan(local,
 				rcu_dereference_protected(
 					local->scan_sdata,
-					lockdep_is_held(&local->mtx))))
+					lockdep_is_held(&local->hw.wiphy->mtx))))
 		return;
 
-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
 				     round_jiffies_relative(0));
 }
 
@@ -662,7 +625,7 @@
 	u32 flags = 0, tx_flags;
 
 	scan_req = rcu_dereference_protected(local->scan_req,
-					     lockdep_is_held(&local->mtx));
+					     lockdep_is_held(&local->hw.wiphy->mtx));
 
 	tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
 	if (scan_req->no_cck)
@@ -673,7 +636,7 @@
 		flags |= IEEE80211_PROBE_FLAG_RANDOM_SN;
 
 	sdata = rcu_dereference_protected(local->scan_sdata,
-					  lockdep_is_held(&local->mtx));
+					  lockdep_is_held(&local->hw.wiphy->mtx));
 
 	for (i = 0; i < scan_req->n_ssids; i++)
 		ieee80211_send_scan_probe_req(
@@ -698,7 +661,7 @@
 	bool hw_scan = local->ops->hw_scan;
 	int rc;
 
-	lockdep_assert_held(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (local->scan_req)
 		return -EBUSY;
@@ -812,7 +775,7 @@
 		}
 
 		/* Now, just wait a bit and we are all done! */
-		ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
 					     next_delay);
 		return 0;
 	} else {
@@ -878,12 +841,13 @@
 	enum mac80211_scan_state next_scan_state;
 	struct cfg80211_scan_request *scan_req;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	/*
 	 * check if at least one STA interface is associated,
 	 * check if at least one STA interface has pending tx frames
 	 * and grab the lowest used beacon interval
 	 */
-	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (!ieee80211_sdata_running(sdata))
 			continue;
@@ -899,10 +863,9 @@
 			}
 		}
 	}
-	mutex_unlock(&local->iflist_mtx);
 
 	scan_req = rcu_dereference_protected(local->scan_req,
-					     lockdep_is_held(&local->mtx));
+					     lockdep_is_held(&local->hw.wiphy->mtx));
 
 	next_chan = scan_req->channels[local->scan_channel_idx];
 
@@ -939,11 +902,10 @@
 {
 	int skip;
 	struct ieee80211_channel *chan;
-	enum nl80211_bss_scan_width oper_scan_width;
 	struct cfg80211_scan_request *scan_req;
 
 	scan_req = rcu_dereference_protected(local->scan_req,
-					     lockdep_is_held(&local->mtx));
+					     lockdep_is_held(&local->hw.wiphy->mtx));
 
 	skip = 0;
 	chan = scan_req->channels[local->scan_channel_idx];
@@ -953,42 +915,21 @@
 	local->scan_chandef.freq1_offset = chan->freq_offset;
 	local->scan_chandef.center_freq2 = 0;
 
-	/* For scanning on the S1G band, ignore scan_width (which is constant
-	 * across all channels) for now since channel width is specific to each
-	 * channel. Detect the required channel width here and likely revisit
-	 * later. Maybe scan_width could be used to build the channel scan list?
+	/* For scanning on the S1G band, detect the channel width according to
+	 * the channel being scanned.
 	 */
 	if (chan->band == NL80211_BAND_S1GHZ) {
 		local->scan_chandef.width = ieee80211_s1g_channel_width(chan);
 		goto set_channel;
 	}
 
-	switch (scan_req->scan_width) {
-	case NL80211_BSS_CHAN_WIDTH_5:
-		local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
-		break;
-	case NL80211_BSS_CHAN_WIDTH_10:
-		local->scan_chandef.width = NL80211_CHAN_WIDTH_10;
-		break;
-	default:
-	case NL80211_BSS_CHAN_WIDTH_20:
 		/* If scanning on oper channel, use whatever channel-type
 		 * is currently in use.
 		 */
-		oper_scan_width = cfg80211_chandef_to_scan_width(
-					&local->_oper_chandef);
-		if (chan == local->_oper_chandef.chan &&
-		    oper_scan_width == scan_req->scan_width)
+	if (chan == local->_oper_chandef.chan)
 			local->scan_chandef = local->_oper_chandef;
 		else
 			local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
-		break;
-	case NL80211_BSS_CHAN_WIDTH_1:
-	case NL80211_BSS_CHAN_WIDTH_2:
-		/* shouldn't get here, S1G handled above */
-		WARN_ON(1);
-		break;
-	}
 
 set_channel:
 	if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
@@ -1060,7 +1001,7 @@
 	local->next_scan_state = SCAN_SET_CHANNEL;
 }
 
-void ieee80211_scan_work(struct work_struct *work)
+void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, scan_work.work);
@@ -1069,7 +1010,7 @@
 	unsigned long next_delay = 0;
 	bool aborted;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!ieee80211_can_run_worker(local)) {
 		aborted = true;
@@ -1077,9 +1018,9 @@
 	}
 
 	sdata = rcu_dereference_protected(local->scan_sdata,
-					  lockdep_is_held(&local->mtx));
+					  lockdep_is_held(&local->hw.wiphy->mtx));
 	scan_req = rcu_dereference_protected(local->scan_req,
-					     lockdep_is_held(&local->mtx));
+					     lockdep_is_held(&local->hw.wiphy->mtx));
 
 	/* When scanning on-channel, the first-callback means completed. */
 	if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
@@ -1093,7 +1034,7 @@
 	}
 
 	if (!sdata || !scan_req)
-		goto out;
+		return;
 
 	if (!local->scanning) {
 		int rc;
@@ -1102,13 +1043,12 @@
 		RCU_INIT_POINTER(local->scan_sdata, NULL);
 
 		rc = __ieee80211_start_scan(sdata, scan_req);
-		if (rc) {
+		if (!rc)
+			return;
 			/* need to complete scan in cfg80211 */
 			rcu_assign_pointer(local->scan_req, scan_req);
 			aborted = true;
 			goto out_complete;
-		} else
-			goto out;
 	}
 
 	clear_bit(SCAN_BEACON_WAIT, &local->scanning);
@@ -1154,38 +1094,32 @@
 		}
 	} while (next_delay == 0);
 
-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
-	goto out;
+	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
+				 next_delay);
+	return;
 
 out_complete:
 	__ieee80211_scan_completed(&local->hw, aborted);
-out:
-	mutex_unlock(&local->mtx);
 }
 
 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
 			   struct cfg80211_scan_request *req)
 {
-	int res;
-
-	mutex_lock(&sdata->local->mtx);
-	res = __ieee80211_start_scan(sdata, req);
-	mutex_unlock(&sdata->local->mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	return res;
+	return __ieee80211_start_scan(sdata, req);
 }
 
 int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
 				const u8 *ssid, u8 ssid_len,
 				struct ieee80211_channel **channels,
-				unsigned int n_channels,
-				enum nl80211_bss_scan_width scan_width)
+				unsigned int n_channels)
 {
 	struct ieee80211_local *local = sdata->local;
 	int ret = -EBUSY, i, n_ch = 0;
 	enum nl80211_band band;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* busy scanning */
 	if (local->scan_req)
@@ -1236,13 +1170,11 @@
 
 	local->int_scan_req->ssids = &local->scan_ssid;
 	local->int_scan_req->n_ssids = 1;
-	local->int_scan_req->scan_width = scan_width;
 	memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
 	local->int_scan_req->ssids[0].ssid_len = ssid_len;
 
 	ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
  unlock:
-	mutex_unlock(&local->mtx);
 	return ret;
 }
 
@@ -1269,9 +1201,8 @@
 	 * after the scan was completed/aborted.
 	 */
 
-	mutex_lock(&local->mtx);
 	if (!local->scan_req)
-		goto out;
+		return;
 
 	/*
 	 * We have a scan running and the driver already reported completion,
@@ -1281,7 +1212,7 @@
 	if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
 	    test_bit(SCAN_COMPLETED, &local->scanning)) {
 		set_bit(SCAN_HW_CANCELLED, &local->scanning);
-		goto out;
+		return;
 	}
 
 	if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
@@ -1293,21 +1224,14 @@
 		if (local->ops->cancel_hw_scan)
 			drv_cancel_hw_scan(local,
 				rcu_dereference_protected(local->scan_sdata,
-						lockdep_is_held(&local->mtx)));
-		goto out;
+						lockdep_is_held(&local->hw.wiphy->mtx)));
+		return;
 	}
 
-	/*
-	 * If the work is currently running, it must be blocked on
-	 * the mutex, but we'll set scan_sdata = NULL and it'll
-	 * simply exit once it acquires the mutex.
-	 */
-	cancel_delayed_work(&local->scan_work);
+	wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work);
 	/* and clean up */
 	memset(&local->scan_info, 0, sizeof(local->scan_info));
 	__ieee80211_scan_completed(&local->hw, true);
-out:
-	mutex_unlock(&local->mtx);
 }
 
 int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
@@ -1322,9 +1246,9 @@
 	u8 *ie;
 	u32 flags = 0;
 
-	iebufsz = local->scan_ies_len + req->ie_len;
+	lockdep_assert_wiphy(local->hw.wiphy);
 
-	lockdep_assert_held(&local->mtx);
+	iebufsz = local->scan_ies_len + req->ie_len;
 
 	if (!local->ops->sched_scan_start)
 		return -ENOTSUPP;
@@ -1346,7 +1270,7 @@
 		goto out;
 	}
 
-	ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+	ieee80211_prepare_scan_chandef(&chandef);
 
 	ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
 				 &sched_scan_ies, req->ie,
@@ -1375,19 +1299,13 @@
 				       struct cfg80211_sched_scan_request *req)
 {
 	struct ieee80211_local *local = sdata->local;
-	int ret;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
-	if (rcu_access_pointer(local->sched_scan_sdata)) {
-		mutex_unlock(&local->mtx);
+	if (rcu_access_pointer(local->sched_scan_sdata))
 		return -EBUSY;
-	}
-
-	ret = __ieee80211_request_sched_scan_start(sdata, req);
 
-	mutex_unlock(&local->mtx);
-	return ret;
+	return __ieee80211_request_sched_scan_start(sdata, req);
 }
 
 int ieee80211_request_sched_scan_stop(struct ieee80211_local *local)
@@ -1395,25 +1313,21 @@
 	struct ieee80211_sub_if_data *sched_scan_sdata;
 	int ret = -ENOENT;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
-	if (!local->ops->sched_scan_stop) {
-		ret = -ENOTSUPP;
-		goto out;
-	}
+	if (!local->ops->sched_scan_stop)
+		return -ENOTSUPP;
 
 	/* We don't want to restart sched scan anymore. */
 	RCU_INIT_POINTER(local->sched_scan_req, NULL);
 
 	sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
-						lockdep_is_held(&local->mtx));
+						lockdep_is_held(&local->hw.wiphy->mtx));
 	if (sched_scan_sdata) {
 		ret = drv_sched_scan_stop(local, sched_scan_sdata);
 		if (!ret)
 			RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
 	}
-out:
-	mutex_unlock(&local->mtx);
 
 	return ret;
 }
@@ -1430,24 +1344,21 @@
 
 void ieee80211_sched_scan_end(struct ieee80211_local *local)
 {
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
-	if (!rcu_access_pointer(local->sched_scan_sdata)) {
-		mutex_unlock(&local->mtx);
+	if (!rcu_access_pointer(local->sched_scan_sdata))
 		return;
-	}
 
 	RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
 
 	/* If sched scan was aborted by the driver. */
 	RCU_INIT_POINTER(local->sched_scan_req, NULL);
 
-	mutex_unlock(&local->mtx);
-
-	cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
+	cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
 }
 
-void ieee80211_sched_scan_stopped_work(struct work_struct *work)
+void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
+				       struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local,
@@ -1470,6 +1381,6 @@
 	if (local->in_reconfig)
 		return;
 
-	schedule_work(&local->sched_scan_stopped_work);
+	wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work);
 }
 EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
diff -ruw linux-6.4/net/mac80211/spectmgmt.c linux-6.4-fbx/net/mac80211/spectmgmt.c
--- linux-6.4/net/mac80211/spectmgmt.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/spectmgmt.c	2023-11-07 13:38:44.086257347 +0100
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2008, Intel Corporation
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018, 2020, 2022 Intel Corporation
+ * Copyright (C) 2018, 2020, 2022-2023 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -33,12 +33,14 @@
 	struct cfg80211_chan_def new_vht_chandef = {};
 	const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
 	const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
+	const struct ieee80211_bandwidth_indication *bwi;
 	int secondary_channel_offset = -1;
 
 	memset(csa_ie, 0, sizeof(*csa_ie));
 
 	sec_chan_offs = elems->sec_chan_offs;
 	wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
+	bwi = elems->bandwidth_indication;
 
 	if (conn_flags & (IEEE80211_CONN_DISABLE_HT |
 			  IEEE80211_CONN_DISABLE_40MHZ)) {
@@ -132,7 +134,14 @@
 		break;
 	}
 
-	if (wide_bw_chansw_ie) {
+	if (bwi) {
+		/* start with the CSA one */
+		new_vht_chandef = csa_ie->chandef;
+		/* and update the width accordingly */
+		/* FIXME: support 160/320 */
+		ieee80211_chandef_eht_oper(&bwi->info, true, true,
+					   &new_vht_chandef);
+	} else if (wide_bw_chansw_ie) {
 		u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
 		struct ieee80211_vht_operation vht_oper = {
 			.chan_width =
diff -ruw linux-6.4/net/mac80211/sta_info.c linux-6.4-fbx/net/mac80211/sta_info.c
--- linux-6.4/net/mac80211/sta_info.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/sta_info.c	2023-12-12 17:24:34.179627645 +0100
@@ -4,7 +4,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 
 #include <linux/module.h>
@@ -88,7 +88,6 @@
 	.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
 };
 
-/* Caller must hold local->sta_mtx */
 static int sta_info_hash_del(struct ieee80211_local *local,
 			     struct sta_info *sta)
 {
@@ -99,19 +98,36 @@
 static int link_sta_info_hash_add(struct ieee80211_local *local,
 				  struct link_sta_info *link_sta)
 {
-	lockdep_assert_held(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	return rhltable_insert(&local->link_sta_hash,
-			       &link_sta->link_hash_node,
-			       link_sta_rht_params);
+			       &link_sta->link_hash_node, link_sta_rht_params);
 }
 
 static int link_sta_info_hash_del(struct ieee80211_local *local,
 				  struct link_sta_info *link_sta)
 {
-	lockdep_assert_held(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	return rhltable_remove(&local->link_sta_hash,
-			       &link_sta->link_hash_node,
-			       link_sta_rht_params);
+			       &link_sta->link_hash_node, link_sta_rht_params);
+}
+
+void ieee80211_purge_sta_txqs(struct sta_info *sta)
+{
+	struct ieee80211_local *local = sta->sdata->local;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+		struct txq_info *txqi;
+
+		if (!sta->sta.txq[i])
+			continue;
+
+		txqi = to_txq_info(sta->sta.txq[i]);
+
+		ieee80211_txq_purge(local, txqi);
+	}
 }
 
 static void __cleanup_single_sta(struct sta_info *sta)
@@ -140,16 +156,7 @@
 		atomic_dec(&ps->num_sta_ps);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
-		struct txq_info *txqi;
-
-		if (!sta->sta.txq[i])
-			continue;
-
-		txqi = to_txq_info(sta->sta.txq[i]);
-
-		ieee80211_txq_purge(local, txqi);
-	}
+	ieee80211_purge_sta_txqs(sta);
 
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
@@ -331,7 +338,7 @@
 	int i = 0;
 
 	list_for_each_entry_rcu(sta, &local->sta_list, list,
-				lockdep_is_held(&local->sta_mtx)) {
+				lockdep_is_held(&local->hw.wiphy->mtx)) {
 		if (sdata != sta->sdata)
 			continue;
 		if (i < idx) {
@@ -355,9 +362,9 @@
 	struct sta_link_alloc *alloc = NULL;
 	struct link_sta_info *link_sta;
 
-	link_sta = rcu_dereference_protected(sta->link[link_id],
-					     lockdep_is_held(&sta->local->sta_mtx));
+	lockdep_assert_wiphy(sta->local->hw.wiphy);
 
+	link_sta = rcu_access_pointer(sta->link[link_id]);
 	if (WARN_ON(!link_sta))
 		return;
 
@@ -436,7 +443,6 @@
 	kfree(sta);
 }
 
-/* Caller must hold local->sta_mtx */
 static int sta_info_hash_add(struct ieee80211_local *local,
 			     struct sta_info *sta)
 {
@@ -454,9 +460,10 @@
 		return;
 
 	local_bh_disable();
-	if (!test_sta_flag(sta, WLAN_STA_PS_STA))
+	if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
+		if (test_sta_flag(sta, WLAN_STA_PS_DELIVER))
 		ieee80211_sta_ps_deliver_wakeup(sta);
-	else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL))
+	} else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL))
 		ieee80211_sta_ps_deliver_poll_response(sta);
 	else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD))
 		ieee80211_sta_ps_deliver_uapsd(sta);
@@ -555,8 +562,7 @@
 	spin_lock_init(&sta->lock);
 	spin_lock_init(&sta->ps_lock);
 	INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
-	INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
-	mutex_init(&sta->ampdu_mlme.mtx);
+	wiphy_work_init(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
 #ifdef CONFIG_MAC80211_MESH
 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
 		sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
@@ -716,6 +722,8 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
 	/*
 	 * Can't be a WARN_ON because it can be triggered through a race:
 	 * something inserts a STA (on one CPU) without holding the RTNL
@@ -733,7 +741,6 @@
 	 * for correctness.
 	 */
 	rcu_read_lock();
-	lockdep_assert_held(&sdata->local->sta_mtx);
 	if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) &&
 	    ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) {
 		rcu_read_unlock();
@@ -807,11 +814,6 @@
 	}
 }
 
-/*
- * should be called with sta_mtx locked
- * this function replaces the mutex lock
- * with a RCU lock
- */
 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 {
 	struct ieee80211_local *local = sta->local;
@@ -819,7 +821,7 @@
 	struct station_info *sinfo = NULL;
 	int err = 0;
 
-	lockdep_assert_held(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* check if STA exists already */
 	if (sta_info_get_bss(sdata, sta->sta.addr)) {
@@ -883,7 +885,7 @@
 			struct link_sta_info *link_sta;
 
 			link_sta = rcu_dereference_protected(sta->link[i],
-							     lockdep_is_held(&local->sta_mtx));
+							     lockdep_is_held(&local->hw.wiphy->mtx));
 
 			if (!link_sta)
 				continue;
@@ -905,7 +907,6 @@
 
 	/* move reference to rcu-protected */
 	rcu_read_lock();
-	mutex_unlock(&local->sta_mtx);
 
 	if (ieee80211_vif_is_mesh(&sdata->vif))
 		mesh_accept_plinks_update(sdata);
@@ -921,7 +922,6 @@
 	synchronize_net();
  out_cleanup:
 	cleanup_single_sta(sta);
-	mutex_unlock(&local->sta_mtx);
 	kfree(sinfo);
 	rcu_read_lock();
 	return err;
@@ -933,13 +933,11 @@
 	int err;
 
 	might_sleep();
-
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	err = sta_info_insert_check(sta);
 	if (err) {
 		sta_info_free(local, sta);
-		mutex_unlock(&local->sta_mtx);
 		rcu_read_lock();
 		return err;
 	}
@@ -1218,7 +1216,7 @@
 	local = sta->local;
 	sdata = sta->sdata;
 
-	lockdep_assert_held(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/*
 	 * Before removing the station from the driver and
@@ -1243,7 +1241,7 @@
 			continue;
 
 		link_sta = rcu_dereference_protected(sta->link[i],
-						     lockdep_is_held(&local->sta_mtx));
+						     lockdep_is_held(&local->hw.wiphy->mtx));
 
 		link_sta_info_hash_del(local, link_sta);
 	}
@@ -1274,7 +1272,137 @@
 	return 0;
 }
 
-static void __sta_info_destroy_part2(struct sta_info *sta)
+static int _sta_info_move_state(struct sta_info *sta,
+				enum ieee80211_sta_state new_state,
+				bool recalc)
+{
+	struct ieee80211_local *local = sta->local;
+
+	might_sleep();
+
+	if (sta->sta_state == new_state)
+		return 0;
+
+	/* check allowed transitions first */
+
+	switch (new_state) {
+	case IEEE80211_STA_NONE:
+		if (sta->sta_state != IEEE80211_STA_AUTH)
+			return -EINVAL;
+		break;
+	case IEEE80211_STA_AUTH:
+		if (sta->sta_state != IEEE80211_STA_NONE &&
+		    sta->sta_state != IEEE80211_STA_ASSOC)
+			return -EINVAL;
+		break;
+	case IEEE80211_STA_ASSOC:
+		if (sta->sta_state != IEEE80211_STA_AUTH &&
+		    sta->sta_state != IEEE80211_STA_AUTHORIZED)
+			return -EINVAL;
+		break;
+	case IEEE80211_STA_AUTHORIZED:
+		if (sta->sta_state != IEEE80211_STA_ASSOC)
+			return -EINVAL;
+		break;
+	default:
+		WARN(1, "invalid state %d", new_state);
+		return -EINVAL;
+	}
+
+	sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
+		sta->sta.addr, new_state);
+
+	/* notify the driver before the actual changes so it can
+	 * fail the transition
+	 */
+	if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
+		int err = drv_sta_state(sta->local, sta->sdata, sta,
+					sta->sta_state, new_state);
+		if (err)
+			return err;
+	}
+
+	/* reflect the change in all state variables */
+
+	switch (new_state) {
+	case IEEE80211_STA_NONE:
+		if (sta->sta_state == IEEE80211_STA_AUTH)
+			clear_bit(WLAN_STA_AUTH, &sta->_flags);
+		break;
+	case IEEE80211_STA_AUTH:
+		if (sta->sta_state == IEEE80211_STA_NONE) {
+			set_bit(WLAN_STA_AUTH, &sta->_flags);
+		} else if (sta->sta_state == IEEE80211_STA_ASSOC) {
+			clear_bit(WLAN_STA_ASSOC, &sta->_flags);
+			if (recalc) {
+				ieee80211_recalc_min_chandef(sta->sdata, -1);
+				if (!sta->sta.support_p2p_ps)
+					ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
+			}
+		}
+		break;
+	case IEEE80211_STA_ASSOC:
+		if (sta->sta_state == IEEE80211_STA_AUTH) {
+			set_bit(WLAN_STA_ASSOC, &sta->_flags);
+			sta->assoc_at = ktime_get_boottime_ns();
+			if (recalc) {
+				ieee80211_recalc_min_chandef(sta->sdata, -1);
+				if (!sta->sta.support_p2p_ps)
+					ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
+			}
+		} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+			ieee80211_vif_dec_num_mcast(sta->sdata);
+			clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+
+			/*
+			 * If we have encryption offload, flush (station) queues
+			 * (after ensuring concurrent TX completed) so we won't
+			 * transmit anything later unencrypted if/when keys are
+			 * also removed, which might otherwise happen depending
+			 * on how the hardware offload works.
+			 */
+			if (local->ops->set_key) {
+				synchronize_net();
+				if (local->ops->flush_sta)
+					drv_flush_sta(local, sta->sdata, sta);
+				else
+					ieee80211_flush_queues(local,
+							       sta->sdata,
+							       false);
+			}
+
+			ieee80211_clear_fast_xmit(sta);
+			ieee80211_clear_fast_rx(sta);
+		}
+		break;
+	case IEEE80211_STA_AUTHORIZED:
+		if (sta->sta_state == IEEE80211_STA_ASSOC) {
+			ieee80211_vif_inc_num_mcast(sta->sdata);
+			set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+			ieee80211_check_fast_xmit(sta);
+			ieee80211_check_fast_rx(sta);
+		}
+		if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+		    sta->sdata->vif.type == NL80211_IFTYPE_AP)
+			cfg80211_send_layer2_update(sta->sdata->dev,
+						    sta->sta.addr);
+		break;
+	default:
+		break;
+	}
+
+	sta->sta_state = new_state;
+
+	return 0;
+}
+
+int sta_info_move_state(struct sta_info *sta,
+			enum ieee80211_sta_state new_state)
+{
+	return _sta_info_move_state(sta, new_state, true);
+}
+
+static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc)
 {
 	struct ieee80211_local *local = sta->local;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -1286,26 +1414,28 @@
 	 *	 after _part1 and before _part2!
 	 */
 
+	/*
+	 * There's a potential race in _part1 where we set WLAN_STA_BLOCK_BA
+	 * but someone might have just gotten past a check, and not yet into
+	 * queuing the work/creating the data/etc.
+	 *
+	 * Do another round of destruction so that the worker is certainly
+	 * canceled before we later free the station.
+	 *
+	 * Since this is after synchronize_rcu()/synchronize_net() we're now
+	 * certain that nobody can actually hold a reference to the STA and
+	 * be calling e.g. ieee80211_start_tx_ba_session().
+	 */
+	ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
+
 	might_sleep();
-	lockdep_assert_held(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
-		ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+		ret = _sta_info_move_state(sta, IEEE80211_STA_ASSOC, recalc);
 		WARN_ON_ONCE(ret);
 	}
 
-	/* Flush queues before removing keys, as that might remove them
-	 * from hardware, and then depending on the offload method, any
-	 * frames sitting on hardware queues might be sent out without
-	 * any encryption at all.
-	 */
-	if (local->ops->set_key) {
-		if (local->ops->flush_sta)
-			drv_flush_sta(local, sta->sdata, sta);
-		else
-			ieee80211_flush_queues(local, sta->sdata, false);
-	}
-
 	/* now keys can no longer be reached */
 	ieee80211_free_sta_keys(local, sta);
 
@@ -1318,7 +1448,7 @@
 	local->sta_generation++;
 
 	while (sta->sta_state > IEEE80211_STA_NONE) {
-		ret = sta_info_move_state(sta, sta->sta_state - 1);
+		ret = _sta_info_move_state(sta, sta->sta_state - 1, recalc);
 		if (ret) {
 			WARN_ON_ONCE(1);
 			break;
@@ -1355,7 +1485,7 @@
 
 	synchronize_net();
 
-	__sta_info_destroy_part2(sta);
+	__sta_info_destroy_part2(sta, true);
 
 	return 0;
 }
@@ -1363,28 +1493,22 @@
 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
 {
 	struct sta_info *sta;
-	int ret;
 
-	mutex_lock(&sdata->local->sta_mtx);
-	sta = sta_info_get(sdata, addr);
-	ret = __sta_info_destroy(sta);
-	mutex_unlock(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	return ret;
+	sta = sta_info_get(sdata, addr);
+	return __sta_info_destroy(sta);
 }
 
 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
 			      const u8 *addr)
 {
 	struct sta_info *sta;
-	int ret;
 
-	mutex_lock(&sdata->local->sta_mtx);
-	sta = sta_info_get_bss(sdata, addr);
-	ret = __sta_info_destroy(sta);
-	mutex_unlock(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	return ret;
+	sta = sta_info_get_bss(sdata, addr);
+	return __sta_info_destroy(sta);
 }
 
 static void sta_info_cleanup(struct timer_list *t)
@@ -1424,7 +1548,6 @@
 	}
 
 	spin_lock_init(&local->tim_lock);
-	mutex_init(&local->sta_mtx);
 	INIT_LIST_HEAD(&local->sta_list);
 
 	timer_setup(&local->sta_cleanup, sta_info_cleanup, 0);
@@ -1447,11 +1570,11 @@
 	int ret = 0;
 
 	might_sleep();
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP);
 	WARN_ON(vlans && !sdata->bss);
 
-	mutex_lock(&local->sta_mtx);
 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
 		if (sdata == sta->sdata ||
 		    (vlans && sdata->bss == sta->sdata->bss)) {
@@ -1462,11 +1585,19 @@
 	}
 
 	if (!list_empty(&free_list)) {
+		bool support_p2p_ps = true;
+
 		synchronize_net();
-		list_for_each_entry_safe(sta, tmp, &free_list, free_list)
-			__sta_info_destroy_part2(sta);
+		list_for_each_entry_safe(sta, tmp, &free_list, free_list) {
+			if (!sta->sta.support_p2p_ps)
+				support_p2p_ps = false;
+			__sta_info_destroy_part2(sta, false);
+		}
+
+		ieee80211_recalc_min_chandef(sdata, -1);
+		if (!support_p2p_ps)
+			ieee80211_recalc_p2p_go_ps_allowed(sdata);
 	}
-	mutex_unlock(&local->sta_mtx);
 
 	return ret;
 }
@@ -1477,7 +1608,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta, *tmp;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
 		unsigned long last_active = ieee80211_sta_last_active(sta);
@@ -1496,8 +1627,6 @@
 			WARN_ON(__sta_info_destroy(sta));
 		}
 	}
-
-	mutex_unlock(&local->sta_mtx);
 }
 
 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
@@ -2252,106 +2381,6 @@
 	}
 }
 
-int sta_info_move_state(struct sta_info *sta,
-			enum ieee80211_sta_state new_state)
-{
-	might_sleep();
-
-	if (sta->sta_state == new_state)
-		return 0;
-
-	/* check allowed transitions first */
-
-	switch (new_state) {
-	case IEEE80211_STA_NONE:
-		if (sta->sta_state != IEEE80211_STA_AUTH)
-			return -EINVAL;
-		break;
-	case IEEE80211_STA_AUTH:
-		if (sta->sta_state != IEEE80211_STA_NONE &&
-		    sta->sta_state != IEEE80211_STA_ASSOC)
-			return -EINVAL;
-		break;
-	case IEEE80211_STA_ASSOC:
-		if (sta->sta_state != IEEE80211_STA_AUTH &&
-		    sta->sta_state != IEEE80211_STA_AUTHORIZED)
-			return -EINVAL;
-		break;
-	case IEEE80211_STA_AUTHORIZED:
-		if (sta->sta_state != IEEE80211_STA_ASSOC)
-			return -EINVAL;
-		break;
-	default:
-		WARN(1, "invalid state %d", new_state);
-		return -EINVAL;
-	}
-
-	sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
-		sta->sta.addr, new_state);
-
-	/*
-	 * notify the driver before the actual changes so it can
-	 * fail the transition
-	 */
-	if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
-		int err = drv_sta_state(sta->local, sta->sdata, sta,
-					sta->sta_state, new_state);
-		if (err)
-			return err;
-	}
-
-	/* reflect the change in all state variables */
-
-	switch (new_state) {
-	case IEEE80211_STA_NONE:
-		if (sta->sta_state == IEEE80211_STA_AUTH)
-			clear_bit(WLAN_STA_AUTH, &sta->_flags);
-		break;
-	case IEEE80211_STA_AUTH:
-		if (sta->sta_state == IEEE80211_STA_NONE) {
-			set_bit(WLAN_STA_AUTH, &sta->_flags);
-		} else if (sta->sta_state == IEEE80211_STA_ASSOC) {
-			clear_bit(WLAN_STA_ASSOC, &sta->_flags);
-			ieee80211_recalc_min_chandef(sta->sdata, -1);
-			if (!sta->sta.support_p2p_ps)
-				ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
-		}
-		break;
-	case IEEE80211_STA_ASSOC:
-		if (sta->sta_state == IEEE80211_STA_AUTH) {
-			set_bit(WLAN_STA_ASSOC, &sta->_flags);
-			sta->assoc_at = ktime_get_boottime_ns();
-			ieee80211_recalc_min_chandef(sta->sdata, -1);
-			if (!sta->sta.support_p2p_ps)
-				ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
-		} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
-			ieee80211_vif_dec_num_mcast(sta->sdata);
-			clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
-			ieee80211_clear_fast_xmit(sta);
-			ieee80211_clear_fast_rx(sta);
-		}
-		break;
-	case IEEE80211_STA_AUTHORIZED:
-		if (sta->sta_state == IEEE80211_STA_ASSOC) {
-			ieee80211_vif_inc_num_mcast(sta->sdata);
-			set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
-			ieee80211_check_fast_xmit(sta);
-			ieee80211_check_fast_rx(sta);
-		}
-		if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-		    sta->sdata->vif.type == NL80211_IFTYPE_AP)
-			cfg80211_send_layer2_update(sta->sdata->dev,
-						    sta->sta.addr);
-		break;
-	default:
-		break;
-	}
-
-	sta->sta_state = new_state;
-
-	return 0;
-}
-
 static struct ieee80211_sta_rx_stats *
 sta_get_last_rx_stats(struct sta_info *sta)
 {
@@ -2691,7 +2720,8 @@
 	}
 
 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) &&
-	    !sta->sta.valid_links) {
+	    !sta->sta.valid_links &&
+	    ieee80211_rate_valid(&sta->deflink.tx_stats.last_rate)) {
 		sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate,
 				     &sinfo->txrate);
 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
@@ -2803,6 +2833,10 @@
 	struct rate_control_ref *ref = NULL;
 	u32 thr = 0;
 
+	/* first check for overrride */
+	if (sta->deflink.pub->tp_override)
+		return sta->deflink.pub->tp_override;
+
 	if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
 		ref = local->rate_ctrl;
 
@@ -2852,7 +2886,9 @@
 	struct sta_link_alloc *alloc;
 	int ret;
 
-	lockdep_assert_held(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+	WARN_ON(!test_sta_flag(sta, WLAN_STA_INSERTED));
 
 	/* must represent an MLD from the start */
 	if (WARN_ON(!sta->sta.valid_links))
@@ -2881,7 +2917,9 @@
 
 void ieee80211_sta_free_link(struct sta_info *sta, unsigned int link_id)
 {
-	lockdep_assert_held(&sta->sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sta->sdata->local->hw.wiphy);
+
+	WARN_ON(!test_sta_flag(sta, WLAN_STA_INSERTED));
 
 	sta_remove_link(sta, link_id, false);
 }
@@ -2895,7 +2933,7 @@
 	int ret;
 
 	link_sta = rcu_dereference_protected(sta->link[link_id],
-					     lockdep_is_held(&sdata->local->sta_mtx));
+					     lockdep_is_held(&sdata->local->hw.wiphy->mtx));
 
 	if (WARN_ON(old_links == new_links || !link_sta))
 		return -EINVAL;
@@ -2910,9 +2948,11 @@
 
 	sta->sta.valid_links = new_links;
 
-	if (!test_sta_flag(sta, WLAN_STA_INSERTED))
+	if (WARN_ON(!test_sta_flag(sta, WLAN_STA_INSERTED)))
 		goto hash;
 
+	ieee80211_recalc_min_chandef(sdata, link_id);
+
 	/* Ensure the values are updated for the driver,
 	 * redone by sta_remove_link on failure.
 	 */
@@ -2937,11 +2977,11 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	u16 old_links = sta->sta.valid_links;
 
-	lockdep_assert_held(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	sta->sta.valid_links &= ~BIT(link_id);
 
-	if (test_sta_flag(sta, WLAN_STA_INSERTED))
+	if (!WARN_ON(!test_sta_flag(sta, WLAN_STA_INSERTED)))
 		drv_change_sta_links(sdata->local, sdata, &sta->sta,
 				     old_links, sta->sta.valid_links);
 
@@ -2968,7 +3008,7 @@
 				   WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1;
 
 	if (val)
-		sta->sta.max_amsdu_subframes = 4 << val;
+		sta->sta.max_amsdu_subframes = 4 << (4 - val);
 }
 
 #ifdef CONFIG_LOCKDEP
@@ -2976,7 +3016,7 @@
 {
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 
-	return lockdep_is_held(&sta->local->sta_mtx);
+	return lockdep_is_held(&sta->local->hw.wiphy->mtx);
 }
 EXPORT_SYMBOL(lockdep_sta_mutex_held);
 #endif
diff -ruw linux-6.4/net/mac80211/sta_info.h linux-6.4-fbx/net/mac80211/sta_info.h
--- linux-6.4/net/mac80211/sta_info.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/sta_info.h	2023-12-12 17:24:34.179627645 +0100
@@ -3,7 +3,7 @@
  * Copyright 2002-2005, Devicescape Software, Inc.
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2022 Intel Corporation
+ * Copyright(c) 2020-2023 Intel Corporation
  */
 
 #ifndef STA_INFO_H
@@ -259,9 +259,6 @@
 /**
  * struct sta_ampdu_mlme - STA aggregation information.
  *
- * @mtx: mutex to protect all TX data (except non-NULL assignments
- *	to tid_tx[idx], which are protected by the sta spinlock)
- *	tid_start_tx is also protected by sta->lock.
  * @tid_rx: aggregation info for Rx per TID -- RCU protected
  * @tid_rx_token: dialog tokens for valid aggregation sessions
  * @tid_rx_timer_expired: bitmap indicating on which TIDs the
@@ -275,13 +272,13 @@
  *	unexpected aggregation related frames outside a session
  * @work: work struct for starting/stopping aggregation
  * @tid_tx: aggregation info for Tx per TID
- * @tid_start_tx: sessions where start was requested
+ * @tid_start_tx: sessions where start was requested, not just protected
+ *	by wiphy mutex but also sta->lock
  * @last_addba_req_time: timestamp of the last addBA request.
  * @addba_req_num: number of times addBA request has been sent.
  * @dialog_token_allocator: dialog token enumerator for each new session;
  */
 struct sta_ampdu_mlme {
-	struct mutex mtx;
 	/* rx */
 	struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS];
 	u8 tid_rx_token[IEEE80211_NUM_TIDS];
@@ -291,7 +288,7 @@
 	unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
 	unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
 	/* tx */
-	struct work_struct work;
+	struct wiphy_work work;
 	struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
 	struct tid_ampdu_tx *tid_start_tx[IEEE80211_NUM_TIDS];
 	unsigned long last_addba_req_time[IEEE80211_NUM_TIDS];
@@ -618,8 +615,6 @@
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
  * @rcu_head: RCU head used for freeing this station struct
- * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
- *	taken from HT/VHT capabilities or VHT operating mode notification
  * @cparams: CoDel parameters for this station.
  * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
  * @amsdu_mesh_control: track the mesh A-MSDU format used by the peer:
@@ -702,6 +697,8 @@
 	struct airtime_info airtime[IEEE80211_NUM_ACS];
 	u16 airtime_weight;
 
+	u32 tp_override;
+
 	/*
 	 * Aggregation information, locked with lock.
 	 */
@@ -796,13 +793,10 @@
 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
 			     struct tid_ampdu_tx *tid_tx);
 
-static inline struct tid_ampdu_tx *
-rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
-{
-	return rcu_dereference_protected(sta->ampdu_mlme.tid_tx[tid],
-					 lockdep_is_held(&sta->lock) ||
-					 lockdep_is_held(&sta->ampdu_mlme.mtx));
-}
+#define rcu_dereference_protected_tid_tx(sta, tid)			\
+	rcu_dereference_protected((sta)->ampdu_mlme.tid_tx[tid],	\
+				  lockdep_is_held(&(sta)->lock) ||	\
+				  lockdep_is_held(&(sta)->local->hw.wiphy->mtx));
 
 /* Maximum number of frames to buffer per power saving station per AC */
 #define STA_MAX_TX_BUFFER	64
@@ -827,7 +821,7 @@
 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
 				  const u8 *addr);
 
-/* user must hold sta_mtx or be in RCU critical section */
+/* user must hold wiphy mutex or be in RCU critical section */
 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local,
 				       const u8 *sta_addr, const u8 *vif_addr);
 
diff -ruw linux-6.4/net/mac80211/status.c linux-6.4-fbx/net/mac80211/status.c
--- linux-6.4/net/mac80211/status.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/status.c	2024-01-19 17:01:19.909848232 +0100
@@ -5,7 +5,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2008-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright 2021-2022  Intel Corporation
+ * Copyright 2021-2023  Intel Corporation
  */
 
 #include <linux/export.h>
@@ -17,6 +17,7 @@
 #include "mesh.h"
 #include "led.h"
 #include "wme.h"
+#include "fbx_scum.h"
 
 
 void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
@@ -184,8 +185,6 @@
 static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
 {
 	struct ieee80211_mgmt *mgmt = (void *) skb->data;
-	struct ieee80211_local *local = sta->local;
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
 	if (ieee80211_is_data_qos(mgmt->frame_control)) {
 		struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -194,39 +193,6 @@
 
 		ieee80211_check_pending_bar(sta, hdr->addr1, tid);
 	}
-
-	if (ieee80211_is_action(mgmt->frame_control) &&
-	    !ieee80211_has_protected(mgmt->frame_control) &&
-	    mgmt->u.action.category == WLAN_CATEGORY_HT &&
-	    mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
-	    ieee80211_sdata_running(sdata)) {
-		enum ieee80211_smps_mode smps_mode;
-
-		switch (mgmt->u.action.u.ht_smps.smps_control) {
-		case WLAN_HT_SMPS_CONTROL_DYNAMIC:
-			smps_mode = IEEE80211_SMPS_DYNAMIC;
-			break;
-		case WLAN_HT_SMPS_CONTROL_STATIC:
-			smps_mode = IEEE80211_SMPS_STATIC;
-			break;
-		case WLAN_HT_SMPS_CONTROL_DISABLED:
-		default: /* shouldn't happen since we don't send that */
-			smps_mode = IEEE80211_SMPS_OFF;
-			break;
-		}
-
-		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-			/*
-			 * This update looks racy, but isn't -- if we come
-			 * here we've definitely got a station that we're
-			 * talking to, and on a managed interface that can
-			 * only be the AP. And the only other place updating
-			 * this variable in managed mode is before association.
-			 */
-			sdata->deflink.smps_mode = smps_mode;
-			ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
-		}
-	}
 }
 
 static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
@@ -291,7 +257,7 @@
 static void
 ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
 				 struct sk_buff *skb, int retry_count,
-				 int rtap_len, int shift,
+				 int rtap_len,
 				 struct ieee80211_tx_status *status)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -342,7 +308,7 @@
 
 	if (legacy_rate) {
 		rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE));
-		*pos = DIV_ROUND_UP(legacy_rate, 5 * (1 << shift));
+		*pos = DIV_ROUND_UP(legacy_rate, 5);
 		/* padding for tx flags */
 		pos += 2;
 	}
@@ -633,7 +599,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&local->ack_status_lock, flags);
-	skb = idr_remove(&local->ack_status_frames, info->ack_frame_id);
+	skb = idr_remove(&local->ack_status_frames, info->status_data);
 	spin_unlock_irqrestore(&local->ack_status_lock, flags);
 
 	if (!skb)
@@ -695,6 +661,42 @@
 	}
 }
 
+static void ieee80211_handle_smps_status(struct ieee80211_sub_if_data *sdata,
+					 bool acked, u16 status_data)
+{
+	u16 sub_data = u16_get_bits(status_data, IEEE80211_STATUS_SUBDATA_MASK);
+	enum ieee80211_smps_mode smps_mode = sub_data & 3;
+	int link_id = (sub_data >> 2);
+	struct ieee80211_link_data *link;
+
+	if (!sdata || !ieee80211_sdata_running(sdata))
+		return;
+
+	if (!acked)
+		return;
+
+	if (sdata->vif.type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (WARN(link_id >= ARRAY_SIZE(sdata->link),
+		 "bad SMPS status link: %d\n", link_id))
+		return;
+
+	link = rcu_dereference(sdata->link[link_id]);
+	if (!link)
+		return;
+
+	/*
+	 * This update looks racy, but isn't, the only other place
+	 * updating this variable is in managed mode before assoc,
+	 * and we have to be associated to have a status from the
+	 * action frame TX, since we cannot send it while we're not
+	 * associated yet.
+	 */
+	link->smps_mode = smps_mode;
+	wiphy_work_queue(sdata->local->hw.wiphy, &link->u.mgd.recalc_smps);
+}
+
 static void ieee80211_report_used_skb(struct ieee80211_local *local,
 				      struct sk_buff *skb, bool dropped,
 				      ktime_t ack_hwtstamp)
@@ -730,12 +732,9 @@
 		if (!sdata) {
 			skb->dev = NULL;
 		} else if (!dropped) {
-			unsigned int hdr_size =
-				ieee80211_hdrlen(hdr->frame_control);
-
 			/* Check to see if packet is a TDLS teardown packet */
 			if (ieee80211_is_data(hdr->frame_control) &&
-			    (ieee80211_get_tdls_action(skb, hdr_size) ==
+			    (ieee80211_get_tdls_action(skb) ==
 			     WLAN_TDLS_TEARDOWN)) {
 				ieee80211_tdls_td_tx_handle(local, sdata, skb,
 							    info->flags);
@@ -747,7 +746,7 @@
 					if (qskb) {
 						skb_queue_tail(&sdata->status_queue,
 							       qskb);
-						ieee80211_queue_work(&local->hw,
+						wiphy_work_queue(local->hw.wiphy,
 								     &sdata->work);
 					}
 				}
@@ -759,9 +758,24 @@
 		}
 
 		rcu_read_unlock();
-	} else if (info->ack_frame_id) {
+	} else if (info->status_data_idr) {
 		ieee80211_report_ack_skb(local, skb, acked, dropped,
 					 ack_hwtstamp);
+	} else if (info->status_data) {
+		struct ieee80211_sub_if_data *sdata;
+
+		rcu_read_lock();
+
+		sdata = ieee80211_sdata_from_skb(local, skb);
+
+		switch (u16_get_bits(info->status_data,
+				     IEEE80211_STATUS_TYPE_MASK)) {
+		case IEEE80211_STATUS_TYPE_SMPS:
+			ieee80211_handle_smps_status(sdata, acked,
+						     info->status_data);
+			break;
+		}
+		rcu_read_unlock();
 	}
 
 	if (!dropped && skb->destructor) {
@@ -862,7 +876,7 @@
 }
 
 void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
-			  int retry_count, int shift, bool send_to_cooked,
+			  int retry_count, bool send_to_cooked,
 			  struct ieee80211_tx_status *status)
 {
 	struct sk_buff *skb2;
@@ -879,7 +893,7 @@
 		return;
 	}
 	ieee80211_add_tx_radiotap_header(local, skb, retry_count,
-					 rtap_len, shift, status);
+					 rtap_len, status);
 
 	/* XXX: is this sufficient for BPF? */
 	skb_reset_mac_header(skb);
@@ -894,6 +908,9 @@
 			if (!ieee80211_sdata_running(sdata))
 				continue;
 
+			if (fbx80211_skip_mon(sdata))
+				continue;
+
 			if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) &&
 			    !send_to_cooked)
 				continue;
@@ -932,14 +949,14 @@
 	bool acked;
 	bool noack_success;
 	struct ieee80211_bar *bar;
-	int shift = 0;
 	int tid = IEEE80211_NUM_TIDS;
+	bool ack_requested;
 
+	ack_requested = !(info->flags & IEEE80211_TX_CTL_NO_ACK);
 	fc = hdr->frame_control;
 
-	if (status->sta) {
+	if (status->sta && ack_requested) {
 		sta = container_of(status->sta, struct sta_info, sta);
-		shift = ieee80211_vif_get_shift(&sta->sdata->vif);
 
 		if (info->flags & IEEE80211_TX_STATUS_EOSP)
 			clear_sta_flag(sta, WLAN_STA_SP);
@@ -1077,7 +1094,7 @@
 	}
 
 	/* send to monitor interfaces */
-	ieee80211_tx_monitor(local, skb, retry_count, shift,
+	ieee80211_tx_monitor(local, skb, retry_count,
 			     send_to_cooked, status);
 }
 
@@ -1102,6 +1119,30 @@
 }
 EXPORT_SYMBOL(ieee80211_tx_status);
 
+void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct sk_buff *skb)
+{
+        struct ieee80211_sub_if_data *sdata;
+        struct ieee80211_tx_status status = {
+                .skb = skb,
+                .info = IEEE80211_SKB_CB(skb),
+        };
+        struct sta_info *sta;
+
+        sdata = vif_to_sdata(vif);
+
+        rcu_read_lock();
+
+        if (!ieee80211_lookup_ra_sta(sdata, skb, &sta) && !IS_ERR(sta))
+                status.sta = &sta->sta;
+
+        ieee80211_tx_status_ext(hw, &status);
+
+        rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_tx_status_8023);
+
 void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
 			     struct ieee80211_tx_status *status)
 {
@@ -1113,6 +1154,7 @@
 	int rates_idx, retry_count;
 	bool acked, noack_success, ack_signal_valid;
 	u16 tx_time_est;
+	bool ack_requested;
 
 	if (pubsta) {
 		sta = container_of(pubsta, struct sta_info, sta);
@@ -1139,12 +1181,13 @@
 
 	rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
 
+	ack_requested = !(info->flags & IEEE80211_TX_CTL_NO_ACK);
 	acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
 	noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
 	ack_signal_valid =
 		!!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID);
 
-	if (pubsta) {
+	if (pubsta && ack_requested) {
 		struct ieee80211_sub_if_data *sdata = sta->sdata;
 
 		if (!acked && !noack_success)
diff -ruw linux-6.4/net/mac80211/tdls.c linux-6.4-fbx/net/mac80211/tdls.c
--- linux-6.4/net/mac80211/tdls.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/tdls.c	2023-11-07 13:38:44.090257456 +0100
@@ -6,7 +6,7 @@
  * Copyright 2014, Intel Corporation
  * Copyright 2014  Intel Mobile Communications GmbH
  * Copyright 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2019, 2021-2022 Intel Corporation
+ * Copyright (C) 2019, 2021-2023 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -21,7 +21,7 @@
 /* give usermode some time for retries in setting up the TDLS session */
 #define TDLS_PEER_SETUP_TIMEOUT	(15 * HZ)
 
-void ieee80211_tdls_peer_del_work(struct work_struct *wk)
+void ieee80211_tdls_peer_del_work(struct wiphy *wiphy, struct wiphy_work *wk)
 {
 	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_local *local;
@@ -30,18 +30,19 @@
 			     u.mgd.tdls_peer_del_work.work);
 	local = sdata->local;
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer)) {
 		tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->u.mgd.tdls_peer);
 		sta_info_destroy_addr(sdata, sdata->u.mgd.tdls_peer);
 		eth_zero_addr(sdata->u.mgd.tdls_peer);
 	}
-	mutex_unlock(&local->mtx);
 }
 
-static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_link_data *link,
 					 struct sk_buff *skb)
 {
+	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	bool chan_switch = local->hw.wiphy->features &
@@ -50,7 +51,7 @@
 			  !ifmgd->tdls_wider_bw_prohibited;
 	bool buffer_sta = ieee80211_hw_check(&local->hw,
 					     SUPPORTS_TDLS_BUFFER_STA);
-	struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata);
+	struct ieee80211_supported_band *sband = ieee80211_get_link_sband(link);
 	bool vht = sband && sband->vht_cap.vht_supported;
 	u8 *pos = skb_put(skb, 10);
 
@@ -152,13 +153,13 @@
 	*pos = 2 * subband_cnt;
 }
 
-static void ieee80211_tdls_add_oper_classes(struct ieee80211_sub_if_data *sdata,
+static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link,
 					    struct sk_buff *skb)
 {
 	u8 *pos;
 	u8 op_class;
 
-	if (!ieee80211_chandef_to_operating_class(&sdata->vif.bss_conf.chandef,
+	if (!ieee80211_chandef_to_operating_class(&link->conf->chandef,
 						  &op_class))
 		return;
 
@@ -180,7 +181,7 @@
 	*pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST;
 }
 
-static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata,
+static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_link_data *link,
 					u16 status_code)
 {
 	struct ieee80211_supported_band *sband;
@@ -189,7 +190,8 @@
 	if (status_code != 0)
 		return 0;
 
-	sband = ieee80211_get_sband(sdata);
+	sband = ieee80211_get_link_sband(link);
+
 	if (sband && sband->band == NL80211_BAND_2GHZ) {
 		return WLAN_CAPABILITY_SHORT_SLOT_TIME |
 		       WLAN_CAPABILITY_SHORT_PREAMBLE;
@@ -198,10 +200,11 @@
 	return 0;
 }
 
-static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata,
+static void ieee80211_tdls_add_link_ie(struct ieee80211_link_data *link,
 				       struct sk_buff *skb, const u8 *peer,
 				       bool initiator)
 {
+	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_tdls_lnkie *lnkid;
 	const u8 *init_addr, *rsp_addr;
 
@@ -218,7 +221,7 @@
 	lnkid->ie_type = WLAN_EID_LINK_ID;
 	lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
 
-	memcpy(lnkid->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
+	memcpy(lnkid->bssid, link->u.mgd.bssid, ETH_ALEN);
 	memcpy(lnkid->init_sta, init_addr, ETH_ALEN);
 	memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN);
 }
@@ -306,7 +309,7 @@
 				   struct sta_info *sta)
 {
 	/* IEEE802.11ac-2013 Table E-4 */
-	u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+	static const u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
 	struct cfg80211_chan_def uc = sta->tdls_chandef;
 	enum nl80211_chan_width max_width =
 		ieee80211_sta_cap_chan_bw(&sta->deflink);
@@ -359,21 +362,24 @@
 }
 
 static void
-ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
+ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
 				   struct sk_buff *skb, const u8 *peer,
 				   u8 action_code, bool initiator,
 				   const u8 *extra_ies, size_t extra_ies_len)
 {
+	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_sta_ht_cap ht_cap;
 	struct ieee80211_sta_vht_cap vht_cap;
+	const struct ieee80211_sta_he_cap *he_cap;
+	const struct ieee80211_sta_eht_cap *eht_cap;
 	struct sta_info *sta = NULL;
 	size_t offset = 0, noffset;
 	u8 *pos;
 
-	sband = ieee80211_get_sband(sdata);
-	if (!sband)
+	sband = ieee80211_get_link_sband(link);
+	if (WARN_ON_ONCE(!sband))
 		return;
 
 	ieee80211_add_srates_ie(sdata, skb, false, sband->band);
@@ -397,7 +403,7 @@
 		offset = noffset;
 	}
 
-	ieee80211_tdls_add_ext_capab(sdata, skb);
+	ieee80211_tdls_add_ext_capab(link, skb);
 
 	/* add the QoS element if we support it */
 	if (local->hw.queues >= IEEE80211_NUM_ACS &&
@@ -426,20 +432,16 @@
 		offset = noffset;
 	}
 
-	mutex_lock(&local->sta_mtx);
-
 	/* we should have the peer STA if we're already responding */
 	if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
 		sta = sta_info_get(sdata, peer);
-		if (WARN_ON_ONCE(!sta)) {
-			mutex_unlock(&local->sta_mtx);
+		if (WARN_ON_ONCE(!sta))
 			return;
-		}
 
-		sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+		sta->tdls_chandef = link->conf->chandef;
 	}
 
-	ieee80211_tdls_add_oper_classes(sdata, skb);
+	ieee80211_tdls_add_oper_classes(link, skb);
 
 	/*
 	 * with TDLS we can switch channels, and HT-caps are not necessarily
@@ -472,7 +474,7 @@
 	    (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
 		ieee80211_tdls_add_bss_coex_ie(skb);
 
-	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+	ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
 
 	/* add any custom IEs that go before VHT capabilities */
 	if (extra_ies_len) {
@@ -497,17 +499,21 @@
 		offset = noffset;
 	}
 
-	/* build the VHT-cap similarly to the HT-cap */
+	/* add AID if VHT, HE or EHT capabilities supported */
 	memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+	he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+	eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+	if ((vht_cap.vht_supported || he_cap || eht_cap) &&
+	    (action_code == WLAN_TDLS_SETUP_REQUEST ||
+	     action_code == WLAN_TDLS_SETUP_RESPONSE))
+		ieee80211_tdls_add_aid(sdata, skb);
+
+	/* build the VHT-cap similarly to the HT-cap */
 	if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
 	     action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) &&
 	    vht_cap.vht_supported) {
 		ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
 
-		/* the AID is present only when VHT is implemented */
-		if (action_code == WLAN_TDLS_SETUP_REQUEST)
-			ieee80211_tdls_add_aid(sdata, skb);
-
 		pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
 		ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
 	} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
@@ -515,9 +521,6 @@
 		/* the peer caps are already intersected with our own */
 		memcpy(&vht_cap, &sta->sta.deflink.vht_cap, sizeof(vht_cap));
 
-		/* the AID is present only when VHT is implemented */
-		ieee80211_tdls_add_aid(sdata, skb);
-
 		pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
 		ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
 
@@ -529,7 +532,80 @@
 			ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
 	}
 
-	mutex_unlock(&local->sta_mtx);
+	/* add any custom IEs that go before HE capabilities */
+	if (extra_ies_len) {
+		static const u8 before_he_cap[] = {
+			WLAN_EID_EXTENSION,
+			WLAN_EID_EXT_FILS_REQ_PARAMS,
+			WLAN_EID_AP_CSN,
+		};
+		noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+					     before_he_cap,
+					     ARRAY_SIZE(before_he_cap),
+					     offset);
+		skb_put_data(skb, extra_ies + offset, noffset - offset);
+		offset = noffset;
+	}
+
+	/* build the HE-cap from sband */
+	if (he_cap &&
+	    (action_code == WLAN_TDLS_SETUP_REQUEST ||
+	     action_code == WLAN_TDLS_SETUP_RESPONSE ||
+	     action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
+		__le16 he_6ghz_capa;
+		u8 cap_size;
+
+		cap_size =
+			2 + 1 + sizeof(he_cap->he_cap_elem) +
+			ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
+			ieee80211_he_ppe_size(he_cap->ppe_thres[0],
+					      he_cap->he_cap_elem.phy_cap_info);
+		pos = skb_put(skb, cap_size);
+		pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size);
+
+		/* Build HE 6Ghz capa IE from sband */
+		if (sband->band == NL80211_BAND_6GHZ) {
+			cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
+			pos = skb_put(skb, cap_size);
+			he_6ghz_capa =
+				ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif);
+			pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa,
+							  pos + cap_size);
+		}
+	}
+
+	/* add any custom IEs that go before EHT capabilities */
+	if (extra_ies_len) {
+		static const u8 before_he_cap[] = {
+			WLAN_EID_EXTENSION,
+			WLAN_EID_EXT_FILS_REQ_PARAMS,
+			WLAN_EID_AP_CSN,
+		};
+
+		noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+					     before_he_cap,
+					     ARRAY_SIZE(before_he_cap),
+					     offset);
+		skb_put_data(skb, extra_ies + offset, noffset - offset);
+		offset = noffset;
+	}
+
+	/* build the EHT-cap from sband */
+	if (he_cap && eht_cap &&
+	    (action_code == WLAN_TDLS_SETUP_REQUEST ||
+	     action_code == WLAN_TDLS_SETUP_RESPONSE ||
+	     action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
+		u8 cap_size;
+
+		cap_size =
+			2 + 1 + sizeof(eht_cap->eht_cap_elem) +
+			ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+						   &eht_cap->eht_cap_elem, false) +
+			ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
+					       eht_cap->eht_cap_elem.phy_cap_info);
+		pos = skb_put(skb, cap_size);
+		ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false);
+	}
 
 	/* add any remaining IEs */
 	if (extra_ies_len) {
@@ -540,31 +616,29 @@
 }
 
 static void
-ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
+ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
 				 struct sk_buff *skb, const u8 *peer,
 				 bool initiator, const u8 *extra_ies,
 				 size_t extra_ies_len)
 {
+	struct ieee80211_sub_if_data *sdata = link->sdata;
 	struct ieee80211_local *local = sdata->local;
 	size_t offset = 0, noffset;
 	struct sta_info *sta, *ap_sta;
 	struct ieee80211_supported_band *sband;
 	u8 *pos;
 
-	sband = ieee80211_get_sband(sdata);
-	if (!sband)
+	sband = ieee80211_get_link_sband(link);
+	if (WARN_ON_ONCE(!sband))
 		return;
 
-	mutex_lock(&local->sta_mtx);
-
 	sta = sta_info_get(sdata, peer);
-	ap_sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid);
-	if (WARN_ON_ONCE(!sta || !ap_sta)) {
-		mutex_unlock(&local->sta_mtx);
+	ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+
+	if (WARN_ON_ONCE(!sta || !ap_sta))
 		return;
-	}
 
-	sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+	sta->tdls_chandef = link->conf->chandef;
 
 	/* add any custom IEs that go before the QoS IE */
 	if (extra_ies_len) {
@@ -610,11 +684,11 @@
 
 		pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
 		ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap,
-					   &sdata->vif.bss_conf.chandef, prot,
+					   &link->conf->chandef, prot,
 					   true);
 	}
 
-	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+	ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
 
 	/* only include VHT-operation if not on the 2.4GHz band */
 	if (sband->band != NL80211_BAND_2GHZ &&
@@ -631,8 +705,6 @@
 					    &sta->tdls_chandef);
 	}
 
-	mutex_unlock(&local->sta_mtx);
-
 	/* add any remaining IEs */
 	if (extra_ies_len) {
 		noffset = extra_ies_len;
@@ -641,7 +713,7 @@
 }
 
 static void
-ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata,
+ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_link_data *link,
 				       struct sk_buff *skb, const u8 *peer,
 				       bool initiator, const u8 *extra_ies,
 				       size_t extra_ies_len, u8 oper_class,
@@ -670,7 +742,7 @@
 		offset = noffset;
 	}
 
-	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+	ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
 
 	/* add any remaining IEs */
 	if (extra_ies_len) {
@@ -680,20 +752,20 @@
 }
 
 static void
-ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_sub_if_data *sdata,
+ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_link_data *link,
 					struct sk_buff *skb, const u8 *peer,
 					u16 status_code, bool initiator,
 					const u8 *extra_ies,
 					size_t extra_ies_len)
 {
 	if (status_code == 0)
-		ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+		ieee80211_tdls_add_link_ie(link, skb, peer, initiator);
 
 	if (extra_ies_len)
 		skb_put_data(skb, extra_ies, extra_ies_len);
 }
 
-static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata,
+static void ieee80211_tdls_add_ies(struct ieee80211_link_data *link,
 				   struct sk_buff *skb, const u8 *peer,
 				   u8 action_code, u16 status_code,
 				   bool initiator, const u8 *extra_ies,
@@ -705,7 +777,8 @@
 	case WLAN_TDLS_SETUP_RESPONSE:
 	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
 		if (status_code == 0)
-			ieee80211_tdls_add_setup_start_ies(sdata, skb, peer,
+			ieee80211_tdls_add_setup_start_ies(link,
+							   skb, peer,
 							   action_code,
 							   initiator,
 							   extra_ies,
@@ -713,7 +786,7 @@
 		break;
 	case WLAN_TDLS_SETUP_CONFIRM:
 		if (status_code == 0)
-			ieee80211_tdls_add_setup_cfm_ies(sdata, skb, peer,
+			ieee80211_tdls_add_setup_cfm_ies(link, skb, peer,
 							 initiator, extra_ies,
 							 extra_ies_len);
 		break;
@@ -722,16 +795,17 @@
 		if (extra_ies_len)
 			skb_put_data(skb, extra_ies, extra_ies_len);
 		if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN)
-			ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+			ieee80211_tdls_add_link_ie(link, skb,
+						   peer, initiator);
 		break;
 	case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
-		ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer,
+		ieee80211_tdls_add_chan_switch_req_ies(link, skb, peer,
 						       initiator, extra_ies,
 						       extra_ies_len,
 						       oper_class, chandef);
 		break;
 	case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
-		ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer,
+		ieee80211_tdls_add_chan_switch_resp_ies(link, skb, peer,
 							status_code,
 							initiator, extra_ies,
 							extra_ies_len);
@@ -742,6 +816,7 @@
 
 static int
 ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
+			       struct ieee80211_link_data *link,
 			       const u8 *peer, u8 action_code, u8 dialog_token,
 			       u16 status_code, struct sk_buff *skb)
 {
@@ -766,7 +841,7 @@
 		skb_put(skb, sizeof(tf->u.setup_req));
 		tf->u.setup_req.dialog_token = dialog_token;
 		tf->u.setup_req.capability =
-			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata,
+			cpu_to_le16(ieee80211_get_tdls_sta_capab(link,
 								 status_code));
 		break;
 	case WLAN_TDLS_SETUP_RESPONSE:
@@ -777,7 +852,7 @@
 		tf->u.setup_resp.status_code = cpu_to_le16(status_code);
 		tf->u.setup_resp.dialog_token = dialog_token;
 		tf->u.setup_resp.capability =
-			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata,
+			cpu_to_le16(ieee80211_get_tdls_sta_capab(link,
 								 status_code));
 		break;
 	case WLAN_TDLS_SETUP_CONFIRM:
@@ -824,7 +899,8 @@
 
 static int
 ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
-			   const u8 *peer, u8 action_code, u8 dialog_token,
+			   const u8 *peer, struct ieee80211_link_data *link,
+			   u8 action_code, u8 dialog_token,
 			   u16 status_code, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -833,8 +909,7 @@
 	mgmt = skb_put_zero(skb, 24);
 	memcpy(mgmt->da, peer, ETH_ALEN);
 	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
-	memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
-
+	memcpy(mgmt->bssid, link->u.mgd.bssid, ETH_ALEN);
 	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
 					  IEEE80211_STYPE_ACTION);
 
@@ -847,7 +922,7 @@
 		mgmt->u.action.u.tdls_discover_resp.dialog_token =
 			dialog_token;
 		mgmt->u.action.u.tdls_discover_resp.capability =
-			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata,
+			cpu_to_le16(ieee80211_get_tdls_sta_capab(link,
 								 status_code));
 		break;
 	default:
@@ -859,15 +934,23 @@
 
 static struct sk_buff *
 ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
-				      const u8 *peer, u8 action_code,
-				      u8 dialog_token, u16 status_code,
-				      bool initiator, const u8 *extra_ies,
-				      size_t extra_ies_len, u8 oper_class,
+				      const u8 *peer, int link_id,
+				      u8 action_code, u8 dialog_token,
+				      u16 status_code, bool initiator,
+				      const u8 *extra_ies, size_t extra_ies_len,
+				      u8 oper_class,
 				      struct cfg80211_chan_def *chandef)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
 	int ret;
+	struct ieee80211_link_data *link;
+
+	link_id = link_id >= 0 ? link_id : 0;
+	rcu_read_lock();
+	link = rcu_dereference(sdata->link[link_id]);
+	if (WARN_ON(!link))
+		goto unlock;
 
 	skb = netdev_alloc_skb(sdata->dev,
 			       local->hw.extra_tx_headroom +
@@ -880,6 +963,13 @@
 				       sizeof(struct ieee80211_ht_operation)) +
 			       2 + max(sizeof(struct ieee80211_vht_cap),
 				       sizeof(struct ieee80211_vht_operation)) +
+			       2 + 1 + sizeof(struct ieee80211_he_cap_elem) +
+				       sizeof(struct ieee80211_he_mcs_nss_supp) +
+				       IEEE80211_HE_PPE_THRES_MAX_LEN +
+			       2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) +
+			       2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
+				       sizeof(struct ieee80211_eht_mcs_nss_supp) +
+				       IEEE80211_EHT_PPE_THRES_MAX_LEN +
 			       50 + /* supported channels */
 			       3 + /* 40/20 BSS coex */
 			       4 + /* AID */
@@ -887,7 +977,7 @@
 			       extra_ies_len +
 			       sizeof(struct ieee80211_tdls_lnkie));
 	if (!skb)
-		return NULL;
+		goto unlock;
 
 	skb_reserve(skb, local->hw.extra_tx_headroom);
 
@@ -900,13 +990,13 @@
 	case WLAN_TDLS_CHANNEL_SWITCH_REQUEST:
 	case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE:
 		ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy,
-						     sdata->dev, peer,
+						     sdata->dev, link, peer,
 						     action_code, dialog_token,
 						     status_code, skb);
 		break;
 	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
 		ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev,
-						 peer, action_code,
+						 peer, link, action_code,
 						 dialog_token, status_code,
 						 skb);
 		break;
@@ -918,19 +1008,23 @@
 	if (ret < 0)
 		goto fail;
 
-	ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code,
+	ieee80211_tdls_add_ies(link, skb, peer, action_code, status_code,
 			       initiator, extra_ies, extra_ies_len, oper_class,
 			       chandef);
+	rcu_read_unlock();
 	return skb;
 
 fail:
 	dev_kfree_skb(skb);
+unlock:
+	rcu_read_unlock();
 	return NULL;
 }
 
 static int
 ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
-				const u8 *peer, u8 action_code, u8 dialog_token,
+				const u8 *peer, int link_id,
+				u8 action_code, u8 dialog_token,
 				u16 status_code, u32 peer_capability,
 				bool initiator, const u8 *extra_ies,
 				size_t extra_ies_len, u8 oper_class,
@@ -988,7 +1082,8 @@
 	if (ret < 0)
 		goto fail;
 
-	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code,
+	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer,
+						    link_id, action_code,
 						    dialog_token, status_code,
 						    initiator, extra_ies,
 						    extra_ies_len, oper_class,
@@ -999,7 +1094,7 @@
 	}
 
 	if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
-		ieee80211_tx_skb(sdata, skb);
+		ieee80211_tx_skb_tid(sdata, skb, 7, link_id);
 		return 0;
 	}
 
@@ -1066,7 +1161,8 @@
 
 static int
 ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
-			  const u8 *peer, u8 action_code, u8 dialog_token,
+			  const u8 *peer, int link_id,
+			  u8 action_code, u8 dialog_token,
 			  u16 status_code, u32 peer_capability, bool initiator,
 			  const u8 *extra_ies, size_t extra_ies_len)
 {
@@ -1084,7 +1180,7 @@
 		return -ENOTSUPP;
 	}
 
-	mutex_lock(&local->mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* we don't support concurrent TDLS peer setups */
 	if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) &&
@@ -1112,34 +1208,32 @@
 
 	ieee80211_flush_queues(local, sdata, false);
 	memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
-	mutex_unlock(&local->mtx);
 
 	/* we cannot take the mutex while preparing the setup packet */
-	ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
+	ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
+					      link_id, action_code,
 					      dialog_token, status_code,
 					      peer_capability, initiator,
 					      extra_ies, extra_ies_len, 0,
 					      NULL);
 	if (ret < 0) {
-		mutex_lock(&local->mtx);
 		eth_zero_addr(sdata->u.mgd.tdls_peer);
-		mutex_unlock(&local->mtx);
 		return ret;
 	}
 
-	ieee80211_queue_delayed_work(&sdata->local->hw,
+	wiphy_delayed_work_queue(sdata->local->hw.wiphy,
 				     &sdata->u.mgd.tdls_peer_del_work,
 				     TDLS_PEER_SETUP_TIMEOUT);
 	return 0;
 
 out_unlock:
-	mutex_unlock(&local->mtx);
 	return ret;
 }
 
 static int
 ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev,
-			     const u8 *peer, u8 action_code, u8 dialog_token,
+			     const u8 *peer, int link_id,
+			     u8 action_code, u8 dialog_token,
 			     u16 status_code, u32 peer_capability,
 			     bool initiator, const u8 *extra_ies,
 			     size_t extra_ies_len)
@@ -1159,7 +1253,8 @@
 				  IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN);
 	ieee80211_flush_queues(local, sdata, false);
 
-	ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
+	ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
+					      link_id, action_code,
 					      dialog_token, status_code,
 					      peer_capability, initiator,
 					      extra_ies, extra_ies_len, 0,
@@ -1185,10 +1280,10 @@
 }
 
 int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
-			const u8 *peer, u8 action_code, u8 dialog_token,
-			u16 status_code, u32 peer_capability,
-			bool initiator, const u8 *extra_ies,
-			size_t extra_ies_len)
+			const u8 *peer, int link_id,
+			u8 action_code, u8 dialog_token, u16 status_code,
+			u32 peer_capability, bool initiator,
+			const u8 *extra_ies, size_t extra_ies_len)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	int ret;
@@ -1204,13 +1299,14 @@
 	switch (action_code) {
 	case WLAN_TDLS_SETUP_REQUEST:
 	case WLAN_TDLS_SETUP_RESPONSE:
-		ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, action_code,
+		ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer,
+						link_id, action_code,
 						dialog_token, status_code,
 						peer_capability, initiator,
 						extra_ies, extra_ies_len);
 		break;
 	case WLAN_TDLS_TEARDOWN:
-		ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer,
+		ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer, link_id,
 						   action_code, dialog_token,
 						   status_code,
 						   peer_capability, initiator,
@@ -1222,13 +1318,13 @@
 		 * response frame. It is transmitted directly and not buffered
 		 * by the AP.
 		 */
-		drv_mgd_protect_tdls_discover(sdata->local, sdata);
+		drv_mgd_protect_tdls_discover(sdata->local, sdata, link_id);
 		fallthrough;
 	case WLAN_TDLS_SETUP_CONFIRM:
 	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
 		/* no special handling */
 		ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
-						      action_code,
+						      link_id, action_code,
 						      dialog_token,
 						      status_code,
 						      peer_capability,
@@ -1240,8 +1336,8 @@
 		break;
 	}
 
-	tdls_dbg(sdata, "TDLS mgmt action %d peer %pM status %d\n",
-		 action_code, peer, ret);
+	tdls_dbg(sdata, "TDLS mgmt action %d peer %pM link_id %d status %d\n",
+		 action_code, peer, link_id, ret);
 	return ret;
 }
 
@@ -1254,9 +1350,10 @@
 	enum nl80211_chan_width width;
 	struct ieee80211_supported_band *sband;
 
-	mutex_lock(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	if (conf) {
 		width = conf->def.width;
 		sband = local->hw.wiphy->bands[conf->def.chan->band];
@@ -1284,7 +1381,6 @@
 		}
 
 	}
-	mutex_unlock(&local->chanctx_mtx);
 }
 
 static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
@@ -1347,6 +1443,8 @@
 	struct ieee80211_local *local = sdata->local;
 	int ret;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
 		return -ENOTSUPP;
 
@@ -1367,35 +1465,26 @@
 	/* protect possible bss_conf changes and avoid concurrency in
 	 * ieee80211_bss_info_change_notify()
 	 */
-	sdata_lock(sdata);
-	mutex_lock(&local->mtx);
 	tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
 
 	switch (oper) {
 	case NL80211_TDLS_ENABLE_LINK:
 		if (sdata->vif.bss_conf.csa_active) {
 			tdls_dbg(sdata, "TDLS: disallow link during CSA\n");
-			ret = -EBUSY;
-			break;
+			return -EBUSY;
 		}
 
-		mutex_lock(&local->sta_mtx);
 		sta = sta_info_get(sdata, peer);
-		if (!sta) {
-			mutex_unlock(&local->sta_mtx);
-			ret = -ENOLINK;
-			break;
-		}
+		if (!sta)
+			return -ENOLINK;
 
 		iee80211_tdls_recalc_chanctx(sdata, sta);
 		iee80211_tdls_recalc_ht_protection(sdata, sta);
 
 		set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
-		mutex_unlock(&local->sta_mtx);
 
 		WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
 			     !ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
-		ret = 0;
 		break;
 	case NL80211_TDLS_DISABLE_LINK:
 		/*
@@ -1414,29 +1503,26 @@
 
 		ret = sta_info_destroy_addr(sdata, peer);
 
-		mutex_lock(&local->sta_mtx);
 		iee80211_tdls_recalc_ht_protection(sdata, NULL);
-		mutex_unlock(&local->sta_mtx);
 
 		iee80211_tdls_recalc_chanctx(sdata, NULL);
+		if (ret)
+			return ret;
 		break;
 	default:
-		ret = -ENOTSUPP;
-		break;
+		return -ENOTSUPP;
 	}
 
-	if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
-		cancel_delayed_work(&sdata->u.mgd.tdls_peer_del_work);
+	if (ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
+		wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+					  &sdata->u.mgd.tdls_peer_del_work);
 		eth_zero_addr(sdata->u.mgd.tdls_peer);
 	}
 
-	if (ret == 0)
-		ieee80211_queue_work(&sdata->local->hw,
+	wiphy_work_queue(sdata->local->hw.wiphy,
 				     &sdata->deflink.u.mgd.request_smps_work);
 
-	mutex_unlock(&local->mtx);
-	sdata_unlock(sdata);
-	return ret;
+	return 0;
 }
 
 void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
@@ -1497,6 +1583,7 @@
 	int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing);
 	u8 *pos = extra_ies;
 	struct sk_buff *skb;
+	int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
 
 	/*
 	 * if chandef points to a wide channel add a Secondary-Channel
@@ -1524,6 +1611,7 @@
 	iee80211_tdls_add_ch_switch_timing(pos, 0, 0);
 
 	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
+					      link_id,
 					      WLAN_TDLS_CHANNEL_SWITCH_REQUEST,
 					      0, 0, !sta->sta.tdls_initiator,
 					      extra_ies, extra_ies_len,
@@ -1567,11 +1655,12 @@
 	u32 ch_sw_tm_ie;
 	int ret;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (chandef->chan->freq_offset)
 		/* this may work, but is untested */
 		return -EOPNOTSUPP;
 
-	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, addr);
 	if (!sta) {
 		tdls_dbg(sdata,
@@ -1601,7 +1690,6 @@
 		set_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
 
 out:
-	mutex_unlock(&local->sta_mtx);
 	dev_kfree_skb_any(skb);
 	return ret;
 }
@@ -1615,26 +1703,24 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
-	mutex_lock(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	sta = sta_info_get(sdata, addr);
 	if (!sta) {
 		tdls_dbg(sdata,
 			 "Invalid TDLS peer %pM for channel switch cancel\n",
 			 addr);
-		goto out;
+		return;
 	}
 
 	if (!test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) {
 		tdls_dbg(sdata, "TDLS channel switch not initiated by %pM\n",
 			 addr);
-		goto out;
+		return;
 	}
 
 	drv_tdls_cancel_channel_switch(local, sdata, &sta->sta);
 	clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
-
-out:
-	mutex_unlock(&local->sta_mtx);
 }
 
 static struct sk_buff *
@@ -1644,11 +1730,13 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct sk_buff *skb;
 	u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)];
+	int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
 
 	/* initial timing are always zero in the template */
 	iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0);
 
 	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr,
+					link_id,
 					WLAN_TDLS_CHANNEL_SWITCH_RESPONSE,
 					0, 0, !sta->sta.tdls_initiator,
 					extra_ies, sizeof(extra_ies), 0, NULL);
@@ -1694,6 +1782,8 @@
 	struct ieee80211_tdls_ch_sw_params params = {};
 	int ret;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE;
 	params.timestamp = rx_status->device_timestamp;
 
@@ -1703,7 +1793,6 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, tf->sa);
 	if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
 		tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n",
@@ -1766,7 +1855,6 @@
 		 tf->sa, params.status);
 
 out:
-	mutex_unlock(&local->sta_mtx);
 	dev_kfree_skb_any(params.tmpl_skb);
 	kfree(elems);
 	return ret;
@@ -1792,6 +1880,8 @@
 	struct ieee80211_tdls_ch_sw_params params = {};
 	int ret = 0;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	params.action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST;
 	params.timestamp = rx_status->device_timestamp;
 
@@ -1880,7 +1970,6 @@
 		goto free;
 	}
 
-	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, tf->sa);
 	if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
 		tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n",
@@ -1927,7 +2016,6 @@
 		 tf->sa, params.chandef->chan->center_freq,
 		 params.chandef->width);
 out:
-	mutex_unlock(&local->sta_mtx);
 	dev_kfree_skb_any(params.tmpl_skb);
 free:
 	kfree(elems);
diff -ruw linux-6.4/net/mac80211/trace.h linux-6.4-fbx/net/mac80211/trace.h
--- linux-6.4/net/mac80211/trace.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/trace.h	2023-11-07 13:38:44.090257456 +0100
@@ -2,7 +2,7 @@
 /*
  * Portions of this file
  * Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
  */
 
 #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -17,7 +17,7 @@
 
 #define MAXNAME		32
 #define LOCAL_ENTRY	__array(char, wiphy_name, 32)
-#define LOCAL_ASSIGN	strlcpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME)
+#define LOCAL_ASSIGN	strscpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME)
 #define LOCAL_PR_FMT	"%s"
 #define LOCAL_PR_ARG	__entry->wiphy_name
 
@@ -634,6 +634,7 @@
 		LOCAL_ENTRY
 		VIF_ENTRY
 		STA_ENTRY
+		__field(u32, cmd)
 		KEY_ENTRY
 	),
 
@@ -641,12 +642,13 @@
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
 		STA_ASSIGN;
+		__entry->cmd = cmd;
 		KEY_ASSIGN(key);
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT KEY_PR_FMT,
-		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, KEY_PR_ARG
+		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT " cmd: %d" KEY_PR_FMT,
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd, KEY_PR_ARG
 	)
 );
 
@@ -2837,23 +2839,26 @@
 );
 
 TRACE_EVENT(api_chswitch_done,
-	TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success),
+	TP_PROTO(struct ieee80211_sub_if_data *sdata, bool success,
+		 unsigned int link_id),
 
-	TP_ARGS(sdata, success),
+	TP_ARGS(sdata, success, link_id),
 
 	TP_STRUCT__entry(
 		VIF_ENTRY
 		__field(bool, success)
+		__field(unsigned int, link_id)
 	),
 
 	TP_fast_assign(
 		VIF_ASSIGN;
 		__entry->success = success;
+		__entry->link_id = link_id;
 	),
 
 	TP_printk(
-		VIF_PR_FMT " success=%d",
-		VIF_PR_ARG, __entry->success
+		VIF_PR_FMT " success=%d link_id=%d",
+		VIF_PR_ARG, __entry->success, __entry->link_id
 	)
 );
 
diff -ruw linux-6.4/net/mac80211/tx.c linux-6.4-fbx/net/mac80211/tx.c
--- linux-6.4/net/mac80211/tx.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/tx.c	2024-03-18 14:40:14.879742098 +0100
@@ -26,6 +26,7 @@
 #include <net/codel_impl.h>
 #include <asm/unaligned.h>
 #include <net/fq_impl.h>
+#include <net/gso.h>
 
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -42,7 +43,7 @@
 				 struct sk_buff *skb, int group_addr,
 				 int next_frag_len)
 {
-	int rate, mrate, erp, dur, i, shift = 0;
+	int rate, mrate, erp, dur, i;
 	struct ieee80211_rate *txrate;
 	struct ieee80211_local *local = tx->local;
 	struct ieee80211_supported_band *sband;
@@ -57,10 +58,8 @@
 
 	rcu_read_lock();
 	chanctx_conf = rcu_dereference(tx->sdata->vif.bss_conf.chanctx_conf);
-	if (chanctx_conf) {
-		shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+	if (chanctx_conf)
 		rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
-	}
 	rcu_read_unlock();
 
 	/* uh huh? */
@@ -142,7 +141,7 @@
 			continue;
 
 		if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
-			rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
+			rate = r->bitrate;
 
 		switch (sband->band) {
 		case NL80211_BAND_2GHZ:
@@ -172,7 +171,7 @@
 	if (rate == -1) {
 		/* No matching basic rate found; use highest suitable mandatory
 		 * PHY rate */
-		rate = DIV_ROUND_UP(mrate, 1 << shift);
+		rate = mrate;
 	}
 
 	/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
@@ -184,8 +183,7 @@
 		 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
 		 * to closest integer */
 		dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
-				tx->sdata->vif.bss_conf.use_short_preamble,
-				shift);
+				tx->sdata->vif.bss_conf.use_short_preamble);
 
 	if (next_frag_len) {
 		/* Frame is fragmented: duration increases with time needed to
@@ -194,8 +192,7 @@
 		/* next fragment */
 		dur += ieee80211_frame_duration(sband->band, next_frag_len,
 				txrate->bitrate, erp,
-				tx->sdata->vif.bss_conf.use_short_preamble,
-				shift);
+				tx->sdata->vif.bss_conf.use_short_preamble);
 	}
 
 	return cpu_to_le16(dur);
@@ -265,7 +262,7 @@
 						IEEE80211_QUEUE_STOP_REASON_PS,
 						false);
 		ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
-		ieee80211_queue_work(&local->hw,
+		wiphy_work_queue(local->hw.wiphy,
 				     &local->dynamic_ps_disable_work);
 	}
 
@@ -581,25 +578,9 @@
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-	enum {
-		USE_NONE,
-		USE_MGMT_KEY,
-		USE_MCAST_KEY,
-	} which_key = USE_NONE;
 	struct ieee80211_link_data *link;
 	unsigned int link_id;
 
-	if (ieee80211_is_group_privacy_action(tx->skb))
-		which_key = USE_MCAST_KEY;
-	else if (ieee80211_is_mgmt(hdr->frame_control) &&
-		 is_multicast_ether_addr(hdr->addr1) &&
-		 ieee80211_is_robust_mgmt_frame(tx->skb))
-		which_key = USE_MGMT_KEY;
-	else if (is_multicast_ether_addr(hdr->addr1))
-		which_key = USE_MCAST_KEY;
-	else
-		return NULL;
-
 	link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
 	if (link_id == IEEE80211_LINK_UNSPECIFIED) {
 		link = &tx->sdata->deflink;
@@ -609,14 +590,14 @@
 			return NULL;
 	}
 
-	switch (which_key) {
-	case USE_NONE:
-		break;
-	case USE_MGMT_KEY:
+	if (ieee80211_is_group_privacy_action(tx->skb))
+		return rcu_dereference(link->default_multicast_key);
+	else if (ieee80211_is_mgmt(hdr->frame_control) &&
+		 is_multicast_ether_addr(hdr->addr1) &&
+		 ieee80211_is_robust_mgmt_frame(tx->skb))
 		return rcu_dereference(link->default_mgmt_key);
-	case USE_MCAST_KEY:
+	else if (is_multicast_ether_addr(hdr->addr1))
 		return rcu_dereference(link->default_multicast_key);
-	}
 
 	return NULL;
 }
@@ -680,7 +661,8 @@
 		}
 
 		if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
-			     !ieee80211_is_deauth(hdr->frame_control)))
+			     !ieee80211_is_deauth(hdr->frame_control)) &&
+			     tx->skb->protocol != tx->sdata->control_port_protocol)
 			return TX_DROP;
 
 		if (!skip_hw && tx->key &&
@@ -860,7 +842,7 @@
 
 	/* SNS11 from 802.11be 10.3.2.14 */
 	if (unlikely(is_multicast_ether_addr(hdr->addr1) &&
-		     info->control.vif->valid_links &&
+		     ieee80211_vif_is_mld(info->control.vif) &&
 		     info->control.vif->type == NL80211_IFTYPE_AP)) {
 		if (info->control.flags & IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX)
 			tx->sdata->mld_mcast_seq += 0x10;
@@ -1320,6 +1302,9 @@
 	    (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
 		return NULL;
 
+	if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
+		return NULL;
+
 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 	    unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
 		if ((!ieee80211_is_mgmt(hdr->frame_control) ||
@@ -2181,6 +2166,11 @@
 			rate_found = true;
 			break;
 
+		case IEEE80211_RADIOTAP_ANTENNA:
+			/* this can appear multiple times, keep a bitmap */
+			info->control.antennas |= BIT(*iterator.this_arg);
+			break;
+
 		case IEEE80211_RADIOTAP_DATA_RETRIES:
 			rate_retries = *iterator.this_arg;
 			break;
@@ -2275,8 +2265,17 @@
 		}
 
 		if (rate_flags & IEEE80211_TX_RC_MCS) {
+			/* reset antennas if not enough */
+			if (IEEE80211_HT_MCS_CHAINS(rate) >
+					hweight8(info->control.antennas))
+				info->control.antennas = 0;
+
 			info->control.rates[0].idx = rate;
 		} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+			/* reset antennas if not enough */
+			if (vht_nss > hweight8(info->control.antennas))
+				info->control.antennas = 0;
+
 			ieee80211_rate_set_vht(info->control.rates, vht_mcs,
 					       vht_nss);
 		} else if (sband) {
@@ -2626,7 +2625,7 @@
 	ethertype = (skb->data[12] << 8) | skb->data[13];
 	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
 
-	if (!sdata->vif.valid_links)
+	if (!ieee80211_vif_is_mld(&sdata->vif))
 		chanctx_conf =
 			rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
 
@@ -2643,7 +2642,7 @@
 			authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
 			wme_sta = sta->sta.wme;
 		}
-		if (!sdata->vif.valid_links) {
+		if (!ieee80211_vif_is_mld(&sdata->vif)) {
 			struct ieee80211_sub_if_data *ap_sdata;
 
 			/* override chanctx_conf from AP (we don't have one) */
@@ -2661,7 +2660,7 @@
 		/* DA BSSID SA */
 		memcpy(hdr.addr1, skb->data, ETH_ALEN);
 
-		if (sdata->vif.valid_links && sta && !sta->sta.mlo) {
+		if (ieee80211_vif_is_mld(&sdata->vif) && sta && !sta->sta.mlo) {
 			struct ieee80211_link_data *link;
 
 			link_id = sta->deflink.link_id;
@@ -2769,10 +2768,20 @@
 		tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
 
 		if (tdls_peer) {
+			/* For TDLS only one link can be valid with peer STA */
+			int tdls_link_id = sta->sta.valid_links ?
+					   __ffs(sta->sta.valid_links) : 0;
+			struct ieee80211_link_data *link;
+
 			/* DA SA BSSID */
 			memcpy(hdr.addr1, skb->data, ETH_ALEN);
 			memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
-			memcpy(hdr.addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN);
+			link = rcu_dereference(sdata->link[tdls_link_id]);
+			if (WARN_ON_ONCE(!link)) {
+				ret = -EINVAL;
+				goto free;
+			}
+			memcpy(hdr.addr3, link->u.mgd.bssid, ETH_ALEN);
 			hdrlen = 24;
 		}  else if (sdata->u.mgd.use_4addr &&
 			    cpu_to_be16(ethertype) != sdata->control_port_protocol) {
@@ -2813,7 +2822,7 @@
 	}
 
 	if (!chanctx_conf) {
-		if (!sdata->vif.valid_links) {
+		if (!ieee80211_vif_is_mld(&sdata->vif)) {
 			ret = -ENOTCONN;
 			goto free;
 		}
@@ -2860,7 +2869,8 @@
 		goto free;
 	}
 
-	if (unlikely(!multicast && ((skb->sk &&
+	if (unlikely(!multicast &&
+		     ((skb->sk &&
 		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
 		     ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS)))
 		info_id = ieee80211_store_ack_skb(local, skb, &info_flags,
@@ -2946,7 +2956,10 @@
 	memset(info, 0, sizeof(*info));
 
 	info->flags = info_flags;
-	info->ack_frame_id = info_id;
+	if (info_id) {
+		info->status_data = info_id;
+		info->status_data_idr = 1;
+	}
 	info->band = band;
 
 	if (likely(!cookie)) {
@@ -3055,7 +3068,7 @@
 	    !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG))
 		goto out;
 
-	if (!sdata->vif.valid_links) {
+	if (!ieee80211_vif_is_mld(&sdata->vif)) {
 		rcu_read_lock();
 		chanctx_conf =
 			rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
@@ -3082,10 +3095,18 @@
 		break;
 	case NL80211_IFTYPE_STATION:
 		if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+			/* For TDLS only one link can be valid with peer STA */
+			int tdls_link_id = sta->sta.valid_links ?
+					   __ffs(sta->sta.valid_links) : 0;
+			struct ieee80211_link_data *link;
+
 			/* DA SA BSSID */
 			build.da_offs = offsetof(struct ieee80211_hdr, addr1);
 			build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
-			memcpy(hdr->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN);
+			link = rcu_dereference(sdata->link[tdls_link_id]);
+			if (WARN_ON_ONCE(!link))
+				break;
+			memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
 			build.hdr_len = 24;
 			break;
 		}
@@ -3126,7 +3147,7 @@
 		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
 		/* DA BSSID SA */
 		build.da_offs = offsetof(struct ieee80211_hdr, addr1);
-		if (sta->sta.mlo || !sdata->vif.valid_links) {
+		if (sta->sta.mlo || !ieee80211_vif_is_mld(&sdata->vif)) {
 			memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
 		} else {
 			unsigned int link_id = sta->deflink.link_id;
@@ -4350,6 +4371,9 @@
 			return false;
 		if (sdata->wdev.use_4addr)
 			return false;
+		if (ieee80211_hw_check(&sdata->local->hw,
+				       APVLAN_NEED_MCAST_TO_UCAST))
+			break;
 		fallthrough;
 	case NL80211_IFTYPE_AP:
 		/* check runtime toggle for this bss */
@@ -4471,6 +4495,8 @@
  * @dev: incoming interface
  *
  * On failure skb will be freed.
+ *
+ * Returns: the netdev TX status (but really only %NETDEV_TX_OK)
  */
 netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 				       struct net_device *dev)
@@ -4495,7 +4521,7 @@
 			__ieee80211_subif_start_xmit(skb, dev, 0,
 						     IEEE80211_TX_CTRL_MLO_LINK_UNSPEC,
 						     NULL);
-	} else if (sdata->vif.valid_links &&
+	} else if (ieee80211_vif_is_mld(&sdata->vif) &&
 		   sdata->vif.type == NL80211_IFTYPE_AP &&
 		   !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) {
 		ieee80211_mlo_multicast_tx(dev, skb);
@@ -4570,19 +4596,25 @@
 
 static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
 				struct net_device *dev, struct sta_info *sta,
-				struct ieee80211_key *key, struct sk_buff *skb)
+				struct ieee80211_key *key, struct sk_buff *skb,
+				u32 info_flags, u32 ctrl_flags, u64 *cookie)
 {
 	struct ieee80211_tx_info *info;
 	struct ieee80211_local *local = sdata->local;
 	struct tid_ampdu_tx *tid_tx;
 	struct sk_buff *seg, *next;
+	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+	unsigned char *ra = ehdr->h_dest;
 	unsigned int skbs = 0, len = 0;
 	u16 queue;
+	bool multicast;
 	u8 tid;
 
 	queue = ieee80211_select_queue(sdata, sta, skb);
 	skb_set_queue_mapping(skb, queue);
 
+	multicast = is_multicast_ether_addr(ra);
+
 	if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
 	    test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
 		goto out_free;
@@ -4615,6 +4647,7 @@
 	info = IEEE80211_SKB_CB(skb);
 	memset(info, 0, sizeof(*info));
 
+	info->flags |= info_flags;
 	info->hw_queue = sdata->vif.hw_queue[queue];
 
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -4634,10 +4667,14 @@
 			memcpy(IEEE80211_SKB_CB(seg), info, sizeof(*info));
 	}
 
-	if (unlikely(skb->sk &&
-		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
-		info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
-							     &info->flags, NULL);
+	if (unlikely((skb->sk &&
+		      skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
+		     ((ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS) && !multicast))) {
+		info->status_data = ieee80211_store_ack_skb(local, skb,
+							    &info->flags, cookie);
+		if (info->status_data)
+			info->status_data_idr = 1;
+	}
 
 	dev_sw_netstats_tx_add(dev, skbs, len);
 	sta->deflink.tx_stats.packets[queue] += skbs;
@@ -4653,13 +4690,87 @@
 	kfree_skb(skb);
 }
 
-netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
-					    struct net_device *dev)
+static
+void ieee80211_8023_xmit_ap(struct ieee80211_sub_if_data *sdata,
+			    struct net_device *dev, struct sta_info *sta,
+			    struct ieee80211_key *key, struct sk_buff *skb,
+			    u32 info_flags, u32 ctrl_flags, u64 *cookie)
+{
+	struct ieee80211_tx_info *info;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_sta *pubsta = NULL;
+	struct ieee80211_tx_control control = {};
+	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+        unsigned char *ra = ehdr->h_dest;
+        bool multicast = is_multicast_ether_addr(ra);
+	unsigned long flags;
+	int q;
+	u16 q_map;
+
+	/*
+	 * If the skb is shared we need to obtain our own copy.
+	 */
+	skb = skb_share_check(skb, GFP_ATOMIC);
+
+	if (unlikely(!skb))
+		return;
+
+	info = IEEE80211_SKB_CB(skb);
+	memset(info, 0, sizeof(*info));
+	info->flags |= info_flags;
+
+	if (unlikely((skb->sk &&
+		      skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
+                     ((ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS) && !multicast))) {
+		info->status_data = ieee80211_store_ack_skb(local, skb,
+							    &info->flags, cookie);
+		if (info->status_data)
+			info->status_data_idr = 1;
+	}
+
+	info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP;
+	info->control.vif = &sdata->vif;
+
+	if (key)
+		info->control.hw_key = &key->conf;
+
+	q_map = skb_get_queue_mapping(skb);
+	q = sdata->vif.hw_queue[q_map];
+
+	if (sta) {
+		sta->deflink.tx_stats.bytes[q_map] += skb->len;
+		sta->deflink.tx_stats.packets[q_map]++;
+	}
+
+	spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+	if (local->queue_stop_reasons[q] || !skb_queue_empty(&local->pending[q])) {
+		skb_queue_tail(&local->pending[q], skb);
+		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+		return;
+	}
+
+	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+	if (sta && sta->uploaded)
+		pubsta = &sta->sta;
+
+	control.sta = pubsta;
+
+	drv_tx(local, &control, skb);
+}
+
+static netdev_tx_t __ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
+						     struct net_device *dev,
+						     u32 info_flags,
+						     u32 ctrl_flags,
+						     u64 *cookie)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
 	struct ieee80211_key *key;
 	struct sta_info *sta;
+	bool is_eapol;
 
 	if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
 		kfree_skb(skb);
@@ -4672,10 +4783,11 @@
 		kfree_skb(skb);
 		goto out;
 	}
+	is_eapol = (sdata->control_port_protocol == ehdr->h_proto);
 
 	if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
-	    !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
-	    sdata->control_port_protocol == ehdr->h_proto))
+	    (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) && !is_eapol) ||
+	    (is_eapol && !(sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED))))
 		goto skip_offload;
 
 	key = rcu_dereference(sta->ptk[sta->ptk_idx]);
@@ -4687,7 +4799,14 @@
 		goto skip_offload;
 
 	sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
-	ieee80211_8023_xmit(sdata, dev, sta, key, skb);
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP) {
+		ieee80211_8023_xmit_ap(sdata, dev, sta, key, skb, info_flags, ctrl_flags, cookie);
+		goto out;
+	}
+
+	ieee80211_8023_xmit(sdata, dev, sta, key, skb, info_flags,
+			    ctrl_flags, cookie);
 	goto out;
 
 skip_offload:
@@ -4698,6 +4817,60 @@
 	return NETDEV_TX_OK;
 }
 
+netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+#if defined(CONFIG_IP_FFN) || defined(CONFIG_IPV6_FFN)
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_control control = {};
+	struct sta_info *sta;
+	struct ieee80211_sta *pubsta = NULL;
+
+	info->control.vif = &sdata->vif;
+
+	if (skb->ffn_ff_done) {
+		info->control.flags = u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
+						      IEEE80211_TX_CTRL_MLO_LINK);
+		info->flags = IEEE80211_TX_CTL_HW_80211_ENCAP;
+
+		if (hweight16(sdata->vif.valid_links) > 1) {
+			rcu_read_lock();
+
+			if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
+				kfree_skb(skb);
+				goto out;
+			}
+
+			if (!IS_ERR_OR_NULL(sta) && sta->uploaded)
+				pubsta = &sta->sta;
+
+			control.sta = pubsta;
+			drv_tx(sdata->local, &control,  skb);
+out:
+			rcu_read_unlock();
+		} else {
+			control.sta = NULL;
+
+			rcu_read_lock();
+			if (!ieee80211_lookup_ra_sta(sdata, skb, &sta) &&
+			    !IS_ERR_OR_NULL(sta)) {
+				sta->deflink.tx_stats.packets[0]++;
+				sta->deflink.tx_stats.bytes[0] += skb->len;
+			}
+			rcu_read_unlock();
+			dev_sw_netstats_tx_add(dev, 1, skb->len);
+
+			drv_tx(sdata->local, &control,  skb);
+		}
+
+		return NETDEV_TX_OK;
+	}
+#endif
+
+	return __ieee80211_subif_start_xmit_8023(skb, dev, 0, 0, NULL);
+}
+
 struct sk_buff *
 ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
 			      struct sk_buff *skb, u32 info_flags)
@@ -4771,7 +4944,7 @@
 
 	if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) {
 		/* update band only for non-MLD */
-		if (!sdata->vif.valid_links) {
+		if (!ieee80211_vif_is_mld(&sdata->vif)) {
 			chanctx_conf =
 				rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
 			if (unlikely(!chanctx_conf)) {
@@ -5546,7 +5719,6 @@
 						     IEEE80211_INCLUDE_ALL_MBSSID_ELEMS,
 						     NULL);
 	struct sk_buff *copy;
-	int shift;
 
 	if (!bcn)
 		return bcn;
@@ -5566,8 +5738,7 @@
 	if (!copy)
 		return bcn;
 
-	shift = ieee80211_vif_get_shift(vif);
-	ieee80211_tx_monitor(hw_to_local(hw), copy, 1, shift, false, NULL);
+	ieee80211_tx_monitor(hw_to_local(hw), copy, 1, false, NULL);
 
 	return bcn;
 }
@@ -5917,7 +6088,7 @@
 	int ret;
 	u32 queues;
 
-	lockdep_assert_held(&local->sta_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	/* only some cases are supported right now */
 	switch (sdata->vif.type) {
@@ -5978,7 +6149,7 @@
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-	lockdep_assert_held(&sdata->local->sta_mtx);
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	/* only some cases are supported right now */
 	switch (sdata->vif.type) {
@@ -6018,7 +6189,7 @@
 	BUILD_BUG_ON(!FIELD_FIT(IEEE80211_TX_CTRL_MLO_LINK,
 				IEEE80211_LINK_UNSPECIFIED));
 
-	if (!sdata->vif.valid_links) {
+	if (!ieee80211_vif_is_mld(&sdata->vif)) {
 		link = 0;
 	} else if (link_id >= 0) {
 		link = link_id;
@@ -6064,7 +6235,7 @@
 	enum nl80211_band band;
 
 	rcu_read_lock();
-	if (!sdata->vif.valid_links) {
+	if (!ieee80211_vif_is_mld(&sdata->vif)) {
 		WARN_ON(link_id >= 0);
 		chanctx_conf =
 			rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
@@ -6099,6 +6270,9 @@
 	u32 flags = 0;
 	int err;
 
+	/* mutex lock is only needed for incrementing the cookie counter */
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	/* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
 	 * or Pre-Authentication
 	 */
@@ -6189,15 +6363,15 @@
 	rcu_read_unlock();
 
 start_xmit:
-	/* mutex lock is only needed for incrementing the cookie counter */
-	mutex_lock(&local->mtx);
-
 	local_bh_disable();
-	__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie);
+	if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+		__ieee80211_subif_start_xmit_8023(skb, skb->dev, flags,
+						  ctrl_flags, cookie);
+	else
+		__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags,
+					     cookie);
 	local_bh_enable();
 
-	mutex_unlock(&local->mtx);
-
 	return 0;
 }
 
diff -ruw linux-6.4/net/mac80211/util.c linux-6.4-fbx/net/mac80211/util.c
--- linux-6.4/net/mac80211/util.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/util.c	2024-04-19 16:04:28.973736213 +0200
@@ -6,7 +6,7 @@
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017	Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  *
  * utilities for mac80211
  */
@@ -24,6 +24,7 @@
 #include <net/net_namespace.h>
 #include <net/cfg80211.h>
 #include <net/rtnetlink.h>
+#include <kunit/visibility.h>
 
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -109,8 +110,7 @@
 }
 
 int ieee80211_frame_duration(enum nl80211_band band, size_t len,
-			     int rate, int erp, int short_preamble,
-			     int shift)
+			     int rate, int erp, int short_preamble)
 {
 	int dur;
 
@@ -121,9 +121,6 @@
 	 *
 	 * rate is in 100 kbps, so divident is multiplied by 10 in the
 	 * DIV_ROUND_UP() operations.
-	 *
-	 * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and
-	 * is assumed to be 0 otherwise.
 	 */
 
 	if (band == NL80211_BAND_5GHZ || erp) {
@@ -144,12 +141,6 @@
 		dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */
 		dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */
 
-		/* IEEE 802.11-2012 18.3.2.4: all values above are:
-		 *  * times 4 for 5 MHz
-		 *  * times 2 for 10 MHz
-		 */
-		dur *= 1 << shift;
-
 		/* rates should already consider the channel bandwidth,
 		 * don't apply divisor again.
 		 */
@@ -184,7 +175,7 @@
 {
 	struct ieee80211_sub_if_data *sdata;
 	u16 dur;
-	int erp, shift = 0;
+	int erp;
 	bool short_preamble = false;
 
 	erp = 0;
@@ -193,11 +184,10 @@
 		short_preamble = sdata->vif.bss_conf.use_short_preamble;
 		if (sdata->deflink.operating_11g_mode)
 			erp = rate->flags & IEEE80211_RATE_ERP_G;
-		shift = ieee80211_vif_get_shift(vif);
 	}
 
 	dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
-				       short_preamble, shift);
+				       short_preamble);
 
 	return cpu_to_le16(dur);
 }
@@ -211,7 +201,7 @@
 	struct ieee80211_rate *rate;
 	struct ieee80211_sub_if_data *sdata;
 	bool short_preamble;
-	int erp, shift = 0, bitrate;
+	int erp, bitrate;
 	u16 dur;
 	struct ieee80211_supported_band *sband;
 
@@ -227,20 +217,19 @@
 		short_preamble = sdata->vif.bss_conf.use_short_preamble;
 		if (sdata->deflink.operating_11g_mode)
 			erp = rate->flags & IEEE80211_RATE_ERP_G;
-		shift = ieee80211_vif_get_shift(vif);
 	}
 
-	bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+	bitrate = rate->bitrate;
 
 	/* CTS duration */
 	dur = ieee80211_frame_duration(sband->band, 10, bitrate,
-				       erp, short_preamble, shift);
+				       erp, short_preamble);
 	/* Data frame duration */
 	dur += ieee80211_frame_duration(sband->band, frame_len, bitrate,
-					erp, short_preamble, shift);
+					erp, short_preamble);
 	/* ACK duration */
 	dur += ieee80211_frame_duration(sband->band, 10, bitrate,
-					erp, short_preamble, shift);
+					erp, short_preamble);
 
 	return cpu_to_le16(dur);
 }
@@ -255,7 +244,7 @@
 	struct ieee80211_rate *rate;
 	struct ieee80211_sub_if_data *sdata;
 	bool short_preamble;
-	int erp, shift = 0, bitrate;
+	int erp, bitrate;
 	u16 dur;
 	struct ieee80211_supported_band *sband;
 
@@ -270,18 +259,17 @@
 		short_preamble = sdata->vif.bss_conf.use_short_preamble;
 		if (sdata->deflink.operating_11g_mode)
 			erp = rate->flags & IEEE80211_RATE_ERP_G;
-		shift = ieee80211_vif_get_shift(vif);
 	}
 
-	bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+	bitrate = rate->bitrate;
 
 	/* Data frame duration */
 	dur = ieee80211_frame_duration(sband->band, frame_len, bitrate,
-				       erp, short_preamble, shift);
+				       erp, short_preamble);
 	if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
 		/* ACK duration */
 		dur += ieee80211_frame_duration(sband->band, 10, bitrate,
-						erp, short_preamble, shift);
+						erp, short_preamble);
 	}
 
 	return cpu_to_le16(dur);
@@ -705,6 +693,19 @@
 					IEEE80211_QUEUE_STOP_REASON_FLUSH,
 					false);
 
+	if (drop) {
+		struct sta_info *sta;
+
+		/* Purge the queues, so the frames on them won't be
+		 * sent during __ieee80211_wake_queue()
+		 */
+		list_for_each_entry(sta, &local->sta_list, list) {
+			if (sdata != sta->sdata)
+				continue;
+			ieee80211_purge_sta_txqs(sta);
+		}
+	}
+
 	drv_flush(local, sdata, queues, drop);
 
 	ieee80211_wake_queues_by_reason(&local->hw, queues,
@@ -918,6 +919,7 @@
 				  struct ieee80211_elems_parse_params *params)
 {
 	const void *data = elem->data + 1;
+	bool calc_crc = false;
 	u8 len;
 
 	if (!elem->datalen)
@@ -927,12 +929,9 @@
 
 	switch (elem->data[0]) {
 	case WLAN_EID_EXT_HE_MU_EDCA:
-		if (len >= sizeof(*elems->mu_edca_param_set)) {
+		calc_crc = true;
+		if (len >= sizeof(*elems->mu_edca_param_set))
 			elems->mu_edca_param_set = data;
-			if (crc)
-				*crc = crc32_be(*crc, (void *)elem,
-						elem->datalen + 2);
-		}
 		break;
 	case WLAN_EID_EXT_HE_CAPABILITY:
 		if (ieee80211_he_capa_size_ok(data, len)) {
@@ -941,13 +940,10 @@
 		}
 		break;
 	case WLAN_EID_EXT_HE_OPERATION:
+		calc_crc = true;
 		if (len >= sizeof(*elems->he_operation) &&
-		    len >= ieee80211_he_oper_size(data) - 1) {
-			if (crc)
-				*crc = crc32_be(*crc, (void *)elem,
-						elem->datalen + 2);
+		    len >= ieee80211_he_oper_size(data) - 1)
 			elems->he_operation = data;
-		}
 		break;
 	case WLAN_EID_EXT_UORA:
 		if (len >= 1)
@@ -981,14 +977,49 @@
 	case WLAN_EID_EXT_EHT_OPERATION:
 		if (ieee80211_eht_oper_size_ok(data, len))
 			elems->eht_operation = data;
+		calc_crc = true;
 		break;
 	case WLAN_EID_EXT_EHT_MULTI_LINK:
+		calc_crc = true;
+
 		if (ieee80211_mle_size_ok(data, len)) {
-			elems->multi_link = (void *)data;
-			elems->multi_link_len = len;
+			const struct ieee80211_multi_link_elem *mle =
+				(void *)data;
+
+			switch (le16_get_bits(mle->control,
+					      IEEE80211_ML_CONTROL_TYPE)) {
+			case IEEE80211_ML_CONTROL_TYPE_BASIC:
+				elems->ml_basic_elem = (void *)elem;
+				elems->ml_basic = data;
+				elems->ml_basic_len = len;
+				break;
+			case IEEE80211_ML_CONTROL_TYPE_RECONF:
+				elems->ml_reconf_elem = (void *)elem;
+				elems->ml_reconf = data;
+				elems->ml_reconf_len = len;
+				break;
+			default:
+				break;
+			}
 		}
 		break;
+	case WLAN_EID_EXT_BANDWIDTH_INDICATION:
+		if (ieee80211_bandwidth_indication_size_ok(data, len))
+			elems->bandwidth_indication = data;
+		calc_crc = true;
+		break;
+	case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
+		calc_crc = true;
+		if (ieee80211_tid_to_link_map_size_ok(data, len) &&
+		    elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
+			elems->ttlm[elems->ttlm_num] = (void *)data;
+			elems->ttlm_num++;
 	}
+		break;
+	}
+
+	if (crc && calc_crc)
+		*crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
 }
 
 static u32
@@ -1000,11 +1031,11 @@
 	bool calc_crc = params->filter != 0;
 	DECLARE_BITMAP(seen_elems, 256);
 	u32 crc = params->crc;
-	const u8 *ie;
 
 	bitmap_zero(seen_elems, 256);
 
 	for_each_element(elem, params->start, params->len) {
+		const struct element *subelem;
 		bool elem_parse_failed;
 		u8 id = elem->id;
 		u8 elen = elem->datalen;
@@ -1262,15 +1293,27 @@
 			}
 			/*
 			 * This is a bit tricky, but as we only care about
-			 * the wide bandwidth channel switch element, so
-			 * just parse it out manually.
+			 * a few elements, parse them out manually.
 			 */
-			ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
+			subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
 					      pos, elen);
-			if (ie) {
-				if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie))
+			if (subelem) {
+				if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
 					elems->wide_bw_chansw_ie =
-						(void *)(ie + 2);
+						(void *)subelem->data;
+				else
+					elem_parse_failed = true;
+			}
+
+			subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
+							 pos, elen);
+			if (subelem) {
+				const void *edata = subelem->data + 1;
+				u8 edatalen = subelem->datalen - 1;
+
+				if (ieee80211_bandwidth_indication_size_ok(edata,
+									   edatalen))
+					elems->bandwidth_indication = edata;
 				else
 					elem_parse_failed = true;
 			}
@@ -1458,56 +1501,11 @@
 	return found ? profile_len : 0;
 }
 
-static void ieee80211_defragment_element(struct ieee802_11_elems *elems,
-					 void **elem_ptr, size_t *len,
-					 size_t total_len, u8 frag_id)
-{
-	u8 *data = *elem_ptr, *pos, *start;
-	const struct element *elem;
-
-	/*
-	 * Since 'data' points to the data of the element, not the element
-	 * itself, allow 254 in case it was an extended element where the
-	 * extended ID isn't part of the data we see here and thus not part of
-	 * 'len' either.
-	 */
-	if (!data || (*len != 254 && *len != 255))
-		return;
-
-	start = elems->scratch_pos;
-
-	if (WARN_ON(*len > (elems->scratch + elems->scratch_len -
-			    elems->scratch_pos)))
-		return;
-
-	memcpy(elems->scratch_pos, data, *len);
-	elems->scratch_pos += *len;
-
-	pos = data + *len;
-	total_len -= *len;
-	for_each_element(elem, pos, total_len) {
-		if (elem->id != frag_id)
-			break;
-
-		if (WARN_ON(elem->datalen >
-			    (elems->scratch + elems->scratch_len -
-			     elems->scratch_pos)))
-			return;
-
-		memcpy(elems->scratch_pos, elem->data, elem->datalen);
-		elems->scratch_pos += elem->datalen;
-
-		*len += elem->datalen;
-	}
-
-	*elem_ptr = start;
-}
-
 static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems,
 				       u8 link_id)
 {
-	const struct ieee80211_multi_link_elem *ml = elems->multi_link;
-	size_t ml_len = elems->multi_link_len;
+	const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
+	ssize_t ml_len = elems->ml_basic_len;
 	const struct element *sub;
 
 	if (!ml || !ml_len)
@@ -1519,12 +1517,14 @@
 
 	for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
 		struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+		ssize_t sta_prof_len;
 		u16 control;
 
 		if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
 			continue;
 
-		if (!ieee80211_mle_sta_prof_size_ok(sub->data, sub->datalen))
+		if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
+							  sub->datalen))
 			return;
 
 		control = le16_to_cpu(prof->control);
@@ -1536,14 +1536,23 @@
 		if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
 			return;
 
-		elems->prof = prof;
-		elems->sta_prof_len = sub->datalen;
-
 		/* the sub element can be fragmented */
-		ieee80211_defragment_element(elems, (void **)&elems->prof,
-					     &elems->sta_prof_len,
-					     ml_len - (sub->data - (u8 *)ml),
+		sta_prof_len =
+			cfg80211_defragment_element(sub,
+						    (u8 *)ml, ml_len,
+						    elems->scratch_pos,
+						    elems->scratch +
+							elems->scratch_len -
+							elems->scratch_pos,
 					     IEEE80211_MLE_SUBELEM_FRAGMENT);
+
+		if (sta_prof_len < 0)
+			return;
+
+		elems->prof = (void *)elems->scratch_pos;
+		elems->sta_prof_len = sta_prof_len;
+		elems->scratch_pos += sta_prof_len;
+
 		return;
 	}
 }
@@ -1557,18 +1566,28 @@
 		.from_ap = params->from_ap,
 		.link_id = -1,
 	};
+	ssize_t ml_len = elems->ml_basic_len;
 	const struct element *non_inherit = NULL;
 	const u8 *end;
 
 	if (params->link_id == -1)
 		return;
 
-	ieee80211_defragment_element(elems, (void **)&elems->multi_link,
-				     &elems->multi_link_len,
-				     elems->total_len - ((u8 *)elems->multi_link -
-							 elems->ie_start),
+	ml_len = cfg80211_defragment_element(elems->ml_basic_elem,
+					     elems->ie_start,
+					     elems->total_len,
+					     elems->scratch_pos,
+					     elems->scratch +
+						elems->scratch_len -
+						elems->scratch_pos,
 				     WLAN_EID_FRAGMENT);
 
+	if (ml_len < 0)
+		return;
+
+	elems->ml_basic = (const void *)elems->scratch_pos;
+	elems->ml_basic_len = ml_len;
+
 	ieee80211_mle_get_sta_prof(elems, params->link_id);
 	prof = elems->prof;
 
@@ -1597,6 +1616,59 @@
 	_ieee802_11_parse_elems_full(&sub, elems, non_inherit);
 }
 
+u32 ieee802_11_parse_mesh_vendor_elems(const u8 *start, size_t len, bool action,
+				       struct ieee802_11_mesh_vendor_specific_elems *elems,
+				       u64 filter, u32 crc, u8 type)
+{
+	size_t left = len;
+	const u8 *pos = start;
+	bool calc_crc = filter != 0;
+
+	memset(elems, 0, sizeof(*elems));
+	elems->parse_error = true;
+
+	while (left >= 2) {
+		u8 id, elen;
+
+		id = *pos++;
+		elen = *pos++;
+		left -= 2;
+
+		if (elen > left)
+			break;
+
+		if (calc_crc && id < 64 && (filter & (1ULL << id)))
+			crc = crc32_be(crc, pos - 2, elen + 2);
+
+		switch (id) {
+		case WLAN_EID_VENDOR_SPECIFIC:
+			if (elen >= 4 && pos[0] == 0xC0 && pos[1] == 0xFF &&
+			    pos[2] == 0xEE && pos[3] == type) {
+				/* Qubercomm OUI (C0:FF:EE) */
+
+				if (calc_crc)
+					crc = crc32_be(crc, pos - 2, elen + 2);
+
+				elems->ie_start = pos;
+				elems->ie_len = elen;
+				elems->parse_error = false;
+			}
+			break;
+		default:
+			break;
+		}
+
+		if (elems->parse_error == false)
+			break;
+
+		left -= elen;
+		pos += elen;
+	}
+
+	return crc;
+}
+EXPORT_SYMBOL(ieee802_11_parse_mesh_vendor_elems);
+
 struct ieee802_11_elems *
 ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
 {
@@ -1604,9 +1676,9 @@
 	const struct element *non_inherit = NULL;
 	u8 *nontransmitted_profile;
 	int nontransmitted_profile_len = 0;
-	size_t scratch_len = params->scratch_len ?: 3 * params->len;
+	size_t scratch_len = 3 * params->len;
 
-	elems = kzalloc(sizeof(*elems) + scratch_len, GFP_ATOMIC);
+	elems = kzalloc(struct_size(elems, scratch, scratch_len), GFP_ATOMIC);
 	if (!elems)
 		return NULL;
 	elems->ie_start = params->start;
@@ -1661,6 +1733,7 @@
 
 	return elems;
 }
+EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
 
 void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 					   struct ieee80211_tx_queue_params
@@ -1824,7 +1897,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
-	bool multi_link = sdata->vif.valid_links;
+	bool multi_link = ieee80211_vif_is_mld(&sdata->vif);
 	struct {
 		u8 id;
 		u8 len;
@@ -1918,7 +1991,7 @@
 	}
 }
 
-static u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end)
+u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end)
 {
 	if ((end - pos) < 5)
 		return pos;
@@ -1949,7 +2022,6 @@
 	u8 rates[32];
 	int num_rates;
 	int ext_rates_len;
-	int shift;
 	u32 rate_flags;
 	bool have_80mhz = false;
 
@@ -1960,7 +2032,6 @@
 		return 0;
 
 	rate_flags = ieee80211_chandef_rate_flags(chandef);
-	shift = ieee80211_chandef_get_shift(chandef);
 
 	/* For direct scan add S1G IE and consider its override bits */
 	if (band == NL80211_BAND_S1GHZ) {
@@ -1978,8 +2049,7 @@
 			continue;
 
 		rates[num_rates++] =
-			(u8) DIV_ROUND_UP(sband->bitrates[i].bitrate,
-					  (1 << shift) * 5);
+			(u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
 	}
 
 	supp_rates_len = min_t(int, num_rates, 8);
@@ -2121,8 +2191,7 @@
 		*offset = noffset;
 	}
 
-	he_cap = ieee80211_get_he_iftype_cap(sband,
-					     ieee80211_vif_type_p2p(&sdata->vif));
+	he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
 	if (he_cap &&
 	    cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
 					 IEEE80211_CHAN_NO_HE)) {
@@ -2131,8 +2200,7 @@
 			goto out_err;
 	}
 
-	eht_cap = ieee80211_get_eht_iftype_cap(sband,
-					       ieee80211_vif_type_p2p(&sdata->vif));
+	eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
 
 	if (eht_cap &&
 	    cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
@@ -2150,8 +2218,7 @@
 		struct ieee80211_supported_band *sband6;
 
 		sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
-		he_cap = ieee80211_get_he_iftype_cap(sband6,
-				ieee80211_vif_type_p2p(&sdata->vif));
+		he_cap = ieee80211_get_he_iftype_cap_vif(sband6, &sdata->vif);
 
 		if (he_cap) {
 			enum nl80211_iftype iftype =
@@ -2275,14 +2342,13 @@
 	struct ieee80211_supported_band *sband;
 	size_t num_rates;
 	u32 supp_rates, rate_flags;
-	int i, j, shift;
+	int i, j;
 
 	sband = sdata->local->hw.wiphy->bands[band];
 	if (WARN_ON(!sband))
 		return 1;
 
 	rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
-	shift = ieee80211_vif_get_shift(&sdata->vif);
 
 	num_rates = sband->n_bitrates;
 	supp_rates = 0;
@@ -2308,8 +2374,7 @@
 			    != rate_flags)
 				continue;
 
-			brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
-					     1 << shift);
+			brate = sband->bitrates[j].bitrate;
 
 			if (brate == own_rate) {
 				supp_rates |= BIT(j);
@@ -2326,9 +2391,10 @@
 	ieee80211_led_radio(local, false);
 	ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
 
-	cancel_work_sync(&local->reconfig_filter);
+	wiphy_work_cancel(local->hw.wiphy, &local->reconfig_filter);
 
 	flush_workqueue(local->workqueue);
+	wiphy_work_flush(local->hw.wiphy, NULL);
 	drv_stop(local);
 }
 
@@ -2350,8 +2416,8 @@
 		 */
 		if (aborted)
 			set_bit(SCAN_ABORTED, &local->scanning);
-		ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
-		flush_delayed_work(&local->scan_work);
+		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+		wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
 	}
 }
 
@@ -2360,6 +2426,8 @@
 	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_chanctx *ctx;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	/*
 	 * We get here if during resume the device can't be restarted properly.
 	 * We might also get here if this happens during HW reset, which is a
@@ -2373,6 +2441,7 @@
 	local->resuming = false;
 	local->suspended = false;
 	local->in_reconfig = false;
+	local->reconfig_failure = true;
 
 	ieee80211_flush_completed_scan(local, true);
 
@@ -2387,10 +2456,8 @@
 	/* Mark channel contexts as not being in the driver any more to avoid
 	 * removing them from the driver during the shutdown process...
 	 */
-	mutex_lock(&local->chanctx_mtx);
 	list_for_each_entry(ctx, &local->chanctx_list, list)
 		ctx->driver_present = false;
-	mutex_unlock(&local->chanctx_mtx);
 }
 
 static void ieee80211_assign_chanctx(struct ieee80211_local *local,
@@ -2400,17 +2467,17 @@
 	struct ieee80211_chanctx_conf *conf;
 	struct ieee80211_chanctx *ctx;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (!local->use_chanctx)
 		return;
 
-	mutex_lock(&local->chanctx_mtx);
 	conf = rcu_dereference_protected(link->conf->chanctx_conf,
-					 lockdep_is_held(&local->chanctx_mtx));
+					 lockdep_is_held(&local->hw.wiphy->mtx));
 	if (conf) {
 		ctx = container_of(conf, struct ieee80211_chanctx, conf);
 		drv_assign_vif_chanctx(local, sdata, link->conf, ctx);
 	}
-	mutex_unlock(&local->chanctx_mtx);
 }
 
 static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata)
@@ -2418,8 +2485,9 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	/* add STAs back */
-	mutex_lock(&local->sta_mtx);
 	list_for_each_entry(sta, &local->sta_list, list) {
 		enum ieee80211_sta_state state;
 
@@ -2431,7 +2499,6 @@
 			WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
 					      state + 1));
 	}
-	mutex_unlock(&local->sta_mtx);
 }
 
 static int ieee80211_reconfig_nan(struct ieee80211_sub_if_data *sdata)
@@ -2475,6 +2542,35 @@
 	return 0;
 }
 
+static void ieee80211_reconfig_ap_links(struct ieee80211_local *local,
+					struct ieee80211_sub_if_data *sdata,
+					u64 changed)
+{
+	int link_id;
+
+	for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+		struct ieee80211_link_data *link;
+
+		if (!(sdata->vif.active_links & BIT(link_id)))
+			continue;
+
+		link = sdata_dereference(sdata->link[link_id], sdata);
+		if (!link)
+			continue;
+
+		if (rcu_access_pointer(link->u.ap.beacon))
+			drv_start_ap(local, sdata, link->conf);
+
+		if (!link->conf->enable_beacon)
+			continue;
+
+		changed |= BSS_CHANGED_BEACON |
+			   BSS_CHANGED_BEACON_ENABLED;
+
+		ieee80211_link_info_change_notify(sdata, link, changed);
+	}
+}
+
 int ieee80211_reconfig(struct ieee80211_local *local)
 {
 	struct ieee80211_hw *hw = &local->hw;
@@ -2489,6 +2585,8 @@
 	bool suspended = local->suspended;
 	bool in_reconfig = false;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	/* nothing to do if HW shouldn't run */
 	if (!local->open_count)
 		goto wake_up;
@@ -2604,12 +2702,10 @@
 
 	/* add channel contexts */
 	if (local->use_chanctx) {
-		mutex_lock(&local->chanctx_mtx);
 		list_for_each_entry(ctx, &local->chanctx_list, list)
 			if (ctx->replace_state !=
 			    IEEE80211_CHANCTX_REPLACES_OTHER)
 				WARN_ON(drv_add_chanctx(local, ctx));
-		mutex_unlock(&local->chanctx_mtx);
 
 		sdata = wiphy_dereference(local->hw.wiphy,
 					  local->monitor_sdata);
@@ -2624,20 +2720,53 @@
 
 	/* Finally also reconfigure all the BSS information */
 	list_for_each_entry(sdata, &local->interfaces, list) {
+		/* common change flags for all interface types - link only */
+		u64 changed = BSS_CHANGED_ERP_CTS_PROT |
+			      BSS_CHANGED_ERP_PREAMBLE |
+			      BSS_CHANGED_ERP_SLOT |
+			      BSS_CHANGED_HT |
+			      BSS_CHANGED_BASIC_RATES |
+			      BSS_CHANGED_BEACON_INT |
+			      BSS_CHANGED_BSSID |
+			      BSS_CHANGED_CQM |
+			      BSS_CHANGED_QOS |
+			      BSS_CHANGED_TXPOWER |
+			      BSS_CHANGED_MCAST_RATE;
+		struct ieee80211_link_data *link = NULL;
 		unsigned int link_id;
-		u32 changed;
+		u32 active_links = 0;
 
 		if (!ieee80211_sdata_running(sdata))
 			continue;
 
-		sdata_lock(sdata);
+		if (ieee80211_vif_is_mld(&sdata->vif)) {
+			struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS] = {
+				[0] = &sdata->vif.bss_conf,
+			};
+
+			if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+				/* start with a single active link */
+				active_links = sdata->vif.active_links;
+				link_id = ffs(active_links) - 1;
+				sdata->vif.active_links = BIT(link_id);
+			}
+
+			drv_change_vif_links(local, sdata, 0,
+					     sdata->vif.active_links,
+					     old);
+		}
+
 		for (link_id = 0;
 		     link_id < ARRAY_SIZE(sdata->vif.link_conf);
 		     link_id++) {
-			struct ieee80211_link_data *link;
+			if (ieee80211_vif_is_mld(&sdata->vif) &&
+			    !(sdata->vif.active_links & BIT(link_id)))
+				continue;
 
 			link = sdata_dereference(sdata->link[link_id], sdata);
-			if (link)
+			if (!link)
+				continue;
+
 				ieee80211_assign_chanctx(local, sdata, link);
 		}
 
@@ -2658,27 +2787,16 @@
 					    &sdata->deflink.tx_conf[i]);
 			break;
 		}
-		sdata_unlock(sdata);
-
-		/* common change flags for all interface types */
-		changed = BSS_CHANGED_ERP_CTS_PROT |
-			  BSS_CHANGED_ERP_PREAMBLE |
-			  BSS_CHANGED_ERP_SLOT |
-			  BSS_CHANGED_HT |
-			  BSS_CHANGED_BASIC_RATES |
-			  BSS_CHANGED_BEACON_INT |
-			  BSS_CHANGED_BSSID |
-			  BSS_CHANGED_CQM |
-			  BSS_CHANGED_QOS |
-			  BSS_CHANGED_IDLE |
-			  BSS_CHANGED_TXPOWER |
-			  BSS_CHANGED_MCAST_RATE;
 
 		if (sdata->vif.bss_conf.mu_mimo_owner)
 			changed |= BSS_CHANGED_MU_GROUPS;
 
+		if (!ieee80211_vif_is_mld(&sdata->vif))
+			changed |= BSS_CHANGED_IDLE;
+
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_STATION:
+			if (!ieee80211_vif_is_mld(&sdata->vif)) {
 			changed |= BSS_CHANGED_ASSOC |
 				   BSS_CHANGED_ARP_FILTER |
 				   BSS_CHANGED_PS;
@@ -2691,9 +2809,20 @@
 			    sdata->vif.bss_conf.protected_keep_alive)
 				changed |= BSS_CHANGED_KEEP_ALIVE;
 
-			sdata_lock(sdata);
-			ieee80211_bss_info_change_notify(sdata, changed);
-			sdata_unlock(sdata);
+				if (sdata->vif.bss_conf.eht_puncturing)
+					changed |= BSS_CHANGED_EHT_PUNCTURING;
+
+				ieee80211_bss_info_change_notify(sdata,
+								 changed);
+			} else if (!WARN_ON(!link)) {
+				ieee80211_link_info_change_notify(sdata, link,
+								  changed);
+				changed = BSS_CHANGED_ASSOC |
+					  BSS_CHANGED_IDLE |
+					  BSS_CHANGED_PS |
+					  BSS_CHANGED_ARP_FILTER;
+				ieee80211_vif_cfg_change_notify(sdata, changed);
+			}
 			break;
 		case NL80211_IFTYPE_OCB:
 			changed |= BSS_CHANGED_OCB;
@@ -2703,7 +2832,13 @@
 			changed |= BSS_CHANGED_IBSS;
 			fallthrough;
 		case NL80211_IFTYPE_AP:
-			changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS;
+			changed |= BSS_CHANGED_P2P_PS;
+
+			if (ieee80211_vif_is_mld(&sdata->vif))
+				ieee80211_vif_cfg_change_notify(sdata,
+								BSS_CHANGED_SSID);
+			else
+				changed |= BSS_CHANGED_SSID;
 
 			if (sdata->vif.bss_conf.ftm_responder == 1 &&
 			    wiphy_ext_feature_isset(sdata->local->hw.wiphy,
@@ -2713,6 +2848,13 @@
 			if (sdata->vif.type == NL80211_IFTYPE_AP) {
 				changed |= BSS_CHANGED_AP_PROBE_RESP;
 
+				if (ieee80211_vif_is_mld(&sdata->vif)) {
+					ieee80211_reconfig_ap_links(local,
+								    sdata,
+								    changed);
+					break;
+				}
+
 				if (rcu_access_pointer(sdata->deflink.u.ap.beacon))
 					drv_start_ap(local, sdata,
 						     sdata->deflink.conf);
@@ -2745,6 +2887,9 @@
 			WARN_ON(1);
 			break;
 		}
+
+		if (active_links)
+			ieee80211_set_active_links(&sdata->vif, active_links);
 	}
 
 	ieee80211_recalc_ps(local);
@@ -2771,7 +2916,6 @@
 		if (!ieee80211_sdata_running(sdata))
 			continue;
 
-		sdata_lock(sdata);
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_AP_VLAN:
 		case NL80211_IFTYPE_AP:
@@ -2780,7 +2924,6 @@
 		default:
 			break;
 		}
-		sdata_unlock(sdata);
 	}
 
 	/* add back keys */
@@ -2788,11 +2931,10 @@
 		ieee80211_reenable_keys(sdata);
 
 	/* Reconfigure sched scan if it was interrupted by FW restart */
-	mutex_lock(&local->mtx);
 	sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
-						lockdep_is_held(&local->mtx));
+						lockdep_is_held(&local->hw.wiphy->mtx));
 	sched_scan_req = rcu_dereference_protected(local->sched_scan_req,
-						lockdep_is_held(&local->mtx));
+						lockdep_is_held(&local->hw.wiphy->mtx));
 	if (sched_scan_sdata && sched_scan_req)
 		/*
 		 * Sched scan stopped, but we don't want to report it. Instead,
@@ -2808,7 +2950,6 @@
 			RCU_INIT_POINTER(local->sched_scan_req, NULL);
 			sched_scan_stopped = true;
 		}
-	mutex_unlock(&local->mtx);
 
 	if (sched_scan_stopped)
 		cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
@@ -2829,16 +2970,12 @@
 	 * are active. This is really a workaround though.
 	 */
 	if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
-		mutex_lock(&local->sta_mtx);
-
 		list_for_each_entry(sta, &local->sta_list, list) {
 			if (!local->resuming)
 				ieee80211_sta_tear_down_BA_sessions(
 						sta, AGG_STOP_LOCAL_REQUEST);
 			clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 		}
-
-		mutex_unlock(&local->sta_mtx);
 	}
 
 	/*
@@ -2854,13 +2991,11 @@
 		barrier();
 
 		/* Restart deferred ROCs */
-		mutex_lock(&local->mtx);
 		ieee80211_start_next_roc(local);
-		mutex_unlock(&local->mtx);
 
 		/* Requeue all works */
 		list_for_each_entry(sdata, &local->interfaces, list)
-			ieee80211_queue_work(&local->hw, &sdata->work);
+			wiphy_work_queue(local->hw.wiphy, &sdata->work);
 	}
 
 	ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
@@ -2917,6 +3052,8 @@
 	sdata = vif_to_sdata(vif);
 	local = sdata->local;
 
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	if (WARN_ON(flag & IEEE80211_SDATA_DISCONNECT_RESUME &&
 		    !local->resuming))
 		return;
@@ -2930,10 +3067,8 @@
 
 	sdata->flags |= flag;
 
-	mutex_lock(&local->key_mtx);
 	list_for_each_entry(key, &sdata->key_list, list)
 		key->flags |= KEY_FLAG_TAINTED;
-	mutex_unlock(&local->key_mtx);
 }
 
 void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif)
@@ -2955,10 +3090,10 @@
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_chanctx *chanctx;
 
-	mutex_lock(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	chanctx_conf = rcu_dereference_protected(link->conf->chanctx_conf,
-						 lockdep_is_held(&local->chanctx_mtx));
+						 lockdep_is_held(&local->hw.wiphy->mtx));
 
 	/*
 	 * This function can be called from a work, thus it may be possible
@@ -2967,12 +3102,10 @@
 	 * So nothing should be done in such case.
 	 */
 	if (!chanctx_conf)
-		goto unlock;
+		return;
 
 	chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
 	ieee80211_recalc_smps_chanctx(local, chanctx);
- unlock:
-	mutex_unlock(&local->chanctx_mtx);
 }
 
 void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata,
@@ -2983,7 +3116,7 @@
 	struct ieee80211_chanctx *chanctx;
 	int i;
 
-	mutex_lock(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	for (i = 0; i < ARRAY_SIZE(sdata->vif.link_conf); i++) {
 		struct ieee80211_bss_conf *bss_conf;
@@ -2999,9 +3132,9 @@
 		}
 
 		chanctx_conf = rcu_dereference_protected(bss_conf->chanctx_conf,
-							 lockdep_is_held(&local->chanctx_mtx));
+							 lockdep_is_held(&local->hw.wiphy->mtx));
 		/*
-		 * Since we hold the chanctx_mtx (checked above)
+		 * Since we hold the wiphy mutex (checked above)
 		 * we can take the chanctx_conf pointer out of the
 		 * RCU critical section, it cannot go away without
 		 * the mutex. Just the way we reached it could - in
@@ -3011,14 +3144,12 @@
 		rcu_read_unlock();
 
 		if (!chanctx_conf)
-			goto unlock;
+			return;
 
 		chanctx = container_of(chanctx_conf, struct ieee80211_chanctx,
 				       conf);
 		ieee80211_recalc_chanctx_min_def(local, chanctx, NULL);
 	}
- unlock:
-	mutex_unlock(&local->chanctx_mtx);
 }
 
 size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
@@ -3706,12 +3837,10 @@
 	return true;
 }
 
-void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation *eht_oper,
+void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
 				bool support_160, bool support_320,
 				struct cfg80211_chan_def *chandef)
 {
-	struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional;
-
 	chandef->center_freq1 =
 		ieee80211_channel_to_frequency(info->ccfs0,
 					       chandef->chan->band);
@@ -3801,10 +3930,8 @@
 	}
 
 	eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
-	if (!eht_cap) {
-		sdata_info(sdata, "Missing iftype sband data/EHT cap");
+	if (!eht_cap)
 		eht_oper = NULL;
-	}
 
 	he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
 
@@ -3882,8 +4009,9 @@
 		support_320 =
 			eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
 
-		ieee80211_chandef_eht_oper(eht_oper, support_160,
-					   support_320, &he_chandef);
+		ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
+					   support_160, support_320,
+					   &he_chandef);
 	}
 
 	if (!cfg80211_chandef_valid(&he_chandef)) {
@@ -3942,7 +4070,6 @@
 			     const u8 *srates, int srates_len, u32 *rates)
 {
 	u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
-	int shift = ieee80211_chanwidth_get_shift(width);
 	struct ieee80211_rate *br;
 	int brate, rate, i, j, count = 0;
 
@@ -3956,7 +4083,7 @@
 			if ((rate_flags & br->flags) != rate_flags)
 				continue;
 
-			brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+			brate = DIV_ROUND_UP(br->bitrate, 5);
 			if (brate == rate) {
 				*rates |= BIT(j);
 				count++;
@@ -3973,12 +4100,11 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
-	int rate, shift;
+	int rate;
 	u8 i, rates, *pos;
 	u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 	u32 rate_flags;
 
-	shift = ieee80211_vif_get_shift(&sdata->vif);
 	rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
 	sband = local->hw.wiphy->bands[band];
 	rates = 0;
@@ -4003,8 +4129,7 @@
 
 		if (need_basic && basic_rates & BIT(i))
 			basic = 0x80;
-		rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
-				    5 * (1 << shift));
+		rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
 		*pos++ = basic | (u8) rate;
 	}
 
@@ -4017,13 +4142,12 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
-	int rate, shift;
+	int rate;
 	u8 i, exrates, *pos;
 	u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 	u32 rate_flags;
 
 	rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
-	shift = ieee80211_vif_get_shift(&sdata->vif);
 
 	sband = local->hw.wiphy->bands[band];
 	exrates = 0;
@@ -4052,8 +4176,7 @@
 				continue;
 			if (need_basic && basic_rates & BIT(i))
 				basic = 0x80;
-			rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
-					    5 * (1 << shift));
+			rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
 			*pos++ = basic | (u8) rate;
 		}
 	}
@@ -4097,6 +4220,8 @@
  * This function calculates the RX timestamp at the given MPDU offset, taking
  * into account what the RX timestamp was. An offset of 0 will just normalize
  * the timestamp to TSF at beginning of MPDU reception.
+ *
+ * Returns: the calculated timestamp
  */
 u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
 				     struct ieee80211_rx_status *status,
@@ -4212,25 +4337,13 @@
 		fallthrough;
 	case RX_ENC_LEGACY: {
 		struct ieee80211_supported_band *sband;
-		int shift = 0;
-		int bitrate;
-
-		switch (status->bw) {
-		case RATE_INFO_BW_10:
-			shift = 1;
-			break;
-		case RATE_INFO_BW_5:
-			shift = 2;
-			break;
-		}
 
 		sband = local->hw.wiphy->bands[status->band];
-		bitrate = sband->bitrates[status->rate_idx].bitrate;
-		ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
+		ri.legacy = sband->bitrates[status->rate_idx].bitrate;
 
 		if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
 			if (status->band == NL80211_BAND_5GHZ) {
-				ts += 20 << shift;
+				ts += 20;
 				mpdu_offset += 2;
 			} else if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) {
 				ts += 96;
@@ -4263,16 +4376,15 @@
 	struct ieee80211_sub_if_data *sdata;
 	struct cfg80211_chan_def chandef;
 
-	/* for interface list, to avoid linking iflist_mtx and chanctx_mtx */
 	lockdep_assert_wiphy(local->hw.wiphy);
 
-	mutex_lock(&local->mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		/* it might be waiting for the local->mtx, but then
 		 * by the time it gets it, sdata->wdev.cac_started
 		 * will no longer be true
 		 */
-		cancel_delayed_work(&sdata->deflink.dfs_cac_timer_work);
+		wiphy_delayed_work_cancel(local->hw.wiphy,
+					  &sdata->deflink.dfs_cac_timer_work);
 
 		if (sdata->wdev.cac_started) {
 			chandef = sdata->vif.bss_conf.chandef;
@@ -4283,10 +4395,10 @@
 					   GFP_KERNEL);
 		}
 	}
-	mutex_unlock(&local->mtx);
 }
 
-void ieee80211_dfs_radar_detected_work(struct work_struct *work)
+void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
+				       struct wiphy_work *work)
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, radar_detected_work);
@@ -4294,7 +4406,8 @@
 	struct ieee80211_chanctx *ctx;
 	int num_chanctx = 0;
 
-	mutex_lock(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
+
 	list_for_each_entry(ctx, &local->chanctx_list, list) {
 		if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
 			continue;
@@ -4302,11 +4415,8 @@
 		num_chanctx++;
 		chandef = ctx->conf.def;
 	}
-	mutex_unlock(&local->chanctx_mtx);
 
-	wiphy_lock(local->hw.wiphy);
 	ieee80211_dfs_cac_cancel(local);
-	wiphy_unlock(local->hw.wiphy);
 
 	if (num_chanctx > 1)
 		/* XXX: multi-channel is not supported yet */
@@ -4321,7 +4431,7 @@
 
 	trace_api_radar_detected(local);
 
-	schedule_work(&local->radar_detected_work);
+	wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
 }
 EXPORT_SYMBOL(ieee80211_radar_detected);
 
@@ -4699,13 +4809,39 @@
 	ps->dtim_count = dtim_count;
 }
 
+void ieee80211_force_dtim(struct ieee80211_vif *vif,
+			  unsigned int dtim_count)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	u8 dtim_period = sdata->vif.bss_conf.dtim_period;
+	struct ps_data *ps;
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP ||
+	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+		if (!sdata->bss)
+			return;
+
+		ps = &sdata->bss->ps;
+	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
+		ps = &sdata->u.mesh.ps;
+	} else {
+		return;
+	}
+
+	if (WARN_ON_ONCE(dtim_count >= dtim_period))
+		return;
+
+	ps->dtim_count = dtim_count;
+}
+EXPORT_SYMBOL(ieee80211_force_dtim);
+
 static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
 					 struct ieee80211_chanctx *ctx)
 {
 	struct ieee80211_link_data *link;
 	u8 radar_detect = 0;
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED))
 		return 0;
@@ -4746,7 +4882,7 @@
 		.radar_detect = radar_detect,
 	};
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (WARN_ON(hweight32(radar_detect) > 1))
 		return -EINVAL;
@@ -4836,7 +4972,7 @@
 	int err;
 	struct iface_combination_params params = {0};
 
-	lockdep_assert_held(&local->chanctx_mtx);
+	lockdep_assert_wiphy(local->hw.wiphy);
 
 	list_for_each_entry(ctx, &local->chanctx_list, list) {
 		if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
@@ -5048,31 +5184,3 @@
 
 	return pos;
 }
-
-void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id)
-{
-	unsigned int elem_len;
-
-	if (!len_pos)
-		return;
-
-	elem_len = skb->data + skb->len - len_pos - 1;
-
-	while (elem_len > 255) {
-		/* this one is 255 */
-		*len_pos = 255;
-		/* remaining data gets smaller */
-		elem_len -= 255;
-		/* make space for the fragment ID/len in SKB */
-		skb_put(skb, 2);
-		/* shift back the remaining data to place fragment ID/len */
-		memmove(len_pos + 255 + 3, len_pos + 255 + 1, elem_len);
-		/* place the fragment ID */
-		len_pos += 255 + 1;
-		*len_pos = frag_id;
-		/* and point to fragment length to update later */
-		len_pos++;
-	}
-
-	*len_pos = elem_len;
-}
diff -ruw linux-6.4/net/mac80211/vht.c linux-6.4-fbx/net/mac80211/vht.c
--- linux-6.4/net/mac80211/vht.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/vht.c	2024-01-19 17:01:19.913848341 +0100
@@ -4,7 +4,7 @@
  *
  * Portions of this file
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -116,12 +116,14 @@
 ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_supported_band *sband,
 				    const struct ieee80211_vht_cap *vht_cap_ie,
+				    const struct ieee80211_vht_cap *vht_cap_ie2,
 				    struct link_sta_info *link_sta)
 {
 	struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
 	struct ieee80211_sta_vht_cap own_cap;
 	u32 cap_info, i;
 	bool have_80mhz;
+	u32 mpdu_len;
 
 	memset(vht_cap, 0, sizeof(*vht_cap));
 
@@ -230,9 +232,11 @@
 	       sizeof(struct ieee80211_vht_mcs_info));
 
 	/* copy EXT_NSS_BW Support value or remove the capability */
-	if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW))
+	if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW)) {
+		vht_cap->cap |= cap_info &
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
 		vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
-	else
+	} else
 		vht_cap->vht_mcs.tx_highest &=
 			~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
 
@@ -316,12 +320,23 @@
 	}
 
 	link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
+	link_sta->pub->sta_max_bandwidth = link_sta->cur_max_bandwidth;
+
+	/*
+	 * Work around the Cisco 9115 FW 17.3 bug by taking the min of
+	 * both reported MPDU lengths.
+	 */
+	mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK;
+	if (vht_cap_ie2)
+		mpdu_len = min_t(u32, mpdu_len,
+				 le32_get_bits(vht_cap_ie2->vht_cap_info,
+					       IEEE80211_VHT_CAP_MAX_MPDU_MASK));
 
 	/*
 	 * FIXME - should the amsdu len be per link? store per link
 	 * and maintain a minimum?
 	 */
-	switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+	switch (mpdu_len) {
 	case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
 		link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
 		break;
@@ -678,6 +693,7 @@
 		break;
 	}
 
+	link_sta->pub->sta_max_bandwidth = link_sta->cur_max_bandwidth;
 	new_bw = ieee80211_sta_cur_vht_bw(link_sta);
 	if (new_bw != link_sta->pub->bandwidth) {
 		link_sta->pub->bandwidth = new_bw;
diff -ruw linux-6.4/net/mac80211/wep.c linux-6.4-fbx/net/mac80211/wep.c
--- linux-6.4/net/mac80211/wep.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/wep.c	2023-11-07 13:38:44.094257566 +0100
@@ -3,6 +3,7 @@
  * Software WEP encryption implementation
  * Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi>
  * Copyright 2003, Instant802 Networks, Inc.
+ * Copyright (C) 2023 Intel Corporation
  */
 
 #include <linux/netdevice.h>
@@ -250,18 +251,18 @@
 
 	if (!(status->flag & RX_FLAG_DECRYPTED)) {
 		if (skb_linearize(rx->skb))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_OOM;
 		if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_WEP_DEC_FAIL;
 	} else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
 		if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) +
 					    IEEE80211_WEP_IV_LEN))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_NO_IV;
 		ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
 		/* remove ICV */
 		if (!(status->flag & RX_FLAG_ICV_STRIPPED) &&
 		    pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_NO_ICV;
 	}
 
 	return RX_CONTINUE;
diff -ruw linux-6.4/net/mac80211/wpa.c linux-6.4-fbx/net/mac80211/wpa.c
--- linux-6.4/net/mac80211/wpa.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/mac80211/wpa.c	2023-11-07 13:38:44.094257566 +0100
@@ -3,7 +3,7 @@
  * Copyright 2002-2004, Instant802 Networks, Inc.
  * Copyright 2008, Jouni Malinen <j@w1.fi>
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2020-2022 Intel Corporation
+ * Copyright (C) 2020-2023 Intel Corporation
  */
 
 #include <linux/netdevice.h>
@@ -15,7 +15,7 @@
 #include <asm/unaligned.h>
 #include <net/mac80211.h>
 #include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/utils.h>
 
 #include "ieee80211_i.h"
 #include "michael.h"
@@ -142,7 +142,7 @@
 		 * group keys and only the AP is sending real multicast
 		 * frames in the BSS.
 		 */
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_AP_RX_GROUPCAST;
 	}
 
 	if (status->flag & RX_FLAG_MMIC_ERROR)
@@ -150,10 +150,10 @@
 
 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
 	if (skb->len < hdrlen + MICHAEL_MIC_LEN)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_MMIC;
 
 	if (skb_linearize(rx->skb))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_OOM;
 	hdr = (void *)skb->data;
 
 	data = skb->data + hdrlen;
@@ -188,7 +188,7 @@
 				     NL80211_KEYTYPE_PAIRWISE,
 				     rx->key ? rx->key->conf.keyidx : -1,
 				     NULL, GFP_ATOMIC);
-	return RX_DROP_UNUSABLE;
+	return RX_DROP_U_MMIC_FAIL;
 }
 
 static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
@@ -276,11 +276,11 @@
 		return RX_CONTINUE;
 
 	if (!rx->sta || skb->len - hdrlen < 12)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_TKIP;
 
 	/* it may be possible to optimize this a bit more */
 	if (skb_linearize(rx->skb))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_OOM;
 	hdr = (void *)skb->data;
 
 	/*
@@ -298,7 +298,7 @@
 					  &rx->tkip.iv32,
 					  &rx->tkip.iv16);
 	if (res != TKIP_DECRYPT_OK)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_TKIP_FAIL;
 
 	/* Trim ICV */
 	if (!(status->flag & RX_FLAG_ICV_STRIPPED))
@@ -523,12 +523,12 @@
 
 	if (status->flag & RX_FLAG_DECRYPTED) {
 		if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_SHORT_CCMP;
 		if (status->flag & RX_FLAG_MIC_STRIPPED)
 			mic_len = 0;
 	} else {
 		if (skb_linearize(rx->skb))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_OOM;
 	}
 
 	/* reload hdr - skb might have been reallocated */
@@ -536,7 +536,7 @@
 
 	data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
 	if (!rx->sta || data_len < 0)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_CCMP;
 
 	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
 		int res;
@@ -574,7 +574,7 @@
 
 	/* Remove CCMP header and MIC */
 	if (pskb_trim(skb, skb->len - mic_len))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_CCMP_MIC;
 	memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
 	skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
 
@@ -719,12 +719,12 @@
 
 	if (status->flag & RX_FLAG_DECRYPTED) {
 		if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_SHORT_GCMP;
 		if (status->flag & RX_FLAG_MIC_STRIPPED)
 			mic_len = 0;
 	} else {
 		if (skb_linearize(rx->skb))
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_OOM;
 	}
 
 	/* reload hdr - skb might have been reallocated */
@@ -732,7 +732,7 @@
 
 	data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
 	if (!rx->sta || data_len < 0)
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_GCMP;
 
 	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
 		int res;
@@ -771,7 +771,7 @@
 
 	/* Remove GCMP header and MIC */
 	if (pskb_trim(skb, skb->len - mic_len))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_GCMP_MIC;
 	memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
 	skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
 
@@ -924,7 +924,7 @@
 	/* management frames are already linear */
 
 	if (skb->len < 24 + sizeof(*mmie))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_CMAC;
 
 	mmie = (struct ieee80211_mmie *)
 		(skb->data + skb->len - sizeof(*mmie));
@@ -974,13 +974,13 @@
 	/* management frames are already linear */
 
 	if (skb->len < 24 + sizeof(*mmie))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_CMAC256;
 
 	mmie = (struct ieee80211_mmie_16 *)
 		(skb->data + skb->len - sizeof(*mmie));
 	if (mmie->element_id != WLAN_EID_MMIE ||
 	    mmie->length != sizeof(*mmie) - 2)
-		return RX_DROP_UNUSABLE; /* Invalid MMIE */
+		return RX_DROP_U_BAD_MMIE; /* Invalid MMIE */
 
 	bip_ipn_swap(ipn, mmie->sequence_number);
 
@@ -1073,7 +1073,7 @@
 	/* management frames are already linear */
 
 	if (skb->len < 24 + sizeof(*mmie))
-		return RX_DROP_UNUSABLE;
+		return RX_DROP_U_SHORT_GMAC;
 
 	mmie = (struct ieee80211_mmie_16 *)
 		(skb->data + skb->len - sizeof(*mmie));
@@ -1097,7 +1097,7 @@
 
 		mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
 		if (!mic)
-			return RX_DROP_UNUSABLE;
+			return RX_DROP_U_OOM;
 		if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
 				       skb->data + 24, skb->len - 24,
 				       mic) < 0 ||
diff -ruw linux-6.4/net/netfilter/Kconfig linux-6.4-fbx/net/netfilter/Kconfig
--- linux-6.4/net/netfilter/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/Kconfig	2023-05-22 20:06:45.643891041 +0200
@@ -364,6 +364,7 @@
 config NF_CONNTRACK_SIP
 	tristate "SIP protocol support"
 	default m if NETFILTER_ADVANCED=n
+	select CRYPTO_LIB_SHA256
 	help
 	  SIP is an application-layer control protocol that can establish,
 	  modify, and terminate multimedia sessions (conferences) such as
diff -ruw linux-6.4/net/netfilter/nf_conntrack_core.c linux-6.4-fbx/net/netfilter/nf_conntrack_core.c
--- linux-6.4/net/netfilter/nf_conntrack_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_core.c	2023-06-27 11:47:16.167869662 +0200
@@ -577,12 +577,29 @@
 #endif
 }
 
+#ifdef CONFIG_IP_FFN
+extern void ip_ffn_ct_destroy(struct nf_conn *ct);
+#endif
+
+#ifdef CONFIG_IPV6_FFN
+extern void ipv6_ffn_ct_destroy(struct nf_conn *ct);
+#endif
+
 void nf_ct_destroy(struct nf_conntrack *nfct)
 {
 	struct nf_conn *ct = (struct nf_conn *)nfct;
 
 	WARN_ON(refcount_read(&nfct->use) != 0);
 
+#ifdef CONFIG_IP_FFN
+	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == AF_INET)
+		ip_ffn_ct_destroy(ct);
+#endif
+#ifdef CONFIG_IPV6_FFN
+	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == AF_INET6)
+		ipv6_ffn_ct_destroy(ct);
+#endif
+
 	if (unlikely(nf_ct_is_template(ct))) {
 		nf_ct_tmpl_free(ct);
 		return;
@@ -1784,7 +1801,7 @@
 		}
 		spin_unlock_bh(&nf_conntrack_expect_lock);
 	}
-	if (!exp && tmpl)
+	if (!exp)
 		__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
 
 	/* Other CPU might have obtained a pointer to this object before it was
@@ -2063,6 +2080,10 @@
 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
 	if (ct->master || (help && !hlist_empty(&help->expectations)))
 		return;
+
+	rcu_read_lock();
+	__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
 
@@ -2804,6 +2825,7 @@
 	nf_conntrack_acct_pernet_init(net);
 	nf_conntrack_tstamp_pernet_init(net);
 	nf_conntrack_ecache_pernet_init(net);
+	nf_conntrack_helper_pernet_init(net);
 	nf_conntrack_proto_pernet_init(net);
 
 	return 0;
diff -ruw linux-6.4/net/netfilter/nf_conntrack_ftp.c linux-6.4-fbx/net/netfilter/nf_conntrack_ftp.c
--- linux-6.4/net/netfilter/nf_conntrack_ftp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_ftp.c	2023-05-22 20:06:45.671891785 +0200
@@ -27,6 +27,10 @@
 #include <linux/netfilter/nf_conntrack_ftp.h>
 
 #define HELPER_NAME "ftp"
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/fbxbridge.h>
+#endif
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
@@ -397,6 +401,17 @@
 	if (unlikely(skb_linearize(skb)))
 		return NF_DROP;
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	if (!ct_ftp_info->is_fbxbridge && skb->dev->fbx_bridge) {
+		struct fbxbridge *fbxbr;
+
+		fbxbr = skb->dev->fbx_bridge;
+		ct_ftp_info->is_fbxbridge = 1;
+		ct_ftp_info->fbxbridge_remote = ntohl(fbxbr->br_remote_ipaddr);
+		ct_ftp_info->fbxbridge_wan = fbxbr->wan_ipaddr;
+	}
+#endif
+
 	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
 	if (th == NULL)
 		return NF_ACCEPT;
@@ -483,6 +498,50 @@
 	 * Doesn't matter unless NAT is happening.  */
 	daddr = &ct->tuplehash[!dir].tuple.dst.u3;
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	if (ct_ftp_info->is_fbxbridge &&
+	    search[dir][i].ftptype == NF_CT_FTP_PORT) {
+		unsigned long orig_ip_addr;
+		unsigned short orig_port;
+		char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
+		unsigned int len;
+		__be32 addr;
+
+		/* kludge: if  we are here,  then this is a  local pkt
+		 * that has  gone through internal  fbxbridge snat.
+		 *
+		 * If we see a port  command, then we mangle packet to
+		 * change  ip  address  given  to  the  remote  bridge
+		 * address */
+
+		/* check  address  is  packet  is  the  one  fbxbridge
+		 * changed */
+		orig_ip_addr = cmd.u3.ip;
+		if (orig_ip_addr != ct_ftp_info->fbxbridge_wan)
+			goto donttouch;
+
+		/* now mangle the remote address */
+		orig_port = cmd.u.tcp.port;
+		addr = ct_ftp_info->fbxbridge_remote;
+		len = sprintf(buffer, "%u,%u,%u,%u,%u,%u",
+			      ((unsigned char *)&addr)[0],
+			      ((unsigned char *)&addr)[1],
+			      ((unsigned char *)&addr)[2],
+			      ((unsigned char *)&addr)[3],
+			      orig_port >> 8 , orig_port & 0xFF);
+
+		nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
+					 matchlen, buffer, len);
+
+		/* then adjust as if nothing happened */
+		matchlen = len;
+		cmd.u3.ip = ct_ftp_info->fbxbridge_remote;
+	}
+donttouch:
+
+#endif
+
+
 	/* Update the ftp info */
 	if ((cmd.l3num == nf_ct_l3num(ct)) &&
 	    memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
diff -ruw linux-6.4/net/netfilter/nf_conntrack_helper.c linux-6.4-fbx/net/netfilter/nf_conntrack_helper.c
--- linux-6.4/net/netfilter/nf_conntrack_helper.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_helper.c	2023-05-22 20:06:45.671891785 +0200
@@ -37,6 +37,11 @@
 EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
 static unsigned int nf_ct_helper_count __read_mostly;
 
+static bool nf_ct_auto_assign_helper __read_mostly = true;
+module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
+MODULE_PARM_DESC(nf_conntrack_helper,
+		 "Enable automatic conntrack helper assignment (default 0)");
+
 static DEFINE_MUTEX(nf_ct_nat_helpers_mutex);
 static struct list_head nf_ct_nat_helpers __read_mostly;
 
@@ -48,6 +53,24 @@
 		(__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
 }
 
+static struct nf_conntrack_helper *
+__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_helper *helper;
+	struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
+	unsigned int h;
+
+	if (!nf_ct_helper_count)
+		return NULL;
+
+	h = helper_hash(tuple);
+	hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
+		if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
+			return helper;
+	}
+	return NULL;
+}
+
 struct nf_conntrack_helper *
 __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
 {
@@ -188,11 +211,33 @@
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
 
+static struct nf_conntrack_helper *
+nf_ct_lookup_helper(struct nf_conn *ct, struct net *net)
+{
+	struct nf_conntrack_net *cnet = nf_ct_pernet(net);
+
+	if (!cnet->sysctl_auto_assign_helper) {
+		if (cnet->auto_assign_helper_warned)
+			return NULL;
+		if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple))
+			return NULL;
+		pr_info("nf_conntrack: default automatic helper assignment "
+			"has been turned off for security reasons and CT-based "
+			"firewall rule not found. Use the iptables CT target "
+			"to attach helpers instead.\n");
+		cnet->auto_assign_helper_warned = true;
+		return NULL;
+	}
+
+	return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+}
+
 int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
 			      gfp_t flags)
 {
 	struct nf_conntrack_helper *helper = NULL;
 	struct nf_conn_help *help;
+	struct net *net = nf_ct_net(ct);
 
 	/* We already got a helper explicitly attached. The function
 	 * nf_conntrack_alter_reply - in case NAT is in use - asks for looking
@@ -203,22 +248,24 @@
 	if (test_bit(IPS_HELPER_BIT, &ct->status))
 		return 0;
 
-	if (WARN_ON_ONCE(!tmpl))
-		return 0;
-
+	if (tmpl != NULL) {
 	help = nfct_help(tmpl);
 	if (help != NULL) {
 		helper = rcu_dereference(help->helper);
 		set_bit(IPS_HELPER_BIT, &ct->status);
 	}
+	}
 
 	help = nfct_help(ct);
 
 	if (helper == NULL) {
+		helper = nf_ct_lookup_helper(ct, net);
+		if (helper == NULL) {
 		if (help)
 			RCU_INIT_POINTER(help->helper, NULL);
 		return 0;
 	}
+	}
 
 	if (help == NULL) {
 		help = nf_ct_helper_ext_add(ct, flags);
@@ -500,6 +547,19 @@
 }
 EXPORT_SYMBOL_GPL(nf_nat_helper_unregister);
 
+void nf_ct_set_auto_assign_helper_warned(struct net *net)
+{
+	nf_ct_pernet(net)->auto_assign_helper_warned = true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_set_auto_assign_helper_warned);
+
+void nf_conntrack_helper_pernet_init(struct net *net)
+{
+	struct nf_conntrack_net *cnet = nf_ct_pernet(net);
+
+	cnet->sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
+}
+
 int nf_conntrack_helper_init(void)
 {
 	nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
diff -ruw linux-6.4/net/netfilter/nf_conntrack_netlink.c linux-6.4-fbx/net/netfilter/nf_conntrack_netlink.c
--- linux-6.4/net/netfilter/nf_conntrack_netlink.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_netlink.c	2023-05-22 20:06:45.675891892 +0200
@@ -2297,6 +2297,11 @@
 			ct->status |= IPS_HELPER;
 			RCU_INIT_POINTER(help->helper, helper);
 		}
+	} else {
+		/* try an implicit helper assignation */
+		err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
+		if (err < 0)
+			goto err2;
 	}
 
 	err = ctnetlink_setup_nat(ct, cda);
diff -ruw linux-6.4/net/netfilter/nf_conntrack_proto_tcp.c linux-6.4-fbx/net/netfilter/nf_conntrack_proto_tcp.c
--- linux-6.4/net/netfilter/nf_conntrack_proto_tcp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_proto_tcp.c	2023-05-22 20:06:45.675891892 +0200
@@ -1247,6 +1247,7 @@
 		break;
 	}
 
+	if (!ct->proto.tcp.no_window_track) {
 	res = tcp_in_window(ct, dir, index,
 			    skb, dataoff, th, state);
 	switch (res) {
@@ -1260,6 +1261,7 @@
 	case NFCT_TCP_ACCEPT:
 		break;
 	}
+	}
      in_window:
 	/* From now on we have got in-window packets */
 	ct->proto.tcp.last_index = index;
@@ -1333,6 +1335,38 @@
 	return NF_ACCEPT;
 }
 
+#ifdef CONFIG_IP_FFN
+int external_tcpv4_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_tcp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
+#ifdef CONFIG_IPV6_FFN
+int external_tcpv6_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET6,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_tcp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
diff -ruw linux-6.4/net/netfilter/nf_conntrack_proto_udp.c linux-6.4-fbx/net/netfilter/nf_conntrack_proto_udp.c
--- linux-6.4/net/netfilter/nf_conntrack_proto_udp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_proto_udp.c	2023-05-22 20:06:45.675891892 +0200
@@ -129,6 +129,38 @@
 	return NF_ACCEPT;
 }
 
+#ifdef CONFIG_IP_FFN
+int external_udpv4_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_udp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
+#ifdef CONFIG_IPV6_FFN
+int external_udpv6_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET6,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_udp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
 static void udplite_error_log(const struct sk_buff *skb,
 			      const struct nf_hook_state *state,
diff -ruw linux-6.4/net/netfilter/nf_conntrack_sip.c linux-6.4-fbx/net/netfilter/nf_conntrack_sip.c
--- linux-6.4/net/netfilter/nf_conntrack_sip.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_sip.c	2023-05-22 20:06:45.675891892 +0200
@@ -35,6 +35,8 @@
 MODULE_ALIAS("ip_conntrack_sip");
 MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
 
+#define MAX_CALLS	8
+
 #define MAX_PORTS	8
 static unsigned short ports[MAX_PORTS];
 static unsigned int ports_c;
@@ -825,7 +827,8 @@
 	return found;
 }
 
-static void flush_expectations(struct nf_conn *ct, bool media)
+static void __flush_expectations(struct nf_conn *ct, bool media,
+				 const u8 *cid_hash)
 {
 	struct nf_conn_help *help = nfct_help(ct);
 	struct nf_conntrack_expect *exp;
@@ -835,6 +838,15 @@
 	hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
 		if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
 			continue;
+		if (media && cid_hash) {
+			const struct nf_ct_sip_expect *exp_sip_info;
+			exp_sip_info = nf_ct_exp_data(exp);
+
+			if (memcmp(exp_sip_info->cid_hash, cid_hash,
+				   sizeof (exp_sip_info->cid_hash)))
+				continue;
+		}
+
 		if (!nf_ct_remove_expect(exp))
 			continue;
 		if (!media)
@@ -843,12 +855,36 @@
 	spin_unlock_bh(&nf_conntrack_expect_lock);
 }
 
+static void flush_sig_expectations(struct nf_conn *ct)
+{
+	return __flush_expectations(ct, false, NULL);
+}
+
+static void flush_media_expectations(struct nf_conn *ct,
+				     const char *msg_data,
+				     unsigned int msg_len)
+{
+	unsigned int matchoff, matchlen;
+	u8 cid_hash[SHA256_DIGEST_SIZE];
+	struct sha256_state s;
+
+	sha256_init(&s);
+	if (ct_sip_get_header(ct, msg_data, 0, msg_len,
+			      SIP_HDR_CALL_ID,
+			      &matchoff, &matchlen) > 0)
+		sha256_update(&s, msg_data + matchoff, matchlen);
+	sha256_final(&s, cid_hash);
+
+	__flush_expectations(ct, true, cid_hash);
+}
+
 static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
 				 unsigned int dataoff,
 				 const char **dptr, unsigned int *datalen,
 				 union nf_inet_addr *daddr, __be16 port,
 				 enum sip_expectation_classes class,
-				 unsigned int mediaoff, unsigned int medialen)
+				 unsigned int mediaoff, unsigned int medialen,
+				 const u8 *cid_hash)
 {
 	struct nf_conntrack_expect *exp, *rtp_exp, *rtcp_exp;
 	enum ip_conntrack_info ctinfo;
@@ -861,6 +897,7 @@
 	u_int16_t base_port;
 	__be16 rtp_port, rtcp_port;
 	const struct nf_nat_sip_hooks *hooks;
+	struct nf_ct_sip_expect *exp_sip_info;
 
 	saddr = NULL;
 	if (sip_direct_media) {
@@ -953,18 +990,29 @@
 			goto err1;
 	}
 
-	if (skip_expect)
+	if (skip_expect) {
+		exp_sip_info = nf_ct_exp_data(exp);
+		memcpy(exp_sip_info->cid_hash, cid_hash,
+		       sizeof (exp_sip_info->cid_hash));
 		return NF_ACCEPT;
+	}
 
 	rtp_exp = nf_ct_expect_alloc(ct);
 	if (rtp_exp == NULL)
 		goto err1;
+	exp_sip_info = nf_ct_exp_data(rtp_exp);
+	memcpy(exp_sip_info->cid_hash, cid_hash,
+	       sizeof (exp_sip_info->cid_hash));
 	nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr,
 			  IPPROTO_UDP, NULL, &rtp_port);
 
+
 	rtcp_exp = nf_ct_expect_alloc(ct);
 	if (rtcp_exp == NULL)
 		goto err2;
+	exp_sip_info = nf_ct_exp_data(rtcp_exp);
+	memcpy(exp_sip_info->cid_hash, cid_hash,
+	       sizeof (exp_sip_info->cid_hash));
 	nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
 			  IPPROTO_UDP, NULL, &rtcp_port);
 
@@ -1039,10 +1087,20 @@
 	const struct nf_nat_sip_hooks *hooks;
 	unsigned int port;
 	const struct sdp_media_type *t;
+	struct sha256_state s;
+	u8 cid_hash[SHA256_DIGEST_SIZE];
 	int ret = NF_ACCEPT;
 
 	hooks = rcu_dereference(nf_nat_sip_hooks);
 
+	/* extract caller id if any */
+	sha256_init(&s);
+	if (ct_sip_get_header(ct, *dptr, 0, *datalen,
+			      SIP_HDR_CALL_ID,
+			      &matchoff, &matchlen) > 0)
+		sha256_update(&s, *dptr + matchoff, matchlen);
+	sha256_final(&s, cid_hash);
+
 	/* Find beginning of session description */
 	if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
 				  SDP_HDR_VERSION, SDP_HDR_UNSPEC,
@@ -1101,7 +1159,7 @@
 		ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
 					    dptr, datalen,
 					    &rtp_addr, htons(port), t->class,
-					    mediaoff, medialen);
+					    mediaoff, medialen, cid_hash);
 		if (ret != NF_ACCEPT) {
 			nf_ct_helper_log(skb, ct,
 					 "cannot add expectation for voice");
@@ -1145,7 +1203,7 @@
 	    (code >= 200 && code <= 299))
 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	else if (ct_sip_info->invite_cseq == cseq)
-		flush_expectations(ct, true);
+		flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1162,7 +1220,7 @@
 	    (code >= 200 && code <= 299))
 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	else if (ct_sip_info->invite_cseq == cseq)
-		flush_expectations(ct, true);
+		flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1179,7 +1237,7 @@
 	    (code >= 200 && code <= 299))
 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	else if (ct_sip_info->invite_cseq == cseq)
-		flush_expectations(ct, true);
+		flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1193,7 +1251,7 @@
 	struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
 	unsigned int ret;
 
-	flush_expectations(ct, true);
+	flush_media_expectations(ct, *dptr, *datalen);
 	ret = process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	if (ret == NF_ACCEPT)
 		ct_sip_info->invite_cseq = cseq;
@@ -1208,7 +1266,7 @@
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
-	flush_expectations(ct, true);
+	flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1392,7 +1450,7 @@
 	}
 
 flush:
-	flush_expectations(ct, false);
+	flush_sig_expectations(ct);
 	return NF_ACCEPT;
 }
 
@@ -1468,12 +1526,13 @@
 	 * Via: header so that nf_nat_sip can redirect the responses to
 	 * the correct port.
 	 */
-	if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
+	if (nf_ct_protonum(ct) == IPPROTO_UDP &&
+	    ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
 				    SIP_HDR_VIA_UDP, NULL, &matchoff,
 				    &matchlen, &addr, &port) > 0 &&
 	    port != ct->tuplehash[dir].tuple.src.u.udp.port &&
 	    nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3))
-		ct_sip_info->forced_dport = port;
+		ct_sip_info->forced_dport[!dir] = port;
 
 	for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
 		const struct sip_handler *handler;
@@ -1647,17 +1706,17 @@
 	},
 	[SIP_EXPECT_AUDIO] = {
 		.name		= "audio",
-		.max_expected	= 2 * IP_CT_DIR_MAX,
+		.max_expected	= MAX_CALLS * 2 * IP_CT_DIR_MAX,
 		.timeout	= 3 * 60,
 	},
 	[SIP_EXPECT_VIDEO] = {
 		.name		= "video",
-		.max_expected	= 2 * IP_CT_DIR_MAX,
+		.max_expected	= MAX_CALLS * 2 * IP_CT_DIR_MAX,
 		.timeout	= 3 * 60,
 	},
 	[SIP_EXPECT_IMAGE] = {
 		.name		= "image",
-		.max_expected	= IP_CT_DIR_MAX,
+		.max_expected	= MAX_CALLS * IP_CT_DIR_MAX,
 		.timeout	= 3 * 60,
 	},
 };
@@ -1672,6 +1731,7 @@
 	int i, ret;
 
 	NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sip_master));
+	NF_CT_EXPECT_BUILD_BUG_ON(sizeof(struct nf_ct_sip_expect));
 
 	if (ports_c == 0)
 		ports[ports_c++] = SIP_PORT;
diff -ruw linux-6.4/net/netfilter/nf_conntrack_standalone.c linux-6.4-fbx/net/netfilter/nf_conntrack_standalone.c
--- linux-6.4/net/netfilter/nf_conntrack_standalone.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_conntrack_standalone.c	2023-05-22 20:06:45.675891892 +0200
@@ -557,6 +557,7 @@
 	NF_SYSCTL_CT_LOG_INVALID,
 	NF_SYSCTL_CT_EXPECT_MAX,
 	NF_SYSCTL_CT_ACCT,
+	NF_SYSCTL_CT_HELPER,
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
 	NF_SYSCTL_CT_EVENTS,
 #endif
@@ -674,6 +675,14 @@
 		.extra1 	= SYSCTL_ZERO,
 		.extra2 	= SYSCTL_ONE,
 	},
+	[NF_SYSCTL_CT_HELPER] = {
+		.procname	= "nf_conntrack_helper",
+		.maxlen		= sizeof(u8),
+		.mode		= 0644,
+		.proc_handler	= proc_dou8vec_minmax,
+		.extra1 	= SYSCTL_ZERO,
+		.extra2 	= SYSCTL_ONE,
+	},
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
 	[NF_SYSCTL_CT_EVENTS] = {
 		.procname	= "nf_conntrack_events",
@@ -1079,6 +1088,7 @@
 	table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
 	table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
 	table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
+	table[NF_SYSCTL_CT_HELPER].data = &cnet->sysctl_auto_assign_helper;
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
 	table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
 #endif
diff -ruw linux-6.4/net/netfilter/nf_nat_core.c linux-6.4-fbx/net/netfilter/nf_nat_core.c
--- linux-6.4/net/netfilter/nf_nat_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_nat_core.c	2023-05-22 20:06:45.679891998 +0200
@@ -641,6 +641,11 @@
 	else
 		ct->status |= IPS_SRC_NAT_DONE;
 
+	if (maniptype == NF_NAT_MANIP_SRC) {
+		ct->nat_src_proto_min = range->min_proto;
+		ct->nat_src_proto_max = range->max_proto;
+	}
+
 	return NF_ACCEPT;
 }
 EXPORT_SYMBOL(nf_nat_setup_info);
diff -ruw linux-6.4/net/netfilter/nf_nat_ftp.c linux-6.4-fbx/net/netfilter/nf_nat_ftp.c
--- linux-6.4/net/netfilter/nf_nat_ftp.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_nat_ftp.c	2023-05-22 20:06:45.679891998 +0200
@@ -69,7 +69,8 @@
 			       struct nf_conntrack_expect *exp)
 {
 	union nf_inet_addr newaddr;
-	u_int16_t port;
+	u_int16_t port, sport, eport;
+	unsigned int i;
 	int dir = CTINFO2DIR(ctinfo);
 	struct nf_conn *ct = exp->master;
 	char buffer[sizeof("|1||65535|") + INET6_ADDRSTRLEN];
@@ -86,7 +87,42 @@
 	 * this one. */
 	exp->expectfn = nf_nat_follow_master;
 
-	port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port));
+	if (dir == IP_CT_DIR_ORIGINAL &&
+	    (ct->status & IPS_SRC_NAT) &&
+	    ct->nat_src_proto_min.all &&
+	    ct->nat_src_proto_max.all) {
+		sport = ntohs(ct->nat_src_proto_min.all);
+		eport = ntohs(ct->nat_src_proto_max.all);
+	} else {
+		sport = 1024;
+		eport = 65535;
+	}
+
+	port = ntohs(exp->saved_proto.tcp.port);
+	if (port < sport || port > eport) {
+		get_random_bytes(&port, sizeof (port));
+		port %= eport - sport;
+		port += sport;
+	}
+
+	/* Try to get same port: if not, try to change it. */
+	for (i = 0; i < eport - sport + 1; i++) {
+		int ret;
+
+		exp->tuple.dst.u.tcp.port = htons(port);
+		ret = nf_ct_expect_related(exp, 0);
+		if (ret == 0)
+			break;
+		else if (ret != -EBUSY) {
+			port = 0;
+			break;
+		}
+
+		port++;
+		if (port > eport)
+			port = sport;
+	}
+
 	if (port == 0) {
 		nf_ct_helper_log(skb, exp->master, "all ports in use");
 		return NF_DROP;
diff -ruw linux-6.4/net/netfilter/nf_nat_helper.c linux-6.4-fbx/net/netfilter/nf_nat_helper.c
--- linux-6.4/net/netfilter/nf_nat_helper.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_nat_helper.c	2023-05-22 20:06:45.679891998 +0200
@@ -188,6 +188,14 @@
 	range.flags = NF_NAT_RANGE_MAP_IPS;
 	range.min_addr = range.max_addr
 		= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
+
+	if (ct->master->nat_src_proto_min.all &&
+	    ct->master->nat_src_proto_max.all) {
+		range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+		range.min_proto = ct->master->nat_src_proto_min;
+		range.max_proto = ct->master->nat_src_proto_max;
+	}
+
 	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
 	/* For DST manip, map port here to where it's expected. */
diff -ruw linux-6.4/net/netfilter/nf_nat_proto.c linux-6.4-fbx/net/netfilter/nf_nat_proto.c
--- linux-6.4/net/netfilter/nf_nat_proto.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_nat_proto.c	2023-02-27 13:42:32.630771810 +0100
@@ -385,6 +385,8 @@
 {
 #if IS_ENABLED(CONFIG_IPV6)
 	struct ipv6hdr *ipv6h;
+	const __be32 *to;
+	__be32 *from;
 	__be16 frag_off;
 	int hdroff;
 	u8 nexthdr;
@@ -407,10 +409,24 @@
 	ipv6h = (void *)skb->data + iphdroff;
 
 manip_addr:
-	if (maniptype == NF_NAT_MANIP_SRC)
-		ipv6h->saddr = target->src.u3.in6;
-	else
-		ipv6h->daddr = target->dst.u3.in6;
+	if (maniptype == NF_NAT_MANIP_SRC) {
+		from = ipv6h->saddr.s6_addr32;
+		to = target->src.u3.in6.s6_addr32;
+	} else {
+		from = ipv6h->daddr.s6_addr32;
+		to = target->dst.u3.in6.s6_addr32;
+	}
+
+	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		__be32 diff[] = {
+			~from[0], ~from[1], ~from[2], ~from[3],
+			to[0], to[1], to[2], to[3],
+		};
+
+		skb->csum = ~csum_partial(diff, sizeof(diff), ~skb->csum);
+	}
+
+	memcpy(from, to, sizeof (struct in6_addr));
 
 #endif
 	return true;
diff -ruw linux-6.4/net/netfilter/nf_nat_sip.c linux-6.4-fbx/net/netfilter/nf_nat_sip.c
--- linux-6.4/net/netfilter/nf_nat_sip.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nf_nat_sip.c	2023-05-22 20:06:45.679891998 +0200
@@ -111,8 +111,11 @@
 	} else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) &&
 		   ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
 		newaddr = ct->tuplehash[!dir].tuple.src.u3;
-		newport = ct_sip_info->forced_dport ? :
+		if (nf_ct_protonum(ct) == IPPROTO_UDP)
+			newport = ct_sip_info->forced_dport[dir] ? :
 			  ct->tuplehash[!dir].tuple.src.u.udp.port;
+		else
+			newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
 	} else
 		return 1;
 
@@ -279,7 +282,8 @@
 	}
 
 	/* Mangle destination port for Cisco phones, then fix up checksums */
-	if (dir == IP_CT_DIR_REPLY && ct_sip_info->forced_dport) {
+	if (nf_ct_protonum(ct) == IPPROTO_UDP &&
+	    dir == IP_CT_DIR_REPLY && ct_sip_info->forced_dport[dir]) {
 		struct udphdr *uh;
 
 		if (skb_ensure_writable(skb, skb->len)) {
@@ -288,7 +292,7 @@
 		}
 
 		uh = (void *)skb->data + protoff;
-		uh->dest = ct_sip_info->forced_dport;
+		uh->dest = ct_sip_info->forced_dport[dir];
 
 		if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, protoff,
 					      0, 0, NULL, 0)) {
@@ -397,7 +401,7 @@
 	/* If the signalling port matches the connection's source port in the
 	 * original direction, try to use the destination port in the opposite
 	 * direction. */
-	srcport = ct_sip_info->forced_dport ? :
+	srcport = ct_sip_info->forced_dport[dir] ? :
 		  ct->tuplehash[dir].tuple.src.u.udp.port;
 	if (exp->tuple.dst.u.udp.port == srcport)
 		port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
diff -ruw linux-6.4/net/netfilter/nfnetlink.c linux-6.4-fbx/net/netfilter/nfnetlink.c
--- linux-6.4/net/netfilter/nfnetlink.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/netfilter/nfnetlink.c	2023-06-27 11:47:16.171869771 +0200
@@ -648,7 +648,10 @@
 	    skb->len < nlh->nlmsg_len)
 		return;
 
-	if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
+	if (NFNL_SUBSYS_ID(nlh->nlmsg_type) == NFNL_SUBSYS_CTNETLINK &&
+	    NFNL_MSG_TYPE(nlh->nlmsg_type) == 1 /* IPCTNL_MSG_CT_GET */) {
+		pr_debug("Carving out access exception for conntrack get; does not work for batch queries\n");
+	} else if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
 		netlink_ack(skb, nlh, -EPERM, NULL);
 		return;
 	}
diff -ruw linux-6.4/net/sched/act_police.c linux-6.4-fbx/net/sched/act_police.c
--- linux-6.4/net/sched/act_police.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/sched/act_police.c	2023-11-07 13:38:44.098257675 +0100
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <net/act_api.h>
+#include <net/gso.h>
 #include <net/netlink.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_police.h>
diff -ruw linux-6.4/net/sched/sch_drr.c linux-6.4-fbx/net/sched/sch_drr.c
--- linux-6.4/net/sched/sch_drr.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/sched/sch_drr.c	2023-05-22 20:06:45.951899233 +0200
@@ -325,7 +325,9 @@
 			cl = drr_find_class(sch, res.classid);
 		return cl;
 	}
-	return NULL;
+
+	/* default to first minor if it exists, or drop */
+	return drr_find_class(sch, TC_H_MAKE(TC_H_MAJ(sch->handle), 1));
 }
 
 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
diff -ruw linux-6.4/net/socket.c linux-6.4-fbx/net/socket.c
--- linux-6.4/net/socket.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/socket.c	2023-05-22 20:06:46.211906149 +0200
@@ -1220,6 +1220,29 @@
 	return err;
 }
 
+static DEFINE_MUTEX(fbxbridge_ioctl_mutex);
+static int (*fbxbridge_ioctl_hook)(struct net *, unsigned int cmd, void __user *arg) = NULL;
+
+void fbxbridge_set(int (*hook)(struct net *, unsigned int, void __user *))
+{
+	mutex_lock(&fbxbridge_ioctl_mutex);
+	fbxbridge_ioctl_hook = hook;
+	mutex_unlock(&fbxbridge_ioctl_mutex);
+}
+
+static DEFINE_MUTEX(fbxdiverter_ioctl_mutex);
+static int (*fbxdiverter_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
+
+void fbxdiverter_ioctl_set(int (*hook) (struct net *, unsigned int,
+					void __user *))
+{
+	mutex_lock(&fbxdiverter_ioctl_mutex);
+	fbxdiverter_ioctl_hook = hook;
+	mutex_unlock(&fbxdiverter_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(fbxdiverter_ioctl_set);
+
 /*
  *	With an ioctl, arg may well be a user mode pointer, but we don't know
  *	what to do with it - that's up to the protocol still.
@@ -1289,6 +1312,17 @@
 
 			err = open_related_ns(&net->ns, get_net_ns);
 			break;
+		case SIOCGFBXDIVERT:
+		case SIOCSFBXDIVERT:
+			err = -ENOPKG;
+			if (!fbxdiverter_ioctl_hook)
+				request_module("fbxdiverter");
+
+			mutex_lock(&fbxdiverter_ioctl_mutex);
+			if (fbxdiverter_ioctl_hook)
+				err = fbxdiverter_ioctl_hook(net, cmd, argp);
+			mutex_unlock(&fbxdiverter_ioctl_mutex);
+			break;
 		case SIOCGSTAMP_OLD:
 		case SIOCGSTAMPNS_OLD:
 			if (!sock->ops->gettstamp) {
@@ -1314,6 +1348,17 @@
 			err = dev_ifconf(net, argp);
 			break;
 
+		case SIOCGFBXBRIDGE:
+		case SIOCSFBXBRIDGE:
+			err = -ENOPKG;
+			if (!fbxbridge_ioctl_hook)
+				request_module("fbxbridge");
+
+			mutex_lock(&fbxbridge_ioctl_mutex);
+			if (fbxbridge_ioctl_hook)
+				err = fbxbridge_ioctl_hook(net, cmd, argp);
+			mutex_unlock(&fbxbridge_ioctl_mutex);
+			break;
 		default:
 			err = sock_do_ioctl(net, sock, cmd, arg);
 			break;
diff -ruw linux-6.4/net/unix/Kconfig linux-6.4-fbx/net/unix/Kconfig
--- linux-6.4/net/unix/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/unix/Kconfig	2023-04-05 14:58:59.067863051 +0200
@@ -30,6 +30,9 @@
 	depends on UNIX
 	default y
 
+config UNIX_ABSTRACT_IGNORE_NETNS
+	bool "make abstract namespace global to all network namespaces"
+
 config UNIX_DIAG
 	tristate "UNIX: socket monitoring interface"
 	depends on UNIX
diff -ruw linux-6.4/net/unix/af_unix.c linux-6.4-fbx/net/unix/af_unix.c
--- linux-6.4/net/unix/af_unix.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/unix/af_unix.c	2023-06-27 11:47:16.191870315 +0200
@@ -1116,8 +1116,19 @@
 
 	if (sunaddr->sun_path[0])
 		sk = unix_find_bsd(sunaddr, addr_len, type);
-	else
+	else {
+#ifdef CONFIG_UNIX_ABSTRACT_IGNORE_NETNS
+		down_read(&net_rwsem);
+		for_each_net(net) {
+#endif
 		sk = unix_find_abstract(net, sunaddr, addr_len, type);
+#ifdef CONFIG_UNIX_ABSTRACT_IGNORE_NETNS
+			if (!IS_ERR(sk))
+				break;
+		}
+		up_read(&net_rwsem);
+#endif
+	}
 
 	return sk;
 }
diff -ruw linux-6.4/net/wireless/Kconfig linux-6.4-fbx/net/wireless/Kconfig
--- linux-6.4/net/wireless/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/Kconfig	2024-02-14 17:43:53.023127909 +0100
@@ -177,6 +177,9 @@
 
 	  If unsure, say N.
 
+config CFG80211_DFS_CACHE
+	bool "keep each channel dfs state in global cache"
+
 config CFG80211_CRDA_SUPPORT
 	bool "support CRDA" if EXPERT
 	default y
@@ -201,6 +204,22 @@
 	  Drivers should select this option if they require cfg80211's
 	  wext compatibility symbols to be exported.
 
+config CFG80211_KUNIT_TEST
+	tristate "KUnit tests for cfg80211" if !KUNIT_ALL_TESTS
+	depends on KUNIT
+	depends on CFG80211
+	default KUNIT_ALL_TESTS
+	depends on !KERNEL_6_2
+	help
+	  Enable this option to test cfg80211 functions with kunit.
+
+	  If unsure, say N.
+
+config FBX80211
+	bool "fbx genl family"
+	help
+	  Support for freebox specific genl family
+
 endif # CFG80211
 
 config LIB80211
diff -ruw linux-6.4/net/wireless/Makefile linux-6.4-fbx/net/wireless/Makefile
--- linux-6.4/net/wireless/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/Makefile	2023-12-12 17:24:34.179627645 +0100
@@ -4,6 +4,7 @@
 obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
 obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
 obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
+obj-y += tests/
 
 obj-$(CONFIG_WEXT_CORE) += wext-core.o
 obj-$(CONFIG_WEXT_PROC) += wext-proc.o
@@ -24,6 +25,8 @@
 cfg80211-y += extra-certs.o
 endif
 
+cfg80211-$(CONFIG_FBX80211) += nlfbx.o
+
 $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
 	@$(kecho) "  GEN     $@"
 	$(Q)(echo '#include "reg.h"'; \
diff -ruw linux-6.4/net/wireless/ap.c linux-6.4-fbx/net/wireless/ap.c
--- linux-6.4/net/wireless/ap.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/ap.c	2023-11-07 13:38:44.098257675 +0100
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Parts of this file are
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022-2023 Intel Corporation
  */
 #include <linux/ieee80211.h>
 #include <linux/export.h>
@@ -18,7 +18,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!rdev->ops->stop_ap)
 		return -EOPNOTSUPP;
@@ -52,7 +52,7 @@
 	return err;
 }
 
-int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
 		       struct net_device *dev, int link_id,
 		       bool notify)
 {
@@ -72,17 +72,3 @@
 
 	return ret;
 }
-
-int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
-		     struct net_device *dev, int link_id,
-		     bool notify)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	int err;
-
-	wdev_lock(wdev);
-	err = __cfg80211_stop_ap(rdev, dev, link_id, notify);
-	wdev_unlock(wdev);
-
-	return err;
-}
diff -ruw linux-6.4/net/wireless/chan.c linux-6.4-fbx/net/wireless/chan.c
--- linux-6.4/net/wireless/chan.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/chan.c	2024-02-14 17:43:53.027128018 +0100
@@ -32,6 +32,8 @@
 	chandef->center_freq2 = 0;
 	chandef->edmg.bw_config = 0;
 	chandef->edmg.channels = 0;
+	chandef->ru_punct_bitmap = 0;
+        chandef->ru_punct_bitmap_supp_he = 0;
 
 	switch (chan_type) {
 	case NL80211_CHAN_NO_HT:
@@ -445,6 +447,77 @@
 }
 EXPORT_SYMBOL(cfg80211_chandef_compatible);
 
+static inline u32
+dfs_cache_channel_to_khz(const struct cfg80211_chan_dfs_cache *cd)
+{
+	return MHZ_TO_KHZ(cd->center_freq) + cd->freq_offset;
+}
+
+static struct cfg80211_chan_dfs_cache *
+__get_dfs_chan_cache(struct ieee80211_channel *c)
+{
+	struct cfg80211_chan_dfs_cache *cd;
+	u32 freq;
+
+	freq = ieee80211_channel_to_khz(c);
+	list_for_each_entry(cd, &cfg80211_dfs_cache.bands[c->band], next) {
+		if (dfs_cache_channel_to_khz(cd) == freq)
+			return cd;
+	}
+	return NULL;
+}
+
+struct cfg80211_chan_dfs_cache *
+cfg80211_get_dfs_chan_cache(struct ieee80211_channel *c)
+{
+	struct cfg80211_chan_dfs_cache *cd;
+	u32 freq;
+
+	freq = ieee80211_channel_to_khz(c);
+	list_for_each_entry(cd, &cfg80211_dfs_cache.bands[c->band], next) {
+		if (dfs_cache_channel_to_khz(cd) == freq)
+			return cd;
+	}
+	return NULL;
+}
+
+void cfg80211_flush_dfs_cache(void)
+{
+	struct cfg80211_chan_dfs_cache *cd, *tmp;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(cfg80211_dfs_cache.bands); i++) {
+		list_for_each_entry_safe(cd, tmp,
+					 &cfg80211_dfs_cache.bands[i], next) {
+			list_del(&cd->next);
+			kfree(cd);
+		}
+	}
+}
+
+static void set_dfs_cache_state(struct ieee80211_channel *c,
+				enum nl80211_dfs_state dfs_state)
+{
+	struct cfg80211_chan_dfs_cache *cd;
+
+	mutex_lock(&cfg80211_dfs_cache.mtx);
+
+	cd = __get_dfs_chan_cache(c);
+	if (!cd) {
+		cd = kzalloc(sizeof (*cd), GFP_KERNEL);
+		if (!cd)
+			return;
+		cd->center_freq = c->center_freq;
+		cd->freq_offset = c->freq_offset;
+		list_add(&cd->next, &cfg80211_dfs_cache.bands[c->band]);
+	}
+
+	cd->dfs_state = dfs_state;
+	cd->dfs_state_entered = jiffies;
+
+	mutex_unlock(&cfg80211_dfs_cache.mtx);
+}
+
 static void cfg80211_set_chans_dfs_state(struct wiphy *wiphy, u32 center_freq,
 					 u32 bandwidth,
 					 enum nl80211_dfs_state dfs_state)
@@ -459,6 +532,8 @@
 		if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
 			continue;
 
+		if (IS_ENABLED(CONFIG_CFG80211_DFS_CACHE))
+			set_dfs_cache_state(c, dfs_state);
 		c->dfs_state = dfs_state;
 		c->dfs_state_entered = jiffies;
 	}
@@ -666,6 +741,7 @@
 
 	return (r1 + r2 > 0);
 }
+EXPORT_SYMBOL(cfg80211_chandef_dfs_usable);
 
 /*
  * Checks if center frequency of chan falls with in the bandwidth
@@ -713,7 +789,7 @@
 {
 	unsigned int link;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_AP:
@@ -782,19 +858,15 @@
 {
 	struct wireless_dev *wdev;
 
+	lockdep_assert_wiphy(wiphy);
+
 	list_for_each_entry(wdev, &wiphy->wdev_list, list) {
-		wdev_lock(wdev);
-		if (!cfg80211_beaconing_iface_active(wdev)) {
-			wdev_unlock(wdev);
+		if (!cfg80211_beaconing_iface_active(wdev))
 			continue;
-		}
 
-		if (cfg80211_wdev_on_sub_chan(wdev, chan, false)) {
-			wdev_unlock(wdev);
+		if (cfg80211_wdev_on_sub_chan(wdev, chan, false))
 			return true;
 		}
-		wdev_unlock(wdev);
-	}
 
 	return false;
 }
@@ -823,14 +895,18 @@
 	if (!(chan->flags & IEEE80211_CHAN_RADAR))
 		return false;
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
+		bool found;
+
 		if (!reg_dfs_domain_same(wiphy, &rdev->wiphy))
 			continue;
 
-		if (cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan))
-			return true;
+		wiphy_lock(&rdev->wiphy);
+		found = cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan) ||
+			cfg80211_offchan_chain_is_active(rdev, chan);
+		wiphy_unlock(&rdev->wiphy);
 
-		if (cfg80211_offchan_chain_is_active(rdev, chan))
+		if (found)
 			return true;
 	}
 
@@ -965,6 +1041,7 @@
 
 	return max(t1, t2);
 }
+EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time);
 
 static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
 					u32 center_freq, u32 bandwidth,
@@ -1321,10 +1398,7 @@
 	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		bool ret;
 
-		wdev_lock(wdev);
 		ret = cfg80211_ir_permissive_check_wdev(iftype, wdev, chan);
-		wdev_unlock(wdev);
-
 		if (ret)
 			return ret;
 	}
@@ -1433,17 +1507,10 @@
 struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
 				       unsigned int link_id)
 {
-	/*
-	 * We need to sort out the locking here - in some cases
-	 * where we get here we really just don't care (yet)
-	 * about the valid links, but in others we do. But we
-	 * get here with various driver cases, so we cannot
-	 * easily require the wdev mutex.
-	 */
-	if (link_id || wdev->valid_links & BIT(0)) {
-		ASSERT_WDEV_LOCK(wdev);
-		WARN_ON(!(wdev->valid_links & BIT(link_id)));
-	}
+	lockdep_assert_wiphy(wdev->wiphy);
+
+	WARN_ON(wdev->valid_links && !(wdev->valid_links & BIT(link_id)));
+	WARN_ON(!wdev->valid_links && link_id > 0);
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_MESH_POINT:
diff -ruw linux-6.4/net/wireless/core.c linux-6.4-fbx/net/wireless/core.c
--- linux-6.4/net/wireless/core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/core.c	2024-02-14 17:43:53.027128018 +0100
@@ -25,6 +25,7 @@
 #include <net/genetlink.h>
 #include <net/cfg80211.h>
 #include "nl80211.h"
+#include "nlfbx.h"
 #include "core.h"
 #include "sysfs.h"
 #include "debugfs.h"
@@ -54,13 +55,17 @@
 MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz,
 		 "Disable 40MHz support in the 2.4GHz band");
 
+/* global dfs cache */
+struct cfg80211_dfs_cache cfg80211_dfs_cache;
+static struct dentry *cfg80211_dfs_cache_debugfs;
+
 struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
 {
 	struct cfg80211_registered_device *result = NULL, *rdev;
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		if (rdev->wiphy_idx == wiphy_idx) {
 			result = rdev;
 			break;
@@ -116,7 +121,7 @@
 	}
 
 	/* Ensure another device does not already have this name. */
-	list_for_each_entry(rdev2, &cfg80211_rdev_list, list)
+	for_each_rdev(rdev2)
 		if (strcmp(newname, wiphy_name(&rdev2->wiphy)) == 0)
 			return -EINVAL;
 
@@ -129,6 +134,7 @@
 	int result;
 
 	ASSERT_RTNL();
+	lockdep_assert_wiphy(&rdev->wiphy);
 
 	/* Ignore nop renames */
 	if (strcmp(newname, wiphy_name(&rdev->wiphy)) == 0)
@@ -195,6 +201,8 @@
 			continue;
 		nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
 	}
+
+	wiphy_lock(&rdev->wiphy);
 	nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
 
 	wiphy_net_set(&rdev->wiphy, net);
@@ -203,6 +211,8 @@
 	WARN_ON(err);
 
 	nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
+	wiphy_unlock(&rdev->wiphy);
+
 	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->netdev)
 			continue;
@@ -360,7 +370,8 @@
 	rtnl_unlock();
 }
 
-static void cfg80211_sched_scan_stop_wk(struct work_struct *work)
+static void cfg80211_sched_scan_stop_wk(struct wiphy *wiphy,
+					struct wiphy_work *work)
 {
 	struct cfg80211_registered_device *rdev;
 	struct cfg80211_sched_scan_request *req, *tmp;
@@ -368,12 +379,10 @@
 	rdev = container_of(work, struct cfg80211_registered_device,
 			   sched_scan_stop_wk);
 
-	wiphy_lock(&rdev->wiphy);
 	list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
 		if (req->nl_owner_dead)
 			cfg80211_stop_sched_scan_req(rdev, req, false);
 	}
-	wiphy_unlock(&rdev->wiphy);
 }
 
 static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
@@ -408,6 +417,34 @@
 	rtnl_unlock();
 }
 
+static void cfg80211_wiphy_work(struct work_struct *work)
+{
+	struct cfg80211_registered_device *rdev;
+	struct wiphy_work *wk;
+
+	rdev = container_of(work, struct cfg80211_registered_device, wiphy_work);
+
+	wiphy_lock(&rdev->wiphy);
+	if (rdev->suspended)
+		goto out;
+
+	spin_lock_irq(&rdev->wiphy_work_lock);
+	wk = list_first_entry_or_null(&rdev->wiphy_work_list,
+				      struct wiphy_work, entry);
+	if (wk) {
+		list_del_init(&wk->entry);
+		if (!list_empty(&rdev->wiphy_work_list))
+			schedule_work(work);
+		spin_unlock_irq(&rdev->wiphy_work_lock);
+
+		wk->func(&rdev->wiphy, wk);
+	} else {
+		spin_unlock_irq(&rdev->wiphy_work_lock);
+	}
+out:
+	wiphy_unlock(&rdev->wiphy);
+}
+
 /* exported functions */
 
 struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
@@ -495,7 +532,7 @@
 	spin_lock_init(&rdev->bss_lock);
 	INIT_LIST_HEAD(&rdev->bss_list);
 	INIT_LIST_HEAD(&rdev->sched_scan_req_list);
-	INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
+	wiphy_work_init(&rdev->scan_done_wk, __cfg80211_scan_done);
 	INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
 			  cfg80211_dfs_channels_update_work);
 #ifdef CONFIG_CFG80211_WEXT
@@ -508,7 +545,7 @@
 	device_enable_async_suspend(&rdev->wiphy.dev);
 
 	INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk);
-	INIT_WORK(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk);
+	wiphy_work_init(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk);
 	INIT_WORK(&rdev->sched_scan_res_wk, cfg80211_sched_scan_results_wk);
 	INIT_WORK(&rdev->propagate_radar_detect_wk,
 		  cfg80211_propagate_radar_detect_wk);
@@ -533,6 +570,9 @@
 		return NULL;
 	}
 
+	INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work);
+	INIT_LIST_HEAD(&rdev->wiphy_work_list);
+	spin_lock_init(&rdev->wiphy_work_lock);
 	INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work);
 	INIT_WORK(&rdev->conn_work, cfg80211_conn_work);
 	INIT_WORK(&rdev->event_work, cfg80211_event_work);
@@ -558,6 +598,7 @@
 
 	rdev->wiphy.max_sched_scan_plans = 1;
 	rdev->wiphy.max_sched_scan_plan_interval = U32_MAX;
+	rdev->wiphy.dev_port = -1;
 
 	return &rdev->wiphy;
 }
@@ -721,22 +762,6 @@
 			return -EINVAL;
 	}
 
-	/*
-	 * if a wiphy has unsupported modes for regulatory channel enforcement,
-	 * opt-out of enforcement checking
-	 */
-	if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) |
-				       BIT(NL80211_IFTYPE_P2P_CLIENT) |
-				       BIT(NL80211_IFTYPE_AP) |
-				       BIT(NL80211_IFTYPE_MESH_POINT) |
-				       BIT(NL80211_IFTYPE_P2P_GO) |
-				       BIT(NL80211_IFTYPE_ADHOC) |
-				       BIT(NL80211_IFTYPE_P2P_DEVICE) |
-				       BIT(NL80211_IFTYPE_NAN) |
-				       BIT(NL80211_IFTYPE_AP_VLAN) |
-				       BIT(NL80211_IFTYPE_MONITOR)))
-		wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF;
-
 	if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) &&
 		    (wiphy->regulatory_flags &
 					(REGULATORY_CUSTOM_REG |
@@ -941,8 +966,10 @@
 	rdev->wiphy.features |= NL80211_FEATURE_SCAN_FLUSH;
 
 	rtnl_lock();
+	wiphy_lock(&rdev->wiphy);
 	res = device_add(&rdev->wiphy.dev);
 	if (res) {
+		wiphy_unlock(&rdev->wiphy);
 		rtnl_unlock();
 		return res;
 	}
@@ -956,6 +983,7 @@
 
 	cfg80211_debugfs_rdev_add(rdev);
 	nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
+	wiphy_unlock(&rdev->wiphy);
 
 	/* set up regulatory info */
 	wiphy_regulatory_register(wiphy);
@@ -1027,6 +1055,36 @@
 }
 EXPORT_SYMBOL(wiphy_rfkill_start_polling);
 
+void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
+				  struct wiphy_work *end)
+{
+	unsigned int runaway_limit = 100;
+	unsigned long flags;
+
+	lockdep_assert_held(&rdev->wiphy.mtx);
+
+	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
+	while (!list_empty(&rdev->wiphy_work_list)) {
+		struct wiphy_work *wk;
+
+		wk = list_first_entry(&rdev->wiphy_work_list,
+				      struct wiphy_work, entry);
+		list_del_init(&wk->entry);
+		spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+
+		wk->func(&rdev->wiphy, wk);
+
+		spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
+
+		if (wk == end)
+			break;
+
+		if (WARN_ON(--runaway_limit == 0))
+			INIT_LIST_HEAD(&rdev->wiphy_work_list);
+	}
+	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+}
+
 void wiphy_unregister(struct wiphy *wiphy)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -1065,25 +1123,29 @@
 	cfg80211_rdev_list_generation++;
 	device_del(&rdev->wiphy.dev);
 
+#ifdef CONFIG_PM
+	if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
+		rdev_set_wakeup(rdev, false);
+#endif
+
+	/* surely nothing is reachable now, clean up work */
+	cfg80211_process_wiphy_works(rdev, NULL);
 	wiphy_unlock(&rdev->wiphy);
 	rtnl_unlock();
 
-	flush_work(&rdev->scan_done_wk);
+	/* this has nothing to do now but make sure it's gone */
+	cancel_work_sync(&rdev->wiphy_work);
+
 	cancel_work_sync(&rdev->conn_work);
 	flush_work(&rdev->event_work);
 	cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
 	cancel_delayed_work_sync(&rdev->background_cac_done_wk);
 	flush_work(&rdev->destroy_work);
-	flush_work(&rdev->sched_scan_stop_wk);
 	flush_work(&rdev->propagate_radar_detect_wk);
 	flush_work(&rdev->propagate_cac_done_wk);
 	flush_work(&rdev->mgmt_registrations_update_wk);
 	flush_work(&rdev->background_cac_abort_wk);
 
-#ifdef CONFIG_PM
-	if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
-		rdev_set_wakeup(rdev, false);
-#endif
 	cfg80211_rdev_free_wowlan(rdev);
 	cfg80211_rdev_free_coalesce(rdev);
 }
@@ -1130,23 +1192,16 @@
 }
 EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason);
 
-void cfg80211_cqm_config_free(struct wireless_dev *wdev)
-{
-	kfree(wdev->cqm_config);
-	wdev->cqm_config = NULL;
-}
-
 static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
 				      bool unregister_netdev)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+	struct cfg80211_cqm_config *cqm_config;
 	unsigned int link_id;
 
 	ASSERT_RTNL();
 	lockdep_assert_held(&rdev->wiphy.mtx);
 
-	flush_work(&wdev->pmsr_free_wk);
-
 	nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
 
 	wdev->registered = false;
@@ -1178,11 +1233,10 @@
 	kfree_sensitive(wdev->wext.keys);
 	wdev->wext.keys = NULL;
 #endif
-	/* only initialized if we have a netdev */
-	if (wdev->netdev)
-		flush_work(&wdev->disconnect_wk);
-
-	cfg80211_cqm_config_free(wdev);
+	wiphy_work_cancel(wdev->wiphy, &wdev->cqm_rssi_work);
+	/* deleted from the list, so can't be found from nl80211 any more */
+	cqm_config = rcu_access_pointer(wdev->cqm_config);
+	kfree_rcu(cqm_config, rcu_head);
 
 	/*
 	 * Ensure that all events have been processed and
@@ -1228,14 +1282,13 @@
 		rdev->num_running_monitor_ifaces += num;
 }
 
-void __cfg80211_leave(struct cfg80211_registered_device *rdev,
+void cfg80211_leave(struct cfg80211_registered_device *rdev,
 		      struct wireless_dev *wdev)
 {
 	struct net_device *dev = wdev->netdev;
 	struct cfg80211_sched_scan_request *pos, *tmp;
 
 	lockdep_assert_held(&rdev->wiphy.mtx);
-	ASSERT_WDEV_LOCK(wdev);
 
 	cfg80211_pmsr_wdev_down(wdev);
 
@@ -1243,7 +1296,7 @@
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_ADHOC:
-		__cfg80211_leave_ibss(rdev, dev, true);
+		cfg80211_leave_ibss(rdev, dev, true);
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
 	case NL80211_IFTYPE_STATION:
@@ -1263,14 +1316,14 @@
 				    WLAN_REASON_DEAUTH_LEAVING, true);
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
-		__cfg80211_leave_mesh(rdev, dev);
+		cfg80211_leave_mesh(rdev, dev);
 		break;
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_P2P_GO:
-		__cfg80211_stop_ap(rdev, dev, -1, true);
+		cfg80211_stop_ap(rdev, dev, -1, true);
 		break;
 	case NL80211_IFTYPE_OCB:
-		__cfg80211_leave_ocb(rdev, dev);
+		cfg80211_leave_ocb(rdev, dev);
 		break;
 	case NL80211_IFTYPE_P2P_DEVICE:
 	case NL80211_IFTYPE_NAN:
@@ -1288,14 +1341,6 @@
 	}
 }
 
-void cfg80211_leave(struct cfg80211_registered_device *rdev,
-		    struct wireless_dev *wdev)
-{
-	wdev_lock(wdev);
-	__cfg80211_leave(rdev, wdev);
-	wdev_unlock(wdev);
-}
-
 void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
 			 gfp_t gfp)
 {
@@ -1320,7 +1365,6 @@
 
 void cfg80211_init_wdev(struct wireless_dev *wdev)
 {
-	mutex_init(&wdev->mtx);
 	INIT_LIST_HEAD(&wdev->event_list);
 	spin_lock_init(&wdev->event_lock);
 	INIT_LIST_HEAD(&wdev->mgmt_registrations);
@@ -1334,6 +1378,8 @@
 	wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
 #endif
 
+	wiphy_work_init(&wdev->cqm_rssi_work, cfg80211_cqm_rssi_notify_work);
+
 	if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
 		wdev->ps = true;
 	else
@@ -1392,6 +1438,7 @@
 	lockdep_assert_held(&rdev->wiphy.mtx);
 
 	/* we'll take care of this */
+	dev->dev_port = rdev->wiphy.dev_port + 1;
 	wdev->registered = true;
 	wdev->registering = true;
 	ret = register_netdevice(dev);
@@ -1455,6 +1502,9 @@
 		cfg80211_leave(rdev, wdev);
 		cfg80211_remove_links(wdev);
 		wiphy_unlock(&rdev->wiphy);
+		/* since we just did cfg80211_leave() nothing to do there */
+		cancel_work_sync(&wdev->disconnect_wk);
+		cancel_work_sync(&wdev->pmsr_free_wk);
 		break;
 	case NETDEV_DOWN:
 		wiphy_lock(&rdev->wiphy);
@@ -1480,7 +1530,6 @@
 	case NETDEV_UP:
 		wiphy_lock(&rdev->wiphy);
 		cfg80211_update_iface_num(rdev, wdev->iftype, 1);
-		wdev_lock(wdev);
 		switch (wdev->iftype) {
 #ifdef CONFIG_CFG80211_WEXT
 		case NL80211_IFTYPE_ADHOC:
@@ -1510,7 +1559,6 @@
 		default:
 			break;
 		}
-		wdev_unlock(wdev);
 		rdev->opencount++;
 
 		/*
@@ -1553,7 +1601,7 @@
 	struct cfg80211_registered_device *rdev;
 
 	rtnl_lock();
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		if (net_eq(wiphy_net(&rdev->wiphy), net))
 			WARN_ON(cfg80211_switch_netns(rdev, &init_net));
 	}
@@ -1564,6 +1612,125 @@
 	.exit = cfg80211_pernet_exit,
 };
 
+void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
+{
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	unsigned long flags;
+
+	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
+	if (list_empty(&work->entry))
+		list_add_tail(&work->entry, &rdev->wiphy_work_list);
+	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+
+	queue_work(system_unbound_wq, &rdev->wiphy_work);
+}
+EXPORT_SYMBOL_GPL(wiphy_work_queue);
+
+void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
+{
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	unsigned long flags;
+
+	lockdep_assert_held(&wiphy->mtx);
+
+	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
+	if (!list_empty(&work->entry))
+		list_del_init(&work->entry);
+	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+}
+EXPORT_SYMBOL_GPL(wiphy_work_cancel);
+
+void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
+{
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	unsigned long flags;
+	bool run;
+
+	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
+	run = !work || !list_empty(&work->entry);
+	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+
+	if (run)
+		cfg80211_process_wiphy_works(rdev, work);
+}
+EXPORT_SYMBOL_GPL(wiphy_work_flush);
+
+void wiphy_delayed_work_timer(struct timer_list *t)
+{
+	struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
+
+	wiphy_work_queue(dwork->wiphy, &dwork->work);
+}
+EXPORT_SYMBOL(wiphy_delayed_work_timer);
+
+void wiphy_delayed_work_queue(struct wiphy *wiphy,
+			      struct wiphy_delayed_work *dwork,
+			      unsigned long delay)
+{
+	if (!delay) {
+		wiphy_work_queue(wiphy, &dwork->work);
+		return;
+	}
+
+	dwork->wiphy = wiphy;
+	mod_timer(&dwork->timer, jiffies + delay);
+}
+EXPORT_SYMBOL_GPL(wiphy_delayed_work_queue);
+
+void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+			       struct wiphy_delayed_work *dwork)
+{
+	lockdep_assert_held(&wiphy->mtx);
+
+	del_timer_sync(&dwork->timer);
+	wiphy_work_cancel(wiphy, &dwork->work);
+}
+EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
+
+void wiphy_delayed_work_flush(struct wiphy *wiphy,
+			      struct wiphy_delayed_work *dwork)
+{
+	lockdep_assert_held(&wiphy->mtx);
+
+	del_timer_sync(&dwork->timer);
+	wiphy_work_flush(wiphy, &dwork->work);
+}
+EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
+
+/*
+ *
+ */
+static int dfs_cache_flush_set(void *data, u64 val)
+{
+	cfg80211_flush_dfs_cache();
+	printk("DFS cache flushed\n");
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dfs_cache_flush, NULL,
+			 dfs_cache_flush_set, "%llu\n");
+
+static void __init dfs_cache_init(void)
+{
+	size_t i;
+
+	mutex_init(&cfg80211_dfs_cache.mtx);
+
+	for (i = 0; i < ARRAY_SIZE(cfg80211_dfs_cache.bands); i++)
+		INIT_LIST_HEAD(&cfg80211_dfs_cache.bands[i]);
+
+	cfg80211_dfs_cache_debugfs =
+		debugfs_create_file_unsafe("dfs_cache_flush", 0200,
+					   ieee80211_debugfs_dir, NULL,
+					   &fops_dfs_cache_flush);
+}
+
+static void __exit dfs_cache_exit(void)
+{
+	cfg80211_flush_dfs_cache();
+	debugfs_remove(cfg80211_dfs_cache_debugfs);
+}
+
 static int __init cfg80211_init(void)
 {
 	int err;
@@ -1584,8 +1751,14 @@
 	if (err)
 		goto out_fail_nl80211;
 
+	err = nlfbx_init();
+	if (err)
+		goto out_fail_nlfbx;
+
 	ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL);
 
+	dfs_cache_init();
+
 	err = regulatory_init();
 	if (err)
 		goto out_fail_reg;
@@ -1601,6 +1774,8 @@
 out_fail_wq:
 	regulatory_exit();
 out_fail_reg:
+	nlfbx_exit();
+out_fail_nlfbx:
 	debugfs_remove(ieee80211_debugfs_dir);
 	nl80211_exit();
 out_fail_nl80211:
@@ -1616,6 +1791,7 @@
 
 static void __exit cfg80211_exit(void)
 {
+	dfs_cache_exit();
 	debugfs_remove(ieee80211_debugfs_dir);
 	nl80211_exit();
 	unregister_netdevice_notifier(&cfg80211_netdev_notifier);
diff -ruw linux-6.4/net/wireless/core.h linux-6.4-fbx/net/wireless/core.h
--- linux-6.4/net/wireless/core.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/core.h	2024-02-14 17:43:53.027128018 +0100
@@ -75,7 +75,7 @@
 	struct sk_buff *scan_msg;
 	struct list_head sched_scan_req_list;
 	time64_t suspend_at;
-	struct work_struct scan_done_wk;
+	struct wiphy_work scan_done_wk;
 
 	struct genl_info *cur_cmd_info;
 
@@ -95,7 +95,7 @@
 	struct cfg80211_coalesce *coalesce;
 
 	struct work_struct destroy_work;
-	struct work_struct sched_scan_stop_wk;
+	struct wiphy_work sched_scan_stop_wk;
 	struct work_struct sched_scan_res_wk;
 
 	struct cfg80211_chan_def radar_chandef;
@@ -108,6 +108,12 @@
 	/* lock for all wdev lists */
 	spinlock_t mgmt_registrations_lock;
 
+	struct work_struct wiphy_work;
+	struct list_head wiphy_work_list;
+	/* protects the list above */
+	spinlock_t wiphy_work_lock;
+	bool suspended;
+
 	/* must be last because of the way we do wiphy_priv(),
 	 * and it should at least be aligned to NETDEV_ALIGN */
 	struct wiphy wiphy __aligned(NETDEV_ALIGN);
@@ -154,6 +160,40 @@
 extern struct list_head cfg80211_rdev_list;
 extern int cfg80211_rdev_list_generation;
 
+
+/*
+ * DFS cache
+ */
+struct cfg80211_chan_dfs_cache {
+	u32 center_freq;
+	u16 freq_offset;
+
+	enum nl80211_dfs_state dfs_state;
+	unsigned long dfs_state_entered;
+	struct list_head next;
+};
+
+struct cfg80211_dfs_cache {
+	struct list_head bands[NUM_NL80211_BANDS];
+	struct mutex mtx;
+};
+
+extern struct cfg80211_dfs_cache cfg80211_dfs_cache;
+
+struct cfg80211_chan_dfs_cache *
+cfg80211_get_dfs_chan_cache(struct ieee80211_channel *c);
+void cfg80211_flush_dfs_cache(void);
+
+/* This is constructed like this so it can be used in if/else */
+static inline int for_each_rdev_check_rtnl(void)
+{
+	ASSERT_RTNL();
+	return 0;
+}
+#define for_each_rdev(rdev)						\
+	if (for_each_rdev_check_rtnl()) {} else				\
+		list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+
 struct cfg80211_internal_bss {
 	struct list_head list;
 	struct list_head hidden_list;
@@ -219,22 +259,6 @@
 void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
 			    struct wireless_dev *wdev);
 
-static inline void wdev_lock(struct wireless_dev *wdev)
-	__acquires(wdev)
-{
-	mutex_lock(&wdev->mtx);
-	__acquire(wdev->mtx);
-}
-
-static inline void wdev_unlock(struct wireless_dev *wdev)
-	__releases(wdev)
-{
-	__release(wdev->mtx);
-	mutex_unlock(&wdev->mtx);
-}
-
-#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
-
 static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
 {
 	lockdep_assert_held(&rdev->wiphy.mtx);
@@ -270,7 +294,7 @@
 			struct ieee80211_channel *channel;
 		} ij;
 		struct {
-			u8 bssid[ETH_ALEN];
+			u8 peer_addr[ETH_ALEN];
 			const u8 *td_bitmap;
 			u8 td_bitmap_len;
 		} pa;
@@ -289,12 +313,17 @@
 };
 
 struct cfg80211_cqm_config {
+	struct rcu_head rcu_head;
 	u32 rssi_hyst;
 	s32 last_rssi_event_value;
+	enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
 	int n_rssi_thresholds;
-	s32 rssi_thresholds[];
+	s32 rssi_thresholds[] __counted_by(n_rssi_thresholds);
 };
 
+void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy,
+				   struct wiphy_work *work);
+
 void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
 
 /* free object */
@@ -318,8 +347,6 @@
 			 struct cfg80211_ibss_params *params,
 			 struct cfg80211_cached_keys *connkeys);
 void cfg80211_clear_ibss(struct net_device *dev, bool nowext);
-int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
-			  struct net_device *dev, bool nowext);
 int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
 			struct net_device *dev, bool nowext);
 void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
@@ -334,8 +361,6 @@
 			 struct net_device *dev,
 			 struct mesh_setup *setup,
 			 const struct mesh_config *conf);
-int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
-			  struct net_device *dev);
 int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
 			struct net_device *dev);
 int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
@@ -343,21 +368,13 @@
 			      struct cfg80211_chan_def *chandef);
 
 /* OCB */
-int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
-			struct net_device *dev,
-			struct ocb_setup *setup);
 int cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
 		      struct net_device *dev,
 		      struct ocb_setup *setup);
-int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
-			 struct net_device *dev);
 int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
 		       struct net_device *dev);
 
 /* AP */
-int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
-		       struct net_device *dev, int link,
-		       bool notify);
 int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
 		     struct net_device *dev, int link,
 		     bool notify);
@@ -411,7 +428,7 @@
 			bool wextev);
 void __cfg80211_roamed(struct wireless_dev *wdev,
 		       struct cfg80211_roam_info *info);
-void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid,
+void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *peer_addr,
 				const u8 *td_bitmap, u8 td_bitmap_len);
 int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
 			      struct wireless_dev *wdev);
@@ -435,7 +452,7 @@
 int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
 				   struct key_params *params, int key_idx,
 				   bool pairwise, const u8 *mac_addr);
-void __cfg80211_scan_done(struct work_struct *wk);
+void __cfg80211_scan_done(struct wiphy *wiphy, struct wiphy_work *wk);
 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
 			   bool send_message);
 void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev,
@@ -453,6 +470,8 @@
 			  struct net_device *dev, enum nl80211_iftype ntype,
 			  struct vif_params *params);
 void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
+				  struct wiphy_work *end);
 void cfg80211_process_wdev_events(struct wireless_dev *wdev);
 
 bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
@@ -462,29 +481,12 @@
 
 extern struct work_struct cfg80211_disconnect_work;
 
-/**
- * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
- * @wiphy: the wiphy to validate against
- * @chandef: the channel definition to check
- *
- * Checks if chandef is usable and we can/need start CAC on such channel.
- *
- * Return: true if all channels available and at least
- *	   one channel requires CAC (NL80211_DFS_USABLE)
- */
-bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
-				 const struct cfg80211_chan_def *chandef);
-
 void cfg80211_set_dfs_state(struct wiphy *wiphy,
 			    const struct cfg80211_chan_def *chandef,
 			    enum nl80211_dfs_state dfs_state);
 
 void cfg80211_dfs_channels_update_work(struct work_struct *work);
 
-unsigned int
-cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
-			      const struct cfg80211_chan_def *chandef);
-
 void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev);
 
 int
@@ -533,8 +535,6 @@
 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
 			       enum nl80211_iftype iftype, int num);
 
-void __cfg80211_leave(struct cfg80211_registered_device *rdev,
-		      struct wireless_dev *wdev);
 void cfg80211_leave(struct cfg80211_registered_device *rdev,
 		    struct wireless_dev *wdev);
 
@@ -559,8 +559,6 @@
 #define CFG80211_DEV_WARN_ON(cond)	({bool __r = (cond); __r; })
 #endif
 
-void cfg80211_cqm_config_free(struct wireless_dev *wdev);
-
 void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid);
 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev);
 void cfg80211_pmsr_free_wk(struct work_struct *work);
@@ -569,5 +567,6 @@
 void cfg80211_remove_links(struct wireless_dev *wdev);
 int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev,
 				 struct wireless_dev *wdev);
+void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask);
 
 #endif /* __NET_WIRELESS_CORE_H */
diff -ruw linux-6.4/net/wireless/ibss.c linux-6.4-fbx/net/wireless/ibss.c
--- linux-6.4/net/wireless/ibss.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/ibss.c	2023-11-07 13:38:44.102257784 +0100
@@ -3,7 +3,7 @@
  * Some IBSS support code for cfg80211.
  *
  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2022 Intel Corporation
+ * Copyright (C) 2020-2023 Intel Corporation
  */
 
 #include <linux/etherdevice.h>
@@ -93,7 +93,6 @@
 	int err;
 
 	lockdep_assert_held(&rdev->wiphy.mtx);
-	ASSERT_WDEV_LOCK(wdev);
 
 	if (wdev->u.ibss.ssid_len)
 		return -EALREADY;
@@ -151,13 +150,13 @@
 	return 0;
 }
 
-static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
+void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	int i;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	kfree_sensitive(wdev->connect_keys);
 	wdev->connect_keys = NULL;
@@ -187,22 +186,13 @@
 	cfg80211_sched_dfs_chan_update(rdev);
 }
 
-void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-
-	wdev_lock(wdev);
-	__cfg80211_clear_ibss(dev, nowext);
-	wdev_unlock(wdev);
-}
-
-int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
+int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
 			  struct net_device *dev, bool nowext)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!wdev->u.ibss.ssid_len)
 		return -ENOLINK;
@@ -213,24 +203,11 @@
 		return err;
 
 	wdev->conn_owner_nlportid = 0;
-	__cfg80211_clear_ibss(dev, nowext);
+	cfg80211_clear_ibss(dev, nowext);
 
 	return 0;
 }
 
-int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
-			struct net_device *dev, bool nowext)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	int err;
-
-	wdev_lock(wdev);
-	err = __cfg80211_leave_ibss(rdev, dev, nowext);
-	wdev_unlock(wdev);
-
-	return err;
-}
-
 #ifdef CONFIG_CFG80211_WEXT
 int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
 			    struct wireless_dev *wdev)
@@ -239,7 +216,7 @@
 	enum nl80211_band band;
 	int i, err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!wdev->wext.ibss.beacon_interval)
 		wdev->wext.ibss.beacon_interval = 100;
@@ -336,11 +313,9 @@
 	if (wdev->wext.ibss.chandef.chan == chan)
 		return 0;
 
-	wdev_lock(wdev);
 	err = 0;
 	if (wdev->u.ibss.ssid_len)
-		err = __cfg80211_leave_ibss(rdev, dev, true);
-	wdev_unlock(wdev);
+		err = cfg80211_leave_ibss(rdev, dev, true);
 
 	if (err)
 		return err;
@@ -354,11 +329,7 @@
 		wdev->wext.ibss.channel_fixed = false;
 	}
 
-	wdev_lock(wdev);
-	err = cfg80211_ibss_wext_join(rdev, wdev);
-	wdev_unlock(wdev);
-
-	return err;
+	return cfg80211_ibss_wext_join(rdev, wdev);
 }
 
 int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
@@ -372,12 +343,10 @@
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
 		return -EINVAL;
 
-	wdev_lock(wdev);
 	if (wdev->u.ibss.current_bss)
 		chan = wdev->u.ibss.current_bss->pub.channel;
 	else if (wdev->wext.ibss.chandef.chan)
 		chan = wdev->wext.ibss.chandef.chan;
-	wdev_unlock(wdev);
 
 	if (chan) {
 		freq->m = chan->center_freq;
@@ -405,11 +374,9 @@
 	if (!rdev->ops->join_ibss)
 		return -EOPNOTSUPP;
 
-	wdev_lock(wdev);
 	err = 0;
 	if (wdev->u.ibss.ssid_len)
-		err = __cfg80211_leave_ibss(rdev, dev, true);
-	wdev_unlock(wdev);
+		err = cfg80211_leave_ibss(rdev, dev, true);
 
 	if (err)
 		return err;
@@ -422,11 +389,7 @@
 	wdev->wext.ibss.ssid = wdev->u.ibss.ssid;
 	wdev->wext.ibss.ssid_len = len;
 
-	wdev_lock(wdev);
-	err = cfg80211_ibss_wext_join(rdev, wdev);
-	wdev_unlock(wdev);
-
-	return err;
+	return cfg80211_ibss_wext_join(rdev, wdev);
 }
 
 int cfg80211_ibss_wext_giwessid(struct net_device *dev,
@@ -441,7 +404,6 @@
 
 	data->flags = 0;
 
-	wdev_lock(wdev);
 	if (wdev->u.ibss.ssid_len) {
 		data->flags = 1;
 		data->length = wdev->u.ibss.ssid_len;
@@ -451,7 +413,6 @@
 		data->length = wdev->wext.ibss.ssid_len;
 		memcpy(ssid, wdev->wext.ibss.ssid, data->length);
 	}
-	wdev_unlock(wdev);
 
 	return 0;
 }
@@ -491,11 +452,9 @@
 	    ether_addr_equal(bssid, wdev->wext.ibss.bssid))
 		return 0;
 
-	wdev_lock(wdev);
 	err = 0;
 	if (wdev->u.ibss.ssid_len)
-		err = __cfg80211_leave_ibss(rdev, dev, true);
-	wdev_unlock(wdev);
+		err = cfg80211_leave_ibss(rdev, dev, true);
 
 	if (err)
 		return err;
@@ -506,11 +465,7 @@
 	} else
 		wdev->wext.ibss.bssid = NULL;
 
-	wdev_lock(wdev);
-	err = cfg80211_ibss_wext_join(rdev, wdev);
-	wdev_unlock(wdev);
-
-	return err;
+	return cfg80211_ibss_wext_join(rdev, wdev);
 }
 
 int cfg80211_ibss_wext_giwap(struct net_device *dev,
@@ -525,7 +480,6 @@
 
 	ap_addr->sa_family = ARPHRD_ETHER;
 
-	wdev_lock(wdev);
 	if (wdev->u.ibss.current_bss)
 		memcpy(ap_addr->sa_data, wdev->u.ibss.current_bss->pub.bssid,
 		       ETH_ALEN);
@@ -534,8 +488,6 @@
 	else
 		eth_zero_addr(ap_addr->sa_data);
 
-	wdev_unlock(wdev);
-
 	return 0;
 }
 #endif
diff -ruw linux-6.4/net/wireless/mesh.c linux-6.4-fbx/net/wireless/mesh.c
--- linux-6.4/net/wireless/mesh.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/mesh.c	2023-11-07 13:38:44.102257784 +0100
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Portions
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022-2023 Intel Corporation
  */
 #include <linux/ieee80211.h>
 #include <linux/export.h>
@@ -109,7 +109,7 @@
 
 	BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN);
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
 		return -EOPNOTSUPP;
@@ -172,7 +172,6 @@
 	 * basic rates
 	 */
 	if (!setup->basic_rates) {
-		enum nl80211_bss_scan_width scan_width;
 		struct ieee80211_supported_band *sband =
 				rdev->wiphy.bands[setup->chandef.chan->band];
 
@@ -193,9 +192,7 @@
 				}
 			}
 		} else {
-			scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
-			setup->basic_rates = ieee80211_mandatory_rates(sband,
-								       scan_width);
+			setup->basic_rates = ieee80211_mandatory_rates(sband);
 		}
 	}
 
@@ -257,13 +254,13 @@
 	return 0;
 }
 
-int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
+int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
 			  struct net_device *dev)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
 		return -EOPNOTSUPP;
@@ -287,16 +284,3 @@
 
 	return err;
 }
-
-int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
-			struct net_device *dev)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	int err;
-
-	wdev_lock(wdev);
-	err = __cfg80211_leave_mesh(rdev, dev);
-	wdev_unlock(wdev);
-
-	return err;
-}
diff -ruw linux-6.4/net/wireless/mlme.c linux-6.4-fbx/net/wireless/mlme.c
--- linux-6.4/net/wireless/mlme.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/mlme.c	2023-11-07 13:38:44.102257784 +0100
@@ -4,7 +4,7 @@
  *
  * Copyright (c) 2009, Jouni Malinen <j@w1.fi>
  * Copyright (c) 2015		Intel Deutschland GmbH
- * Copyright (C) 2019-2020, 2022 Intel Corporation
+ * Copyright (C) 2019-2020, 2022-2023 Intel Corporation
  */
 
 #include <linux/kernel.h>
@@ -22,7 +22,7 @@
 
 
 void cfg80211_rx_assoc_resp(struct net_device *dev,
-			    struct cfg80211_rx_assoc_resp *data)
+			    struct cfg80211_rx_assoc_resp_data *data)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct wiphy *wiphy = wdev->wiphy;
@@ -43,16 +43,18 @@
 
 	for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
 		cr.links[link_id].status = data->links[link_id].status;
+		cr.links[link_id].bss = data->links[link_id].bss;
+
 		WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
 			     (!cr.ap_mld_addr || !cr.links[link_id].bss));
 
-		cr.links[link_id].bss = data->links[link_id].bss;
 		if (!cr.links[link_id].bss)
 			continue;
 		cr.links[link_id].bssid = data->links[link_id].bss->bssid;
 		cr.links[link_id].addr = data->links[link_id].addr;
 		/* need to have local link addresses for MLO connections */
-		WARN_ON(cr.ap_mld_addr && !cr.links[link_id].addr);
+		WARN_ON(cr.ap_mld_addr &&
+			!is_valid_ether_addr(cr.links[link_id].addr));
 
 		BUG_ON(!cr.links[link_id].bss->channel);
 
@@ -149,7 +151,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct ieee80211_mgmt *mgmt = (void *)buf;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	trace_cfg80211_rx_mlme_mgmt(dev, buf, len);
 
@@ -214,7 +216,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct ieee80211_mgmt *mgmt = (void *)buf;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	trace_cfg80211_tx_mlme_mgmt(dev, buf, len, reconnect);
 
@@ -262,7 +264,7 @@
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!req->bss)
 		return -ENOENT;
@@ -281,6 +283,11 @@
 	    ether_addr_equal(req->bss->bssid, wdev->u.client.connected_addr))
 		return -EALREADY;
 
+	if (ether_addr_equal(req->bss->bssid, dev->dev_addr) ||
+	    (req->link_id >= 0 &&
+	     ether_addr_equal(req->ap_mld_addr, dev->dev_addr)))
+		return -EINVAL;
+
 	return rdev_auth(rdev, dev, req);
 }
 
@@ -326,7 +333,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err, i, j;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	for (i = 1; i < ARRAY_SIZE(req->links); i++) {
 		if (!req->links[i].bss)
@@ -335,6 +342,9 @@
 			if (req->links[i].bss == req->links[j].bss)
 				return -EINVAL;
 		}
+
+		if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr))
+			return -EINVAL;
 	}
 
 	if (wdev->connected &&
@@ -342,6 +352,11 @@
 	     !ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
 		return -EALREADY;
 
+	if ((req->bss && ether_addr_equal(req->bss->bssid, dev->dev_addr)) ||
+	    (req->link_id >= 0 &&
+	     ether_addr_equal(req->ap_mld_addr, dev->dev_addr)))
+		return -EINVAL;
+
 	cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
 				  rdev->wiphy.ht_capa_mod_mask);
 	cfg80211_oper_and_vht_capa(&req->vht_capa_mask,
@@ -380,7 +395,7 @@
 		.local_state_change = local_state_change,
 	};
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (local_state_change &&
 	    (!wdev->connected ||
@@ -410,7 +425,7 @@
 	};
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!wdev->connected)
 		return -ENOTCONN;
@@ -433,7 +448,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	u8 bssid[ETH_ALEN];
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!rdev->ops->deauth)
 		return;
@@ -713,6 +728,8 @@
 	const struct ieee80211_mgmt *mgmt;
 	u16 stype;
 
+	lockdep_assert_wiphy(&rdev->wiphy);
+
 	if (!wdev->wiphy->mgmt_stypes)
 		return -EOPNOTSUPP;
 
@@ -735,8 +752,6 @@
 	    mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
 		int err = 0;
 
-		wdev_lock(wdev);
-
 		switch (wdev->iftype) {
 		case NL80211_IFTYPE_ADHOC:
 			/*
@@ -801,7 +816,6 @@
 			err = -EOPNOTSUPP;
 			break;
 		}
-		wdev_unlock(wdev);
 
 		if (err)
 			return err;
diff -ruw linux-6.4/net/wireless/nl80211.c linux-6.4-fbx/net/wireless/nl80211.c
--- linux-6.4/net/wireless/nl80211.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/nl80211.c	2024-01-19 17:01:19.917848451 +0100
@@ -5,7 +5,7 @@
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017	Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 
 #include <linux/if.h>
@@ -106,7 +106,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		struct wireless_dev *wdev;
 
 		if (wiphy_net(&rdev->wiphy) != netns)
@@ -323,6 +323,7 @@
 	[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG },
 	[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG },
 	[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK] = { .type = NLA_FLAG },
+	[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR] = { .type = NLA_U8 },
 };
 
 static const struct nla_policy
@@ -816,6 +817,7 @@
 	[NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS] = { .type = NLA_U16 },
 	[NL80211_ATTR_HW_TIMESTAMP_ENABLED] = { .type = NLA_FLAG },
 	[NL80211_ATTR_EMA_RNR_ELEMS] = { .type = NLA_NESTED },
+	[NL80211_ATTR_MLO_LINK_DISABLED] = { .type = NLA_FLAG },
 };
 
 /* policy for the key attributes */
@@ -1113,6 +1115,10 @@
 	if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_OFFSET, chan->freq_offset))
 		goto nla_put_failure;
 
+	if ((chan->flags & IEEE80211_CHAN_PSD) &&
+	    nla_put_s8(msg, NL80211_FREQUENCY_ATTR_PSD, chan->psd))
+		goto nla_put_failure;
+
 	if ((chan->flags & IEEE80211_CHAN_DISABLED) &&
 	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED))
 		goto nla_put_failure;
@@ -1542,7 +1548,7 @@
 
 static int nl80211_key_allowed(struct wireless_dev *wdev)
 {
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_AP:
@@ -3073,7 +3079,7 @@
 		cb->args[0] = (long)state;
 	}
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
 			continue;
 		if (++idx <= state->start)
@@ -3081,6 +3087,7 @@
 		if (state->filter_wiphy != -1 &&
 		    state->filter_wiphy != rdev->wiphy_idx)
 			continue;
+		wiphy_lock(&rdev->wiphy);
 		/* attempt to fit multiple wiphy data chunks into the skb */
 		do {
 			ret = nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY,
@@ -3107,6 +3114,7 @@
 				    cb->min_dump_alloc < 4096) {
 					cb->min_dump_alloc = 4096;
 					state->split_start = 0;
+					wiphy_unlock(&rdev->wiphy);
 					rtnl_unlock();
 					return 1;
 				}
@@ -3114,6 +3122,7 @@
 				break;
 			}
 		} while (state->split_start > 0);
+		wiphy_unlock(&rdev->wiphy);
 		break;
 	}
 	rtnl_unlock();
@@ -3418,13 +3427,8 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	int link_id = nl80211_link_id_or_invalid(info->attrs);
 	struct net_device *netdev = info->user_ptr[1];
-	int ret;
-
-	wdev_lock(netdev->ieee80211_ptr);
-	ret = __nl80211_set_channel(rdev, netdev, info, link_id);
-	wdev_unlock(netdev->ieee80211_ptr);
 
-	return ret;
+	return __nl80211_set_channel(rdev, netdev, info, link_id);
 }
 
 static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
@@ -3504,6 +3508,7 @@
 		}
 
 		if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+		    netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MONITOR &&
 		    netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
 			result = -EINVAL;
 			goto out;
@@ -3531,7 +3536,6 @@
 			txq_params.link_id =
 				nl80211_link_id_or_invalid(info->attrs);
 
-			wdev_lock(netdev->ieee80211_ptr);
 			if (txq_params.link_id >= 0 &&
 			    !(netdev->ieee80211_ptr->valid_links &
 			      BIT(txq_params.link_id)))
@@ -3542,7 +3546,6 @@
 			else
 				result = rdev_set_txq_params(rdev, netdev,
 							     &txq_params);
-			wdev_unlock(netdev->ieee80211_ptr);
 			if (result)
 				goto out;
 		}
@@ -3552,12 +3555,10 @@
 		int link_id = nl80211_link_id_or_invalid(info->attrs);
 
 		if (wdev) {
-			wdev_lock(wdev);
 			result = __nl80211_set_channel(
 				rdev,
 				nl80211_can_set_dev_channel(wdev) ? netdev : NULL,
 				info, link_id);
-			wdev_unlock(wdev);
 		} else {
 			result = __nl80211_set_channel(rdev, netdev, info, link_id);
 		}
@@ -3865,33 +3866,31 @@
 			goto nla_put_failure;
 	}
 
-	wdev_lock(wdev);
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_P2P_GO:
 		if (wdev->u.ap.ssid_len &&
 		    nla_put(msg, NL80211_ATTR_SSID, wdev->u.ap.ssid_len,
 			    wdev->u.ap.ssid))
-			goto nla_put_failure_locked;
+			goto nla_put_failure;
 		break;
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (wdev->u.client.ssid_len &&
 		    nla_put(msg, NL80211_ATTR_SSID, wdev->u.client.ssid_len,
 			    wdev->u.client.ssid))
-			goto nla_put_failure_locked;
+			goto nla_put_failure;
 		break;
 	case NL80211_IFTYPE_ADHOC:
 		if (wdev->u.ibss.ssid_len &&
 		    nla_put(msg, NL80211_ATTR_SSID, wdev->u.ibss.ssid_len,
 			    wdev->u.ibss.ssid))
-			goto nla_put_failure_locked;
+			goto nla_put_failure;
 		break;
 	default:
 		/* nothing */
 		break;
 	}
-	wdev_unlock(wdev);
 
 	if (rdev->ops->get_txq_stats) {
 		struct cfg80211_txq_stats txqstats = {};
@@ -3938,8 +3937,6 @@
 	genlmsg_end(msg, hdr);
 	return 0;
 
- nla_put_failure_locked:
-	wdev_unlock(wdev);
  nla_put_failure:
 	genlmsg_cancel(msg, hdr);
 	return -EMSGSIZE;
@@ -3980,7 +3977,7 @@
 		filter_wiphy = cb->args[2] - 1;
 	}
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
 			continue;
 		if (wp_idx < wp_start) {
@@ -4186,7 +4183,6 @@
 		if (netif_running(dev))
 			return -EBUSY;
 
-		wdev_lock(wdev);
 		BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
 			     IEEE80211_MAX_MESH_ID_LEN);
 		wdev->u.mesh.id_up_len =
@@ -4194,7 +4190,6 @@
 		memcpy(wdev->u.mesh.id,
 		       nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
 		       wdev->u.mesh.id_up_len);
-		wdev_unlock(wdev);
 	}
 
 	if (info->attrs[NL80211_ATTR_4ADDR]) {
@@ -4295,7 +4290,6 @@
 	case NL80211_IFTYPE_MESH_POINT:
 		if (!info->attrs[NL80211_ATTR_MESH_ID])
 			break;
-		wdev_lock(wdev);
 		BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
 			     IEEE80211_MAX_MESH_ID_LEN);
 		wdev->u.mesh.id_up_len =
@@ -4303,7 +4297,6 @@
 		memcpy(wdev->u.mesh.id,
 		       nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
 		       wdev->u.mesh.id_up_len);
-		wdev_unlock(wdev);
 		break;
 	case NL80211_IFTYPE_NAN:
 	case NL80211_IFTYPE_P2P_DEVICE:
@@ -4594,79 +4587,67 @@
 	    !(key.p.mode == NL80211_KEY_SET_TX))
 		return -EINVAL;
 
-	wdev_lock(wdev);
-
 	if (key.def) {
-		if (!rdev->ops->set_default_key) {
-			err = -EOPNOTSUPP;
-			goto out;
-		}
+		if (!rdev->ops->set_default_key)
+			return -EOPNOTSUPP;
 
 		err = nl80211_key_allowed(wdev);
 		if (err)
-			goto out;
+			return err;
 
 		err = nl80211_validate_key_link_id(info, wdev, link_id, false);
 		if (err)
-			goto out;
+			return err;
 
 		err = rdev_set_default_key(rdev, dev, link_id, key.idx,
 					   key.def_uni, key.def_multi);
 
 		if (err)
-			goto out;
+			return err;
 
 #ifdef CONFIG_CFG80211_WEXT
 		wdev->wext.default_key = key.idx;
 #endif
+		return 0;
 	} else if (key.defmgmt) {
-		if (key.def_uni || !key.def_multi) {
-			err = -EINVAL;
-			goto out;
-		}
+		if (key.def_uni || !key.def_multi)
+			return -EINVAL;
 
-		if (!rdev->ops->set_default_mgmt_key) {
-			err = -EOPNOTSUPP;
-			goto out;
-		}
+		if (!rdev->ops->set_default_mgmt_key)
+			return -EOPNOTSUPP;
 
 		err = nl80211_key_allowed(wdev);
 		if (err)
-			goto out;
+			return err;
 
 		err = nl80211_validate_key_link_id(info, wdev, link_id, false);
 		if (err)
-			goto out;
+			return err;
 
 		err = rdev_set_default_mgmt_key(rdev, dev, link_id, key.idx);
 		if (err)
-			goto out;
+			return err;
 
 #ifdef CONFIG_CFG80211_WEXT
 		wdev->wext.default_mgmt_key = key.idx;
 #endif
+		return 0;
 	} else if (key.defbeacon) {
-		if (key.def_uni || !key.def_multi) {
-			err = -EINVAL;
-			goto out;
-		}
+		if (key.def_uni || !key.def_multi)
+			return -EINVAL;
 
-		if (!rdev->ops->set_default_beacon_key) {
-			err = -EOPNOTSUPP;
-			goto out;
-		}
+		if (!rdev->ops->set_default_beacon_key)
+			return -EOPNOTSUPP;
 
 		err = nl80211_key_allowed(wdev);
 		if (err)
-			goto out;
+			return err;
 
 		err = nl80211_validate_key_link_id(info, wdev, link_id, false);
 		if (err)
-			goto out;
+			return err;
 
-		err = rdev_set_default_beacon_key(rdev, dev, link_id, key.idx);
-		if (err)
-			goto out;
+		return rdev_set_default_beacon_key(rdev, dev, link_id, key.idx);
 	} else if (key.p.mode == NL80211_KEY_SET_TX &&
 		   wiphy_ext_feature_isset(&rdev->wiphy,
 					   NL80211_EXT_FEATURE_EXT_KEY_ID)) {
@@ -4675,25 +4656,19 @@
 		if (info->attrs[NL80211_ATTR_MAC])
 			mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
 
-		if (!mac_addr || key.idx < 0 || key.idx > 1) {
-			err = -EINVAL;
-			goto out;
-		}
+		if (!mac_addr || key.idx < 0 || key.idx > 1)
+			return -EINVAL;
 
 		err = nl80211_validate_key_link_id(info, wdev, link_id, true);
 		if (err)
-			goto out;
+			return err;
 
-		err = rdev_add_key(rdev, dev, link_id, key.idx,
+		return rdev_add_key(rdev, dev, link_id, key.idx,
 				   NL80211_KEYTYPE_PAIRWISE,
 				   mac_addr, &key.p);
-	} else {
-		err = -EINVAL;
 	}
- out:
-	wdev_unlock(wdev);
 
-	return err;
+	return -EINVAL;
 }
 
 static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
@@ -4746,7 +4721,6 @@
 		return -EINVAL;
 	}
 
-	wdev_lock(wdev);
 	err = nl80211_key_allowed(wdev);
 	if (err)
 		GENL_SET_ERR_MSG(info, "key not allowed");
@@ -4762,7 +4736,6 @@
 		if (err)
 			GENL_SET_ERR_MSG(info, "key addition failed");
 	}
-	wdev_unlock(wdev);
 
 	return err;
 }
@@ -4803,7 +4776,6 @@
 	if (!rdev->ops->del_key)
 		return -EOPNOTSUPP;
 
-	wdev_lock(wdev);
 	err = nl80211_key_allowed(wdev);
 
 	if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
@@ -4827,7 +4799,6 @@
 			wdev->wext.default_mgmt_key = -1;
 	}
 #endif
-	wdev_unlock(wdev);
 
 	return err;
 }
@@ -4885,13 +4856,12 @@
 	acl = kzalloc(struct_size(acl, mac_addrs, n_entries), GFP_KERNEL);
 	if (!acl)
 		return ERR_PTR(-ENOMEM);
+	acl->n_acl_entries = n_entries;
 
 	nla_for_each_nested(attr, info->attrs[NL80211_ATTR_MAC_ADDRS], tmp) {
 		memcpy(acl->mac_addrs[i].addr, nla_data(attr), ETH_ALEN);
 		i++;
 	}
-
-	acl->n_acl_entries = n_entries;
 	acl->acl_policy = acl_policy;
 
 	return acl;
@@ -5129,6 +5099,106 @@
 	return true;
 }
 
+static int eht_build_mcs_mask(struct genl_info *info,
+			      const struct ieee80211_sta_he_cap *he_cap,
+			      const struct ieee80211_sta_eht_cap *eht_cap,
+			      u16 *mcs_mask)
+{
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	u8 mcs_nss_len, nss, mcs_7 = 0, mcs_9 = 0, mcs_11 = 0, mcs_13 = 0;
+	bool mcs_14 = false, mcs_15 = false;
+
+	mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+						 &eht_cap->eht_cap_elem,
+						wdev->iftype ==
+						NL80211_IFTYPE_STATION);
+
+	if (eht_cap->eht_cap_elem.phy_cap_info[6] &
+	    IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP)
+		mcs_14 = true;
+
+	if (eht_cap->eht_cap_elem.phy_cap_info[6] &
+	    IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK)
+		mcs_15 = true;
+
+	if (mcs_nss_len == 4) {
+		const struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs =
+					&eht_cap->eht_mcs_nss_supp.only_20mhz;
+
+		mcs_7 = mcs->rx_tx_mcs7_max_nss;
+		mcs_9 = mcs->rx_tx_mcs9_max_nss;
+		mcs_11 = mcs->rx_tx_mcs11_max_nss;
+		mcs_13 = mcs->rx_tx_mcs13_max_nss;
+	} else {
+		const struct ieee80211_eht_mcs_nss_supp_bw *mcs;
+		enum nl80211_chan_width width;
+
+		switch (wdev->iftype) {
+		case NL80211_IFTYPE_AP:
+			width = wdev->u.ap.preset_chandef.width;
+			break;
+		case NL80211_IFTYPE_MESH_POINT:
+			width = wdev->u.mesh.chandef.width;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		switch (width) {
+		case NL80211_CHAN_WIDTH_320:
+			mcs = &eht_cap->eht_mcs_nss_supp.bw._320;
+			break;
+		case NL80211_CHAN_WIDTH_160:
+			mcs = &eht_cap->eht_mcs_nss_supp.bw._160;
+			break;
+		case NL80211_CHAN_WIDTH_80:
+		case NL80211_CHAN_WIDTH_40:
+		case NL80211_CHAN_WIDTH_20:
+			mcs = &eht_cap->eht_mcs_nss_supp.bw._80;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		mcs_7 = mcs->rx_tx_mcs9_max_nss;
+		mcs_9 = mcs->rx_tx_mcs9_max_nss;
+		mcs_11 = mcs->rx_tx_mcs11_max_nss;
+		mcs_13 = mcs->rx_tx_mcs13_max_nss;
+	}
+
+	for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++) {
+		if (nss == 0) {
+			if (mcs_14)
+				mcs_mask[nss] |= 0x4000;
+			if (mcs_15)
+				mcs_mask[nss] |= 0x8000;
+		}
+
+		if (!mcs_7)
+			continue;
+		mcs_mask[nss] |= 0x00FF;
+		mcs_7--;
+
+		if (!mcs_9)
+			continue;
+		mcs_mask[nss] |= 0x0300;
+		mcs_9--;
+
+		if (!mcs_11)
+			continue;
+		mcs_mask[nss] |= 0x0C00;
+		mcs_11--;
+
+		if (!mcs_13)
+			continue;
+		mcs_mask[nss] |= 0x3000;
+		mcs_13--;
+	}
+
+	return 0;
+}
+
 static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
 					 struct nlattr *attrs[],
 					 enum nl80211_attrs attr,
@@ -5149,6 +5219,7 @@
 	/* Default to all rates enabled */
 	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		const struct ieee80211_sta_he_cap *he_cap;
+		const struct ieee80211_sta_eht_cap *eht_cap;
 
 		if (!default_all_enabled)
 			break;
@@ -5175,6 +5246,13 @@
 		he_tx_mcs_map = he_get_txmcsmap(info, link_id, he_cap);
 		he_build_mcs_mask(he_tx_mcs_map, mask->control[i].he_mcs);
 
+		eht_cap = ieee80211_get_eht_iftype_cap(sband, wdev->iftype);
+		if (!eht_cap)
+			continue;
+
+		eht_build_mcs_mask(info, he_cap, eht_cap,
+				   mask->control[i].eht_mcs);
+
 		mask->control[i].he_gi = 0xFF;
 		mask->control[i].he_ltf = 0xFF;
 	}
@@ -5254,7 +5332,8 @@
 			 */
 			if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
 			      rdev->wiphy.bands[band]->vht_cap.vht_supported ||
-			      ieee80211_get_he_iftype_cap(sband, wdev->iftype)))
+			      ieee80211_get_he_iftype_cap(sband, wdev->iftype) ||
+			      ieee80211_get_eht_iftype_cap(sband, wdev->iftype)))
 				return -EINVAL;
 
 			for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
@@ -5269,6 +5348,10 @@
 				if (mask->control[band].he_mcs[i])
 					goto out;
 
+			for (i = 0; i < NL80211_EHT_NSS_MAX; i++)
+				if (mask->control[band].eht_mcs[i])
+					goto out;
+
 			/* legacy and mcs rates may not be both empty */
 			return -EINVAL;
 		}
@@ -5426,19 +5509,22 @@
 	if (!wiphy->mbssid_max_interfaces)
 		return ERR_PTR(-EINVAL);
 
-	nla_for_each_nested(nl_elems, attrs, rem_elems)
+	nla_for_each_nested(nl_elems, attrs, rem_elems) {
+		if (num_elems >= 255)
+			return ERR_PTR(-EINVAL);
 		num_elems++;
+	}
 
 	elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL);
 	if (!elems)
 		return ERR_PTR(-ENOMEM);
+	elems->cnt = num_elems;
 
 	nla_for_each_nested(nl_elems, attrs, rem_elems) {
 		elems->elem[i].data = nla_data(nl_elems);
 		elems->elem[i].len = nla_len(nl_elems);
 		i++;
 	}
-	elems->cnt = num_elems;
 	return elems;
 }
 
@@ -5464,13 +5550,13 @@
 	elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL);
 	if (!elems)
 		return ERR_PTR(-ENOMEM);
+	elems->cnt = num_elems;
 
 	nla_for_each_nested(nl_elems, attrs, rem_elems) {
 		elems->elem[i].data = nla_data(nl_elems);
 		elems->elem[i].len = nla_len(nl_elems);
 		i++;
 	}
-	elems->cnt = num_elems;
 	return elems;
 }
 
@@ -5494,6 +5580,7 @@
 		!nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]);
 	he_bss_color->partial =
 		nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]);
+	he_bss_color->collision_detection_enabled = true;
 
 	return 0;
 }
@@ -5664,11 +5751,10 @@
 
 static int nl80211_parse_fils_discovery(struct cfg80211_registered_device *rdev,
 					struct nlattr *attrs,
-					struct cfg80211_ap_settings *params)
+					struct cfg80211_fils_discovery *fd)
 {
 	struct nlattr *tb[NL80211_FILS_DISCOVERY_ATTR_MAX + 1];
 	int ret;
-	struct cfg80211_fils_discovery *fd = &params->fils_discovery;
 
 	if (!wiphy_ext_feature_isset(&rdev->wiphy,
 				     NL80211_EXT_FEATURE_FILS_DISCOVERY))
@@ -5679,6 +5765,13 @@
 	if (ret)
 		return ret;
 
+	if (!tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN] &&
+	    !tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX] &&
+	    !tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]) {
+		fd->update = true;
+		return 0;
+	}
+
 	if (!tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN] ||
 	    !tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX] ||
 	    !tb[NL80211_FILS_DISCOVERY_ATTR_TMPL])
@@ -5688,19 +5781,17 @@
 	fd->tmpl = nla_data(tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]);
 	fd->min_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN]);
 	fd->max_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX]);
-
+	fd->update = true;
 	return 0;
 }
 
 static int
 nl80211_parse_unsol_bcast_probe_resp(struct cfg80211_registered_device *rdev,
 				     struct nlattr *attrs,
-				     struct cfg80211_ap_settings *params)
+				     struct cfg80211_unsol_bcast_probe_resp *presp)
 {
 	struct nlattr *tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + 1];
 	int ret;
-	struct cfg80211_unsol_bcast_probe_resp *presp =
-					&params->unsol_bcast_probe_resp;
 
 	if (!wiphy_ext_feature_isset(&rdev->wiphy,
 				     NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP))
@@ -5711,6 +5802,12 @@
 	if (ret)
 		return ret;
 
+	if (!tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] &&
+	    !tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]) {
+		presp->update = true;
+		return 0;
+	}
+
 	if (!tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] ||
 	    !tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL])
 		return -EINVAL;
@@ -5718,6 +5815,7 @@
 	presp->tmpl = nla_data(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]);
 	presp->tmpl_len = nla_len(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]);
 	presp->interval = nla_get_u32(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT]);
+	presp->update = true;
 	return 0;
 }
 
@@ -5902,6 +6000,21 @@
 	nlmsg_free(msg);
 }
 
+static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params)
+{
+	struct ieee80211_channel *channel = params->chandef.chan;
+
+	if ((params->he_cap ||  params->he_oper) &&
+	    (channel->flags & IEEE80211_CHAN_NO_HE))
+		return -EOPNOTSUPP;
+
+	if ((params->eht_cap || params->eht_oper) &&
+	    (channel->flags & IEEE80211_CHAN_NO_EHT))
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
 static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6065,20 +6178,18 @@
 		goto out;
 	}
 
-	wdev_lock(wdev);
-
 	if (info->attrs[NL80211_ATTR_TX_RATES]) {
 		err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
 						    NL80211_ATTR_TX_RATES,
 						    &params->beacon_rate,
 						    dev, false, link_id);
 		if (err)
-			goto out_unlock;
+			goto out;
 
 		err = validate_beacon_tx_rate(rdev, params->chandef.chan->band,
 					      &params->beacon_rate);
 		if (err)
-			goto out_unlock;
+			goto out;
 	}
 
 	if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
@@ -6091,19 +6202,19 @@
 			if (!(rdev->wiphy.features &
 			      NL80211_FEATURE_STATIC_SMPS)) {
 				err = -EINVAL;
-				goto out_unlock;
+				goto out;
 			}
 			break;
 		case NL80211_SMPS_DYNAMIC:
 			if (!(rdev->wiphy.features &
 			      NL80211_FEATURE_DYNAMIC_SMPS)) {
 				err = -EINVAL;
-				goto out_unlock;
+				goto out;
 			}
 			break;
 		default:
 			err = -EINVAL;
-			goto out_unlock;
+			goto out;
 		}
 	} else {
 		params->smps_mode = NL80211_SMPS_OFF;
@@ -6112,7 +6223,7 @@
 	params->pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
 	if (params->pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) {
 		err = -EOPNOTSUPP;
-		goto out_unlock;
+		goto out;
 	}
 
 	if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6120,7 +6231,7 @@
 		if (IS_ERR(params->acl)) {
 			err = PTR_ERR(params->acl);
 			params->acl = NULL;
-			goto out_unlock;
+			goto out;
 		}
 	}
 
@@ -6132,23 +6243,23 @@
 					info->attrs[NL80211_ATTR_HE_OBSS_PD],
 					&params->he_obss_pd);
 		if (err)
-			goto out_unlock;
+			goto out;
 	}
 
 	if (info->attrs[NL80211_ATTR_FILS_DISCOVERY]) {
 		err = nl80211_parse_fils_discovery(rdev,
 						   info->attrs[NL80211_ATTR_FILS_DISCOVERY],
-						   params);
+						   &params->fils_discovery);
 		if (err)
-			goto out_unlock;
+			goto out;
 	}
 
 	if (info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) {
 		err = nl80211_parse_unsol_bcast_probe_resp(
 			rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
-			params);
+			&params->unsol_bcast_probe_resp);
 		if (err)
-			goto out_unlock;
+			goto out;
 	}
 
 	if (info->attrs[NL80211_ATTR_MBSSID_CONFIG]) {
@@ -6159,17 +6270,21 @@
 							params->beacon.mbssid_ies->cnt :
 							0);
 		if (err)
-			goto out_unlock;
+			goto out;
 	}
 
 	if (!params->mbssid_config.ema && params->beacon.rnr_ies) {
 		err = -EINVAL;
-		goto out_unlock;
+		goto out;
 	}
 
 	err = nl80211_calculate_ap_params(params);
 	if (err)
-		goto out_unlock;
+		goto out;
+
+	err = nl80211_validate_ap_phy_operation(params);
+	if (err)
+		goto out;
 
 	if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS])
 		params->flags = nla_get_u32(
@@ -6181,7 +6296,7 @@
 	    info->attrs[NL80211_ATTR_SOCKET_OWNER] &&
 	    wdev->conn_owner_nlportid != info->snd_portid) {
 		err = -EINVAL;
-		goto out_unlock;
+		goto out;
 	}
 
 	/* FIXME: validate MLO/link-id against driver capabilities */
@@ -6199,8 +6314,6 @@
 
 		nl80211_send_ap_started(wdev, link_id);
 	}
-out_unlock:
-	wdev_unlock(wdev);
 out:
 	kfree(params->acl);
 	kfree(params->beacon.mbssid_ies);
@@ -6220,7 +6333,8 @@
 	unsigned int link_id = nl80211_link_id(info->attrs);
 	struct net_device *dev = info->user_ptr[1];
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	struct cfg80211_beacon_data params;
+	struct cfg80211_ap_update *params;
+	struct nlattr *attr;
 	int err;
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
@@ -6233,17 +6347,37 @@
 	if (!wdev->links[link_id].ap.beacon_interval)
 		return -EINVAL;
 
-	err = nl80211_parse_beacon(rdev, info->attrs, &params, info->extack);
+	params = kzalloc(sizeof(*params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	err = nl80211_parse_beacon(rdev, info->attrs, &params->beacon,
+				   info->extack);
 	if (err)
 		goto out;
 
-	wdev_lock(wdev);
-	err = rdev_change_beacon(rdev, dev, &params);
-	wdev_unlock(wdev);
+	attr = info->attrs[NL80211_ATTR_FILS_DISCOVERY];
+	if (attr) {
+		err = nl80211_parse_fils_discovery(rdev, attr,
+						   &params->fils_discovery);
+		if (err)
+			goto out;
+	}
+
+	attr = info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP];
+	if (attr) {
+		err = nl80211_parse_unsol_bcast_probe_resp(rdev, attr,
+							   &params->unsol_bcast_probe_resp);
+		if (err)
+			goto out;
+	}
+
+	err = rdev_change_beacon(rdev, dev, params);
 
 out:
-	kfree(params.mbssid_ies);
-	kfree(params.rnr_ies);
+	kfree(params->beacon.mbssid_ies);
+	kfree(params->beacon.rnr_ies);
+	kfree(params);
 	return err;
 }
 
@@ -6365,12 +6499,27 @@
 		return false;
 
 	switch (info->bw) {
+	case RATE_INFO_BW_1:
+		rate_flg = NL80211_RATE_INFO_1_MHZ_WIDTH;
+		break;
+	case RATE_INFO_BW_2:
+		rate_flg = NL80211_RATE_INFO_2_MHZ_WIDTH;
+		break;
+	case RATE_INFO_BW_4:
+		rate_flg = NL80211_RATE_INFO_4_MHZ_WIDTH;
+		break;
 	case RATE_INFO_BW_5:
 		rate_flg = NL80211_RATE_INFO_5_MHZ_WIDTH;
 		break;
+	case RATE_INFO_BW_8:
+		rate_flg = NL80211_RATE_INFO_8_MHZ_WIDTH;
+		break;
 	case RATE_INFO_BW_10:
 		rate_flg = NL80211_RATE_INFO_10_MHZ_WIDTH;
 		break;
+	case RATE_INFO_BW_16:
+		rate_flg = NL80211_RATE_INFO_16_MHZ_WIDTH;
+		break;
 	default:
 		WARN_ON(1);
 		fallthrough;
@@ -6429,6 +6578,14 @@
 		    nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC,
 			       info->he_ru_alloc))
 			return false;
+	} else if (info->flags & RATE_INFO_FLAGS_S1G_MCS) {
+		if (nla_put_u8(msg, NL80211_RATE_INFO_S1G_MCS, info->mcs))
+			return false;
+		if (nla_put_u8(msg, NL80211_RATE_INFO_S1G_NSS, info->nss))
+			return false;
+		if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
+		    nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
+			return false;
 	} else if (info->flags & RATE_INFO_FLAGS_EHT_MCS) {
 		if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_MCS, info->mcs))
 			return false;
@@ -7124,6 +7281,34 @@
 	return 0;
 }
 
+static const
+struct nla_policy nl80211_sta_info_policy[NL80211_STA_INFO_MAX+1] = {
+	[NL80211_STA_INFO_EXPECTED_THROUGHPUT] = { .type = NLA_U32 },
+};
+
+static int nl80211_parse_sta_info(struct genl_info *info,
+				  struct station_parameters *params)
+{
+	struct nlattr *tb[NL80211_STA_INFO_MAX + 1];
+
+	if (!info->attrs[NL80211_ATTR_STA_INFO])
+		return 0;
+	if (nla_parse_nested_deprecated(tb,
+					NL80211_STA_INFO_MAX,
+					info->attrs[NL80211_ATTR_STA_INFO],
+					nl80211_sta_info_policy,
+					info->extack))
+		return -EINVAL;
+
+	if (tb[NL80211_STA_INFO_EXPECTED_THROUGHPUT]) {
+		params->link_sta_params.tp_overridden = true;
+		params->link_sta_params.tp_override =
+			nla_get_u32(tb[NL80211_STA_INFO_EXPECTED_THROUGHPUT]);
+	}
+
+	return 0;
+}
+
 static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -7256,6 +7441,10 @@
 	if (err)
 		return err;
 
+	err = nl80211_parse_sta_info(info, &params);
+	if (err)
+		return err;
+
 	params.vlan = get_vlan(info, rdev);
 	if (IS_ERR(params.vlan))
 		return PTR_ERR(params.vlan);
@@ -7275,9 +7464,7 @@
 	}
 
 	/* driver will call cfg80211_check_station_change() */
-	wdev_lock(dev->ieee80211_ptr);
 	err = rdev_change_station(rdev, dev, mac_addr, &params);
-	wdev_unlock(dev->ieee80211_ptr);
 
  out_put_vlan:
 	dev_put(params.vlan);
@@ -7457,6 +7644,10 @@
 	    (params.link_sta_params.ht_capa || params.link_sta_params.vht_capa))
 		return -EINVAL;
 
+	/* Ensure that HE capabilities are set along with EHT */
+	if (params.link_sta_params.eht_capa && !params.link_sta_params.he_capa)
+		return -EINVAL;
+
 	/* When you run into this, adjust the code below for the new flag */
 	BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
 
@@ -7545,7 +7736,6 @@
 
 	/* be aware of params.vlan when changing code here */
 
-	wdev_lock(dev->ieee80211_ptr);
 	if (wdev->valid_links) {
 		if (params.link_sta_params.link_id < 0) {
 			err = -EINVAL;
@@ -7563,7 +7753,6 @@
 	}
 	err = rdev_add_station(rdev, dev, mac_addr, &params);
 out:
-	wdev_unlock(dev->ieee80211_ptr);
 	dev_put(params.vlan);
 	return err;
 }
@@ -7573,7 +7762,6 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct station_del_parameters params;
-	int ret;
 
 	memset(&params, 0, sizeof(params));
 
@@ -7621,11 +7809,7 @@
 		params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID;
 	}
 
-	wdev_lock(dev->ieee80211_ptr);
-	ret = rdev_del_station(rdev, dev, &params);
-	wdev_unlock(dev->ieee80211_ptr);
-
-	return ret;
+	return rdev_del_station(rdev, dev, &params);
 }
 
 static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
@@ -7944,9 +8128,7 @@
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct bss_parameters params;
-	int err;
 
 	memset(&params, 0, sizeof(params));
 	params.link_id = nl80211_link_id_or_invalid(info->attrs);
@@ -8009,11 +8191,7 @@
 	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
 		return -EOPNOTSUPP;
 
-	wdev_lock(wdev);
-	err = rdev_change_bss(rdev, dev, &params);
-	wdev_unlock(wdev);
-
-	return err;
+	return rdev_change_bss(rdev, dev, &params);
 }
 
 static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
@@ -8084,13 +8262,11 @@
 	if (!rdev->ops->get_mesh_config)
 		return -EOPNOTSUPP;
 
-	wdev_lock(wdev);
 	/* If not connected, get default parameters */
 	if (!wdev->u.mesh.id_len)
 		memcpy(&cur_params, &default_mesh_config, sizeof(cur_params));
 	else
 		err = rdev_get_mesh_config(rdev, dev, &cur_params);
-	wdev_unlock(wdev);
 
 	if (err)
 		return err;
@@ -8452,7 +8628,7 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	struct mesh_config cfg;
+	struct mesh_config cfg = {};
 	u32 mask;
 	int err;
 
@@ -8466,15 +8642,12 @@
 	if (err)
 		return err;
 
-	wdev_lock(wdev);
 	if (!wdev->u.mesh.id_len)
 		err = -ENOLINK;
 
 	if (!err)
 		err = rdev_update_mesh_config(rdev, dev, mask, &cfg);
 
-	wdev_unlock(wdev);
-
 	return err;
 }
 
@@ -8529,6 +8702,11 @@
 				reg_rule->dfs_cac_ms))
 			goto nla_put_failure;
 
+		if ((reg_rule->flags & NL80211_RRF_PSD) &&
+		    nla_put_s8(msg, NL80211_ATTR_POWER_RULE_PSD,
+			       reg_rule->psd))
+			goto nla_put_failure;
+
 		nla_nest_end(msg, nl_reg_rule);
 	}
 
@@ -8965,7 +9143,7 @@
 	unsigned int link_id;
 	bool all_ok = true;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!cfg80211_beaconing_iface_active(wdev))
 		return true;
@@ -9215,7 +9393,6 @@
 
 	request->n_channels = i;
 
-	wdev_lock(wdev);
 	for (i = 0; i < request->n_channels; i++) {
 		struct ieee80211_channel *chan = request->channels[i];
 
@@ -9224,12 +9401,10 @@
 			continue;
 
 		if (!cfg80211_wdev_on_sub_chan(wdev, chan, true)) {
-			wdev_unlock(wdev);
 			err = -EBUSY;
 			goto out_free;
 		}
 	}
-	wdev_unlock(wdev);
 
 	i = 0;
 	if (n_ssids) {
@@ -10235,9 +10410,7 @@
 			goto free;
 	}
 
-	wdev_lock(wdev);
 	err = rdev_channel_switch(rdev, dev, &params);
-	wdev_unlock(wdev);
 
 free:
 	kfree(params.beacon_after.mbssid_ies);
@@ -10260,7 +10433,7 @@
 	void *hdr;
 	struct nlattr *bss;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
 			     NL80211_CMD_NEW_SCAN_RESULTS);
@@ -10323,7 +10496,6 @@
 	    nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
 	    nla_put_u32(msg, NL80211_BSS_FREQUENCY_OFFSET,
 			res->channel->freq_offset) ||
-	    nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) ||
 	    nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
 			jiffies_to_msecs(jiffies - intbss->ts)))
 		goto nla_put_failure;
@@ -10409,7 +10581,6 @@
 	/* nl80211_prepare_wdev_dump acquired it in the successful case */
 	__acquire(&rdev->wiphy.mtx);
 
-	wdev_lock(wdev);
 	spin_lock_bh(&rdev->bss_lock);
 
 	/*
@@ -10435,7 +10606,6 @@
 	}
 
 	spin_unlock_bh(&rdev->bss_lock);
-	wdev_unlock(wdev);
 
 	cb->args[2] = idx;
 	wiphy_unlock(&rdev->wiphy);
@@ -10558,9 +10728,7 @@
 	}
 
 	while (1) {
-		wdev_lock(wdev);
 		res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey);
-		wdev_unlock(wdev);
 		if (res == -ENOENT)
 			break;
 		if (res)
@@ -10733,9 +10901,7 @@
 	if (!req.bss)
 		return -ENOENT;
 
-	wdev_lock(dev->ieee80211_ptr);
 	err = cfg80211_mlme_auth(rdev, dev, &req);
-	wdev_unlock(dev->ieee80211_ptr);
 
 	cfg80211_put_bss(&rdev->wiphy, req.bss);
 
@@ -10945,7 +11111,8 @@
 
 		if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
 					   req.ie, req.ie_len)) {
-			GENL_SET_ERR_MSG(info,
+			NL_SET_ERR_MSG_ATTR(info->extack,
+					    info->attrs[NL80211_ATTR_IE],
 					 "non-inheritance makes no sense");
 			return -EINVAL;
 		}
@@ -11071,6 +11238,7 @@
 
 			if (!attrs[NL80211_ATTR_MLO_LINK_ID]) {
 				err = -EINVAL;
+				NL_SET_BAD_ATTR(info->extack, link);
 				goto free;
 			}
 
@@ -11078,6 +11246,7 @@
 			/* cannot use the same link ID again */
 			if (req.links[link_id].bss) {
 				err = -EINVAL;
+				NL_SET_BAD_ATTR(info->extack, link);
 				goto free;
 			}
 			req.links[link_id].bss =
@@ -11085,6 +11254,8 @@
 			if (IS_ERR(req.links[link_id].bss)) {
 				err = PTR_ERR(req.links[link_id].bss);
 				req.links[link_id].bss = NULL;
+				NL_SET_ERR_MSG_ATTR(info->extack,
+						    link, "Error fetching BSS for link");
 				goto free;
 			}
 
@@ -11097,7 +11268,8 @@
 				if (cfg80211_find_elem(WLAN_EID_FRAGMENT,
 						       req.links[link_id].elems,
 						       req.links[link_id].elems_len)) {
-					GENL_SET_ERR_MSG(info,
+					NL_SET_ERR_MSG_ATTR(info->extack,
+							    attrs[NL80211_ATTR_IE],
 							 "cannot deal with fragmentation");
 					err = -EINVAL;
 					goto free;
@@ -11106,12 +11278,16 @@
 				if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
 							   req.links[link_id].elems,
 							   req.links[link_id].elems_len)) {
-					GENL_SET_ERR_MSG(info,
+					NL_SET_ERR_MSG_ATTR(info->extack,
+							    attrs[NL80211_ATTR_IE],
 							 "cannot deal with non-inheritance");
 					err = -EINVAL;
 					goto free;
 				}
 			}
+
+			req.links[link_id].disabled =
+				nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]);
 		}
 
 		if (!req.links[req.link_id].bss) {
@@ -11126,6 +11302,13 @@
 			goto free;
 		}
 
+		if (req.links[req.link_id].disabled) {
+			GENL_SET_ERR_MSG(info,
+					 "cannot have assoc link disabled");
+			err = -EINVAL;
+			goto free;
+		}
+
 		kfree(attrs);
 		attrs = NULL;
 	} else {
@@ -11140,7 +11323,8 @@
 
 	err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
 	if (!err) {
-		wdev_lock(dev->ieee80211_ptr);
+		struct nlattr *link;
+		int rem = 0;
 
 		err = cfg80211_mlme_assoc(rdev, dev, &req);
 
@@ -11151,7 +11335,33 @@
 			       ap_addr, ETH_ALEN);
 		}
 
-		wdev_unlock(dev->ieee80211_ptr);
+		/* Report error from first problematic link */
+		if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
+			nla_for_each_nested(link,
+					    info->attrs[NL80211_ATTR_MLO_LINKS],
+					    rem) {
+				struct nlattr *link_id_attr =
+					nla_find_nested(link, NL80211_ATTR_MLO_LINK_ID);
+
+				if (!link_id_attr)
+					continue;
+
+				link_id = nla_get_u8(link_id_attr);
+
+				if (link_id == req.link_id)
+					continue;
+
+				if (!req.links[link_id].error ||
+				    WARN_ON(req.links[link_id].error > 0))
+					continue;
+
+				WARN_ON(err >= 0);
+
+				NL_SET_BAD_ATTR(info->extack, link);
+				err = req.links[link_id].error;
+				break;
+			}
+		}
 	}
 
 free:
@@ -11168,7 +11378,7 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	const u8 *ie = NULL, *bssid;
-	int ie_len = 0, err;
+	int ie_len = 0;
 	u16 reason_code;
 	bool local_state_change;
 
@@ -11204,11 +11414,8 @@
 
 	local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
 
-	wdev_lock(dev->ieee80211_ptr);
-	err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
+	return cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
 				   local_state_change);
-	wdev_unlock(dev->ieee80211_ptr);
-	return err;
 }
 
 static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
@@ -11216,7 +11423,7 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	const u8 *ie = NULL, *bssid;
-	int ie_len = 0, err;
+	int ie_len = 0;
 	u16 reason_code;
 	bool local_state_change;
 
@@ -11252,11 +11459,8 @@
 
 	local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
 
-	wdev_lock(dev->ieee80211_ptr);
-	err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
+	return cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
 				     local_state_change);
-	wdev_unlock(dev->ieee80211_ptr);
-	return err;
 }
 
 static bool
@@ -11434,13 +11638,11 @@
 	ibss.userspace_handles_dfs =
 		nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]);
 
-	wdev_lock(dev->ieee80211_ptr);
 	err = __cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
 	if (err)
 		kfree_sensitive(connkeys);
 	else if (info->attrs[NL80211_ATTR_SOCKET_OWNER])
 		dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
-	wdev_unlock(dev->ieee80211_ptr);
 
 	return err;
 }
@@ -11973,8 +12175,6 @@
 	if (nla_get_flag(info->attrs[NL80211_ATTR_MLO_SUPPORT]))
 		connect.flags |= CONNECT_REQ_MLO_SUPPORT;
 
-	wdev_lock(dev->ieee80211_ptr);
-
 	err = cfg80211_connect(rdev, dev, &connect, connkeys,
 			       connect.prev_bssid);
 	if (err)
@@ -11989,8 +12189,6 @@
 			eth_zero_addr(dev->ieee80211_ptr->disconnect_bssid);
 	}
 
-	wdev_unlock(dev->ieee80211_ptr);
-
 	return err;
 }
 
@@ -12004,7 +12202,6 @@
 	bool fils_sk_offload;
 	u32 auth_type;
 	u32 changed = 0;
-	int ret;
 
 	if (!rdev->ops->update_connect_params)
 		return -EOPNOTSUPP;
@@ -12065,14 +12262,10 @@
 		changed |= UPDATE_AUTH_TYPE;
 	}
 
-	wdev_lock(dev->ieee80211_ptr);
 	if (!wdev->connected)
-		ret = -ENOLINK;
-	else
-		ret = rdev_update_connect_params(rdev, dev, &connect, changed);
-	wdev_unlock(dev->ieee80211_ptr);
+		return -ENOLINK;
 
-	return ret;
+	return rdev_update_connect_params(rdev, dev, &connect, changed);
 }
 
 static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
@@ -12080,7 +12273,6 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	u16 reason;
-	int ret;
 
 	if (dev->ieee80211_ptr->conn_owner_nlportid &&
 	    dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
@@ -12098,10 +12290,7 @@
 	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
 		return -EOPNOTSUPP;
 
-	wdev_lock(dev->ieee80211_ptr);
-	ret = cfg80211_disconnect(rdev, dev, reason, true);
-	wdev_unlock(dev->ieee80211_ptr);
-	return ret;
+	return cfg80211_disconnect(rdev, dev, reason, true);
 }
 
 static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
@@ -12225,6 +12414,7 @@
 	u32 peer_capability = 0;
 	u16 status_code;
 	u8 *peer;
+	int link_id;
 	bool initiator;
 
 	if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
@@ -12246,8 +12436,9 @@
 	if (info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY])
 		peer_capability =
 			nla_get_u32(info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]);
+	link_id = nl80211_link_id_or_invalid(info->attrs);
 
-	return rdev_tdls_mgmt(rdev, dev, peer, action_code,
+	return rdev_tdls_mgmt(rdev, dev, peer, link_id, action_code,
 			      dialog_token, status_code, peer_capability,
 			      initiator,
 			      nla_data(info->attrs[NL80211_ATTR_IE]),
@@ -12310,7 +12501,6 @@
 	if (err)
 		return err;
 
-	wdev_lock(wdev);
 	if (!cfg80211_off_channel_oper_allowed(wdev, chandef.chan)) {
 		const struct cfg80211_chan_def *oper_chandef, *compat_chandef;
 
@@ -12319,7 +12509,6 @@
 		if (WARN_ON(!oper_chandef)) {
 			/* cannot happen since we must beacon to get here */
 			WARN_ON(1);
-			wdev_unlock(wdev);
 			return -EBUSY;
 		}
 
@@ -12327,12 +12516,9 @@
 		compat_chandef = cfg80211_chandef_compatible(&chandef,
 							     oper_chandef);
 
-		if (compat_chandef != &chandef) {
-			wdev_unlock(wdev);
+		if (compat_chandef != &chandef)
 			return -EBUSY;
 		}
-	}
-	wdev_unlock(wdev);
 
 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!msg)
@@ -12391,23 +12577,18 @@
 	unsigned int link_id = nl80211_link_id(info->attrs);
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
 	if (!rdev->ops->set_bitrate_mask)
 		return -EOPNOTSUPP;
 
-	wdev_lock(wdev);
 	err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
 					    NL80211_ATTR_TX_RATES, &mask,
 					    dev, true, link_id);
 	if (err)
-		goto out;
-
-	err = rdev_set_bitrate_mask(rdev, dev, link_id, NULL, &mask);
-out:
-	wdev_unlock(wdev);
 	return err;
+
+	return rdev_set_bitrate_mask(rdev, dev, link_id, NULL, &mask);
 }
 
 static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
@@ -12536,12 +12717,9 @@
 	if (!chandef.chan && params.offchan)
 		return -EINVAL;
 
-	wdev_lock(wdev);
 	if (params.offchan &&
-	    !cfg80211_off_channel_oper_allowed(wdev, chandef.chan)) {
-		wdev_unlock(wdev);
+	    !cfg80211_off_channel_oper_allowed(wdev, chandef.chan))
 		return -EBUSY;
-	}
 
 	params.link_id = nl80211_link_id_or_invalid(info->attrs);
 	/*
@@ -12550,11 +12728,8 @@
 	 * to the driver.
 	 */
 	if (params.link_id >= 0 &&
-	    !(wdev->valid_links & BIT(params.link_id))) {
-		wdev_unlock(wdev);
+	    !(wdev->valid_links & BIT(params.link_id)))
 		return -EINVAL;
-	}
-	wdev_unlock(wdev);
 
 	params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
 	params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
@@ -12754,7 +12929,8 @@
 }
 
 static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
-				    struct net_device *dev)
+				    struct net_device *dev,
+				    struct cfg80211_cqm_config *cqm_config)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	s32 last, low, high;
@@ -12763,7 +12939,7 @@
 	int err;
 
 	/* RSSI reporting disabled? */
-	if (!wdev->cqm_config)
+	if (!cqm_config)
 		return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
 
 	/*
@@ -12772,7 +12948,7 @@
 	 * connection is established and enough beacons received to calculate
 	 * the average.
 	 */
-	if (!wdev->cqm_config->last_rssi_event_value &&
+	if (!cqm_config->last_rssi_event_value &&
 	    wdev->links[0].client.current_bss &&
 	    rdev->ops->get_station) {
 		struct station_info sinfo = {};
@@ -12786,30 +12962,30 @@
 
 		cfg80211_sinfo_release_content(&sinfo);
 		if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
-			wdev->cqm_config->last_rssi_event_value =
+			cqm_config->last_rssi_event_value =
 				(s8) sinfo.rx_beacon_signal_avg;
 	}
 
-	last = wdev->cqm_config->last_rssi_event_value;
-	hyst = wdev->cqm_config->rssi_hyst;
-	n = wdev->cqm_config->n_rssi_thresholds;
+	last = cqm_config->last_rssi_event_value;
+	hyst = cqm_config->rssi_hyst;
+	n = cqm_config->n_rssi_thresholds;
 
 	for (i = 0; i < n; i++) {
 		i = array_index_nospec(i, n);
-		if (last < wdev->cqm_config->rssi_thresholds[i])
+		if (last < cqm_config->rssi_thresholds[i])
 			break;
 	}
 
 	low_index = i - 1;
 	if (low_index >= 0) {
 		low_index = array_index_nospec(low_index, n);
-		low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+		low = cqm_config->rssi_thresholds[low_index] - hyst;
 	} else {
 		low = S32_MIN;
 	}
 	if (i < n) {
 		i = array_index_nospec(i, n);
-		high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+		high = cqm_config->rssi_thresholds[i] + hyst - 1;
 	} else {
 		high = S32_MAX;
 	}
@@ -12822,10 +12998,11 @@
 				u32 hysteresis)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct cfg80211_cqm_config *cqm_config = NULL, *old;
 	struct net_device *dev = info->user_ptr[1];
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	int i, err;
 	s32 prev = S32_MIN;
+	int i, err;
 
 	/* Check all values negative and sorted */
 	for (i = 0; i < n_thresholds; i++) {
@@ -12839,10 +13016,6 @@
 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
 		return -EOPNOTSUPP;
 
-	wdev_lock(wdev);
-	cfg80211_cqm_config_free(wdev);
-	wdev_unlock(wdev);
-
 	if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
 		if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
 			return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
@@ -12858,17 +13031,14 @@
 	if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
 		n_thresholds = 0;
 
-	wdev_lock(wdev);
-	if (n_thresholds) {
-		struct cfg80211_cqm_config *cqm_config;
+	old = wiphy_dereference(wdev->wiphy, wdev->cqm_config);
 
+	if (n_thresholds) {
 		cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
 						 n_thresholds),
 				     GFP_KERNEL);
-		if (!cqm_config) {
-			err = -ENOMEM;
-			goto unlock;
-		}
+		if (!cqm_config)
+			return -ENOMEM;
 
 		cqm_config->rssi_hyst = hysteresis;
 		cqm_config->n_rssi_thresholds = n_thresholds;
@@ -12876,13 +13046,18 @@
 		       flex_array_size(cqm_config, rssi_thresholds,
 				       n_thresholds));
 
-		wdev->cqm_config = cqm_config;
+		rcu_assign_pointer(wdev->cqm_config, cqm_config);
+	} else {
+		RCU_INIT_POINTER(wdev->cqm_config, NULL);
 	}
 
-	err = cfg80211_cqm_rssi_update(rdev, dev);
-
-unlock:
-	wdev_unlock(wdev);
+	err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+	if (err) {
+		rcu_assign_pointer(wdev->cqm_config, old);
+		kfree_rcu(cqm_config, rcu_head);
+	} else {
+		kfree_rcu(old, rcu_head);
+	}
 
 	return err;
 }
@@ -13066,11 +13241,9 @@
 		setup.control_port_over_nl80211 = true;
 	}
 
-	wdev_lock(dev->ieee80211_ptr);
 	err = __cfg80211_join_mesh(rdev, dev, &setup, &cfg);
 	if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER])
 		dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
-	wdev_unlock(dev->ieee80211_ptr);
 
 	return err;
 }
@@ -14014,21 +14187,13 @@
 	if (tb[NL80211_REKEY_DATA_AKM])
 		rekey_data.akm = nla_get_u32(tb[NL80211_REKEY_DATA_AKM]);
 
-	wdev_lock(wdev);
-	if (!wdev->connected) {
-		err = -ENOTCONN;
-		goto out;
-	}
+	if (!wdev->connected)
+		return -ENOTCONN;
 
-	if (!rdev->ops->set_rekey_data) {
-		err = -EOPNOTSUPP;
-		goto out;
-	}
+	if (!rdev->ops->set_rekey_data)
+		return -EOPNOTSUPP;
 
-	err = rdev_set_rekey_data(rdev, dev, &rekey_data);
- out:
-	wdev_unlock(wdev);
-	return err;
+	return rdev_set_rekey_data(rdev, dev, &rekey_data);
 }
 
 static int nl80211_register_unexpected_frame(struct sk_buff *skb,
@@ -15232,11 +15397,9 @@
 		memcpy(qos_map->up, pos, IEEE80211_QOS_MAP_LEN_MIN);
 	}
 
-	wdev_lock(dev->ieee80211_ptr);
 	ret = nl80211_key_allowed(dev->ieee80211_ptr);
 	if (!ret)
 		ret = rdev_set_qos_map(rdev, dev, qos_map);
-	wdev_unlock(dev->ieee80211_ptr);
 
 	kfree(qos_map);
 	return ret;
@@ -15250,7 +15413,6 @@
 	const u8 *peer;
 	u8 tsid, up;
 	u16 admitted_time = 0;
-	int err;
 
 	if (!(rdev->wiphy.features & NL80211_FEATURE_SUPPORTS_WMM_ADMISSION))
 		return -EOPNOTSUPP;
@@ -15280,34 +15442,25 @@
 			return -EINVAL;
 	}
 
-	wdev_lock(wdev);
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (wdev->connected)
 			break;
-		err = -ENOTCONN;
-		goto out;
+		return -ENOTCONN;
 	default:
-		err = -EOPNOTSUPP;
-		goto out;
+		return -EOPNOTSUPP;
 	}
 
-	err = rdev_add_tx_ts(rdev, dev, tsid, peer, up, admitted_time);
-
- out:
-	wdev_unlock(wdev);
-	return err;
+	return rdev_add_tx_ts(rdev, dev, tsid, peer, up, admitted_time);
 }
 
 static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	const u8 *peer;
 	u8 tsid;
-	int err;
 
 	if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC])
 		return -EINVAL;
@@ -15315,11 +15468,7 @@
 	tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]);
 	peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
 
-	wdev_lock(wdev);
-	err = rdev_del_tx_ts(rdev, dev, tsid, peer);
-	wdev_unlock(wdev);
-
-	return err;
+	return rdev_del_tx_ts(rdev, dev, tsid, peer);
 }
 
 static int nl80211_tdls_channel_switch(struct sk_buff *skb,
@@ -15375,11 +15524,7 @@
 	addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
 	oper_class = nla_get_u8(info->attrs[NL80211_ATTR_OPER_CLASS]);
 
-	wdev_lock(wdev);
-	err = rdev_tdls_channel_switch(rdev, dev, addr, oper_class, &chandef);
-	wdev_unlock(wdev);
-
-	return err;
+	return rdev_tdls_channel_switch(rdev, dev, addr, oper_class, &chandef);
 }
 
 static int nl80211_tdls_cancel_channel_switch(struct sk_buff *skb,
@@ -15387,7 +15532,6 @@
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	const u8 *addr;
 
 	if (!rdev->ops->tdls_channel_switch ||
@@ -15408,9 +15552,7 @@
 
 	addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
 
-	wdev_lock(wdev);
 	rdev_tdls_cancel_channel_switch(rdev, dev, addr);
-	wdev_unlock(wdev);
 
 	return 0;
 }
@@ -15443,7 +15585,6 @@
 	struct net_device *dev = info->user_ptr[1];
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_pmk_conf pmk_conf = {};
-	int ret;
 
 	if (wdev->iftype != NL80211_IFTYPE_STATION &&
 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
@@ -15456,34 +15597,24 @@
 	if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_PMK])
 		return -EINVAL;
 
-	wdev_lock(wdev);
-	if (!wdev->connected) {
-		ret = -ENOTCONN;
-		goto out;
-	}
+	if (!wdev->connected)
+		return -ENOTCONN;
 
 	pmk_conf.aa = nla_data(info->attrs[NL80211_ATTR_MAC]);
-	if (memcmp(pmk_conf.aa, wdev->u.client.connected_addr, ETH_ALEN)) {
-		ret = -EINVAL;
-		goto out;
-	}
+	if (memcmp(pmk_conf.aa, wdev->u.client.connected_addr, ETH_ALEN))
+		return -EINVAL;
 
 	pmk_conf.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
 	pmk_conf.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
 	if (pmk_conf.pmk_len != WLAN_PMK_LEN &&
-	    pmk_conf.pmk_len != WLAN_PMK_LEN_SUITE_B_192) {
-		ret = -EINVAL;
-		goto out;
-	}
+	    pmk_conf.pmk_len != WLAN_PMK_LEN_SUITE_B_192)
+		return -EINVAL;
 
 	if (info->attrs[NL80211_ATTR_PMKR0_NAME])
 		pmk_conf.pmk_r0_name =
 			nla_data(info->attrs[NL80211_ATTR_PMKR0_NAME]);
 
-	ret = rdev_set_pmk(rdev, dev, &pmk_conf);
-out:
-	wdev_unlock(wdev);
-	return ret;
+	return rdev_set_pmk(rdev, dev, &pmk_conf);
 }
 
 static int nl80211_del_pmk(struct sk_buff *skb, struct genl_info *info)
@@ -15492,7 +15623,6 @@
 	struct net_device *dev = info->user_ptr[1];
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	const u8 *aa;
-	int ret;
 
 	if (wdev->iftype != NL80211_IFTYPE_STATION &&
 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
@@ -15505,12 +15635,8 @@
 	if (!info->attrs[NL80211_ATTR_MAC])
 		return -EINVAL;
 
-	wdev_lock(wdev);
 	aa = nla_data(info->attrs[NL80211_ATTR_MAC]);
-	ret = rdev_del_pmk(rdev, dev, aa);
-	wdev_unlock(wdev);
-
-	return ret;
+	return rdev_del_pmk(rdev, dev, aa);
 }
 
 static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info)
@@ -15584,8 +15710,6 @@
 		return -EINVAL;
 	}
 
-	wdev_lock(wdev);
-
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_P2P_GO:
@@ -15594,21 +15718,16 @@
 	case NL80211_IFTYPE_ADHOC:
 		if (wdev->u.ibss.current_bss)
 			break;
-		err = -ENOTCONN;
-		goto out;
+		return -ENOTCONN;
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (wdev->connected)
 			break;
-		err = -ENOTCONN;
-		goto out;
+		return -ENOTCONN;
 	default:
-		err = -EOPNOTSUPP;
-		goto out;
+		return -EOPNOTSUPP;
 	}
 
-	wdev_unlock(wdev);
-
 	buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
 	len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
 	dest = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -15624,9 +15743,6 @@
 	if (!err && !dont_wait_for_ack)
 		nl_set_extack_cookie_u64(info->extack, cookie);
 	return err;
- out:
-	wdev_unlock(wdev);
-	return err;
 }
 
 static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
@@ -15904,8 +16020,6 @@
 	if (info->attrs[NL80211_ATTR_MAC])
 		tid_config->peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
 
-	wdev_lock(dev->ieee80211_ptr);
-
 	nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG],
 			    rem_conf) {
 		ret = nla_parse_nested(attrs, NL80211_TID_CONFIG_ATTR_MAX,
@@ -15927,7 +16041,6 @@
 
 bad_tid_conf:
 	kfree(tid_config);
-	wdev_unlock(dev->ieee80211_ptr);
 	return ret;
 }
 
@@ -16024,9 +16137,7 @@
 		params.counter_offset_presp = offset;
 	}
 
-	wdev_lock(wdev);
 	err = rdev_color_change(rdev, dev, &params);
-	wdev_unlock(wdev);
 
 out:
 	kfree(params.beacon_next.mbssid_ies);
@@ -16082,7 +16193,6 @@
 	    !is_valid_ether_addr(nla_data(info->attrs[NL80211_ATTR_MAC])))
 		return -EINVAL;
 
-	wdev_lock(wdev);
 	wdev->valid_links |= BIT(link_id);
 	ether_addr_copy(wdev->links[link_id].addr,
 			nla_data(info->attrs[NL80211_ATTR_MAC]));
@@ -16092,7 +16202,6 @@
 		wdev->valid_links &= ~BIT(link_id);
 		eth_zero_addr(wdev->links[link_id].addr);
 	}
-	wdev_unlock(wdev);
 
 	return ret;
 }
@@ -16114,9 +16223,7 @@
 		return -EINVAL;
 	}
 
-	wdev_lock(wdev);
 	cfg80211_remove_link(wdev, link_id);
-	wdev_unlock(wdev);
 
 	return 0;
 }
@@ -16206,14 +16313,10 @@
 	if (err)
 		return err;
 
-	wdev_lock(dev->ieee80211_ptr);
 	if (add)
-		err = rdev_add_link_station(rdev, dev, &params);
-	else
-		err = rdev_mod_link_station(rdev, dev, &params);
-	wdev_unlock(dev->ieee80211_ptr);
+		return rdev_add_link_station(rdev, dev, &params);
 
-	return err;
+	return rdev_mod_link_station(rdev, dev, &params);
 }
 
 static int
@@ -16234,7 +16337,6 @@
 	struct link_station_del_parameters params = {};
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
-	int ret;
 
 	if (!rdev->ops->del_link_station)
 		return -EOPNOTSUPP;
@@ -16246,11 +16348,7 @@
 	params.mld_mac = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
 	params.link_id = nla_get_u8(info->attrs[NL80211_ATTR_MLO_LINK_ID]);
 
-	wdev_lock(dev->ieee80211_ptr);
-	ret = rdev_del_link_station(rdev, dev, &params);
-	wdev_unlock(dev->ieee80211_ptr);
-
-	return ret;
+	return rdev_del_link_station(rdev, dev, &params);
 }
 
 static int nl80211_set_hw_timestamp(struct sk_buff *skb,
@@ -17114,7 +17212,8 @@
 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
 		.doit = nl80211_tdls_mgmt,
 		.flags = GENL_UNS_ADMIN_PERM,
-		.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+		.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+					 NL80211_FLAG_MLO_VALID_LINK_ID),
 	},
 	{
 		.cmd = NL80211_CMD_TDLS_OPER,
@@ -17851,7 +17950,7 @@
 
 void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
 			   struct net_device *netdev,
-			   struct cfg80211_rx_assoc_resp *data)
+			   struct cfg80211_rx_assoc_resp_data *data)
 {
 	nl80211_send_mlme_event(rdev, netdev, data->buf, data->len,
 				NL80211_CMD_ASSOCIATE, GFP_KERNEL,
@@ -18176,7 +18275,7 @@
 }
 
 void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev,
-				  struct net_device *netdev, const u8 *bssid,
+				  struct net_device *netdev, const u8 *peer_addr,
 				  const u8 *td_bitmap, u8 td_bitmap_len)
 {
 	struct sk_buff *msg;
@@ -18194,7 +18293,7 @@
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer_addr))
 		goto nla_put_failure;
 
 	if ((td_bitmap_len > 0) && td_bitmap)
@@ -18248,6 +18347,76 @@
 	nlmsg_free(msg);
 }
 
+void cfg80211_links_removed(struct net_device *dev, u16 link_mask)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct wiphy *wiphy = wdev->wiphy;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	struct sk_buff *msg;
+	struct nlattr *links;
+	void *hdr;
+
+	lockdep_assert_wiphy(wdev->wiphy);
+	trace_cfg80211_links_removed(dev, link_mask);
+
+	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
+		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
+		return;
+
+	if (WARN_ON(!wdev->valid_links || !link_mask ||
+		    (wdev->valid_links & link_mask) != link_mask ||
+		    wdev->valid_links == link_mask))
+		return;
+
+	cfg80211_wdev_release_link_bsses(wdev, link_mask);
+	wdev->valid_links &= ~link_mask;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_LINKS_REMOVED);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS);
+	if (!links)
+		goto nla_put_failure;
+
+	while (link_mask) {
+		struct nlattr *link;
+		int link_id = __ffs(link_mask);
+
+		link = nla_nest_start(msg, link_id + 1);
+		if (!link)
+			goto nla_put_failure;
+
+		if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id))
+			goto nla_put_failure;
+
+		nla_nest_end(msg, link);
+		link_mask &= ~(1 << link_id);
+	}
+
+	nla_nest_end(msg, links);
+
+	genlmsg_end(msg, hdr);
+
+	genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+				NL80211_MCGRP_MLME, GFP_KERNEL);
+	return;
+
+ nla_put_failure:
+	nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_links_removed);
+
 void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
 			     struct net_device *netdev, const u8 *bssid,
 			     gfp_t gfp)
@@ -18960,9 +19129,8 @@
 			      enum nl80211_cqm_rssi_threshold_event rssi_event,
 			      s32 rssi_level, gfp_t gfp)
 {
-	struct sk_buff *msg;
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+	struct cfg80211_cqm_config *cqm_config;
 
 	trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level);
 
@@ -18970,16 +19138,37 @@
 		    rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
 		return;
 
-	if (wdev->cqm_config) {
-		wdev->cqm_config->last_rssi_event_value = rssi_level;
+	rcu_read_lock();
+	cqm_config = rcu_dereference(wdev->cqm_config);
+	if (cqm_config) {
+		cqm_config->last_rssi_event_value = rssi_level;
+		cqm_config->last_rssi_event_type = rssi_event;
+		wiphy_work_queue(wdev->wiphy, &wdev->cqm_rssi_work);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
 
-		cfg80211_cqm_rssi_update(rdev, dev);
+void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+	struct wireless_dev *wdev = container_of(work, struct wireless_dev,
+						 cqm_rssi_work);
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	enum nl80211_cqm_rssi_threshold_event rssi_event;
+	struct cfg80211_cqm_config *cqm_config;
+	struct sk_buff *msg;
+	s32 rssi_level;
 
-		if (rssi_level == 0)
-			rssi_level = wdev->cqm_config->last_rssi_event_value;
-	}
+	cqm_config = wiphy_dereference(wdev->wiphy, wdev->cqm_config);
+	if (!wdev->cqm_config)
+		return;
 
-	msg = cfg80211_prepare_cqm(dev, NULL, gfp);
+	cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+
+	rssi_level = cqm_config->last_rssi_event_value;
+	rssi_event = cqm_config->last_rssi_event_type;
+
+	msg = cfg80211_prepare_cqm(wdev->netdev, NULL, GFP_KERNEL);
 	if (!msg)
 		return;
 
@@ -18991,14 +19180,13 @@
 				      rssi_level))
 		goto nla_put_failure;
 
-	cfg80211_send_cqm(msg, gfp);
+	cfg80211_send_cqm(msg, GFP_KERNEL);
 
 	return;
 
  nla_put_failure:
 	nlmsg_free(msg);
 }
-EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
 
 void cfg80211_cqm_txe_notify(struct net_device *dev,
 			     const u8 *peer, u32 num_packets,
@@ -19241,7 +19429,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 	WARN_INVALID_LINK_ID(wdev, link_id);
 
 	trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap);
@@ -19286,7 +19474,7 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 	WARN_INVALID_LINK_ID(wdev, link_id);
 
 	trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id,
@@ -19309,7 +19497,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	trace_cfg80211_bss_color_notify(dev, cmd, count, color_bitmap);
 
@@ -19774,7 +19962,8 @@
 					list) {
 			if (sched_scan_req->owner_nlportid == notify->portid) {
 				sched_scan_req->nl_owner_dead = true;
-				schedule_work(&rdev->sched_scan_stop_wk);
+				wiphy_work_queue(&rdev->wiphy,
+						 &rdev->sched_scan_stop_wk);
 			}
 		}
 
diff -ruw linux-6.4/net/wireless/nl80211.h linux-6.4-fbx/net/wireless/nl80211.h
--- linux-6.4/net/wireless/nl80211.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/nl80211.h	2023-11-07 13:38:44.110258003 +0100
@@ -60,7 +60,7 @@
 			  const u8 *buf, size_t len, gfp_t gfp);
 void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
 			   struct net_device *netdev,
-			   struct cfg80211_rx_assoc_resp *data);
+			   struct cfg80211_rx_assoc_resp_data *data);
 void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
 			 struct net_device *netdev,
 			 const u8 *buf, size_t len,
@@ -82,8 +82,11 @@
 void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
 			 struct net_device *netdev,
 			 struct cfg80211_roam_info *info, gfp_t gfp);
+/* For STA/GC, indicate port authorized with AP/GO bssid.
+ * For GO/AP, use peer GC/STA mac_addr.
+ */
 void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev,
-				  struct net_device *netdev, const u8 *bssid,
+				  struct net_device *netdev, const u8 *peer_addr,
 				  const u8 *td_bitmap, u8 td_bitmap_len);
 void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
 			       struct net_device *netdev, u16 reason,
@@ -120,6 +123,5 @@
 
 /* peer measurement */
 int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info);
-int nl80211_pmsr_dump_results(struct sk_buff *skb, struct netlink_callback *cb);
 
 #endif /* __NET_WIRELESS_NL80211_H */
diff -ruw linux-6.4/net/wireless/ocb.c linux-6.4-fbx/net/wireless/ocb.c
--- linux-6.4/net/wireless/ocb.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/ocb.c	2023-11-07 13:38:44.110258003 +0100
@@ -4,7 +4,7 @@
  *
  * Copyright: (c) 2014 Czech Technical University in Prague
  *            (c) 2014 Volkswagen Group Research
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2022-2023 Intel Corporation
  * Author:    Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
  * Funded by: Volkswagen Group Research
  */
@@ -15,14 +15,14 @@
 #include "core.h"
 #include "rdev-ops.h"
 
-int __cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
+int cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
 			struct net_device *dev,
 			struct ocb_setup *setup)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
 		return -EOPNOTSUPP;
@@ -40,27 +40,13 @@
 	return err;
 }
 
-int cfg80211_join_ocb(struct cfg80211_registered_device *rdev,
-		      struct net_device *dev,
-		      struct ocb_setup *setup)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	int err;
-
-	wdev_lock(wdev);
-	err = __cfg80211_join_ocb(rdev, dev, setup);
-	wdev_unlock(wdev);
-
-	return err;
-}
-
-int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
+int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
 			 struct net_device *dev)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
 		return -EOPNOTSUPP;
@@ -68,22 +54,12 @@
 	if (!rdev->ops->leave_ocb)
 		return -EOPNOTSUPP;
 
+	if (!wdev->u.ocb.chandef.chan)
+		return -ENOTCONN;
+
 	err = rdev_leave_ocb(rdev, dev);
 	if (!err)
 		memset(&wdev->u.ocb.chandef, 0, sizeof(wdev->u.ocb.chandef));
 
 	return err;
 }
-
-int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
-		       struct net_device *dev)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	int err;
-
-	wdev_lock(wdev);
-	err = __cfg80211_leave_ocb(rdev, dev);
-	wdev_unlock(wdev);
-
-	return err;
-}
diff -ruw linux-6.4/net/wireless/pmsr.c linux-6.4-fbx/net/wireless/pmsr.c
--- linux-6.4/net/wireless/pmsr.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/pmsr.c	2023-11-07 13:38:44.110258003 +0100
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2021, 2023 Intel Corporation
  */
 #include <net/cfg80211.h>
 #include "core.h"
@@ -291,6 +291,7 @@
 	req = kzalloc(struct_size(req, peers, count), GFP_KERNEL);
 	if (!req)
 		return -ENOMEM;
+	req->n_peers = count;
 
 	if (info->attrs[NL80211_ATTR_TIMEOUT])
 		req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]);
@@ -321,8 +322,6 @@
 			goto out_err;
 		idx++;
 	}
-
-	req->n_peers = count;
 	req->cookie = cfg80211_assign_cookie(rdev);
 	req->nl_portid = info->snd_portid;
 
@@ -601,7 +600,7 @@
 	struct cfg80211_pmsr_request *req, *tmp;
 	LIST_HEAD(free_list);
 
-	lockdep_assert_held(&wdev->mtx);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	spin_lock_bh(&wdev->pmsr_lock);
 	list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
@@ -623,9 +622,9 @@
 	struct wireless_dev *wdev = container_of(work, struct wireless_dev,
 						 pmsr_free_wk);
 
-	wdev_lock(wdev);
+	wiphy_lock(wdev->wiphy);
 	cfg80211_pmsr_process_abort(wdev);
-	wdev_unlock(wdev);
+	wiphy_unlock(wdev->wiphy);
 }
 
 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
diff -ruw linux-6.4/net/wireless/rdev-ops.h linux-6.4-fbx/net/wireless/rdev-ops.h
--- linux-6.4/net/wireless/rdev-ops.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/rdev-ops.h	2023-11-07 13:38:44.110258003 +0100
@@ -173,7 +173,7 @@
 
 static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev,
 				     struct net_device *dev,
-				     struct cfg80211_beacon_data *info)
+				     struct cfg80211_ap_update *info)
 {
 	int ret;
 	trace_rdev_change_beacon(&rdev->wiphy, dev, info);
@@ -407,6 +407,18 @@
 	return ret;
 }
 
+static inline void rdev_inform_bss(struct cfg80211_registered_device *rdev,
+				   struct cfg80211_bss *bss,
+				   const struct cfg80211_bss_ies *ies,
+				   void *drv_data)
+
+{
+	trace_rdev_inform_bss(&rdev->wiphy, bss);
+	if (rdev->ops->inform_bss)
+		rdev->ops->inform_bss(&rdev->wiphy, bss, ies, drv_data);
+	trace_rdev_return_void(&rdev->wiphy);
+}
+
 static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev,
 				      struct net_device *dev,
 				      struct ieee80211_txq_params *params)
@@ -899,17 +911,18 @@
 
 static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev,
 				 struct net_device *dev, u8 *peer,
-				 u8 action_code, u8 dialog_token,
-				 u16 status_code, u32 peer_capability,
-				 bool initiator, const u8 *buf, size_t len)
+				 int link_id, u8 action_code,
+				 u8 dialog_token, u16 status_code,
+				 u32 peer_capability, bool initiator,
+				 const u8 *buf, size_t len)
 {
 	int ret;
-	trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
-			     dialog_token, status_code, peer_capability,
-			     initiator, buf, len);
-	ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
+	trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, link_id, action_code,
 				   dialog_token, status_code, peer_capability,
 				   initiator, buf, len);
+	ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, link_id,
+				   action_code, dialog_token, status_code,
+				   peer_capability, initiator, buf, len);
 	trace_rdev_return_int(&rdev->wiphy, ret);
 	return ret;
 }
diff -ruw linux-6.4/net/wireless/reg.c linux-6.4-fbx/net/wireless/reg.c
--- linux-6.4/net/wireless/reg.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/reg.c	2024-02-14 17:43:53.027128018 +0100
@@ -5,7 +5,7 @@
  * Copyright 2008-2011	Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright      2017  Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -1283,7 +1283,9 @@
  * 60 GHz band.
  * This resolution can be lowered and should be considered as we add
  * regulatory rule support for other "bands".
- **/
+ *
+ * Returns: whether or not the frequency is in the range
+ */
 static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
 			      u32 freq_khz)
 {
@@ -1492,6 +1494,8 @@
  * Returns a pointer to the regulatory domain structure which will hold the
  * resulting intersection of rules between rd1 and rd2. We will
  * kzalloc() this structure for you.
+ *
+ * Returns: the intersected regdomain
  */
 static struct ieee80211_regdomain *
 regdom_intersect(const struct ieee80211_regdomain *rd1,
@@ -1587,6 +1591,10 @@
 		channel_flags |= IEEE80211_CHAN_NO_HE;
 	if (rd_flags & NL80211_RRF_NO_320MHZ)
 		channel_flags |= IEEE80211_CHAN_NO_320MHZ;
+	if (rd_flags & NL80211_RRF_NO_EHT)
+		channel_flags |= IEEE80211_CHAN_NO_EHT;
+	if (rd_flags & NL80211_RRF_PSD)
+		channel_flags |= IEEE80211_CHAN_PSD;
 	return channel_flags;
 }
 
@@ -1755,6 +1763,40 @@
 	return bw_flags;
 }
 
+static void restore_channel_dfs_cached_state(struct wiphy *wiphy,
+					     struct ieee80211_channel *c)
+{
+	struct cfg80211_chan_dfs_cache *cd;
+	unsigned long timeout;
+
+	if (!IS_ENABLED(CONFIG_CFG80211_DFS_CACHE))
+		return;
+
+	cd = cfg80211_get_dfs_chan_cache(c);
+	if (!cd)
+		return;
+
+	if (cd->dfs_state == NL80211_DFS_USABLE)
+		return;
+
+	if (cd->dfs_state == NL80211_DFS_UNAVAILABLE) {
+		struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+		timeout = cd->dfs_state_entered +
+			msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS);
+
+		if (time_after_eq(jiffies, timeout))
+			return;
+
+		cfg80211_sched_dfs_chan_update(rdev);
+	}
+
+	wiphy_info(wiphy, "restoring channel %u DFS state from cache\n",
+		   cd->center_freq);
+	c->dfs_state = cd->dfs_state;
+	c->dfs_state_entered = cd->dfs_state_entered;
+}
+
 static void handle_channel_single_rule(struct wiphy *wiphy,
 				       enum nl80211_reg_initiator initiator,
 				       struct ieee80211_channel *chan,
@@ -1789,15 +1831,22 @@
 
 		if (chan->flags & IEEE80211_CHAN_RADAR) {
 			chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+			if (chan->center_freq >= 5600 &&
+			    chan->center_freq <= 5650)
+				chan->dfs_cac_ms = IEEE80211_DFS_WEATHER_MIN_CAC_TIME_MS;
 			if (reg_rule->dfs_cac_ms)
 				chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
 		}
 
+		if (chan->flags & IEEE80211_CHAN_PSD)
+			chan->psd = reg_rule->psd;
+
 		return;
 	}
 
 	chan->dfs_state = NL80211_DFS_USABLE;
 	chan->dfs_state_entered = jiffies;
+	restore_channel_dfs_cached_state(wiphy, chan);
 
 	chan->beacon_found = false;
 	chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
@@ -1809,9 +1858,17 @@
 	if (chan->flags & IEEE80211_CHAN_RADAR) {
 		if (reg_rule->dfs_cac_ms)
 			chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
+		else {
+			if (chan->center_freq >= 5600 &&
+			    chan->center_freq <= 5650)
+				chan->dfs_cac_ms = IEEE80211_DFS_WEATHER_MIN_CAC_TIME_MS;
 		else
 			chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
 	}
+	}
+
+	if (chan->flags & IEEE80211_CHAN_PSD)
+		chan->psd = reg_rule->psd;
 
 	if (chan->orig_mpwr) {
 		/*
@@ -1882,11 +1939,18 @@
 							 rrule2->dfs_cac_ms);
 		}
 
+		if ((rrule1->flags & NL80211_RRF_PSD) &&
+		    (rrule2->flags & NL80211_RRF_PSD))
+			chan->psd = min_t(s8, rrule1->psd, rrule2->psd);
+		else
+			chan->flags &= ~NL80211_RRF_PSD;
+
 		return;
 	}
 
 	chan->dfs_state = NL80211_DFS_USABLE;
 	chan->dfs_state_entered = jiffies;
+	restore_channel_dfs_cached_state(wiphy, chan);
 
 	chan->beacon_found = false;
 	chan->flags = flags | bw_flags1 | bw_flags2 |
@@ -2149,6 +2213,13 @@
 	return false;
 }
 
+static void reg_call_notifier(struct wiphy *wiphy,
+			      struct regulatory_request *request)
+{
+	if (wiphy->reg_notifier)
+		wiphy->reg_notifier(wiphy, request);
+}
+
 static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
 			      struct reg_beacon *reg_beacon)
 {
@@ -2156,6 +2227,7 @@
 	struct ieee80211_channel *chan;
 	bool channel_changed = false;
 	struct ieee80211_channel chan_before;
+	struct regulatory_request *lr = get_last_request();
 
 	sband = wiphy->bands[reg_beacon->chan.band];
 	chan = &sband->channels[chan_idx];
@@ -2181,8 +2253,11 @@
 		channel_changed = true;
 	}
 
-	if (channel_changed)
+	if (channel_changed) {
 		nl80211_send_beacon_hint_event(wiphy, &chan_before, chan);
+		if (wiphy->flags & WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON)
+			reg_call_notifier(wiphy, lr);
+	}
 }
 
 /*
@@ -2325,13 +2400,6 @@
 		reg_process_ht_flags_band(wiphy, wiphy->bands[band]);
 }
 
-static void reg_call_notifier(struct wiphy *wiphy,
-			      struct regulatory_request *request)
-{
-	if (wiphy->reg_notifier)
-		wiphy->reg_notifier(wiphy, request);
-}
-
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
 	struct cfg80211_chan_def chandef = {};
@@ -2340,19 +2408,18 @@
 	bool ret;
 	int link;
 
-	wdev_lock(wdev);
 	iftype = wdev->iftype;
 
 	/* make sure the interface is active */
 	if (!wdev->netdev || !netif_running(wdev->netdev))
-		goto wdev_inactive_unlock;
+		return true;
 
 	for (link = 0; link < ARRAY_SIZE(wdev->links); link++) {
 		struct ieee80211_channel *chan;
 
 		if (!wdev->valid_links && link > 0)
 			break;
-		if (!(wdev->valid_links & BIT(link)))
+		if (wdev->valid_links && !(wdev->valid_links & BIT(link)))
 			continue;
 		switch (iftype) {
 		case NL80211_IFTYPE_AP:
@@ -2391,14 +2458,20 @@
 		case NL80211_IFTYPE_P2P_DEVICE:
 			/* no enforcement required */
 			break;
+		case NL80211_IFTYPE_OCB:
+			if (!wdev->u.ocb.chandef.chan)
+				continue;
+			chandef = wdev->u.ocb.chandef;
+			break;
+		case NL80211_IFTYPE_NAN:
+			/* we have no info, but NAN is also pretty universal */
+			continue;
 		default:
 			/* others not implemented for now */
-			WARN_ON(1);
+			WARN_ON_ONCE(1);
 			break;
 		}
 
-		wdev_unlock(wdev);
-
 		switch (iftype) {
 		case NL80211_IFTYPE_AP:
 		case NL80211_IFTYPE_P2P_GO:
@@ -2419,16 +2492,8 @@
 		default:
 			break;
 		}
-
-		wdev_lock(wdev);
 	}
 
-	wdev_unlock(wdev);
-
-	return true;
-
-wdev_inactive_unlock:
-	wdev_unlock(wdev);
 	return true;
 }
 
@@ -2451,9 +2516,7 @@
 	pr_debug("Verifying active interfaces after reg change\n");
 	rtnl_lock();
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list)
-		if (!(rdev->wiphy.regulatory_flags &
-		      REGULATORY_IGNORE_STALE_KICKOFF))
+	for_each_rdev(rdev)
 			reg_leave_invalid_chans(&rdev->wiphy);
 
 	rtnl_unlock();
@@ -2507,7 +2570,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		wiphy = &rdev->wiphy;
 		wiphy_update_regulatory(wiphy, initiator);
 	}
@@ -2549,6 +2612,7 @@
 
 	chan->dfs_state_entered = jiffies;
 	chan->dfs_state = NL80211_DFS_USABLE;
+	restore_channel_dfs_cached_state(wiphy, chan);
 
 	chan->beacon_found = false;
 
@@ -2569,6 +2633,9 @@
 			chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
 	}
 
+	if (chan->flags & IEEE80211_CHAN_PSD)
+		chan->psd = reg_rule->psd;
+
 	chan->max_power = chan->max_reg_power;
 }
 
@@ -2655,6 +2722,9 @@
  *
  * The wireless subsystem can use this function to process
  * a regulatory request issued by the regulatory core.
+ *
+ * Returns: %REG_REQ_OK or %REG_REQ_IGNORE, indicating if the
+ *	hint was processed or ignored
  */
 static enum reg_request_treatment
 reg_process_hint_core(struct regulatory_request *core_request)
@@ -2711,6 +2781,9 @@
  *
  * The wireless subsystem can use this function to process
  * a regulatory request initiated by userspace.
+ *
+ * Returns: %REG_REQ_OK or %REG_REQ_IGNORE, indicating if the
+ *	hint was processed or ignored
  */
 static enum reg_request_treatment
 reg_process_hint_user(struct regulatory_request *user_request)
@@ -2766,7 +2839,7 @@
  * The wireless subsystem can use this function to process
  * a regulatory request issued by an 802.11 driver.
  *
- * Returns one of the different reg request treatment values.
+ * Returns: one of the different reg request treatment values.
  */
 static enum reg_request_treatment
 reg_process_hint_driver(struct wiphy *wiphy,
@@ -2870,7 +2943,7 @@
  * The wireless subsystem can use this function to process
  * a regulatory request issued by a country Information Element.
  *
- * Returns one of the different reg request treatment values.
+ * Returns: one of the different reg request treatment values.
  */
 static enum reg_request_treatment
 reg_process_hint_country_ie(struct wiphy *wiphy,
@@ -2983,7 +3056,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		if (wiphy == &rdev->wiphy)
 			continue;
 		wiphy_share_dfs_chan_state(wiphy, &rdev->wiphy);
@@ -3049,7 +3122,7 @@
 	struct cfg80211_registered_device *rdev;
 	struct wiphy *wiphy;
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		wiphy = &rdev->wiphy;
 		if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
 		    request->initiator == NL80211_REGDOM_SET_BY_USER)
@@ -3114,7 +3187,7 @@
 		list_del_init(&pending_beacon->list);
 
 		/* Applies the beacon hint to current wiphys */
-		list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+		for_each_rdev(rdev)
 			wiphy_update_new_beacon(&rdev->wiphy, pending_beacon);
 
 		/* Remembers the beacon hint for new wiphys or reg changes */
@@ -3169,7 +3242,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		wiphy_lock(&rdev->wiphy);
 		reg_process_self_managed_hint(&rdev->wiphy);
 		wiphy_unlock(&rdev->wiphy);
@@ -3509,7 +3582,7 @@
 	world_alpha2[0] = cfg80211_world_regdom->alpha2[0];
 	world_alpha2[1] = cfg80211_world_regdom->alpha2[1];
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
 			continue;
 		if (rdev->wiphy.regulatory_flags & REGULATORY_CUSTOM_REG)
@@ -3566,15 +3639,15 @@
 	struct cfg80211_registered_device *rdev;
 	struct wireless_dev *wdev;
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
+		wiphy_lock(&rdev->wiphy);
 		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
-			wdev_lock(wdev);
 			if (!(wdev->wiphy->regulatory_flags & flag)) {
-				wdev_unlock(wdev);
+				wiphy_unlock(&rdev->wiphy);
 				return false;
 			}
-			wdev_unlock(wdev);
 		}
+		wiphy_unlock(&rdev->wiphy);
 	}
 
 	return true;
@@ -3830,7 +3903,7 @@
 {
 	const struct ieee80211_regdomain *regd;
 	const struct ieee80211_regdomain *intersected_rd = NULL;
-	const struct ieee80211_regdomain *tmp;
+	const struct ieee80211_regdomain *tmp = NULL;
 	struct wiphy *request_wiphy;
 
 	if (is_world_regdom(rd->alpha2))
@@ -3853,10 +3926,8 @@
 	if (!driver_request->intersect) {
 		ASSERT_RTNL();
 		wiphy_lock(request_wiphy);
-		if (request_wiphy->regd) {
-			wiphy_unlock(request_wiphy);
-			return -EALREADY;
-		}
+		if (request_wiphy->regd)
+			tmp = get_wiphy_regdom(request_wiphy);
 
 		regd = reg_copy_regd(rd);
 		if (IS_ERR(regd)) {
@@ -3865,6 +3936,7 @@
 		}
 
 		rcu_assign_pointer(request_wiphy->regd, regd);
+		rcu_free_regdom(tmp);
 		wiphy_unlock(request_wiphy);
 		reset_regdomains(false, rd);
 		return 0;
@@ -4236,7 +4308,7 @@
 	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
 		return;
 
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
 		if (wiphy == &rdev->wiphy)
 			continue;
 
diff -ruw linux-6.4/net/wireless/reg.h linux-6.4-fbx/net/wireless/reg.h
--- linux-6.4/net/wireless/reg.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/reg.h	2023-11-07 13:38:44.110258003 +0100
@@ -5,7 +5,7 @@
 
 /*
  * Copyright 2008-2011	Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2023 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -133,7 +133,7 @@
 /**
  * cfg80211_get_unii - get the U-NII band for the frequency
  * @freq: the frequency for which we want to get the UNII band.
-
+ *
  * Get a value specifying the U-NII band frequency belongs to.
  * U-NII bands are defined by the FCC in C.F.R 47 part 15.
  *
@@ -156,11 +156,11 @@
 
 /**
  * regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys
- * @wiphy - wiphy on which radar is detected and the event will be propagated
+ * @wiphy: wiphy on which radar is detected and the event will be propagated
  *	to other available wiphys having the same DFS domain
- * @chandef - Channel definition of radar detected channel
- * @dfs_state - DFS channel state to be set
- * @event - Type of radar event which triggered this DFS state change
+ * @chandef: Channel definition of radar detected channel
+ * @dfs_state: DFS channel state to be set
+ * @event: Type of radar event which triggered this DFS state change
  *
  * This function should be called with rtnl lock held.
  */
@@ -171,8 +171,8 @@
 
 /**
  * reg_dfs_domain_same - Checks if both wiphy have same DFS domain configured
- * @wiphy1 - wiphy it's dfs_region to be checked against that of wiphy2
- * @wiphy2 - wiphy it's dfs_region to be checked against that of wiphy1
+ * @wiphy1: wiphy it's dfs_region to be checked against that of wiphy2
+ * @wiphy2: wiphy it's dfs_region to be checked against that of wiphy1
  */
 bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2);
 
diff -ruw linux-6.4/net/wireless/scan.c linux-6.4-fbx/net/wireless/scan.c
--- linux-6.4/net/wireless/scan.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/scan.c	2024-02-14 17:43:53.027128018 +0100
@@ -96,6 +96,7 @@
  *	colocated and can be discovered via legacy bands.
  * @short_ssid_valid: short_ssid is valid and can be used
  * @short_ssid: the short SSID for this SSID
+ * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
  */
 struct cfg80211_colocated_ap {
 	struct list_head list;
@@ -111,6 +112,7 @@
 	   transmitted_bssid:1,
 	   colocated_ess:1,
 	   short_ssid_valid:1;
+	s8 psd_20;
 };
 
 static void bss_free(struct cfg80211_internal_bss *bss)
@@ -218,6 +220,10 @@
 	if (elem->id == WLAN_EID_MULTIPLE_BSSID)
 		return false;
 
+	if (elem->id == WLAN_EID_EXTENSION && elem->datalen > 1 &&
+	    elem->data[0] == WLAN_EID_EXT_EHT_MULTI_LINK)
+		return false;
+
 	if (!non_inherit_elem || non_inherit_elem->datalen < 2)
 		return true;
 
@@ -259,117 +265,152 @@
 }
 EXPORT_SYMBOL(cfg80211_is_element_inherited);
 
-static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
-				  const u8 *subelement, size_t subie_len,
-				  u8 *new_ie, gfp_t gfp)
+static size_t cfg80211_copy_elem_with_frags(const struct element *elem,
+					    const u8 *ie, size_t ie_len,
+					    u8 **pos, u8 *buf, size_t buf_len)
 {
-	u8 *pos, *tmp;
-	const u8 *tmp_old, *tmp_new;
-	const struct element *non_inherit_elem;
-	u8 *sub_copy;
-
-	/* copy subelement as we need to change its content to
-	 * mark an ie after it is processed.
-	 */
-	sub_copy = kmemdup(subelement, subie_len, gfp);
-	if (!sub_copy)
+	if (WARN_ON((u8 *)elem < ie || elem->data > ie + ie_len ||
+		    elem->data + elem->datalen > ie + ie_len))
 		return 0;
 
-	pos = &new_ie[0];
+	if (elem->datalen + 2 > buf + buf_len - *pos)
+		return 0;
 
-	/* set new ssid */
-	tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len);
-	if (tmp_new) {
-		memcpy(pos, tmp_new, tmp_new[1] + 2);
-		pos += (tmp_new[1] + 2);
-	}
+	memcpy(*pos, elem, elem->datalen + 2);
+	*pos += elem->datalen + 2;
 
-	/* get non inheritance list if exists */
-	non_inherit_elem =
-		cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
-				       sub_copy, subie_len);
+	/* Finish if it is not fragmented  */
+	if (elem->datalen != 255)
+		return *pos - buf;
 
-	/* go through IEs in ie (skip SSID) and subelement,
-	 * merge them into new_ie
-	 */
-	tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
-	tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
+	ie_len = ie + ie_len - elem->data - elem->datalen;
+	ie = (const u8 *)elem->data + elem->datalen;
 
-	while (tmp_old + 2 - ie <= ielen &&
-	       tmp_old + tmp_old[1] + 2 - ie <= ielen) {
-		if (tmp_old[0] == 0) {
-			tmp_old++;
-			continue;
-		}
+	for_each_element(elem, ie, ie_len) {
+		if (elem->id != WLAN_EID_FRAGMENT)
+			break;
 
-		if (tmp_old[0] == WLAN_EID_EXTENSION)
-			tmp = (u8 *)cfg80211_find_ext_ie(tmp_old[2], sub_copy,
-							 subie_len);
-		else
-			tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy,
-						     subie_len);
+		if (elem->datalen + 2 > buf + buf_len - *pos)
+			return 0;
 
-		if (!tmp) {
-			const struct element *old_elem = (void *)tmp_old;
+		memcpy(*pos, elem, elem->datalen + 2);
+		*pos += elem->datalen + 2;
 
-			/* ie in old ie but not in subelement */
-			if (cfg80211_is_element_inherited(old_elem,
-							  non_inherit_elem)) {
-				memcpy(pos, tmp_old, tmp_old[1] + 2);
-				pos += tmp_old[1] + 2;
+		if (elem->datalen != 255)
+			break;
 			}
-		} else {
-			/* ie in transmitting ie also in subelement,
-			 * copy from subelement and flag the ie in subelement
-			 * as copied (by setting eid field to WLAN_EID_SSID,
-			 * which is skipped anyway).
-			 * For vendor ie, compare OUI + type + subType to
-			 * determine if they are the same ie.
-			 */
-			if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
-				if (tmp_old[1] >= 5 && tmp[1] >= 5 &&
-				    !memcmp(tmp_old + 2, tmp + 2, 5)) {
-					/* same vendor ie, copy from
-					 * subelement
-					 */
-					memcpy(pos, tmp, tmp[1] + 2);
-					pos += tmp[1] + 2;
-					tmp[0] = WLAN_EID_SSID;
-				} else {
-					memcpy(pos, tmp_old, tmp_old[1] + 2);
-					pos += tmp_old[1] + 2;
+
+	return *pos - buf;
 				}
+
+static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
+				  const u8 *subie, size_t subie_len,
+				  u8 *new_ie, size_t new_ie_len)
+{
+	const struct element *non_inherit_elem, *parent, *sub;
+	u8 *pos = new_ie;
+	u8 id, ext_id;
+	unsigned int match_len;
+
+	non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+						  subie, subie_len);
+
+	/* We copy the elements one by one from the parent to the generated
+	 * elements.
+	 * If they are not inherited (included in subie or in the non
+	 * inheritance element), then we copy all occurrences the first time
+	 * we see this element type.
+	 */
+	for_each_element(parent, ie, ielen) {
+		if (parent->id == WLAN_EID_FRAGMENT)
+			continue;
+
+		if (parent->id == WLAN_EID_EXTENSION) {
+			if (parent->datalen < 1)
+				continue;
+
+			id = WLAN_EID_EXTENSION;
+			ext_id = parent->data[0];
+			match_len = 1;
 			} else {
-				/* copy ie from subelement into new ie */
-				memcpy(pos, tmp, tmp[1] + 2);
-				pos += tmp[1] + 2;
-				tmp[0] = WLAN_EID_SSID;
+			id = parent->id;
+			match_len = 0;
 			}
+
+		/* Find first occurrence in subie */
+		sub = cfg80211_find_elem_match(id, subie, subie_len,
+					       &ext_id, match_len, 0);
+
+		/* Copy from parent if not in subie and inherited */
+		if (!sub &&
+		    cfg80211_is_element_inherited(parent, non_inherit_elem)) {
+			if (!cfg80211_copy_elem_with_frags(parent,
+							   ie, ielen,
+							   &pos, new_ie,
+							   new_ie_len))
+				return 0;
+
+			continue;
 		}
 
-		if (tmp_old + tmp_old[1] + 2 - ie == ielen)
-			break;
+		/* Already copied if an earlier element had the same type */
+		if (cfg80211_find_elem_match(id, ie, (u8 *)parent - ie,
+					     &ext_id, match_len, 0))
+			continue;
+
+		/* Not inheriting, copy all similar elements from subie */
+		while (sub) {
+			if (!cfg80211_copy_elem_with_frags(sub,
+							   subie, subie_len,
+							   &pos, new_ie,
+							   new_ie_len))
+				return 0;
 
-		tmp_old += tmp_old[1] + 2;
+			sub = cfg80211_find_elem_match(id,
+						       sub->data + sub->datalen,
+						       subie_len + subie -
+						       (sub->data +
+							sub->datalen),
+						       &ext_id, match_len, 0);
+		}
 	}
 
-	/* go through subelement again to check if there is any ie not
-	 * copied to new ie, skip ssid, capability, bssid-index ie
+	/* The above misses elements that are included in subie but not in the
+	 * parent, so do a pass over subie and append those.
+	 * Skip the non-tx BSSID caps and non-inheritance element.
 	 */
-	tmp_new = sub_copy;
-	while (tmp_new + 2 - sub_copy <= subie_len &&
-	       tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
-		if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
-		      tmp_new[0] == WLAN_EID_SSID)) {
-			memcpy(pos, tmp_new, tmp_new[1] + 2);
-			pos += tmp_new[1] + 2;
+	for_each_element(sub, subie, subie_len) {
+		if (sub->id == WLAN_EID_NON_TX_BSSID_CAP)
+			continue;
+
+		if (sub->id == WLAN_EID_FRAGMENT)
+			continue;
+
+		if (sub->id == WLAN_EID_EXTENSION) {
+			if (sub->datalen < 1)
+				continue;
+
+			id = WLAN_EID_EXTENSION;
+			ext_id = sub->data[0];
+			match_len = 1;
+
+			if (ext_id == WLAN_EID_EXT_NON_INHERITANCE)
+				continue;
+		} else {
+			id = sub->id;
+			match_len = 0;
 		}
-		if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len)
-			break;
-		tmp_new += tmp_new[1] + 2;
+
+		/* Processed if one was included in the parent */
+		if (cfg80211_find_elem_match(id, ie, ielen,
+					     &ext_id, match_len, 0))
+			continue;
+
+		if (!cfg80211_copy_elem_with_frags(sub, subie, subie_len,
+						   &pos, new_ie, new_ie_len))
+			return 0;
 	}
 
-	kfree(sub_copy);
 	return pos - new_ie;
 }
 
@@ -535,39 +576,58 @@
 static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
 				  const u8 *pos, u8 length,
 				  const struct element *ssid_elem,
-				  int s_ssid_tmp)
+				  u32 s_ssid_tmp)
 {
-	/* skip the TBTT offset */
-	pos++;
+	u8 bss_params;
 
-	/* ignore entries with invalid BSSID */
-	if (!is_valid_ether_addr(pos))
-		return -EINVAL;
+	entry->psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
 
-	memcpy(entry->bssid, pos, ETH_ALEN);
-	pos += ETH_ALEN;
+	/* The length is already verified by the caller to contain bss_params */
+	if (length > sizeof(struct ieee80211_tbtt_info_7_8_9)) {
+		struct ieee80211_tbtt_info_ge_11 *tbtt_info = (void *)pos;
 
-	if (length >= IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM) {
-		memcpy(&entry->short_ssid, pos,
-		       sizeof(entry->short_ssid));
+		memcpy(entry->bssid, tbtt_info->bssid, ETH_ALEN);
+		entry->short_ssid = le32_to_cpu(tbtt_info->short_ssid);
 		entry->short_ssid_valid = true;
-		pos += 4;
+
+		bss_params = tbtt_info->bss_params;
+
+		/* Ignore disabled links */
+		if (length >= offsetofend(typeof(*tbtt_info), mld_params)) {
+			if (le16_get_bits(tbtt_info->mld_params.params,
+					  IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK))
+				return -EINVAL;
+		}
+
+		if (length >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+					  psd_20))
+			entry->psd_20 = tbtt_info->psd_20;
+	} else {
+		struct ieee80211_tbtt_info_7_8_9 *tbtt_info = (void *)pos;
+
+		memcpy(entry->bssid, tbtt_info->bssid, ETH_ALEN);
+
+		bss_params = tbtt_info->bss_params;
+
+		if (length == offsetofend(struct ieee80211_tbtt_info_7_8_9,
+					  psd_20))
+			entry->psd_20 = tbtt_info->psd_20;
 	}
 
+	/* ignore entries with invalid BSSID */
+	if (!is_valid_ether_addr(entry->bssid))
+		return -EINVAL;
+
 	/* skip non colocated APs */
-	if (!cfg80211_parse_bss_param(*pos, entry))
+	if (!cfg80211_parse_bss_param(bss_params, entry))
 		return -EINVAL;
-	pos++;
 
-	if (length == IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM) {
-		/*
-		 * no information about the short ssid. Consider the entry valid
+	/* no information about the short ssid. Consider the entry valid
 		 * for now. It would later be dropped in case there are explicit
 		 * SSIDs that need to be matched
 		 */
-		if (!entry->same_ssid)
+	if (!entry->same_ssid && !entry->short_ssid_valid)
 			return 0;
-	}
 
 	if (entry->same_ssid) {
 		entry->short_ssid = s_ssid_tmp;
@@ -578,10 +638,10 @@
 		 * cfg80211_parse_colocated_ap(), before calling this
 		 * function.
 		 */
-		memcpy(&entry->ssid, &ssid_elem->data,
-		       ssid_elem->datalen);
+		memcpy(&entry->ssid, &ssid_elem->data, ssid_elem->datalen);
 		entry->ssid_len = ssid_elem->datalen;
 	}
+
 	return 0;
 }
 
@@ -595,17 +655,14 @@
 	int n_coloc = 0, ret;
 	LIST_HEAD(ap_list);
 
-	elem = cfg80211_find_elem(WLAN_EID_REDUCED_NEIGHBOR_REPORT, ies->data,
-				  ies->len);
-	if (!elem)
+	ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
+	if (ret)
 		return 0;
 
+	for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
+			    ies->data, ies->len) {
 	pos = elem->data;
-	end = pos + elem->datalen;
-
-	ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
-	if (ret)
-		return ret;
+		end = elem->data + elem->datalen;
 
 	/* RNR IE may contain more than one NEIGHBOR_AP_INFO */
 	while (pos + sizeof(*ap_info) <= end) {
@@ -624,20 +681,30 @@
 						       &band))
 			break;
 
-		freq = ieee80211_channel_to_frequency(ap_info->channel, band);
+			freq = ieee80211_channel_to_frequency(ap_info->channel,
+							      band);
 
 		if (end - pos < count * length)
 			break;
 
-		/*
-		 * TBTT info must include bss param + BSSID +
+			if (u8_get_bits(ap_info->tbtt_info_hdr,
+					IEEE80211_AP_INFO_TBTT_HDR_TYPE) !=
+			    IEEE80211_TBTT_INFO_TYPE_TBTT) {
+				pos += count * length;
+				continue;
+			}
+
+			/* TBTT info must include bss param + BSSID +
 		 * (short SSID or same_ssid bit to be set).
 		 * ignore other options, and move to the
 		 * next AP info
 		 */
 		if (band != NL80211_BAND_6GHZ ||
-		    (length != IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM &&
-		     length < IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM)) {
+			    !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9,
+						    bss_params) ||
+			      length == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
+			      length >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+						    bss_params))) {
 			pos += count * length;
 			continue;
 		}
@@ -649,12 +716,13 @@
 					GFP_ATOMIC);
 
 			if (!entry)
-				break;
+					goto error;
 
 			entry->center_freq = freq;
 
 			if (!cfg80211_parse_ap_info(entry, pos, length,
-						    ssid_elem, s_ssid_tmp)) {
+							    ssid_elem,
+							    s_ssid_tmp)) {
 				n_coloc++;
 				list_add_tail(&entry->list, &ap_list);
 			} else {
@@ -665,10 +733,12 @@
 		}
 	}
 
+error:
 	if (pos != end) {
 		cfg80211_free_coloc_ap_list(&ap_list);
 		return 0;
 	}
+	}
 
 	list_splice_tail(&ap_list, list);
 	return n_coloc;
@@ -760,10 +830,47 @@
 		list_for_each_entry(intbss, &rdev->bss_list, list) {
 			struct cfg80211_bss *res = &intbss->pub;
 			const struct cfg80211_bss_ies *ies;
+			const struct element *ssid_elem;
+			struct cfg80211_colocated_ap *entry;
+			u32 s_ssid_tmp;
+			int ret;
 
 			ies = rcu_access_pointer(res->ies);
 			count += cfg80211_parse_colocated_ap(ies,
 							     &coloc_ap_list);
+
+			/* In case the scan request specified a specific BSSID
+			 * and the BSS is found and operating on 6GHz band then
+			 * add this AP to the collocated APs list.
+			 * This is relevant for ML probe requests when the lower
+			 * band APs have not been discovered.
+			 */
+			if (is_broadcast_ether_addr(rdev_req->bssid) ||
+			    !ether_addr_equal(rdev_req->bssid, res->bssid) ||
+			    res->channel->band != NL80211_BAND_6GHZ)
+				continue;
+
+			ret = cfg80211_calc_short_ssid(ies, &ssid_elem,
+						       &s_ssid_tmp);
+			if (ret)
+				continue;
+
+			entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
+					GFP_ATOMIC);
+
+			if (!entry)
+				continue;
+
+			memcpy(entry->bssid, res->bssid, ETH_ALEN);
+			entry->short_ssid = s_ssid_tmp;
+			memcpy(entry->ssid, ssid_elem->data,
+			       ssid_elem->datalen);
+			entry->ssid_len = ssid_elem->datalen;
+			entry->short_ssid_valid = true;
+			entry->center_freq = res->channel->center_freq;
+
+			list_add_tail(&entry->list, &coloc_ap_list);
+			count++;
 		}
 		spin_unlock_bh(&rdev->bss_lock);
 	}
@@ -838,6 +945,10 @@
 		    !cfg80211_find_ssid_match(ap, request))
 			continue;
 
+		if (!is_broadcast_ether_addr(request->bssid) &&
+		    !ether_addr_equal(request->bssid, ap->bssid))
+			continue;
+
 		if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
 			continue;
 
@@ -846,6 +957,7 @@
 		scan_6ghz_params->short_ssid = ap->short_ssid;
 		scan_6ghz_params->short_ssid_valid = ap->short_ssid_valid;
 		scan_6ghz_params->unsolicited_probe = ap->unsolicited_probe;
+		scan_6ghz_params->psd_20 = ap->psd_20;
 
 		/*
 		 * If a PSC channel is added to the scan and 'need_scan_psc' is
@@ -1004,16 +1116,9 @@
 		nl80211_send_scan_msg(rdev, msg);
 }
 
-void __cfg80211_scan_done(struct work_struct *wk)
+void __cfg80211_scan_done(struct wiphy *wiphy, struct wiphy_work *wk)
 {
-	struct cfg80211_registered_device *rdev;
-
-	rdev = container_of(wk, struct cfg80211_registered_device,
-			    scan_done_wk);
-
-	wiphy_lock(&rdev->wiphy);
-	___cfg80211_scan_done(rdev, true);
-	wiphy_unlock(&rdev->wiphy);
+	___cfg80211_scan_done(wiphy_to_rdev(wiphy), true);
 }
 
 void cfg80211_scan_done(struct cfg80211_scan_request *request,
@@ -1039,7 +1144,8 @@
 	}
 
 	request->notified = true;
-	queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk);
+	wiphy_work_queue(request->wiphy,
+			 &wiphy_to_rdev(request->wiphy)->scan_done_wk);
 }
 EXPORT_SYMBOL(cfg80211_scan_done);
 
@@ -1573,8 +1679,6 @@
 			continue;
 		if (bss->pub.channel != new->pub.channel)
 			continue;
-		if (bss->pub.scan_width != new->pub.scan_width)
-			continue;
 		if (rcu_access_pointer(bss->pub.beacon_ies))
 			continue;
 		ies = rcu_access_pointer(bss->pub.ies);
@@ -1604,12 +1708,6 @@
 	return true;
 }
 
-struct cfg80211_non_tx_bss {
-	struct cfg80211_bss *tx_bss;
-	u8 max_bssid_indicator;
-	u8 bssid_index;
-};
-
 static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known,
 					 const struct cfg80211_bss_ies *new_ies,
 					 const struct cfg80211_bss_ies *old_ies)
@@ -1706,9 +1804,168 @@
 	return true;
 }
 
+static const struct channel_5ghz_desc {
+	unsigned int freq;
+	unsigned int aligned_start_freq_40;
+        unsigned int aligned_start_freq_80;
+        unsigned int aligned_start_freq_160;
+} channel_5ghz_descs[] = {
+        { 5180, 5170, 5170, 5170 }, /* 36 */
+        { 5200, 5170, 5170, 5170 }, /* 40 */
+        { 5220, 5210, 5170, 5170 }, /* 44 */
+        { 5240, 5210, 5170, 5170 }, /* 48 */
+        { 5260, 5250, 5250, 5170 }, /* 52 */
+        { 5280, 5250, 5250, 5170 }, /* 56 */
+        { 5300, 5290, 5250, 5170 }, /* 60 */
+        { 5320, 5290, 5250, 5170 }, /* 64 */
+        { 5340, 5330, 5330, 5330 }, /* 68 */
+        { 5360, 5330, 5330, 5330 }, /* 72 */
+        { 5380, 5370, 5330, 5330 }, /* 76 */
+        { 5400, 5370, 5330, 5330 }, /* 80 */
+        { 5420, 5410, 5410, 5330 }, /* 84 */
+        { 5440, 5410, 5410, 5330 }, /* 88 */
+        { 5460, 5450, 5410, 5330 }, /* 92 */
+        { 5480, 5450, 5410, 5330 }, /* 96 */
+        { 5500, 5490, 5490, 5490 }, /* 100 */
+        { 5520, 5490, 5490, 5490 }, /* 104 */
+        { 5540, 5530, 5490, 5490 }, /* 108 */
+        { 5560, 5530, 5490, 5490 }, /* 112 */
+        { 5580, 5570, 5570, 5490 }, /* 116 */
+        { 5600, 5570, 5570, 5490 }, /* 120 */
+        { 5620, 5610, 5570, 5490 }, /* 124 */
+        { 5640, 5610, 5570, 5490 }, /* 128 */
+        { 5660, 5650, 5650, 5650 }, /* 132 */
+        { 5680, 5650, 5650, 5650 }, /* 136 */
+        { 5700, 5690, 5650, 5650 }, /* 140 */
+        { 5720, 5690, 5650, 5650 }, /* 144 */
+        { 5745, 5735, 5735, 5735 }, /* 148 */
+        { 5765, 5735, 5735, 5735 }, /* 152 */
+        { 5785, 5775, 5735, 5735 }, /* 156 */
+        { 5805, 5775, 5735, 5735 }, /* 160 */
+        { 5825, 5815, 5815, 5735 }, /* 164 */
+        { 5845, 5815, 5815, 5735 }, /* 168 */
+        { 5865, 5855, 5815, 5735 }, /* 172 */
+        { 5885, 5855, 5815, 5735 }, /* 176 */
+        { 5905, 5895, 5895, 5895 }, /* 180 */
+};
+
+static void bss_update_rdev_dfs_state(struct cfg80211_registered_device *rdev,
+				      struct cfg80211_internal_bss *bss)
+{
+	struct ieee80211_channel *chan = bss->pub.channel;
+	const struct cfg80211_bss_ies *ies = bss->pub.ies;
+	const struct channel_5ghz_desc *cdesc;
+	const struct element *elem;
+	enum nl80211_band band;
+	unsigned int width, start_freq, freq;
+	u8 oper_class;
+	size_t i;
+
+	/* extract some IE to check if AP seems to be indeed doing
+	 * DFS, expect country code & power constraint to be present */
+	if (!cfg80211_find_elem(WLAN_EID_PWR_CONSTRAINT, ies->data,
+				ies->len) ||
+	    !cfg80211_find_elem(WLAN_EID_COUNTRY, ies->data,
+				ies->len))
+		return;
+
+	/* guess operating bandwidth, use only operating class for
+	 * now */
+	elem = cfg80211_find_elem(WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+				  ies->data, ies->len);
+	if (!elem || elem->datalen < 1)
+		return;
+
+	oper_class = elem->data[0];
+	if (!ieee80211_operating_class_to_band(oper_class, &band) ||
+	    band != NL80211_BAND_5GHZ)
+		return;
+
+	switch (oper_class) {
+	case 115:
+	case 118:
+	case 121:
+	case 124:
+	case 125:
+		width = 20;
+		break;
+	case 116:
+	case 117:
+	case 119:
+	case 120:
+	case 122:
+	case 123:
+	case 126:
+	case 127:
+		width = 40;
+		break;
+	case 128:
+		width = 80;
+		break;
+	case 129:
+		width = 160;
+		break;
+	case 130:
+		/* ignore 80+80 */
+		return;
+	default:
+		return;
+	}
+
+	/* compute spanned channels according to primary channel &
+	 * width */
+	cdesc = NULL;
+	for (i = 0; i < ARRAY_SIZE(channel_5ghz_descs); i++) {
+		if (channel_5ghz_descs[i].freq == chan->center_freq) {
+			cdesc = &channel_5ghz_descs[i];
+			break;
+		}
+	}
+
+	if (!cdesc)
+		return;
+
+	switch (width) {
+	case 20:
+		start_freq = cdesc->freq - 10;
+		break;
+	case 40:
+		start_freq = cdesc->aligned_start_freq_40;
+		break;
+	case 80:
+		start_freq = cdesc->aligned_start_freq_80;
+		break;
+	case 160:
+		start_freq = cdesc->aligned_start_freq_160;
+		break;
+	}
+
+	for (freq = start_freq + 10;
+	     freq <= start_freq + width - 10;
+	     freq += 20) {
+		struct ieee80211_channel *c;
+
+		c = ieee80211_get_channel(&rdev->wiphy, freq);
+		if (!c ||
+		    (c->flags & IEEE80211_CHAN_DISABLED) ||
+		    !(c->flags & IEEE80211_CHAN_RADAR))
+			continue;
+
+		if (c->dfs_state != NL80211_DFS_USABLE)
+			continue;
+
+		wiphy_info(&rdev->wiphy,
+			   "setting channel %u DFS state from scan result\n",
+			   freq);
+
+		c->dfs_state = NL80211_DFS_AVAILABLE;
+		c->dfs_state_entered = jiffies;
+	}
+}
+
 /* Returned bss is reference counted and must be cleaned up appropriately. */
-struct cfg80211_internal_bss *
-cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+static struct cfg80211_internal_bss *
+__cfg80211_bss_update(struct cfg80211_registered_device *rdev,
 		    struct cfg80211_internal_bss *tmp,
 		    bool signal_valid, unsigned long ts)
 {
@@ -1719,10 +1976,7 @@
 
 	tmp->ts = ts;
 
-	spin_lock_bh(&rdev->bss_lock);
-
 	if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) {
-		spin_unlock_bh(&rdev->bss_lock);
 		return NULL;
 	}
 
@@ -1730,7 +1984,11 @@
 
 	if (found) {
 		if (!cfg80211_update_known_bss(rdev, found, tmp, signal_valid))
-			goto drop;
+			return NULL;
+
+		if (rdev->scan_req &&
+		    rdev->scan_req->flags & NL80211_SCAN_FLAG_UPDATE_DFS)
+			bss_update_rdev_dfs_state(rdev, found);
 	} else {
 		struct cfg80211_internal_bss *new;
 		struct cfg80211_internal_bss *hidden;
@@ -1750,7 +2008,7 @@
 			ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
 			if (ies)
 				kfree_rcu(ies, rcu_head);
-			goto drop;
+			return NULL;
 		}
 		memcpy(new, tmp, sizeof(*new));
 		new->refcount = 1;
@@ -1781,14 +2039,14 @@
 			 */
 			if (!cfg80211_combine_bsses(rdev, new)) {
 				bss_ref_put(rdev, new);
-				goto drop;
+				return NULL;
 			}
 		}
 
 		if (rdev->bss_entries >= bss_entries_limit &&
 		    !cfg80211_bss_expire_oldest(rdev)) {
 			bss_ref_put(rdev, new);
-			goto drop;
+			return NULL;
 		}
 
 		/* This must be before the call to bss_ref_get */
@@ -1797,6 +2055,10 @@
 			bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss));
 		}
 
+		if (rdev->scan_req &&
+		    rdev->scan_req->flags & NL80211_SCAN_FLAG_UPDATE_DFS)
+			bss_update_rdev_dfs_state(rdev, new);
+
 		list_add_tail(&new->list, &rdev->bss_list);
 		rdev->bss_entries++;
 		rb_insert_bss(rdev, new);
@@ -1805,12 +2067,22 @@
 
 	rdev->bss_generation++;
 	bss_ref_get(rdev, found);
-	spin_unlock_bh(&rdev->bss_lock);
 
 	return found;
- drop:
+}
+
+struct cfg80211_internal_bss *
+cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+		    struct cfg80211_internal_bss *tmp,
+		    bool signal_valid, unsigned long ts)
+{
+	struct cfg80211_internal_bss *res;
+
+	spin_lock_bh(&rdev->bss_lock);
+	res = __cfg80211_bss_update(rdev, tmp, signal_valid, ts);
 	spin_unlock_bh(&rdev->bss_lock);
-	return NULL;
+
+	return res;
 }
 
 int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
@@ -1870,8 +2142,7 @@
  */
 static struct ieee80211_channel *
 cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
-			 struct ieee80211_channel *channel,
-			 enum nl80211_bss_scan_width scan_width)
+			 struct ieee80211_channel *channel)
 {
 	u32 freq;
 	int channel_number;
@@ -1911,16 +2182,6 @@
 		return channel;
 	}
 
-	if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
-	    scan_width == NL80211_BSS_CHAN_WIDTH_5) {
-		/*
-		 * Ignore channel number in 5 and 10 MHz channels where there
-		 * may not be an n:1 or 1:n mapping between frequencies and
-		 * channel numbers.
-		 */
-		return channel;
-	}
-
 	/*
 	 * Use the channel determined through the payload channel number
 	 * instead of the RX channel reported by the driver.
@@ -1930,17 +2191,36 @@
 	return alt_channel;
 }
 
+struct cfg80211_inform_single_bss_data {
+	struct cfg80211_inform_bss *drv_data;
+	enum cfg80211_bss_frame_type ftype;
+	struct ieee80211_channel *channel;
+	u8 bssid[ETH_ALEN];
+	u64 tsf;
+	u16 capability;
+	u16 beacon_interval;
+	const u8 *ie;
+	size_t ielen;
+
+	enum {
+		BSS_SOURCE_DIRECT = 0,
+		BSS_SOURCE_MBSSID,
+		BSS_SOURCE_STA_PROFILE,
+	} bss_source;
+	/* Set if reporting bss_source != BSS_SOURCE_DIRECT */
+	struct cfg80211_bss *source_bss;
+	u8 max_bssid_indicator;
+	u8 bssid_index;
+};
+
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_bss *
 cfg80211_inform_single_bss_data(struct wiphy *wiphy,
-				struct cfg80211_inform_bss *data,
-				enum cfg80211_bss_frame_type ftype,
-				const u8 *bssid, u64 tsf, u16 capability,
-				u16 beacon_interval, const u8 *ie, size_t ielen,
-				struct cfg80211_non_tx_bss *non_tx_data,
+				struct cfg80211_inform_single_bss_data *data,
 				gfp_t gfp)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	struct cfg80211_inform_bss *drv_data = data->drv_data;
 	struct cfg80211_bss_ies *ies;
 	struct ieee80211_channel *channel;
 	struct cfg80211_internal_bss tmp = {}, *res;
@@ -1952,31 +2232,51 @@
 		return NULL;
 
 	if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
-		    (data->signal < 0 || data->signal > 100)))
+		    (drv_data->signal < 0 || drv_data->signal > 100)))
 		return NULL;
 
-	channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
-					   data->scan_width);
+	if (WARN_ON(data->bss_source != BSS_SOURCE_DIRECT && !data->source_bss))
+		return NULL;
+
+	channel = data->channel;
+	if (!channel)
+		channel = cfg80211_get_bss_channel(wiphy, data->ie, data->ielen,
+						   drv_data->chan);
 	if (!channel)
 		return NULL;
 
-	memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
+	memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN);
 	tmp.pub.channel = channel;
-	tmp.pub.scan_width = data->scan_width;
-	tmp.pub.signal = data->signal;
-	tmp.pub.beacon_interval = beacon_interval;
-	tmp.pub.capability = capability;
-	tmp.ts_boottime = data->boottime_ns;
-	tmp.parent_tsf = data->parent_tsf;
-	ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
-
-	if (non_tx_data) {
-		tmp.pub.transmitted_bss = non_tx_data->tx_bss;
-		ts = bss_from_pub(non_tx_data->tx_bss)->ts;
-		tmp.pub.bssid_index = non_tx_data->bssid_index;
-		tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator;
+	if (data->bss_source != BSS_SOURCE_STA_PROFILE)
+		tmp.pub.signal = drv_data->signal;
+	else
+		tmp.pub.signal = 0;
+	tmp.pub.beacon_interval = data->beacon_interval;
+	tmp.pub.capability = data->capability;
+	tmp.ts_boottime = drv_data->boottime_ns;
+	tmp.parent_tsf = drv_data->parent_tsf;
+	ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid);
+
+	if (data->bss_source != BSS_SOURCE_DIRECT) {
+		tmp.pub.transmitted_bss = data->source_bss;
+		ts = bss_from_pub(data->source_bss)->ts;
+		tmp.pub.bssid_index = data->bssid_index;
+		tmp.pub.max_bssid_indicator = data->max_bssid_indicator;
 	} else {
 		ts = jiffies;
+
+		if (channel->band == NL80211_BAND_60GHZ) {
+			bss_type = data->capability &
+				   WLAN_CAPABILITY_DMG_TYPE_MASK;
+			if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
+			    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
+				regulatory_hint_found_beacon(wiphy, channel,
+							     gfp);
+		} else {
+			if (data->capability & WLAN_CAPABILITY_ESS)
+				regulatory_hint_found_beacon(wiphy, channel,
+							     gfp);
+		}
 	}
 
 	/*
@@ -1987,15 +2287,15 @@
 	 * override the IEs pointer should we have received an earlier
 	 * indication of Probe Response data.
 	 */
-	ies = kzalloc(sizeof(*ies) + ielen, gfp);
+	ies = kzalloc(sizeof(*ies) + data->ielen, gfp);
 	if (!ies)
 		return NULL;
-	ies->len = ielen;
-	ies->tsf = tsf;
+	ies->len = data->ielen;
+	ies->tsf = data->tsf;
 	ies->from_beacon = false;
-	memcpy(ies->data, ie, ielen);
+	memcpy(ies->data, data->ie, data->ielen);
 
-	switch (ftype) {
+	switch (data->ftype) {
 	case CFG80211_BSS_FTYPE_BEACON:
 		ies->from_beacon = true;
 		fallthrough;
@@ -2008,42 +2308,37 @@
 	}
 	rcu_assign_pointer(tmp.pub.ies, ies);
 
-	signal_valid = data->chan == channel;
-	res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid, ts);
+	signal_valid = drv_data->chan == channel;
+	spin_lock_bh(&rdev->bss_lock);
+	res = __cfg80211_bss_update(rdev, &tmp, signal_valid, ts);
 	if (!res)
-		return NULL;
+		goto drop;
 
-	if (channel->band == NL80211_BAND_60GHZ) {
-		bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
-		if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
-		    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
-			regulatory_hint_found_beacon(wiphy, channel, gfp);
-	} else {
-		if (res->pub.capability & WLAN_CAPABILITY_ESS)
-			regulatory_hint_found_beacon(wiphy, channel, gfp);
-	}
+	rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
 
-	if (non_tx_data) {
+	if (data->bss_source == BSS_SOURCE_MBSSID) {
 		/* this is a nontransmitting bss, we need to add it to
 		 * transmitting bss' list if it is not there
 		 */
-		spin_lock_bh(&rdev->bss_lock);
-		if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
-					       &res->pub)) {
+		if (cfg80211_add_nontrans_list(data->source_bss, &res->pub)) {
 			if (__cfg80211_unlink_bss(rdev, res)) {
 				rdev->bss_generation++;
 				res = NULL;
 			}
 		}
-		spin_unlock_bh(&rdev->bss_lock);
 
 		if (!res)
-			return NULL;
+			goto drop;
 	}
+	spin_unlock_bh(&rdev->bss_lock);
 
 	trace_cfg80211_return_bss(&res->pub);
-	/* cfg80211_bss_update gives us a referenced result */
+	/* __cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
+
+drop:
+	spin_unlock_bh(&rdev->bss_lock);
+	return NULL;
 }
 
 static const struct element
@@ -2118,43 +2413,48 @@
 }
 EXPORT_SYMBOL(cfg80211_merge_profile);
 
-static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
-				       struct cfg80211_inform_bss *data,
-				       enum cfg80211_bss_frame_type ftype,
-				       const u8 *bssid, u64 tsf,
-				       u16 beacon_interval, const u8 *ie,
-				       size_t ielen,
-				       struct cfg80211_non_tx_bss *non_tx_data,
+static void
+cfg80211_parse_mbssid_data(struct wiphy *wiphy,
+			   struct cfg80211_inform_single_bss_data *tx_data,
+			   struct cfg80211_bss *source_bss,
 				       gfp_t gfp)
 {
+	struct cfg80211_inform_single_bss_data data = {
+		.drv_data = tx_data->drv_data,
+		.ftype = tx_data->ftype,
+		.tsf = tx_data->tsf,
+		.beacon_interval = tx_data->beacon_interval,
+		.source_bss = source_bss,
+		.bss_source = BSS_SOURCE_MBSSID,
+	};
 	const u8 *mbssid_index_ie;
 	const struct element *elem, *sub;
-	size_t new_ie_len;
-	u8 new_bssid[ETH_ALEN];
 	u8 *new_ie, *profile;
 	u64 seen_indices = 0;
-	u16 capability;
 	struct cfg80211_bss *bss;
 
-	if (!non_tx_data)
+	if (!source_bss)
 		return;
-	if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+	if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID,
+				tx_data->ie, tx_data->ielen))
 		return;
 	if (!wiphy->support_mbssid)
 		return;
 	if (wiphy->support_only_he_mbssid &&
-	    !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+	    !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
+				    tx_data->ie, tx_data->ielen))
 		return;
 
 	new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
 	if (!new_ie)
 		return;
 
-	profile = kmalloc(ielen, gfp);
+	profile = kmalloc(tx_data->ielen, gfp);
 	if (!profile)
 		goto out;
 
-	for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) {
+	for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID,
+			    tx_data->ie, tx_data->ielen) {
 		if (elem->datalen < 4)
 			continue;
 		if (elem->data[0] < 1 || (int)elem->data[0] > 8)
@@ -2176,12 +2476,13 @@
 				continue;
 			}
 
-			memset(profile, 0, ielen);
-			profile_len = cfg80211_merge_profile(ie, ielen,
+			memset(profile, 0, tx_data->ielen);
+			profile_len = cfg80211_merge_profile(tx_data->ie,
+							     tx_data->ielen,
 							     elem,
 							     sub,
 							     profile,
-							     ielen);
+							     tx_data->ielen);
 
 			/* found a Nontransmitted BSSID Profile */
 			mbssid_index_ie = cfg80211_find_ie
@@ -2201,31 +2502,27 @@
 
 			seen_indices |= BIT_ULL(mbssid_index_ie[2]);
 
-			non_tx_data->bssid_index = mbssid_index_ie[2];
-			non_tx_data->max_bssid_indicator = elem->data[0];
+			data.bssid_index = mbssid_index_ie[2];
+			data.max_bssid_indicator = elem->data[0];
+
+			cfg80211_gen_new_bssid(tx_data->bssid,
+					       data.max_bssid_indicator,
+					       data.bssid_index,
+					       data.bssid);
 
-			cfg80211_gen_new_bssid(bssid,
-					       non_tx_data->max_bssid_indicator,
-					       non_tx_data->bssid_index,
-					       new_bssid);
 			memset(new_ie, 0, IEEE80211_MAX_DATA_LEN);
-			new_ie_len = cfg80211_gen_new_ie(ie, ielen,
+			data.ie = new_ie;
+			data.ielen = cfg80211_gen_new_ie(tx_data->ie,
+							 tx_data->ielen,
 							 profile,
-							 profile_len, new_ie,
-							 gfp);
-			if (!new_ie_len)
+							 profile_len,
+							 new_ie,
+							 IEEE80211_MAX_DATA_LEN);
+			if (!data.ielen)
 				continue;
 
-			capability = get_unaligned_le16(profile + 2);
-			bss = cfg80211_inform_single_bss_data(wiphy, data,
-							      ftype,
-							      new_bssid, tsf,
-							      capability,
-							      beacon_interval,
-							      new_ie,
-							      new_ie_len,
-							      non_tx_data,
-							      gfp);
+			data.capability = get_unaligned_le16(profile + 2);
+			bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp);
 			if (!bss)
 				break;
 			cfg80211_put_bss(wiphy, bss);
@@ -2237,142 +2534,425 @@
 	kfree(profile);
 }
 
-struct cfg80211_bss *
-cfg80211_inform_bss_data(struct wiphy *wiphy,
-			 struct cfg80211_inform_bss *data,
-			 enum cfg80211_bss_frame_type ftype,
-			 const u8 *bssid, u64 tsf, u16 capability,
-			 u16 beacon_interval, const u8 *ie, size_t ielen,
+ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+				    size_t ieslen, u8 *data, size_t data_len,
+				    u8 frag_id)
+{
+	const struct element *next;
+	ssize_t copied;
+	u8 elem_datalen;
+
+	if (!elem)
+		return -EINVAL;
+
+	/* elem might be invalid after the memmove */
+	next = (void *)(elem->data + elem->datalen);
+	elem_datalen = elem->datalen;
+
+	if (elem->id == WLAN_EID_EXTENSION) {
+		copied = elem->datalen - 1;
+		if (copied > data_len)
+			return -ENOSPC;
+
+		memmove(data, elem->data + 1, copied);
+	} else {
+		copied = elem->datalen;
+		if (copied > data_len)
+			return -ENOSPC;
+
+		memmove(data, elem->data, copied);
+	}
+
+	/* Fragmented elements must have 255 bytes */
+	if (elem_datalen < 255)
+		return copied;
+
+	for (elem = next;
+	     elem->data < ies + ieslen &&
+		elem->data + elem->datalen <= ies + ieslen;
+	     elem = next) {
+		/* elem might be invalid after the memmove */
+		next = (void *)(elem->data + elem->datalen);
+
+		if (elem->id != frag_id)
+			break;
+
+		elem_datalen = elem->datalen;
+
+		if (copied + elem_datalen > data_len)
+			return -ENOSPC;
+
+		memmove(data + copied, elem->data, elem_datalen);
+		copied += elem_datalen;
+
+		/* Only the last fragment may be short */
+		if (elem_datalen != 255)
+			break;
+	}
+
+	return copied;
+}
+EXPORT_SYMBOL(cfg80211_defragment_element);
+
+struct cfg80211_mle {
+	struct ieee80211_multi_link_elem *mle;
+	struct ieee80211_mle_per_sta_profile
+		*sta_prof[IEEE80211_MLD_MAX_NUM_LINKS];
+	ssize_t sta_prof_len[IEEE80211_MLD_MAX_NUM_LINKS];
+
+	u8 data[];
+};
+
+static struct cfg80211_mle *
+cfg80211_defrag_mle(const struct element *mle, const u8 *ie, size_t ielen,
 			 gfp_t gfp)
 {
-	struct cfg80211_bss *res;
-	struct cfg80211_non_tx_bss non_tx_data;
+	const struct element *elem;
+	struct cfg80211_mle *res;
+	size_t buf_len;
+	ssize_t mle_len;
+	u8 common_size, idx;
+
+	if (!mle || !ieee80211_mle_size_ok(mle->data + 1, mle->datalen - 1))
+		return NULL;
+
+	/* Required length for first defragmentation */
+	buf_len = mle->datalen - 1;
+	for_each_element(elem, mle->data + mle->datalen,
+			 ielen - sizeof(*mle) + mle->datalen) {
+		if (elem->id != WLAN_EID_FRAGMENT)
+			break;
 
-	res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf,
-					      capability, beacon_interval, ie,
-					      ielen, NULL, gfp);
+		buf_len += elem->datalen;
+	}
+
+	res = kzalloc(struct_size(res, data, buf_len), gfp);
 	if (!res)
 		return NULL;
-	non_tx_data.tx_bss = res;
-	cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf,
-				   beacon_interval, ie, ielen, &non_tx_data,
-				   gfp);
+
+	mle_len = cfg80211_defragment_element(mle, ie, ielen,
+					      res->data, buf_len,
+					      WLAN_EID_FRAGMENT);
+	if (mle_len < 0)
+		goto error;
+
+	res->mle = (void *)res->data;
+
+	/* Find the sub-element area in the buffer */
+	common_size = ieee80211_mle_common_size((u8 *)res->mle);
+	ie = res->data + common_size;
+	ielen = mle_len - common_size;
+
+	idx = 0;
+	for_each_element_id(elem, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE,
+			    ie, ielen) {
+		res->sta_prof[idx] = (void *)elem->data;
+		res->sta_prof_len[idx] = elem->datalen;
+
+		idx++;
+		if (idx >= IEEE80211_MLD_MAX_NUM_LINKS)
+			break;
+	}
+	if (!for_each_element_completed(elem, ie, ielen))
+		goto error;
+
+	/* Defragment sta_info in-place */
+	for (idx = 0; idx < IEEE80211_MLD_MAX_NUM_LINKS && res->sta_prof[idx];
+	     idx++) {
+		if (res->sta_prof_len[idx] < 255)
+			continue;
+
+		elem = (void *)res->sta_prof[idx] - 2;
+
+		if (idx + 1 < ARRAY_SIZE(res->sta_prof) &&
+		    res->sta_prof[idx + 1])
+			buf_len = (u8 *)res->sta_prof[idx + 1] -
+				  (u8 *)res->sta_prof[idx];
+		else
+			buf_len = ielen + ie - (u8 *)elem;
+
+		res->sta_prof_len[idx] =
+			cfg80211_defragment_element(elem,
+						    (u8 *)elem, buf_len,
+						    (u8 *)res->sta_prof[idx],
+						    buf_len,
+						    IEEE80211_MLE_SUBELEM_FRAGMENT);
+		if (res->sta_prof_len[idx] < 0)
+			goto error;
+	}
+
 	return res;
+
+error:
+	kfree(res);
+	return NULL;
 }
-EXPORT_SYMBOL(cfg80211_inform_bss_data);
 
-static void
-cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
-				 struct cfg80211_inform_bss *data,
-				 struct ieee80211_mgmt *mgmt, size_t len,
-				 struct cfg80211_non_tx_bss *non_tx_data,
-				 gfp_t gfp)
+static bool
+cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+			      const struct ieee80211_neighbor_ap_info **ap_info,
+			      const u8 **tbtt_info)
 {
-	enum cfg80211_bss_frame_type ftype;
-	const u8 *ie = mgmt->u.probe_resp.variable;
-	size_t ielen = len - offsetof(struct ieee80211_mgmt,
-				      u.probe_resp.variable);
+	const struct ieee80211_neighbor_ap_info *info;
+	const struct element *rnr;
+	const u8 *pos, *end;
 
-	ftype = ieee80211_is_beacon(mgmt->frame_control) ?
-		CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+	for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT, ie, ielen) {
+		pos = rnr->data;
+		end = rnr->data + rnr->datalen;
 
-	cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
-				   le64_to_cpu(mgmt->u.probe_resp.timestamp),
-				   le16_to_cpu(mgmt->u.probe_resp.beacon_int),
-				   ie, ielen, non_tx_data, gfp);
+		/* RNR IE may contain more than one NEIGHBOR_AP_INFO */
+		while (sizeof(*info) <= end - pos) {
+			const struct ieee80211_rnr_mld_params *mld_params;
+			u16 params;
+			u8 length, i, count, mld_params_offset;
+			u8 type, lid;
+
+			info = (void *)pos;
+			count = u8_get_bits(info->tbtt_info_hdr,
+					    IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
+			length = info->tbtt_info_len;
+
+			pos += sizeof(*info);
+
+			if (count * length > end - pos)
+				return false;
+
+			type = u8_get_bits(info->tbtt_info_hdr,
+					   IEEE80211_AP_INFO_TBTT_HDR_TYPE);
+
+			/* Only accept full TBTT information. NSTR mobile APs
+			 * use the shortened version, but we ignore them here.
+			 */
+			if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
+			    length >=
+			    offsetofend(struct ieee80211_tbtt_info_ge_11,
+					mld_params)) {
+				mld_params_offset =
+					offsetof(struct ieee80211_tbtt_info_ge_11, mld_params);
+			} else {
+				pos += count * length;
+				continue;
 }
 
-static void
-cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
-				   struct cfg80211_bss *nontrans_bss,
-				   struct ieee80211_mgmt *mgmt, size_t len)
-{
-	u8 *ie, *new_ie, *pos;
-	const struct element *nontrans_ssid;
-	const u8 *trans_ssid, *mbssid;
-	size_t ielen = len - offsetof(struct ieee80211_mgmt,
-				      u.probe_resp.variable);
-	size_t new_ie_len;
-	struct cfg80211_bss_ies *new_ies;
-	const struct cfg80211_bss_ies *old;
-	size_t cpy_len;
+			for (i = 0; i < count; i++) {
+				mld_params = (void *)pos + mld_params_offset;
+				params = le16_to_cpu(mld_params->params);
+
+				lid = u16_get_bits(params,
+						   IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+
+				if (mld_id == mld_params->mld_id &&
+				    link_id == lid) {
+					*ap_info = info;
+					*tbtt_info = pos;
+
+					return true;
+				}
+
+				pos += length;
+			}
+		}
+	}
 
-	lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
+	return false;
+}
 
-	ie = mgmt->u.probe_resp.variable;
+static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
+				       struct cfg80211_inform_single_bss_data *tx_data,
+				       struct cfg80211_bss *source_bss,
+				       gfp_t gfp)
+{
+	struct cfg80211_inform_single_bss_data data = {
+		.drv_data = tx_data->drv_data,
+		.ftype = tx_data->ftype,
+		.source_bss = source_bss,
+		.bss_source = BSS_SOURCE_STA_PROFILE,
+	};
+	struct ieee80211_multi_link_elem *ml_elem;
+	const struct element *elem;
+	struct cfg80211_mle *mle;
+	u16 control;
+	u8 *new_ie;
+	struct cfg80211_bss *bss;
+	int mld_id;
+	u16 seen_links = 0;
+	const u8 *pos;
+	u8 i;
 
-	new_ie_len = ielen;
-	trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
-	if (!trans_ssid)
+	if (!source_bss)
 		return;
-	new_ie_len -= trans_ssid[1];
-	mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
-	/*
-	 * It's not valid to have the MBSSID element before SSID
-	 * ignore if that happens - the code below assumes it is
-	 * after (while copying things inbetween).
-	 */
-	if (!mbssid || mbssid < trans_ssid)
+
+	if (tx_data->ftype != CFG80211_BSS_FTYPE_PRESP)
 		return;
-	new_ie_len -= mbssid[1];
 
-	nontrans_ssid = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID);
-	if (!nontrans_ssid)
+	elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
+				      tx_data->ie, tx_data->ielen);
+	if (!elem || !ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
 		return;
 
-	new_ie_len += nontrans_ssid->datalen;
+	ml_elem = (void *)elem->data + 1;
+	control = le16_to_cpu(ml_elem->control);
+	if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) !=
+	    IEEE80211_ML_CONTROL_TYPE_BASIC)
+		return;
 
-	/* generate new ie for nontrans BSS
-	 * 1. replace SSID with nontrans BSS' SSID
-	 * 2. skip MBSSID IE
-	 */
-	new_ie = kzalloc(new_ie_len, GFP_ATOMIC);
-	if (!new_ie)
+	/* Must be present when transmitted by an AP (in a probe response) */
+	if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) ||
+	    !(control & IEEE80211_MLC_BASIC_PRES_LINK_ID) ||
+	    !(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
 		return;
 
-	new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC);
-	if (!new_ies)
-		goto out_free;
-
-	pos = new_ie;
-
-	/* copy the nontransmitted SSID */
-	cpy_len = nontrans_ssid->datalen + 2;
-	memcpy(pos, nontrans_ssid, cpy_len);
-	pos += cpy_len;
-	/* copy the IEs between SSID and MBSSID */
-	cpy_len = trans_ssid[1] + 2;
-	memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len)));
-	pos += (mbssid - (trans_ssid + cpy_len));
-	/* copy the IEs after MBSSID */
-	cpy_len = mbssid[1] + 2;
-	memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len)));
-
-	/* update ie */
-	new_ies->len = new_ie_len;
-	new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
-	new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
-	memcpy(new_ies->data, new_ie, new_ie_len);
-	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
-		old = rcu_access_pointer(nontrans_bss->proberesp_ies);
-		rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies);
-		rcu_assign_pointer(nontrans_bss->ies, new_ies);
-		if (old)
-			kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+	/* length + MLD MAC address + link ID info + BSS Params Change Count */
+	pos = ml_elem->variable + 1 + 6 + 1 + 1;
+
+	if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
+		pos += 2;
+	if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_EML_CAPA))
+		pos += 2;
+
+	/* MLD capabilities and operations */
+	pos += 2;
+
+	/* Not included when the (nontransmitted) AP is responding itself,
+	 * but defined to zero then (Draft P802.11be_D3.0, 9.4.2.170.2)
+	 */
+	if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) {
+		mld_id = *pos;
+		pos += 1;
 	} else {
-		old = rcu_access_pointer(nontrans_bss->beacon_ies);
-		rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
-		cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss),
-					     new_ies, old);
-		rcu_assign_pointer(nontrans_bss->ies, new_ies);
-		if (old)
-			kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+		mld_id = 0;
+	}
+
+	/* Extended MLD capabilities and operations */
+	pos += 2;
+
+	/* Fully defrag the ML element for sta information/profile iteration */
+	mle = cfg80211_defrag_mle(elem, tx_data->ie, tx_data->ielen, gfp);
+	if (!mle)
+		return;
+
+	new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
+	if (!new_ie)
+		goto out;
+
+	for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) {
+		const struct ieee80211_neighbor_ap_info *ap_info;
+		enum nl80211_band band;
+		u32 freq;
+		const u8 *profile;
+		const u8 *tbtt_info;
+		ssize_t profile_len;
+		u8 link_id;
+
+		if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i],
+							  mle->sta_prof_len[i]))
+			continue;
+
+		control = le16_to_cpu(mle->sta_prof[i]->control);
+
+		if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
+			continue;
+
+		link_id = u16_get_bits(control,
+				       IEEE80211_MLE_STA_CONTROL_LINK_ID);
+		if (seen_links & BIT(link_id))
+			break;
+		seen_links |= BIT(link_id);
+
+		if (!(control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT) ||
+		    !(control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT) ||
+		    !(control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT))
+			continue;
+
+		memcpy(data.bssid, mle->sta_prof[i]->variable, ETH_ALEN);
+		data.beacon_interval =
+			get_unaligned_le16(mle->sta_prof[i]->variable + 6);
+		data.tsf = tx_data->tsf +
+			   get_unaligned_le64(mle->sta_prof[i]->variable + 8);
+
+		/* sta_info_len counts itself */
+		profile = mle->sta_prof[i]->variable +
+			  mle->sta_prof[i]->sta_info_len - 1;
+		profile_len = (u8 *)mle->sta_prof[i] + mle->sta_prof_len[i] -
+			      profile;
+
+		if (profile_len < 2)
+			continue;
+
+		data.capability = get_unaligned_le16(profile);
+		profile += 2;
+		profile_len -= 2;
+
+		/* Find in RNR to look up channel information */
+		if (!cfg80211_tbtt_info_for_mld_ap(tx_data->ie, tx_data->ielen,
+						   mld_id, link_id,
+						   &ap_info, &tbtt_info))
+			continue;
+
+		/* We could sanity check the BSSID is included */
+
+		if (!ieee80211_operating_class_to_band(ap_info->op_class,
+						       &band))
+			continue;
+
+		freq = ieee80211_channel_to_freq_khz(ap_info->channel, band);
+		data.channel = ieee80211_get_channel_khz(wiphy, freq);
+
+		/* Generate new elements */
+		memset(new_ie, 0, IEEE80211_MAX_DATA_LEN);
+		data.ie = new_ie;
+		data.ielen = cfg80211_gen_new_ie(tx_data->ie, tx_data->ielen,
+						 profile, profile_len,
+						 new_ie,
+						 IEEE80211_MAX_DATA_LEN);
+		if (!data.ielen)
+			continue;
+
+		bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp);
+		if (!bss)
+			break;
+		cfg80211_put_bss(wiphy, bss);
 	}
 
-out_free:
+out:
 	kfree(new_ie);
+	kfree(mle);
 }
 
+struct cfg80211_bss *
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+			 struct cfg80211_inform_bss *data,
+			 enum cfg80211_bss_frame_type ftype,
+			 const u8 *bssid, u64 tsf, u16 capability,
+			 u16 beacon_interval, const u8 *ie, size_t ielen,
+			 gfp_t gfp)
+{
+	struct cfg80211_inform_single_bss_data inform_data = {
+		.drv_data = data,
+		.ftype = ftype,
+		.tsf = tsf,
+		.capability = capability,
+		.beacon_interval = beacon_interval,
+		.ie = ie,
+		.ielen = ielen,
+	};
+	struct cfg80211_bss *res;
+
+	memcpy(inform_data.bssid, bssid, ETH_ALEN);
+
+	res = cfg80211_inform_single_bss_data(wiphy, &inform_data, gfp);
+	if (!res)
+		return NULL;
+
+	cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
+
+	cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
+
+	return res;
+}
+EXPORT_SYMBOL(cfg80211_inform_bss_data);
+
 /* cfg80211_inform_bss_width_frame helper */
 static struct cfg80211_bss *
 cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
@@ -2380,6 +2960,7 @@
 				      struct ieee80211_mgmt *mgmt, size_t len,
 				      gfp_t gfp)
 {
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	struct cfg80211_internal_bss tmp = {}, *res;
 	struct cfg80211_bss_ies *ies;
 	struct ieee80211_channel *channel;
@@ -2426,8 +3007,7 @@
 			variable = ext->u.s1g_beacon.variable;
 	}
 
-	channel = cfg80211_get_bss_channel(wiphy, variable,
-					   ielen, data->chan, data->scan_width);
+	channel = cfg80211_get_bss_channel(wiphy, variable, ielen, data->chan);
 	if (!channel)
 		return NULL;
 
@@ -2451,6 +3031,16 @@
 		capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 	}
 
+	if (channel->band == NL80211_BAND_60GHZ) {
+		bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
+		if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
+		    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
+			regulatory_hint_found_beacon(wiphy, channel, gfp);
+	} else {
+		if (capability & WLAN_CAPABILITY_ESS)
+			regulatory_hint_found_beacon(wiphy, channel, gfp);
+	}
+
 	ies = kzalloc(sizeof(*ies) + ielen, gfp);
 	if (!ies)
 		return NULL;
@@ -2470,7 +3060,6 @@
 	tmp.pub.beacon_interval = beacon_int;
 	tmp.pub.capability = capability;
 	tmp.pub.channel = channel;
-	tmp.pub.scan_width = data->scan_width;
 	tmp.pub.signal = data->signal;
 	tmp.ts_boottime = data->boottime_ns;
 	tmp.parent_tsf = data->parent_tsf;
@@ -2479,24 +3068,22 @@
 	ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
 
 	signal_valid = data->chan == channel;
-	res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid,
-				  jiffies);
+	spin_lock_bh(&rdev->bss_lock);
+	res = __cfg80211_bss_update(rdev, &tmp, signal_valid, jiffies);
 	if (!res)
-		return NULL;
+		goto drop;
 
-	if (channel->band == NL80211_BAND_60GHZ) {
-		bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
-		if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
-		    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
-			regulatory_hint_found_beacon(wiphy, channel, gfp);
-	} else {
-		if (res->pub.capability & WLAN_CAPABILITY_ESS)
-			regulatory_hint_found_beacon(wiphy, channel, gfp);
-	}
+	rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
+
+	spin_unlock_bh(&rdev->bss_lock);
 
 	trace_cfg80211_return_bss(&res->pub);
-	/* cfg80211_bss_update gives us a referenced result */
+	/* __cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
+
+drop:
+	spin_unlock_bh(&rdev->bss_lock);
+	return NULL;
 }
 
 struct cfg80211_bss *
@@ -2505,51 +3092,34 @@
 			       struct ieee80211_mgmt *mgmt, size_t len,
 			       gfp_t gfp)
 {
-	struct cfg80211_bss *res, *tmp_bss;
-	const u8 *ie = mgmt->u.probe_resp.variable;
-	const struct cfg80211_bss_ies *ies1, *ies2;
-	size_t ielen = len - offsetof(struct ieee80211_mgmt,
-				      u.probe_resp.variable);
-	struct cfg80211_non_tx_bss non_tx_data = {};
+	struct cfg80211_inform_single_bss_data inform_data = {
+		.drv_data = data,
+		.ie = mgmt->u.probe_resp.variable,
+		.ielen = len - offsetof(struct ieee80211_mgmt,
+					u.probe_resp.variable),
+	};
+	struct cfg80211_bss *res;
 
 	res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
 						    len, gfp);
+	if (!res)
+		return NULL;
 
-	/* don't do any further MBSSID handling for S1G */
+	/* don't do any further MBSSID/ML handling for S1G */
 	if (ieee80211_is_s1g_beacon(mgmt->frame_control))
 		return res;
 
-	if (!res || !wiphy->support_mbssid ||
-	    !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
-		return res;
-	if (wiphy->support_only_he_mbssid &&
-	    !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
-		return res;
+	inform_data.ftype = ieee80211_is_beacon(mgmt->frame_control) ?
+		CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+	memcpy(inform_data.bssid, mgmt->bssid, ETH_ALEN);
+	inform_data.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
+	inform_data.beacon_interval =
+		le16_to_cpu(mgmt->u.probe_resp.beacon_int);
 
-	non_tx_data.tx_bss = res;
 	/* process each non-transmitting bss */
-	cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
-					 &non_tx_data, gfp);
-
-	spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
-
-	/* check if the res has other nontransmitting bss which is not
-	 * in MBSSID IE
-	 */
-	ies1 = rcu_access_pointer(res->ies);
+	cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
 
-	/* go through nontrans_list, if the timestamp of the BSS is
-	 * earlier than the timestamp of the transmitting BSS then
-	 * update it
-	 */
-	list_for_each_entry(tmp_bss, &res->nontrans_list,
-			    nontrans_list) {
-		ies2 = rcu_access_pointer(tmp_bss->ies);
-		if (ies2->tsf < ies1->tsf)
-			cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
-							   mgmt, len);
-	}
-	spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
+	cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
 
 	return res;
 }
@@ -3043,59 +3613,63 @@
 			cfg = (u8 *)ie + 2;
 			memset(&iwe, 0, sizeof(iwe));
 			iwe.cmd = IWEVCUSTOM;
-			sprintf(buf, "Mesh Network Path Selection Protocol ID: "
-				"0x%02X", cfg[0]);
-			iwe.u.data.length = strlen(buf);
+			iwe.u.data.length = sprintf(buf,
+						    "Mesh Network Path Selection Protocol ID: 0x%02X",
+						    cfg[0]);
 			current_ev = iwe_stream_add_point_check(info,
 								current_ev,
 								end_buf,
 								&iwe, buf);
 			if (IS_ERR(current_ev))
 				goto unlock;
-			sprintf(buf, "Path Selection Metric ID: 0x%02X",
+			iwe.u.data.length = sprintf(buf,
+						    "Path Selection Metric ID: 0x%02X",
 				cfg[1]);
-			iwe.u.data.length = strlen(buf);
 			current_ev = iwe_stream_add_point_check(info,
 								current_ev,
 								end_buf,
 								&iwe, buf);
 			if (IS_ERR(current_ev))
 				goto unlock;
-			sprintf(buf, "Congestion Control Mode ID: 0x%02X",
+			iwe.u.data.length = sprintf(buf,
+						    "Congestion Control Mode ID: 0x%02X",
 				cfg[2]);
-			iwe.u.data.length = strlen(buf);
 			current_ev = iwe_stream_add_point_check(info,
 								current_ev,
 								end_buf,
 								&iwe, buf);
 			if (IS_ERR(current_ev))
 				goto unlock;
-			sprintf(buf, "Synchronization ID: 0x%02X", cfg[3]);
-			iwe.u.data.length = strlen(buf);
+			iwe.u.data.length = sprintf(buf,
+						    "Synchronization ID: 0x%02X",
+						    cfg[3]);
 			current_ev = iwe_stream_add_point_check(info,
 								current_ev,
 								end_buf,
 								&iwe, buf);
 			if (IS_ERR(current_ev))
 				goto unlock;
-			sprintf(buf, "Authentication ID: 0x%02X", cfg[4]);
-			iwe.u.data.length = strlen(buf);
+			iwe.u.data.length = sprintf(buf,
+						    "Authentication ID: 0x%02X",
+						    cfg[4]);
 			current_ev = iwe_stream_add_point_check(info,
 								current_ev,
 								end_buf,
 								&iwe, buf);
 			if (IS_ERR(current_ev))
 				goto unlock;
-			sprintf(buf, "Formation Info: 0x%02X", cfg[5]);
-			iwe.u.data.length = strlen(buf);
+			iwe.u.data.length = sprintf(buf,
+						    "Formation Info: 0x%02X",
+						    cfg[5]);
 			current_ev = iwe_stream_add_point_check(info,
 								current_ev,
 								end_buf,
 								&iwe, buf);
 			if (IS_ERR(current_ev))
 				goto unlock;
-			sprintf(buf, "Capabilities: 0x%02X", cfg[6]);
-			iwe.u.data.length = strlen(buf);
+			iwe.u.data.length = sprintf(buf,
+						    "Capabilities: 0x%02X",
+						    cfg[6]);
 			current_ev = iwe_stream_add_point_check(info,
 								current_ev,
 								end_buf,
@@ -3151,17 +3725,16 @@
 
 	memset(&iwe, 0, sizeof(iwe));
 	iwe.cmd = IWEVCUSTOM;
-	sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
-	iwe.u.data.length = strlen(buf);
+	iwe.u.data.length = sprintf(buf, "tsf=%016llx",
+				    (unsigned long long)(ies->tsf));
 	current_ev = iwe_stream_add_point_check(info, current_ev, end_buf,
 						&iwe, buf);
 	if (IS_ERR(current_ev))
 		goto unlock;
 	memset(&iwe, 0, sizeof(iwe));
 	iwe.cmd = IWEVCUSTOM;
-	sprintf(buf, " Last beacon: %ums ago",
+	iwe.u.data.length = sprintf(buf, " Last beacon: %ums ago",
 		elapsed_jiffies_msecs(bss->ts));
-	iwe.u.data.length = strlen(buf);
 	current_ev = iwe_stream_add_point_check(info, current_ev,
 						end_buf, &iwe, buf);
 	if (IS_ERR(current_ev))
diff -ruw linux-6.4/net/wireless/sme.c linux-6.4-fbx/net/wireless/sme.c
--- linux-6.4/net/wireless/sme.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/sme.c	2023-11-07 13:38:44.114258112 +0100
@@ -5,7 +5,7 @@
  * (for nl80211's connect() and wext)
  *
  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2009, 2020, 2022 Intel Corporation. All rights reserved.
+ * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved.
  * Copyright 2017	Intel Deutschland GmbH
  */
 
@@ -67,7 +67,7 @@
 	struct cfg80211_scan_request *request;
 	int n_channels, err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (rdev->scan_req || rdev->scan_msg)
 		return -EBUSY;
@@ -151,7 +151,7 @@
 	struct cfg80211_assoc_request req = {};
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!wdev->conn)
 		return 0;
@@ -255,16 +255,13 @@
 		if (!wdev->netdev)
 			continue;
 
-		wdev_lock(wdev);
-		if (!netif_running(wdev->netdev)) {
-			wdev_unlock(wdev);
+		if (!netif_running(wdev->netdev))
 			continue;
-		}
+
 		if (!wdev->conn ||
-		    wdev->conn->state == CFG80211_CONN_CONNECTED) {
-			wdev_unlock(wdev);
+		    wdev->conn->state == CFG80211_CONN_CONNECTED)
 			continue;
-		}
+
 		if (wdev->conn->params.bssid) {
 			memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
 			bssid = bssid_buf;
@@ -279,7 +276,6 @@
 			cr.timeout_reason = treason;
 			__cfg80211_connect_result(wdev->netdev, &cr, false);
 		}
-		wdev_unlock(wdev);
 	}
 
 	wiphy_unlock(&rdev->wiphy);
@@ -300,7 +296,7 @@
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_bss *bss;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
 			       wdev->conn->params.bssid,
@@ -317,13 +313,13 @@
 	return bss;
 }
 
-static void __cfg80211_sme_scan_done(struct net_device *dev)
+void cfg80211_sme_scan_done(struct net_device *dev)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_bss *bss;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!wdev->conn)
 		return;
@@ -339,15 +335,6 @@
 		schedule_work(&rdev->conn_work);
 }
 
-void cfg80211_sme_scan_done(struct net_device *dev)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-
-	wdev_lock(wdev);
-	__cfg80211_sme_scan_done(dev);
-	wdev_unlock(wdev);
-}
-
 void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
 {
 	struct wiphy *wiphy = wdev->wiphy;
@@ -355,7 +342,7 @@
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
 	u16 status_code = le16_to_cpu(mgmt->u.auth.status_code);
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED)
 		return;
@@ -491,6 +478,21 @@
 	}
 }
 
+void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask)
+{
+	unsigned int link;
+
+	for_each_valid_link(wdev, link) {
+		if (!wdev->links[link].client.current_bss ||
+		    !(link_mask & BIT(link)))
+			continue;
+		cfg80211_unhold_bss(wdev->links[link].client.current_bss);
+		cfg80211_put_bss(wdev->wiphy,
+				 &wdev->links[link].client.current_bss->pub);
+		wdev->links[link].client.current_bss = NULL;
+	}
+}
+
 static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev,
 				     const u8 *ies, size_t ies_len,
 				     const u8 **out_ies, size_t *out_ies_len)
@@ -687,14 +689,14 @@
 	 * need not issue a disconnect hint and reset any info such
 	 * as chan dfs state, etc.
 	 */
-	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+	for_each_rdev(rdev) {
+		wiphy_lock(&rdev->wiphy);
 		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
-			wdev_lock(wdev);
 			if (wdev->conn || wdev->connected ||
 			    cfg80211_beaconing_iface_active(wdev))
 				is_all_idle = false;
-			wdev_unlock(wdev);
 		}
+		wiphy_unlock(&rdev->wiphy);
 	}
 
 	return is_all_idle;
@@ -746,7 +748,7 @@
 	const u8 *connected_addr;
 	bool bss_not_found = false;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
@@ -1078,7 +1080,7 @@
 	unsigned int link;
 	const u8 *connected_addr;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
@@ -1279,24 +1281,29 @@
 }
 EXPORT_SYMBOL(cfg80211_roamed);
 
-void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid,
+void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *peer_addr,
 					const u8 *td_bitmap, u8 td_bitmap_len)
 {
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
-		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
+		wdev->iftype != NL80211_IFTYPE_P2P_CLIENT &&
+		wdev->iftype != NL80211_IFTYPE_AP &&
+		wdev->iftype != NL80211_IFTYPE_P2P_GO))
 		return;
 
+	if (wdev->iftype == NL80211_IFTYPE_STATION ||
+		wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) {
 	if (WARN_ON(!wdev->connected) ||
-	    WARN_ON(!ether_addr_equal(wdev->u.client.connected_addr, bssid)))
+			WARN_ON(!ether_addr_equal(wdev->u.client.connected_addr, peer_addr)))
 		return;
+	}
 
 	nl80211_send_port_authorized(wiphy_to_rdev(wdev->wiphy), wdev->netdev,
-				     bssid, td_bitmap, td_bitmap_len);
+				     peer_addr, td_bitmap, td_bitmap_len);
 }
 
-void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
+void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr,
 			      const u8 *td_bitmap, u8 td_bitmap_len, gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1304,7 +1311,7 @@
 	struct cfg80211_event *ev;
 	unsigned long flags;
 
-	if (WARN_ON(!bssid))
+	if (WARN_ON(!peer_addr))
 		return;
 
 	ev = kzalloc(sizeof(*ev) + td_bitmap_len, gfp);
@@ -1312,7 +1319,7 @@
 		return;
 
 	ev->type = EVENT_PORT_AUTHORIZED;
-	memcpy(ev->pa.bssid, bssid, ETH_ALEN);
+	memcpy(ev->pa.peer_addr, peer_addr, ETH_ALEN);
 	ev->pa.td_bitmap = ((u8 *)ev) + sizeof(*ev);
 	ev->pa.td_bitmap_len = td_bitmap_len;
 	memcpy((void *)ev->pa.td_bitmap, td_bitmap, td_bitmap_len);
@@ -1338,7 +1345,7 @@
 	union iwreq_data wrqu;
 #endif
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
@@ -1428,7 +1435,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	/*
 	 * If we have an ssid_len, we're trying to connect or are
@@ -1534,7 +1541,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	int err = 0;
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	kfree_sensitive(wdev->connect_keys);
 	wdev->connect_keys = NULL;
@@ -1569,19 +1576,19 @@
 		container_of(work, struct wireless_dev, disconnect_wk);
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
-	wdev_lock(wdev);
+	wiphy_lock(wdev->wiphy);
 
 	if (wdev->conn_owner_nlportid) {
 		switch (wdev->iftype) {
 		case NL80211_IFTYPE_ADHOC:
-			__cfg80211_leave_ibss(rdev, wdev->netdev, false);
+			cfg80211_leave_ibss(rdev, wdev->netdev, false);
 			break;
 		case NL80211_IFTYPE_AP:
 		case NL80211_IFTYPE_P2P_GO:
-			__cfg80211_stop_ap(rdev, wdev->netdev, -1, false);
+			cfg80211_stop_ap(rdev, wdev->netdev, -1, false);
 			break;
 		case NL80211_IFTYPE_MESH_POINT:
-			__cfg80211_leave_mesh(rdev, wdev->netdev);
+			cfg80211_leave_mesh(rdev, wdev->netdev);
 			break;
 		case NL80211_IFTYPE_STATION:
 		case NL80211_IFTYPE_P2P_CLIENT:
@@ -1606,5 +1613,5 @@
 		}
 	}
 
-	wdev_unlock(wdev);
+	wiphy_unlock(wdev->wiphy);
 }
diff -ruw linux-6.4/net/wireless/sysfs.c linux-6.4-fbx/net/wireless/sysfs.c
--- linux-6.4/net/wireless/sysfs.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/sysfs.c	2023-11-07 13:38:44.114258112 +0100
@@ -5,7 +5,7 @@
  *
  * Copyright 2005-2006	Jiri Benc <jbenc@suse.cz>
  * Copyright 2006	Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2021, 2023 Intel Corporation
  */
 
 #include <linux/device.h>
@@ -36,6 +36,7 @@
 SHOW_FMT(index, "%d", wiphy_idx);
 SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
 SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
+SHOW_FMT(dev_port, "%d", wiphy.dev_port);
 
 static ssize_t name_show(struct device *dev,
 			 struct device_attribute *attr,
@@ -70,6 +71,7 @@
 	&dev_attr_macaddress.attr,
 	&dev_attr_address_mask.attr,
 	&dev_attr_addresses.attr,
+	&dev_attr_dev_port.attr,
 	&dev_attr_name.attr,
 	NULL,
 };
@@ -105,14 +107,18 @@
 			cfg80211_leave_all(rdev);
 			cfg80211_process_rdev_events(rdev);
 		}
+		cfg80211_process_wiphy_works(rdev, NULL);
 		if (rdev->ops->suspend)
 			ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
 		if (ret == 1) {
 			/* Driver refuse to configure wowlan */
 			cfg80211_leave_all(rdev);
 			cfg80211_process_rdev_events(rdev);
+			cfg80211_process_wiphy_works(rdev, NULL);
 			ret = rdev_suspend(rdev, NULL);
 		}
+		if (ret == 0)
+			rdev->suspended = true;
 	}
 	wiphy_unlock(&rdev->wiphy);
 	rtnl_unlock();
@@ -132,6 +138,8 @@
 	wiphy_lock(&rdev->wiphy);
 	if (rdev->wiphy.registered && rdev->ops->resume)
 		ret = rdev_resume(rdev);
+	rdev->suspended = false;
+	schedule_work(&rdev->wiphy_work);
 	wiphy_unlock(&rdev->wiphy);
 
 	if (ret)
diff -ruw linux-6.4/net/wireless/trace.h linux-6.4-fbx/net/wireless/trace.h
--- linux-6.4/net/wireless/trace.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/trace.h	2023-11-07 13:38:44.114258112 +0100
@@ -22,7 +22,7 @@
 
 #define MAXNAME		32
 #define WIPHY_ENTRY	__array(char, wiphy_name, 32)
-#define WIPHY_ASSIGN	strlcpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME)
+#define WIPHY_ASSIGN	strscpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME)
 #define WIPHY_PR_FMT	"%s"
 #define WIPHY_PR_ARG	__entry->wiphy_name
 
@@ -615,49 +615,47 @@
 
 TRACE_EVENT(rdev_change_beacon,
 	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
-		 struct cfg80211_beacon_data *info),
+		 struct cfg80211_ap_update *info),
 	TP_ARGS(wiphy, netdev, info),
 	TP_STRUCT__entry(
 		WIPHY_ENTRY
 		NETDEV_ENTRY
 		__field(int, link_id)
-		__dynamic_array(u8, head, info ? info->head_len : 0)
-		__dynamic_array(u8, tail, info ? info->tail_len : 0)
-		__dynamic_array(u8, beacon_ies, info ? info->beacon_ies_len : 0)
-		__dynamic_array(u8, proberesp_ies,
-				info ? info->proberesp_ies_len : 0)
-		__dynamic_array(u8, assocresp_ies,
-				info ? info->assocresp_ies_len : 0)
-		__dynamic_array(u8, probe_resp, info ? info->probe_resp_len : 0)
+		__dynamic_array(u8, head, info->beacon.head_len)
+		__dynamic_array(u8, tail, info->beacon.tail_len)
+		__dynamic_array(u8, beacon_ies, info->beacon.beacon_ies_len)
+		__dynamic_array(u8, proberesp_ies, info->beacon.proberesp_ies_len)
+		__dynamic_array(u8, assocresp_ies, info->beacon.assocresp_ies_len)
+		__dynamic_array(u8, probe_resp, info->beacon.probe_resp_len)
 	),
 	TP_fast_assign(
 		WIPHY_ASSIGN;
 		NETDEV_ASSIGN;
-		if (info) {
-			__entry->link_id = info->link_id;
-			if (info->head)
-				memcpy(__get_dynamic_array(head), info->head,
-				       info->head_len);
-			if (info->tail)
-				memcpy(__get_dynamic_array(tail), info->tail,
-				       info->tail_len);
-			if (info->beacon_ies)
+		__entry->link_id = info->beacon.link_id;
+		if (info->beacon.head)
+			memcpy(__get_dynamic_array(head),
+			       info->beacon.head,
+			       info->beacon.head_len);
+		if (info->beacon.tail)
+			memcpy(__get_dynamic_array(tail),
+			       info->beacon.tail,
+			       info->beacon.tail_len);
+		if (info->beacon.beacon_ies)
 				memcpy(__get_dynamic_array(beacon_ies),
-				       info->beacon_ies, info->beacon_ies_len);
-			if (info->proberesp_ies)
+			       info->beacon.beacon_ies,
+			       info->beacon.beacon_ies_len);
+		if (info->beacon.proberesp_ies)
 				memcpy(__get_dynamic_array(proberesp_ies),
-				       info->proberesp_ies,
-				       info->proberesp_ies_len);
-			if (info->assocresp_ies)
+			       info->beacon.proberesp_ies,
+			       info->beacon.proberesp_ies_len);
+		if (info->beacon.assocresp_ies)
 				memcpy(__get_dynamic_array(assocresp_ies),
-				       info->assocresp_ies,
-				       info->assocresp_ies_len);
-			if (info->probe_resp)
+			       info->beacon.assocresp_ies,
+			       info->beacon.assocresp_ies_len);
+		if (info->beacon.probe_resp)
 				memcpy(__get_dynamic_array(probe_resp),
-				       info->probe_resp, info->probe_resp_len);
-		} else {
-			__entry->link_id = -1;
-		}
+			       info->beacon.probe_resp,
+			       info->beacon.probe_resp_len);
 	),
 	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id:%d",
 		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id)
@@ -1159,6 +1157,23 @@
 		  __entry->ap_isolate, __entry->ht_opmode)
 );
 
+TRACE_EVENT(rdev_inform_bss,
+	TP_PROTO(struct wiphy *wiphy, struct cfg80211_bss *bss),
+	TP_ARGS(wiphy, bss),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		MAC_ENTRY(bssid)
+		CHAN_ENTRY
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		MAC_ASSIGN(bssid, bss->bssid);
+		CHAN_ASSIGN(bss->channel);
+	),
+	TP_printk(WIPHY_PR_FMT ", %pM, " CHAN_PR_FMT,
+		  WIPHY_PR_ARG, __entry->bssid, CHAN_PR_ARG)
+);
+
 TRACE_EVENT(rdev_set_txq_params,
 	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
 		 struct ieee80211_txq_params *params),
@@ -1306,16 +1321,18 @@
 		NETDEV_ENTRY
 		MAC_ENTRY(bssid)
 		__field(u16, reason_code)
+		__field(bool, local_state_change)
 	),
 	TP_fast_assign(
 		WIPHY_ASSIGN;
 		NETDEV_ASSIGN;
 		MAC_ASSIGN(bssid, req->bssid);
 		__entry->reason_code = req->reason_code;
+		__entry->local_state_change = req->local_state_change;
 	),
-	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM, reason: %u",
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM, reason: %u, local_state_change:%d",
 		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
-		  __entry->reason_code)
+		  __entry->reason_code, __entry->local_state_change)
 );
 
 TRACE_EVENT(rdev_disassoc,
@@ -1779,15 +1796,16 @@
 
 TRACE_EVENT(rdev_tdls_mgmt,
 	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
-		 u8 *peer, u8 action_code, u8 dialog_token,
+		 u8 *peer, int link_id, u8 action_code, u8 dialog_token,
 		 u16 status_code, u32 peer_capability,
 		 bool initiator, const u8 *buf, size_t len),
-	TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code,
-		peer_capability, initiator, buf, len),
+	TP_ARGS(wiphy, netdev, peer, link_id, action_code, dialog_token,
+		status_code, peer_capability, initiator, buf, len),
 	TP_STRUCT__entry(
 		WIPHY_ENTRY
 		NETDEV_ENTRY
 		MAC_ENTRY(peer)
+		__field(int, link_id)
 		__field(u8, action_code)
 		__field(u8, dialog_token)
 		__field(u16, status_code)
@@ -1799,6 +1817,7 @@
 		WIPHY_ASSIGN;
 		NETDEV_ASSIGN;
 		MAC_ASSIGN(peer, peer);
+		__entry->link_id = link_id;
 		__entry->action_code = action_code;
 		__entry->dialog_token = dialog_token;
 		__entry->status_code = status_code;
@@ -1806,11 +1825,12 @@
 		__entry->initiator = initiator;
 		memcpy(__get_dynamic_array(buf), buf, len);
 	),
-	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, action_code: %u, "
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM"
+		  ", link_id: %d, action_code: %u "
 		  "dialog_token: %u, status_code: %u, peer_capability: %u "
 		  "initiator: %s buf: %#.2x ",
 		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer,
-		  __entry->action_code, __entry->dialog_token,
+		  __entry->link_id, __entry->action_code, __entry->dialog_token,
 		  __entry->status_code, __entry->peer_capability,
 		  BOOL_TO_STR(__entry->initiator),
 		  ((u8 *)__get_dynamic_array(buf))[0])
@@ -2908,7 +2928,7 @@
 
 TRACE_EVENT(cfg80211_send_rx_assoc,
 	TP_PROTO(struct net_device *netdev,
-		 struct cfg80211_rx_assoc_resp *data),
+		 struct cfg80211_rx_assoc_resp_data *data),
 	TP_ARGS(netdev, data),
 	TP_STRUCT__entry(
 		NETDEV_ENTRY
@@ -3570,7 +3590,6 @@
 	TP_STRUCT__entry(
 		WIPHY_ENTRY
 		CHAN_ENTRY
-		__field(enum nl80211_bss_scan_width, scan_width)
 		__dynamic_array(u8, mgmt, len)
 		__field(s32, signal)
 		__field(u64, ts_boottime)
@@ -3580,7 +3599,6 @@
 	TP_fast_assign(
 		WIPHY_ASSIGN;
 		CHAN_ASSIGN(data->chan);
-		__entry->scan_width = data->scan_width;
 		if (mgmt)
 			memcpy(__get_dynamic_array(mgmt), mgmt, len);
 		__entry->signal = data->signal;
@@ -3589,8 +3607,8 @@
 		MAC_ASSIGN(parent_bssid, data->parent_bssid);
 	),
 	TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT
-		  "(scan_width: %d) signal: %d, tsb:%llu, detect_tsf:%llu, tsf_bssid: %pM",
-		  WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
+		  "signal: %d, tsb:%llu, detect_tsf:%llu, tsf_bssid: %pM",
+		  WIPHY_PR_ARG, CHAN_PR_ARG,
 		  __entry->signal, (unsigned long long)__entry->ts_boottime,
 		  (unsigned long long)__entry->parent_tsf,
 		  __entry->parent_bssid)
@@ -3946,6 +3964,21 @@
 		  __entry->enable)
 );
 
+TRACE_EVENT(cfg80211_links_removed,
+	TP_PROTO(struct net_device *netdev, u16 link_mask),
+	TP_ARGS(netdev, link_mask),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		__field(u16, link_mask)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		__entry->link_mask = link_mask;
+	),
+	TP_printk(NETDEV_PR_FMT ", link_mask:%u", NETDEV_PR_ARG,
+		  __entry->link_mask)
+);
+
 #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
 
 #undef TRACE_INCLUDE_PATH
diff -ruw linux-6.4/net/wireless/util.c linux-6.4-fbx/net/wireless/util.c
--- linux-6.4/net/wireless/util.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/wireless/util.c	2023-11-07 13:38:44.114258112 +0100
@@ -43,8 +43,7 @@
 }
 EXPORT_SYMBOL(ieee80211_get_response_rate);
 
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
-			      enum nl80211_bss_scan_width scan_width)
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
 {
 	struct ieee80211_rate *bitrates;
 	u32 mandatory_rates = 0;
@@ -54,15 +53,10 @@
 	if (WARN_ON(!sband))
 		return 1;
 
-	if (sband->band == NL80211_BAND_2GHZ) {
-		if (scan_width == NL80211_BSS_CHAN_WIDTH_5 ||
-		    scan_width == NL80211_BSS_CHAN_WIDTH_10)
-			mandatory_flag = IEEE80211_RATE_MANDATORY_G;
-		else
+	if (sband->band == NL80211_BAND_2GHZ)
 			mandatory_flag = IEEE80211_RATE_MANDATORY_B;
-	} else {
+	else
 		mandatory_flag = IEEE80211_RATE_MANDATORY_A;
-	}
 
 	bitrates = sband->bitrates;
 	for (i = 0; i < sband->n_bitrates; i++)
@@ -580,6 +574,8 @@
 		hdrlen += ETH_ALEN + 2;
 	else if (!pskb_may_pull(skb, hdrlen))
 		return -EINVAL;
+	else
+		payload.eth.h_proto = htons(skb->len - hdrlen);
 
 	mesh_addr = skb->data + sizeof(payload.eth) + ETH_ALEN;
 	switch (payload.flags & MESH_FLAGS_AE) {
@@ -1042,7 +1038,6 @@
 		list_del(&ev->list);
 		spin_unlock_irqrestore(&wdev->event_lock, flags);
 
-		wdev_lock(wdev);
 		switch (ev->type) {
 		case EVENT_CONNECT_RESULT:
 			__cfg80211_connect_result(
@@ -1064,15 +1059,14 @@
 					       ev->ij.channel);
 			break;
 		case EVENT_STOPPED:
-			__cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev);
+			cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev);
 			break;
 		case EVENT_PORT_AUTHORIZED:
-			__cfg80211_port_authorized(wdev, ev->pa.bssid,
+			__cfg80211_port_authorized(wdev, ev->pa.peer_addr,
 						   ev->pa.td_bitmap,
 						   ev->pa.td_bitmap_len);
 			break;
 		}
-		wdev_unlock(wdev);
 
 		kfree(ev);
 
@@ -1122,9 +1116,7 @@
 			return -EBUSY;
 
 		dev->ieee80211_ptr->use_4addr = false;
-		wdev_lock(dev->ieee80211_ptr);
 		rdev_set_qos_map(rdev, dev, NULL);
-		wdev_unlock(dev->ieee80211_ptr);
 
 		switch (otype) {
 		case NL80211_IFTYPE_AP:
@@ -1136,10 +1128,8 @@
 			break;
 		case NL80211_IFTYPE_STATION:
 		case NL80211_IFTYPE_P2P_CLIENT:
-			wdev_lock(dev->ieee80211_ptr);
 			cfg80211_disconnect(rdev, dev,
 					    WLAN_REASON_DEAUTH_LEAVING, true);
-			wdev_unlock(dev->ieee80211_ptr);
 			break;
 		case NL80211_IFTYPE_MESH_POINT:
 			/* mesh should be handled? */
@@ -1646,6 +1636,114 @@
 	return result / 10000;
 }
 
+static u32 cfg80211_calculate_bitrate_s1g(struct rate_info *rate)
+{
+	/* For 1, 2, 4, 8 and 16 MHz channels */
+	static const u32 base[5][11] = {
+		{  300000,
+		   600000,
+		   900000,
+		  1200000,
+		  1800000,
+		  2400000,
+		  2700000,
+		  3000000,
+		  3600000,
+		  4000000,
+		  /* MCS 10 supported in 1 MHz only */
+		  150000,
+		},
+		{  650000,
+		  1300000,
+		  1950000,
+		  2600000,
+		  3900000,
+		  5200000,
+		  5850000,
+		  6500000,
+		  7800000,
+		  /* MCS 9 not valid */
+		},
+		{  1350000,
+		   2700000,
+		   4050000,
+		   5400000,
+		   8100000,
+		  10800000,
+		  12150000,
+		  13500000,
+		  16200000,
+		  18000000,
+		},
+		{  2925000,
+		   5850000,
+		   8775000,
+		  11700000,
+		  17550000,
+		  23400000,
+		  26325000,
+		  29250000,
+		  35100000,
+		  39000000,
+		},
+		{  8580000,
+		  11700000,
+		  17550000,
+		  23400000,
+		  35100000,
+		  46800000,
+		  52650000,
+		  58500000,
+		  70200000,
+		  78000000,
+		},
+	};
+	u32 bitrate;
+	/* default is 1 MHz index */
+	int idx = 0;
+
+	if (rate->mcs >= 11)
+		goto warn;
+
+	switch (rate->bw) {
+	case RATE_INFO_BW_16:
+		idx = 4;
+		break;
+	case RATE_INFO_BW_8:
+		idx = 3;
+		break;
+	case RATE_INFO_BW_4:
+		idx = 2;
+		break;
+	case RATE_INFO_BW_2:
+		idx = 1;
+		break;
+	case RATE_INFO_BW_1:
+		idx = 0;
+		break;
+	case RATE_INFO_BW_5:
+	case RATE_INFO_BW_10:
+	case RATE_INFO_BW_20:
+	case RATE_INFO_BW_40:
+	case RATE_INFO_BW_80:
+	case RATE_INFO_BW_160:
+	default:
+		goto warn;
+	}
+
+	bitrate = base[idx][rate->mcs];
+	bitrate *= rate->nss;
+
+	if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+		bitrate = (bitrate / 9) * 10;
+	/* do NOT round down here */
+	return (bitrate + 50000) / 100000;
+warn:
+	WARN_ONCE(1, "invalid rate bw=%d, mcs=%d, nss=%d\n",
+		  rate->bw, rate->mcs, rate->nss);
+	return 0;
+}
+
 u32 cfg80211_calculate_bitrate(struct rate_info *rate)
 {
 	if (rate->flags & RATE_INFO_FLAGS_MCS)
@@ -1662,6 +1760,8 @@
 		return cfg80211_calculate_bitrate_he(rate);
 	if (rate->flags & RATE_INFO_FLAGS_EHT_MCS)
 		return cfg80211_calculate_bitrate_eht(rate);
+	if (rate->flags & RATE_INFO_FLAGS_S1G_MCS)
+		return cfg80211_calculate_bitrate_s1g(rate);
 
 	return rate->legacy;
 }
@@ -1860,6 +1960,35 @@
 }
 EXPORT_SYMBOL(ieee80211_ie_split_ric);
 
+void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id)
+{
+	unsigned int elem_len;
+
+	if (!len_pos)
+		return;
+
+	elem_len = skb->data + skb->len - len_pos - 1;
+
+	while (elem_len > 255) {
+		/* this one is 255 */
+		*len_pos = 255;
+		/* remaining data gets smaller */
+		elem_len -= 255;
+		/* make space for the fragment ID/len in SKB */
+		skb_put(skb, 2);
+		/* shift back the remaining data to place fragment ID/len */
+		memmove(len_pos + 255 + 3, len_pos + 255 + 1, elem_len);
+		/* place the fragment ID */
+		len_pos += 255 + 1;
+		*len_pos = frag_id;
+		/* and point to fragment length to update later */
+		len_pos++;
+	}
+
+	*len_pos = elem_len;
+}
+EXPORT_SYMBOL(ieee80211_fragment_element);
+
 bool ieee80211_operating_class_to_band(u8 operating_class,
 				       enum nl80211_band *band)
 {
@@ -1870,6 +1999,7 @@
 		*band = NL80211_BAND_5GHZ;
 		return true;
 	case 131 ... 135:
+	case 137:
 		*band = NL80211_BAND_6GHZ;
 		return true;
 	case 81:
@@ -2535,12 +2665,12 @@
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
-	ASSERT_WDEV_LOCK(wdev);
+	lockdep_assert_wiphy(wdev->wiphy);
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_P2P_GO:
-		__cfg80211_stop_ap(rdev, wdev->netdev, link_id, true);
+		cfg80211_stop_ap(rdev, wdev->netdev, link_id, true);
 		break;
 	default:
 		/* per-link not relevant */
@@ -2565,12 +2695,10 @@
 	if (wdev->iftype != NL80211_IFTYPE_AP)
 		return;
 
-	wdev_lock(wdev);
 	if (wdev->valid_links) {
 		for_each_valid_link(wdev, link_id)
 			cfg80211_remove_link(wdev, link_id);
 	}
-	wdev_unlock(wdev);
 }
 
 int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev,
diff -ruw linux-6.4/net/xfrm/xfrm_device.c linux-6.4-fbx/net/xfrm/xfrm_device.c
--- linux-6.4/net/xfrm/xfrm_device.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/xfrm/xfrm_device.c	2023-11-07 13:38:44.114258112 +0100
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <net/dst.h>
+#include <net/gso.h>
 #include <net/xfrm.h>
 #include <linux/notifier.h>
 
diff -ruw linux-6.4/net/xfrm/xfrm_interface_core.c linux-6.4-fbx/net/xfrm/xfrm_interface_core.c
--- linux-6.4/net/xfrm/xfrm_interface_core.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/xfrm/xfrm_interface_core.c	2023-11-07 13:38:44.114258112 +0100
@@ -33,6 +33,7 @@
 #include <linux/uaccess.h>
 #include <linux/atomic.h>
 
+#include <net/gso.h>
 #include <net/icmp.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
diff -ruw linux-6.4/net/xfrm/xfrm_output.c linux-6.4-fbx/net/xfrm/xfrm_output.c
--- linux-6.4/net/xfrm/xfrm_output.c	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/net/xfrm/xfrm_output.c	2023-11-07 13:38:44.114258112 +0100
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <net/dst.h>
+#include <net/gso.h>
 #include <net/icmp.h>
 #include <net/inet_ecn.h>
 #include <net/xfrm.h>
diff -ruw linux-6.4/scripts/Makefile.lib linux-6.4-fbx/scripts/Makefile.lib
--- linux-6.4/scripts/Makefile.lib	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/Makefile.lib	2023-05-22 20:06:46.611916789 +0200
@@ -372,7 +372,7 @@
       cmd_wrap_S_dtb = {								\
 		symbase=__$(patsubst .%,%,$(suffix $<))_$(subst -,_,$(notdir $*));	\
 		echo '\#include <asm-generic/vmlinux.lds.h>';				\
-		echo '.section .dtb.init.rodata,"a"';					\
+		echo '.section .dtb.rodata,"a"';					\
 		echo '.balign STRUCT_ALIGNMENT';					\
 		echo ".global $${symbase}_begin";					\
 		echo "$${symbase}_begin:";						\
diff -ruw linux-6.4/scripts/dtc/include-prefixes/arm64/amlogic/Makefile linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/amlogic/Makefile
--- linux-6.4/scripts/dtc/include-prefixes/arm64/amlogic/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/amlogic/Makefile	2023-11-27 19:13:52.958343154 +0100
@@ -1,8 +1,15 @@
 # SPDX-License-Identifier: GPL-2.0
+fbx-boards += \
+	fbxwmr.dtb \
+	fbxwmr-r1.dtb fbxwmr-r2.dtb \
+	fbxwmr-r3.dtb fbxwmr-r4.dtb
+
 dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j100.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-3.dtb
+DTC_FLAGS += -@
+dtb-$(CONFIG_ARCH_MESON) += $(fbx-boards)
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-radxa-zero.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-sei510.dtb
diff -ruw linux-6.4/scripts/dtc/include-prefixes/arm64/broadcom/Makefile linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/broadcom/Makefile
--- linux-6.4/scripts/dtc/include-prefixes/arm64/broadcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/broadcom/Makefile	2023-05-22 20:06:36.535648775 +0200
@@ -11,3 +11,4 @@
 subdir-y	+= bcmbca
 subdir-y	+= northstar2
 subdir-y	+= stingray
+subdir-y	+= bcm63xx
diff -ruw linux-6.4/scripts/dtc/include-prefixes/arm64/marvell/Makefile linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/marvell/Makefile
--- linux-6.4/scripts/dtc/include-prefixes/arm64/marvell/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/marvell/Makefile	2023-05-22 20:06:36.575649839 +0200
@@ -27,3 +27,19 @@
 dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-A.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-B.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += ac5-98dx35xx-rd.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_dsl_lte.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_pon.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_pericom.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_asmedia.dtb
+
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp2_ftth_p2p.dtb
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
diff -ruw linux-6.4/scripts/dtc/include-prefixes/arm64/qcom/Makefile linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/qcom/Makefile
--- linux-6.4/scripts/dtc/include-prefixes/arm64/qcom/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/qcom/Makefile	2023-11-24 18:39:54.353042707 +0100
@@ -1,204 +1,208 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-ifc6640.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-mi01.2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp468.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq6018-cp01-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk01.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-al02-c7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-acer-a1-724.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-alcatel-idol347.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-asus-z00l.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-gplus-fl8005a.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-huawei-g7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8150.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8910.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a3u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a5u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-grandmax.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt510.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt58.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-serranove.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-uf896.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-ufi001c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt88047.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-yiming-uz801v3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-motorola-potter.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-daisy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-mido.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-tissot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-vince.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-kugo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-suzu.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-msft-lumia-octagon-talkman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-xiaomi-libra.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-huawei-angler-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-msft-lumia-octagon-cityman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-ivy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-karin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-sumire.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3t.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-dora.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-kagura.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-keyaki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-xiaomi-gemini.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-natrium.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-scorpio.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-asus-novago-tp370ql.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-fxtec-pro1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-hp-envy-x2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-lenovo-miix-630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-cheeseburger.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-dumpling.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-lilac.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-maple.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-poplar.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-xiaomi-sagit.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-1000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-4000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qdu1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb2210-rb1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb4210-rb2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5-vision-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qru1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8155p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8295p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8540p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8775p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-kingoftown.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-wifi.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd-pro.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-herobrine-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-crd-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sda660-inforce-ifc6560.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm450-motorola-ali.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-ganges-kirin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-discovery.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-pioneer.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-voyager.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-fairphone-fp3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-motorola-ocean.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm636-sony-xperia-ganges-mermaid.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm660-xiaomi-lavender.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm670-google-sargo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c-navigation-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyln.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-enchilada.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-fajita.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-samsung-starqltechn.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akari.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akatsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-apollo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-ebbg.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-tianma.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-polaris.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-shift-axolotl.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-lenovo-yoga-c630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-samsung-w737.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm4250-oneplus-billie2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6115p-lenovo-j606f.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-sony-xperia-seine-pdx201.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-xiaomi-laurel-sprout.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6350-sony-xperia-lena-pdx213.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6375-sony-xperia-murray-pdx225.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7225-fairphone-fp4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-microsoft-surface-duo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-bahamut.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-griffin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx203.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx206.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-csot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-microsoft-surface-duo2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx214.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx215.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx223.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx224.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-db820c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-ifc6640.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-mi01.2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp468.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq6018-cp01-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk01.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp418.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp433.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp449.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp454.dtb
+dtb-$(CONFIG_ARCH_QCOM_FBX_DTB)	+= fbxgw9r.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-acer-a1-724.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-alcatel-idol347.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-asus-z00l.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-gplus-fl8005a.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-huawei-g7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8150.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8910.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a3u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a5u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-grandmax.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt510.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt58.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-serranove.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-uf896.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-ufi001c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt88047.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-yiming-uz801v3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-motorola-potter.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-daisy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-mido.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-tissot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-vince.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-kugo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-suzu.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-msft-lumia-octagon-talkman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-xiaomi-libra.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-huawei-angler-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-msft-lumia-octagon-cityman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-ivy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-karin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-sumire.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3t.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-dora.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-kagura.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-keyaki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-xiaomi-gemini.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-natrium.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-scorpio.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-asus-novago-tp370ql.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-fxtec-pro1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-hp-envy-x2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-lenovo-miix-630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-cheeseburger.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-dumpling.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-lilac.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-maple.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-poplar.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-xiaomi-sagit.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-1000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-4000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qdu1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb2210-rb1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb4210-rb2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5-vision-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qru1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8155p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8295p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8540p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8775p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-kingoftown.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-wifi.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd-pro.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-herobrine-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-crd-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sda660-inforce-ifc6560.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm450-motorola-ali.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-ganges-kirin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-discovery.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-pioneer.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-voyager.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-fairphone-fp3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-motorola-ocean.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm636-sony-xperia-ganges-mermaid.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm660-xiaomi-lavender.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm670-google-sargo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c-navigation-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyln.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-enchilada.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-fajita.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-samsung-starqltechn.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akari.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akatsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-apollo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-ebbg.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-tianma.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-polaris.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-shift-axolotl.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-lenovo-yoga-c630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-samsung-w737.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm4250-oneplus-billie2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6115p-lenovo-j606f.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-sony-xperia-seine-pdx201.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-xiaomi-laurel-sprout.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6350-sony-xperia-lena-pdx213.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6375-sony-xperia-murray-pdx225.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7225-fairphone-fp4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-microsoft-surface-duo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-bahamut.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-griffin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx203.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx206.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-csot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-microsoft-surface-duo2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx214.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx215.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx223.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx224.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-qrd.dtb
diff -ruw linux-6.4/scripts/dtc/include-prefixes/arm64/qcom/ipq9574.dtsi linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/qcom/ipq9574.dtsi
--- linux-6.4/scripts/dtc/include-prefixes/arm64/qcom/ipq9574.dtsi	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/qcom/ipq9574.dtsi	2024-04-19 15:59:31.193600561 +0200
@@ -6,9 +6,13 @@
  * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,apss-ipq.h>
 #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
+#include <dt-bindings/clock/qcom,nsscc-ipq9574.h>
+#include <dt-bindings/reset/qcom,nsscc-ipq9574.h>
+#include <dt-bindings/clock/qcom,uniphycc-ipq9574.h>
 
 / {
 	interrupt-parent = <&intc>;
@@ -16,23 +20,50 @@
 	#size-cells = <2>;
 
 	clocks {
-		bias_pll_ubi_nc_clk: bias-pll-ubi-nc-clk {
+		sleep_clk: sleep-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+		};
+
+		xo_board_clk: xo-board-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+		};
+
+		bias_pll_ubi_nc_clk: bias_pll_ubi_nc_clk {
 			compatible = "fixed-clock";
 			clock-frequency = <353000000>;
 			#clock-cells = <0>;
 		};
 
-		sleep_clk: sleep-clk {
+		bias_pll_cc_clk: bias_pll_cc_clk {
 			compatible = "fixed-clock";
+			clock-frequency = <1200000000>;
 			#clock-cells = <0>;
 		};
 
-		xo_board_clk: xo-board-clk {
+		bias_pll_nss_noc_clk: bias_pll_nss_noc_clk {
+			compatible = "fixed-clock";
+			clock-frequency = <461500000>;
+			#clock-cells = <0>;
+		};
+
+		gcc_gpll0_out_aux: gcc_gpll0_out_aux {
 			compatible = "fixed-clock";
+			clock-frequency = <800000000>;
 			#clock-cells = <0>;
 		};
 	};
 
+	imem_reset_reason: imem-reset-reason {
+		status = "disabled";
+		compatible = "qcom,imem-reset-reason-ipq9574",
+			"qcom-imem-reset-reason";
+
+		reg = <0x0 0x086006bc 0x0 0x78>;
+		reg-names = "imem";
+	};
+
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -43,6 +74,10 @@
 			reg = <0x0>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		CPU1: cpu@1 {
@@ -51,6 +86,10 @@
 			reg = <0x1>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		CPU2: cpu@2 {
@@ -59,6 +98,10 @@
 			reg = <0x2>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		CPU3: cpu@3 {
@@ -67,6 +110,10 @@
 			reg = <0x3>;
 			enable-method = "psci";
 			next-level-cache = <&L2_0>;
+			clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>;
+			clock-names = "cpu";
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-supply = <&ipq9574_s1>;
 		};
 
 		L2_0: l2-cache {
@@ -82,6 +129,58 @@
 		reg = <0x0 0x40000000 0x0 0x0>;
 	};
 
+	cpu_opp_table: opp-table-cpu {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-936000000 {
+			opp-hz = /bits/ 64 <936000000>;
+			opp-microvolt = <725000>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1104000000 {
+			opp-hz = /bits/ 64 <1104000000>;
+			opp-microvolt = <787500>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1416000000 {
+			opp-hz = /bits/ 64 <1416000000>;
+			opp-microvolt = <862500>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1488000000 {
+			opp-hz = /bits/ 64 <1488000000>;
+			opp-microvolt = <925000>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-1800000000 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-microvolt = <987500>;
+			clock-latency-ns = <200000>;
+		};
+
+		opp-2208000000 {
+			opp-hz = /bits/ 64 <2208000000>;
+			opp-microvolt = <1062500>;
+			clock-latency-ns = <200000>;
+		};
+	};
+
+	firmware {
+		scm {
+			compatible = "qcom,scm-ipq9574", "qcom,scm";
+			qcom,dload-mode = <&tcsr 0x6100>;
+		};
+
+		qfprom {
+			compatible = "qcom,qfprom-sec";
+		};
+	};
+
 	pmu {
 		compatible = "arm,cortex-a73-pmu";
 		interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
@@ -92,6 +191,33 @@
 		method = "smc";
 	};
 
+	reg_usb_3p3: s3300 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+		regulator-name = "usb-phy-vdd-dummy";
+	};
+
+	reg_usb_1p8: s1800 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+		regulator-name = "usb-phy-pll-dummy";
+	};
+
+	reg_usb_0p925: s0925 {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <925000>;
+		regulator-max-microvolt = <925000>;
+		regulator-boot-on;
+		regulator-always-on;
+		regulator-name = "usb-phy-dummy";
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -101,6 +227,25 @@
 			reg = <0x0 0x4a600000 0x0 0x400000>;
 			no-map;
 		};
+
+		smem@4aa00000 {
+			compatible = "qcom,smem";
+			reg = <0x0 0x4aa00000 0x0 0x00100000>;
+			hwlocks = <&tcsr_mutex 0>;
+			no-map;
+		};
+	};
+
+	rpm-glink {
+		compatible = "qcom,glink-rpm";
+		interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+		qcom,rpm-msg-ram = <&rpm_msg_ram>;
+		mboxes = <&apcs_glb 0>;
+
+		rpm_requests: rpm-requests {
+			compatible = "qcom,rpm-ipq9574";
+			qcom,glink-channels = "rpm_requests";
+		};
 	};
 
 	soc: soc@0 {
@@ -109,6 +254,232 @@
 		#size-cells = <1>;
 		ranges = <0 0 0 0xffffffff>;
 
+		rpm_msg_ram: sram@60000 {
+			compatible = "qcom,rpm-msg-ram";
+			reg = <0x00060000 0x6000>;
+		};
+
+		usb_0_qusbphy: phy@7b000 {
+			compatible = "qcom,ipq9574-qusb2-phy";
+			reg = <0x0007b000 0x180>;
+			#phy-cells = <0>;
+
+			clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+				 <&xo_board_clk>;
+			clock-names = "cfg_ahb",
+				      "ref";
+
+			vdd-supply = <&reg_usb_0p925>;
+			vdda-pll-supply = <&reg_usb_1p8>;
+			vdda-phy-dpdm-supply = <&reg_usb_3p3>;
+
+			resets = <&gcc GCC_QUSB2_0_PHY_BCR>;
+			status = "disabled";
+		};
+
+		usb_0_qmpphy: phy@7d000 {
+			compatible = "qcom,ipq9574-qmp-usb3-phy";
+			reg = <0x0007d000 0xa00>;
+			#phy-cells = <0>;
+
+			clocks = <&gcc GCC_USB0_AUX_CLK>,
+				 <&xo_board_clk>,
+				 <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+				 <&gcc GCC_USB0_PIPE_CLK>;
+			clock-names = "aux",
+				      "ref",
+				      "cfg_ahb",
+				      "pipe";
+
+			resets = <&gcc GCC_USB0_PHY_BCR>,
+				 <&gcc GCC_USB3PHY_0_PHY_BCR>;
+			reset-names = "phy",
+				      "phy_phy";
+
+			vdda-pll-supply = <&reg_usb_1p8>;
+			vdda-phy-supply = <&reg_usb_0p925>;
+
+			status = "disabled";
+
+			#clock-cells = <0>;
+			clock-output-names = "usb0_pipe_clk";
+		};
+
+		pcie0_phy: phy@84000 {
+			compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+			reg = <0x00084000 0x1000>;
+
+			clocks = <&gcc GCC_PCIE0_AUX_CLK>,
+				 <&gcc GCC_PCIE0_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE0_1LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE0_1LANE_S_CLK>,
+				 <&gcc GCC_PCIE0_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE0_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE0_PHY_BCR>,
+				 <&gcc GCC_PCIE0PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie0_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		pcie2_phy: phy@8c000 {
+			compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+			reg = <0x0008c000 0x2000>;
+
+			clocks = <&gcc GCC_PCIE2_AUX_CLK>,
+				 <&gcc GCC_PCIE2_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE2_2LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE2_2LANE_S_CLK>,
+				 <&gcc GCC_PCIE2_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE2_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE2_PHY_BCR>,
+				 <&gcc GCC_PCIE2PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie2_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		mdio: mdio@90000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "qcom,ipq9574-mdio";
+			reg = <0x90000 0x64>;
+			clocks = <&gcc GCC_MDIO_AHB_CLK>;
+			clock-names = "gcc_mdio_ahb_clk";
+			status = "disabled";
+		};
+
+		rng: rng@e3000 {
+			compatible = "qcom,prng-ee";
+			reg = <0x000e3000 0x1000>;
+			clocks = <&gcc GCC_PRNG_AHB_CLK>;
+			clock-names = "core";
+		};
+
+		pcie3_phy: phy@f4000 {
+			compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy";
+			reg = <0x000f4000 0x2000>;
+
+			clocks = <&gcc GCC_PCIE3_AUX_CLK>,
+				 <&gcc GCC_PCIE3_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE3_2LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE3_2LANE_S_CLK>,
+				 <&gcc GCC_PCIE3_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE3_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE3_PHY_BCR>,
+				 <&gcc GCC_PCIE3PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie3_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		pcie1_phy: phy@fc000 {
+			compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy";
+			reg = <0x000fc000 0x1000>;
+
+			clocks = <&gcc GCC_PCIE1_AUX_CLK>,
+				 <&gcc GCC_PCIE1_AHB_CLK>,
+				 <&gcc GCC_ANOC_PCIE1_1LANE_M_CLK>,
+				 <&gcc GCC_SNOC_PCIE1_1LANE_S_CLK>,
+				 <&gcc GCC_PCIE1_PIPE_CLK>;
+			clock-names = "aux", "cfg_ahb", "anoc_lane", "snoc_lane", "pipe";
+
+			assigned-clocks = <&gcc GCC_PCIE1_AUX_CLK>;
+			assigned-clock-rates = <20000000>;
+
+			resets = <&gcc GCC_PCIE1_PHY_BCR>,
+				 <&gcc GCC_PCIE1PHY_PHY_BCR>;
+			reset-names = "phy", "common";
+
+			#clock-cells = <0>;
+			clock-output-names = "gcc_pcie1_pipe_clk_src";
+
+			#phy-cells = <0>;
+			status = "disabled";
+
+		};
+
+		lpass: lpass@0a000000 {
+			compatible = "qca,lpass-ipq9574";
+			reg =  <0xa000000 0x3bffff>;
+			clocks = <&gcc GCC_LPASS_SWAY_CLK>,
+				<&gcc GCC_LPASS_CORE_AXIM_CLK>,
+				<&gcc GCC_SNOC_LPASS_CFG_CLK>,
+				<&gcc GCC_PCNOC_LPASS_CLK>;
+			clock-names = "sway", "axim", "snoc_cfg", "pcnoc";
+			resets = <&gcc GCC_LPASS_BCR>;
+			reset-names = "lpass";
+			status = "disabled";
+                };
+
+		lpass_pcm: lpass-pcm@0a3c0000 {
+			compatible = "qca,ipq9574-lpass-pcm";
+			interrupts = <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "if0";
+			reg =  <0xa3c0000 0x23014>;
+			reg-names = "if0", "lpm";
+			status = "disabled";
+                };
+
+		nsscc: clock-controller@39b00000 {
+			compatible = "qcom,nsscc-ipq9574";
+			reg = <0x39b00000 0x80000>;
+			clocks = <&gcc GCC_NSSNOC_SNOC_CLK>,
+				<&gcc GCC_NSSNOC_SNOC_1_CLK>,
+				<&gcc GCC_NSSNOC_NSSCC_CLK>,
+				<&gcc GCC_NSSCC_CLK>,
+				<&xo_board_clk>,
+				<&bias_pll_cc_clk>,
+				<&bias_pll_nss_noc_clk>,
+				<&bias_pll_ubi_nc_clk>,
+				<&gcc_gpll0_out_aux>,
+				<&ess UNIPHY0_GCC_RX_CLK>,
+				<&ess UNIPHY0_GCC_TX_CLK>,
+				<&ess UNIPHY1_GCC_RX_CLK>,
+				<&ess UNIPHY1_GCC_TX_CLK>,
+				<&ess UNIPHY2_GCC_RX_CLK>,
+				<&ess UNIPHY2_GCC_TX_CLK>;
+
+			/*
+			 * those clocks are needed for the clock-controler
+			 * itself, regmap access will freeze if they are
+			 * not enabled
+			 */
+			clock-names = "noc_snoc",
+				"noc_snoc1",
+				"noc_nsscc",
+				"nsscc";
+			#clock-cells = <0x1>;
+			#reset-cells = <0x1>;
+		};
+
 		tlmm: pinctrl@1000000 {
 			compatible = "qcom,ipq9574-tlmm";
 			reg = <0x01000000 0x300000>;
@@ -119,6 +490,51 @@
 			interrupt-controller;
 			#interrupt-cells = <2>;
 
+			mdio_pins: mdio_pinmux {
+				mux_0 {
+					pins = "gpio38";
+					function = "mdc";
+					drive-strength = <8>;
+					bias-disable;
+				};
+				mux_1 {
+					pins = "gpio39";
+					function = "mdio";
+					drive-strength = <8>;
+					bias-pull-up;
+				};
+			};
+
+			audio_pins_pri: audio_pinmux_pri {
+				mux_1 {
+					pins = "gpio41";
+					function = "audio_pri";
+					drive-strength = <8>;
+					bias-pull-down;
+				};
+
+				mux_2 {
+					pins = "gpio40";
+					function = "audio_pri";
+					drive-strength = <8>;
+					bias-pull-down;
+				};
+
+				mux_3 {
+					pins = "gpio42";
+					function = "audio_pri";
+					drive-strength = <8>;
+					bias-pull-down;
+				};
+
+				mux_4 {
+					pins = "gpio43";
+					function = "audio_pri";
+					drive-strength = <16>;
+					bias-pull-down;
+				};
+			};
+
 			uart2_pins: uart2-state {
 				pins = "gpio34", "gpio35";
 				function = "blsp2_uart";
@@ -127,22 +543,501 @@
 			};
 		};
 
+		ess: ess@3a000000 {
+			compatible = "qcom,ipq9574-ess";
+			reg =   <0x3a000000 0xa00000>,
+				<0x3ab00000 0xef800>,
+				<0x07a00000 0x100000>;
+			reg-names = "ppe", "edma", "uniphy";
+
+			interrupts =
+				/* rx desc start */
+				<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+				/* rx fill start */
+				<GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
+				/* tx compl start */
+				<GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 364 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 365 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 498 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 503 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>;
+
+			interrupt-names =
+				"edma_rx_desc_ring_0",
+				"edma_rx_desc_ring_1",
+				"edma_rx_desc_ring_2",
+				"edma_rx_desc_ring_3",
+				"edma_rx_desc_ring_4",
+				"edma_rx_desc_ring_5",
+				"edma_rx_desc_ring_6",
+				"edma_rx_desc_ring_7",
+				"edma_rx_desc_ring_8",
+				"edma_rx_desc_ring_9",
+				"edma_rx_desc_ring_10",
+				"edma_rx_desc_ring_11",
+				"edma_rx_desc_ring_12",
+				"edma_rx_desc_ring_13",
+				"edma_rx_desc_ring_14",
+				"edma_rx_desc_ring_15",
+				"edma_rx_desc_ring_16",
+				"edma_rx_desc_ring_17",
+				"edma_rx_desc_ring_18",
+				"edma_rx_desc_ring_19",
+				"edma_rx_desc_ring_20",
+				"edma_rx_desc_ring_21",
+				"edma_rx_desc_ring_22",
+				"edma_rx_desc_ring_23",
+				"edma_rx_fill_ring_0",
+				"edma_rx_fill_ring_1",
+				"edma_rx_fill_ring_2",
+				"edma_rx_fill_ring_3",
+				"edma_rx_fill_ring_4",
+				"edma_rx_fill_ring_5",
+				"edma_rx_fill_ring_6",
+				"edma_rx_fill_ring_7",
+				"edma_tx_compl_ring_0",
+				"edma_tx_compl_ring_1",
+				"edma_tx_compl_ring_2",
+				"edma_tx_compl_ring_3",
+				"edma_tx_compl_ring_4",
+				"edma_tx_compl_ring_5",
+				"edma_tx_compl_ring_6",
+				"edma_tx_compl_ring_7",
+				"edma_tx_compl_ring_8",
+				"edma_tx_compl_ring_9",
+				"edma_tx_compl_ring_10",
+				"edma_tx_compl_ring_11",
+				"edma_tx_compl_ring_12",
+				"edma_tx_compl_ring_13",
+				"edma_tx_compl_ring_14",
+				"edma_tx_compl_ring_15",
+				"edma_tx_compl_ring_16",
+				"edma_tx_compl_ring_17",
+				"edma_tx_compl_ring_18",
+				"edma_tx_compl_ring_19",
+				"edma_tx_compl_ring_20",
+				"edma_tx_compl_ring_21",
+				"switch_misc_intr",
+				"edma_misc",
+				"edma_tx_compl_ring_22",
+				"edma_tx_compl_ring_23",
+				"edma_tx_compl_ring_24",
+				"edma_tx_compl_ring_25",
+				"edma_tx_compl_ring_26",
+				"edma_tx_compl_ring_27",
+				"edma_tx_compl_ring_28",
+				"edma_tx_compl_ring_29",
+				"edma_tx_compl_ring_30",
+				"edma_tx_compl_ring_31";
+
+			clocks = <&gcc GCC_CMN_12GPLL_AHB_CLK>,
+				<&gcc GCC_CMN_12GPLL_SYS_CLK>,
+				<&gcc GCC_UNIPHY0_AHB_CLK>,
+				<&gcc GCC_UNIPHY0_SYS_CLK>,
+				<&gcc GCC_UNIPHY1_AHB_CLK>,
+				<&gcc GCC_UNIPHY1_SYS_CLK>,
+				<&gcc GCC_UNIPHY2_AHB_CLK>,
+				<&gcc GCC_UNIPHY2_SYS_CLK>,
+				<&nsscc NSS_CC_PORT1_MAC_CLK>,
+				<&nsscc NSS_CC_PORT2_MAC_CLK>,
+				<&nsscc NSS_CC_PORT3_MAC_CLK>,
+				<&nsscc NSS_CC_PORT4_MAC_CLK>,
+				<&nsscc NSS_CC_PORT5_MAC_CLK>,
+				<&nsscc NSS_CC_PORT6_MAC_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_CFG_CLK>,
+				<&nsscc NSS_CC_NSSNOC_PPE_CLK>,
+				<&nsscc NSS_CC_NSSNOC_PPE_CFG_CLK>,
+				<&nsscc NSS_CC_PPE_EDMA_CLK>,
+				<&nsscc NSS_CC_PPE_EDMA_CFG_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_IPE_CLK>,
+				<&nsscc NSS_CC_PPE_SWITCH_BTQ_CLK>,
+				<&nsscc NSS_CC_PORT1_RX_CLK>,
+				<&nsscc NSS_CC_PORT1_TX_CLK>,
+				<&nsscc NSS_CC_PORT2_RX_CLK>,
+				<&nsscc NSS_CC_PORT2_TX_CLK>,
+				<&nsscc NSS_CC_PORT3_RX_CLK>,
+				<&nsscc NSS_CC_PORT3_TX_CLK>,
+				<&nsscc NSS_CC_PORT4_RX_CLK>,
+				<&nsscc NSS_CC_PORT4_TX_CLK>,
+				<&nsscc NSS_CC_PORT5_RX_CLK>,
+				<&nsscc NSS_CC_PORT5_TX_CLK>,
+				<&nsscc NSS_CC_PORT6_RX_CLK>,
+				<&nsscc NSS_CC_PORT6_TX_CLK>,
+				<&nsscc NSS_CC_PORT1_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT1_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT2_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT2_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT3_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT3_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT4_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT4_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT5_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT5_TX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT6_RX_CLK_SRC>,
+				<&nsscc NSS_CC_PORT6_TX_CLK_SRC>,
+				<&nsscc NSS_CC_UNIPHY_PORT1_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT1_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT2_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT2_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT3_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT3_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT4_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT4_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT5_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT5_TX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT6_RX_CLK>,
+				<&nsscc NSS_CC_UNIPHY_PORT6_TX_CLK>,
+
+				/* EDMA clocks */
+				<&nsscc NSS_CC_NSS_CSR_CLK>,
+				<&nsscc NSS_CC_NSSNOC_NSS_CSR_CLK>,
+				<&nsscc NSS_CC_IMEM_QSB_CLK>,
+				<&nsscc NSS_CC_NSSNOC_IMEM_QSB_CLK>,
+				<&nsscc NSS_CC_IMEM_AHB_CLK>,
+				<&nsscc NSS_CC_NSSNOC_IMEM_AHB_CLK>,
+				<&gcc GCC_MEM_NOC_NSSNOC_CLK>,
+				<&gcc GCC_NSS_TBU_CLK>,
+				<&gcc GCC_NSS_TS_CLK>,
+				<&gcc GCC_NSSCC_CLK>,
+				<&gcc GCC_NSSCFG_CLK>,
+				<&gcc GCC_NSSNOC_ATB_CLK>,
+				<&gcc GCC_NSSNOC_MEM_NOC_1_CLK>,
+				<&gcc GCC_NSSNOC_MEMNOC_CLK>,
+				<&gcc GCC_NSSNOC_NSSCC_CLK>,
+				<&gcc GCC_NSSNOC_PCNOC_1_CLK>,
+				<&gcc GCC_NSSNOC_QOSGEN_REF_CLK>,
+				<&gcc GCC_NSSNOC_SNOC_1_CLK>,
+				<&gcc GCC_NSSNOC_SNOC_CLK>,
+				<&gcc GCC_NSSNOC_TIMEOUT_REF_CLK>,
+				<&gcc GCC_NSSNOC_XO_DCD_CLK>;
+
+			clock-names = "cmn_ahb_clk",
+				"cmn_sys_clk",
+				"uniphy0_ahb_clk",
+				"uniphy0_sys_clk",
+				"uniphy1_ahb_clk",
+				"uniphy1_sys_clk",
+				"uniphy2_ahb_clk",
+				"uniphy2_sys_clk",
+				"port1_mac_clk",
+				"port2_mac_clk",
+				"port3_mac_clk",
+				"port4_mac_clk",
+				"port5_mac_clk",
+				"port6_mac_clk",
+				"nss_ppe_switch_clk",
+				"nss_ppe_cfg_clk",
+				"nssnoc_ppe_clk",
+				"nssnoc_ppe_cfg_clk",
+				"nss_edma_clk",
+				"nss_edma_cfg_clk",
+				"nss_ppe_ipe_clk",
+				"nss_ppe_btq_clk",
+				"nss_port1_rx_clk", "nss_port1_tx_clk",
+				"nss_port2_rx_clk", "nss_port2_tx_clk",
+				"nss_port3_rx_clk", "nss_port3_tx_clk",
+				"nss_port4_rx_clk", "nss_port4_tx_clk",
+				"nss_port5_rx_clk", "nss_port5_tx_clk",
+				"nss_port6_rx_clk", "nss_port6_tx_clk",
+				"nss_port1_rx_clk_src",
+				"nss_port1_tx_clk_src",
+				"nss_port2_rx_clk_src",
+				"nss_port2_tx_clk_src",
+				"nss_port3_rx_clk_src",
+				"nss_port3_tx_clk_src",
+				"nss_port4_rx_clk_src",
+				"nss_port4_tx_clk_src",
+				"nss_port5_rx_clk_src",
+				"nss_port5_tx_clk_src",
+				"nss_port6_rx_clk_src",
+				"nss_port6_tx_clk_src",
+				"uniphy_port1_rx_clk",
+				"uniphy_port1_tx_clk",
+				"uniphy_port2_rx_clk",
+				"uniphy_port2_tx_clk",
+				"uniphy_port3_rx_clk",
+				"uniphy_port3_tx_clk",
+				"uniphy_port4_rx_clk",
+				"uniphy_port4_tx_clk",
+				"uniphy_port5_rx_clk",
+				"uniphy_port5_tx_clk",
+				"uniphy_port6_rx_clk",
+				"uniphy_port6_tx_clk",
+				/* EDMA clocks */
+				"nss_cc_nss_csr_clk",
+				"nss_cc_nssnoc_nss_csr_clk",
+				"nss_cc_imem_qsb_clk",
+				"nss_cc_nssnoc_imem_qsb_clk",
+				"nss_cc_imem_ahb_clk",
+				"nss_cc_nssnoc_imem_ahb_clk",
+				"gcc_mem_noc_nssnoc_clk",
+				"gcc_nss_tbu_clk",
+				"gcc_nss_ts_clk",
+				"gcc_nsscc_clk",
+				"gcc_nsscfg_clk",
+				"gcc_nssnoc_atb_clk",
+				"gcc_nssnoc_mem_noc_1_clk",
+				"gcc_nssnoc_memnoc_clk",
+				"gcc_nssnoc_nsscc_clk",
+				"gcc_nssnoc_pcnoc_1_clk",
+				"gcc_nssnoc_qosgen_ref_clk",
+				"gcc_nssnoc_snoc_1_clk",
+				"gcc_nssnoc_snoc_clk",
+				"gcc_nssnoc_timeout_ref_clk",
+				"gcc_nssnoc_xo_dcd_clk";
+
+			#clock-cells = <1>;
+			clock-output-names = "uniphy0_gcc_rx_clk",
+				"uniphy0_gcc_tx_clk",
+				"uniphy1_gcc_rx_clk",
+				"uniphy1_gcc_tx_clk",
+				"uniphy2_gcc_rx_clk",
+				"uniphy2_gcc_tx_clk";
+
+			resets = <&nsscc PPE_FULL_RESET>,
+				<&nsscc EDMA_HW_RESET>,
+				<&nsscc UNIPHY0_SOFT_RESET>,
+				<&gcc GCC_UNIPHY0_XPCS_RESET>,
+				<&nsscc UNIPHY_PORT5_ARES>,
+				<&gcc GCC_UNIPHY1_XPCS_RESET>,
+				<&nsscc UNIPHY_PORT6_ARES>,
+				<&gcc GCC_UNIPHY2_XPCS_RESET>,
+				<&nsscc UNIPHY_PORT1_ARES>,
+				<&nsscc UNIPHY_PORT2_ARES>,
+				<&nsscc UNIPHY_PORT3_ARES>,
+				<&nsscc UNIPHY_PORT4_ARES>,
+				<&gcc GCC_UNIPHY0_SYS_RESET>,
+				<&gcc GCC_UNIPHY1_SYS_RESET>,
+				<&gcc GCC_UNIPHY2_SYS_RESET>,
+				<&nsscc NSSPORT1_RESET>,
+				<&nsscc NSSPORT2_RESET>,
+				<&nsscc NSSPORT3_RESET>,
+				<&nsscc NSSPORT4_RESET>,
+				<&nsscc NSSPORT5_RESET>,
+				<&nsscc NSSPORT6_RESET>,
+				<&nsscc PORT1_MAC_ARES>,
+				<&nsscc PORT2_MAC_ARES>,
+				<&nsscc PORT3_MAC_ARES>,
+				<&nsscc PORT4_MAC_ARES>,
+				<&nsscc PORT5_MAC_ARES>,
+				<&nsscc PORT6_MAC_ARES>;
+			reset-names = "ppe_rst",
+				"edma_rst",
+				"uniphy0_soft_rst",
+				"uniphy0_xpcs_rst",
+				"uniphy1_soft_rst",
+				"uniphy1_xpcs_rst",
+				"uniphy2_soft_rst",
+				"uniphy2_xpcs_rst",
+				"uniphy0_port1_dis",
+				"uniphy0_port2_dis",
+				"uniphy0_port3_dis",
+				"uniphy0_port4_dis",
+				"uniphy0_sys_rst",
+				"uniphy1_sys_rst",
+				"uniphy2_sys_rst",
+				"nss_port1_rst",
+				"nss_port2_rst",
+				"nss_port3_rst",
+				"nss_port4_rst",
+				"nss_port5_rst",
+				"nss_port6_rst",
+				"nss_port1_mac_rst",
+				"nss_port2_mac_rst",
+				"nss_port3_mac_rst",
+				"nss_port4_mac_rst",
+				"nss_port5_mac_rst",
+				"nss_port6_mac_rst";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				/*
+				 * NB: the id are *NOT* the physical
+				 * PPE port id, only the phyiscal
+				 * interfaces
+				*/
+				ess_phys_port0: port@0 {
+					reg = <0>;
+					ess,ppe-port-id = <1>;
+					status = "disabled";
+
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 */
+				};
+
+				ess_phys_port1: port@1 {
+					reg = <1>;
+					ess,ppe-port-id = <2>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 */
+				};
+
+				ess_phys_port2: port@2 {
+					reg = <2>;
+					ess,ppe-port-id = <3>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 */
+				};
+
+				ess_phys_port3: port@3 {
+					reg = <3>;
+					ess,ppe-port-id = <4>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY0
+					 *
+					 * possible phy-mode:
+					 * - QSGMII
+					 * - PSGMII
+					 * - QUSGMII
+					 * - SGMII (port[0-2] disabled)
+					 * - 10GBASER (port[0-2] disabled)
+					 * - USXGMII (port[0-2] disabled)
+					 */
+				};
+
+				ess_phys_port4: port@4 {
+					reg = <4>;
+					ess,ppe-port-id = <5>;
+					status = "disabled";
+					/*
+					 * PCS can be muxed on either:
+					 *  - UNIPHY0
+					 *  - UNIPHY1
+					 *
+					 * possible phy-mode:
+					 * - PSGMII (UNIPHY0)
+					 * - SGMII (UNIPHY1)
+					 * - 10GBASER (UNIPHY1)
+					 * - USXGMII (UNIPHY1)
+					 */
+				};
+
+				ess_phys_port5: port@5 {
+					reg = <5>;
+					ess,ppe-port-id = <6>;
+					status = "disabled";
+					/*
+					 * PCS is UNIPHY2
+					 *
+					 * possible phy-mode:
+					 * - SGMII
+					 * - 10GBASER
+					 * - USXGMII
+					 */
+				};
+			};
+		};
+
 		gcc: clock-controller@1800000 {
 			compatible = "qcom,ipq9574-gcc";
 			reg = <0x01800000 0x80000>;
 			clocks = <&xo_board_clk>,
 				 <&sleep_clk>,
-				 <&bias_pll_ubi_nc_clk>,
-				 <0>,
-				 <0>,
-				 <0>,
 				 <0>,
+				 <&pcie0_phy>,
+				 <&pcie1_phy>,
+				 <&pcie2_phy>,
+				 <&pcie3_phy>,
 				 <0>;
 			#clock-cells = <1>;
 			#reset-cells = <1>;
 			#power-domain-cells = <1>;
 		};
 
+		tcsr_mutex: hwlock@1905000 {
+			compatible = "qcom,tcsr-mutex";
+			reg = <0x01905000 0x20000>;
+			#hwlock-cells = <1>;
+		};
+
+		tcsr: syscon@1937000 {
+			compatible = "qcom,tcsr-ipq9574", "syscon";
+			reg = <0x01937000 0x21000>;
+		};
+
 		sdhc_1: mmc@7804000 {
 			compatible = "qcom,ipq9574-sdhci", "qcom,sdhci-msm-v5";
 			reg = <0x07804000 0x1000>, <0x07805000 0x1000>;
@@ -160,6 +1055,36 @@
 			status = "disabled";
 		};
 
+		blsp_dma: dma-controller@7884000 {
+			compatible = "qcom,bam-v1.7.0";
+			reg = <0x07884000 0x2b000>;
+			interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "bam_clk";
+			#dma-cells = <1>;
+			qcom,ee = <0>;
+		};
+
+		blsp1_uart0: serial@78af000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078af000 0x200>;
+			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_uart1: serial@78b0000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b0000 0x200>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
 		blsp1_uart2: serial@78b1000 {
 			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 			reg = <0x078b1000 0x200>;
@@ -170,17 +1095,222 @@
 			status = "disabled";
 		};
 
+		blsp1_uart3: serial@78b2000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b2000 0x200>;
+			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART4_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_uart4: serial@78b3000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b3000 0x200>;
+			interrupts = <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART5_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_uart5: serial@78b4000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x078b4000 0x200>;
+			interrupts = <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_UART6_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			status = "disabled";
+		};
+
+		blsp1_spi0: spi@78b5000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b5000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 12>, <&blsp_dma 13>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c1: i2c@78b6000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b6000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 14>, <&blsp_dma 15>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi1: spi@78b6000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b6000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 14>, <&blsp_dma 15>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c2: i2c@78b7000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b7000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 16>, <&blsp_dma 17>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi2: spi@78b7000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b7000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 16>, <&blsp_dma 17>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c3: i2c@78b8000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b8000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 18>, <&blsp_dma 19>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi3: spi@78b8000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b8000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			spi-max-frequency = <50000000>;
+			clocks = <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 18>, <&blsp_dma 19>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_i2c4: i2c@78b9000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x078b9000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 20>, <&blsp_dma 21>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		blsp1_spi4: spi@78b9000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b9000 0x600>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP5_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 20>, <&blsp_dma 21>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		usb3: usb@8a00000 {
+			compatible = "qcom,ipq9574-dwc3", "qcom,dwc3";
+			reg = <0x08af8800 0x400>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			clocks = <&gcc GCC_SNOC_USB_CLK>,
+				 <&gcc GCC_USB0_MASTER_CLK>,
+				 <&gcc GCC_ANOC_USB_AXI_CLK>,
+				 <&gcc GCC_USB0_SLEEP_CLK>,
+				 <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+
+			clock-names = "cfg_noc",
+				      "core",
+				      "iface",
+				      "sleep",
+				      "mock_utmi";
+
+			assigned-clocks = <&gcc GCC_USB0_MASTER_CLK>,
+					  <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+			assigned-clock-rates = <200000000>,
+					       <24000000>;
+
+			interrupts-extended = <&intc GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "pwr_event";
+
+			resets = <&gcc GCC_USB_BCR>;
+			status = "disabled";
+
+			dwc_0: usb@8a00000 {
+				dev_id = <0>;
+				compatible = "snps,dwc3";
+				reg = <0x8a00000 0xcd00>;
+				clocks = <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+				clock-names = "ref";
+				interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+				phys = <&usb_0_qusbphy>, <&usb_0_qmpphy>;
+				phy-names = "usb2-phy", "usb3-phy";
+				tx-fifo-resize;
+				snps,is-utmi-l1-suspend;
+				snps,hird-threshold = /bits/ 8 <0x0>;
+				snps,dis_u2_susphy_quirk;
+				snps,dis_u3_susphy_quirk;
+				dr_mode = "host";
+			};
+		};
+
 		intc: interrupt-controller@b000000 {
 			compatible = "qcom,msm-qgic2";
 			reg = <0x0b000000 0x1000>,  /* GICD */
-			      <0x0b002000 0x1000>,  /* GICC */
+			      <0x0b002000 0x2000>,  /* GICC */
 			      <0x0b001000 0x1000>,  /* GICH */
-			      <0x0b004000 0x1000>;  /* GICV */
+			      <0x0b004000 0x2000>;  /* GICV */
 			#address-cells = <1>;
 			#size-cells = <1>;
 			interrupt-controller;
 			#interrupt-cells = <3>;
-			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 			ranges = <0 0x0b00c000 0x3000>;
 
 			v2m0: v2m@0 {
@@ -202,6 +1332,32 @@
 			};
 		};
 
+		watchdog: watchdog@b017000 {
+			compatible = "qcom,apss-wdt-ipq9574", "qcom,kpss-wdt";
+			reg = <0x0b017000 0x1000>;
+			interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
+			clocks = <&sleep_clk>;
+			timeout-sec = <30>;
+		};
+
+		apcs_glb: mailbox@b111000 {
+			compatible = "qcom,ipq9574-apcs-apps-global",
+				     "qcom,ipq6018-apcs-apps-global";
+			reg = <0x0b111000 0x1000>;
+			#clock-cells = <1>;
+			clocks = <&a73pll>, <&xo_board_clk>;
+			clock-names = "pll", "xo";
+			#mbox-cells = <1>;
+		};
+
+		a73pll: clock@b116000 {
+			compatible = "qcom,ipq9574-a73pll";
+			reg = <0x0b116000 0x40>;
+			#clock-cells = <0>;
+			clocks = <&xo_board_clk>;
+			clock-names = "xo";
+		};
+
 		timer@b120000 {
 			compatible = "arm,armv7-timer-mem";
 			reg = <0x0b120000 0x1000>;
@@ -259,6 +1415,484 @@
 				status = "disabled";
 			};
 		};
+
+		pcie1: pci@10000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x10000000 0xf1d>,
+			       <0x10000F20 0xa8>,
+			       <0x10001000 0x1000>,
+			       <0x000F8000 0x4000>,
+			       <0x10100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <2>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <1>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x10200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x10300000 0x10300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 35 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 49 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 84 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 85 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE1_AHB_CLK>,
+				 <&gcc GCC_PCIE1_AUX_CLK>,
+				 <&gcc GCC_PCIE1_AXI_M_CLK>,
+				 <&gcc GCC_PCIE1_AXI_S_CLK>,
+				 <&gcc GCC_PCIE1_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE1_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE1_PIPE_ARES>,
+				 <&gcc GCC_PCIE1_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE1_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE1_AXI_S_ARES>,
+				 <&gcc GCC_PCIE1_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE1_AXI_M_ARES>,
+				 <&gcc GCC_PCIE1_AUX_ARES>,
+				 <&gcc GCC_PCIE1_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie1_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		pcie3: pci@18000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x18000000 0xf1d>,
+			       <0x18000F20 0xa8>,
+			       <0x18001000 0x1000>,
+			       <0x000F0000 0x4000>,
+			       <0x18100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <4>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <2>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x18200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x18300000 0x18300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 189 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 190 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 191 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 192 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE3_AHB_CLK>,
+				 <&gcc GCC_PCIE3_AUX_CLK>,
+				 <&gcc GCC_PCIE3_AXI_M_CLK>,
+				 <&gcc GCC_PCIE3_AXI_S_CLK>,
+				 <&gcc GCC_PCIE3_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE3_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE3_PIPE_ARES>,
+				 <&gcc GCC_PCIE3_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE3_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE3_AXI_S_ARES>,
+				 <&gcc GCC_PCIE3_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE3_AXI_M_ARES>,
+				 <&gcc GCC_PCIE3_AUX_ARES>,
+				 <&gcc GCC_PCIE3_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie3_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		pcie2: pci@20000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x20000000 0xf1d>,
+			       <0x20000F20 0xa8>,
+			       <0x20001000 0x1000>,
+			       <0x00088000 0x4000>,
+			       <0x20100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <3>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <2>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x20200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x20300000 0x20300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 164 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 165 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 186 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 187 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE2_AHB_CLK>,
+				 <&gcc GCC_PCIE2_AUX_CLK>,
+				 <&gcc GCC_PCIE2_AXI_M_CLK>,
+				 <&gcc GCC_PCIE2_AXI_S_CLK>,
+				 <&gcc GCC_PCIE2_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE2_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE2_PIPE_ARES>,
+				 <&gcc GCC_PCIE2_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE2_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE2_AXI_S_ARES>,
+				 <&gcc GCC_PCIE2_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE2_AXI_M_ARES>,
+				 <&gcc GCC_PCIE2_AUX_ARES>,
+				 <&gcc GCC_PCIE2_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie2_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		pcie0: pci@28000000 {
+			compatible = "qcom,pcie-ipq9574";
+			reg =  <0x28000000 0xf1d>,
+			       <0x28000F20 0xa8>,
+			       <0x28001000 0x1000>,
+			       <0x00080000 0x4000>,
+			       <0x28100000 0x1000>;
+			reg-names = "dbi", "elbi", "atu", "parf", "config";
+			device_type = "pci";
+			linux,pci-domain = <1>;
+			bus-range = <0x00 0xff>;
+			num-lanes = <1>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges = <0x01000000 0x0 0x00000000 0x28200000 0x0 0x100000>,  /* I/O */
+				 <0x02000000 0x0 0x28300000 0x28300000 0x0 0x7d00000>; /* MEM */
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0x7>;
+			interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+					<0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+					<0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+					<0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "global_irq";
+
+			/* clocks and clock-names are used to enable the clock in CBCR */
+			clocks = <&gcc GCC_PCIE0_AHB_CLK>,
+				 <&gcc GCC_PCIE0_AUX_CLK>,
+				 <&gcc GCC_PCIE0_AXI_M_CLK>,
+				 <&gcc GCC_PCIE0_AXI_S_CLK>,
+				 <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>,
+				 <&gcc GCC_PCIE0_RCHNG_CLK>;
+			clock-names = "ahb",
+				      "aux",
+				      "axi_m",
+				      "axi_s",
+				      "axi_bridge",
+				      "rchng";
+
+			resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+				 <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
+				 <&gcc GCC_PCIE0_AXI_S_STICKY_ARES>,
+				 <&gcc GCC_PCIE0_AXI_S_ARES>,
+				 <&gcc GCC_PCIE0_AXI_M_STICKY_ARES>,
+				 <&gcc GCC_PCIE0_AXI_M_ARES>,
+				 <&gcc GCC_PCIE0_AUX_ARES>,
+				 <&gcc GCC_PCIE0_AHB_ARES>;
+			reset-names = "pipe",
+				      "sticky",
+				      "axi_s_sticky",
+				      "axi_s",
+				      "axi_m_sticky",
+				      "axi_m",
+				      "aux",
+				      "ahb";
+
+			phys = <&pcie0_phy>;
+			phy-names = "pciephy";
+			msi-parent = <&v2m0>;
+			status = "disabled";
+		};
+
+		tsens: thermal-sensor@4a9000 {
+			compatible = "qcom,ipq9574-tsens";
+			reg = <0x4a9000 0x1000>, /* TM */
+			      <0x4a8000 0x1000>; /* SROT */
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "combined";
+			#qcom,sensors = <16>;
+			#thermal-sensor-cells = <1>;
+		};
+	};
+
+	thermal_zones: thermal-zones {
+		nss_top {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 3>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc0 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 4>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc1 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 5>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc2 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 6>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		misc3 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 7>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		a73ss0 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 8>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		a73ss1 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 9>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		a73_cpu0 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 10>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		a73_cpu1 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 11>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		a73_cpu2 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 12>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		a73_cpu3 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 13>;
+
+			trips {
+				cpu-critical {
+					temperature = <120000>;
+					hysteresis = <10000>;
+					type = "critical";
+				};
+
+				cpu-passive {
+					temperature = <110000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		wcss_phyb_tile3 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 14>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
+
+		top_glue_logic {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens 15>;
+
+			trips {
+				cpu-critical {
+					temperature = <125000>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
+		};
 	};
 
 	timer {
diff -ruw linux-6.4/scripts/dtc/include-prefixes/dt-bindings/clock/qcom,ipq9574-gcc.h linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/clock/qcom,ipq9574-gcc.h
--- linux-6.4/scripts/dtc/include-prefixes/dt-bindings/clock/qcom,ipq9574-gcc.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/clock/qcom,ipq9574-gcc.h	2023-06-09 19:10:10.096815520 +0200
@@ -210,4 +210,15 @@
 #define GCC_SNOC_PCIE1_1LANE_S_CLK			201
 #define GCC_SNOC_PCIE2_2LANE_S_CLK			202
 #define GCC_SNOC_PCIE3_2LANE_S_CLK			203
+#define GCC_PCIE0_PIPE_CLK				204
+#define GCC_PCIE1_PIPE_CLK				205
+#define GCC_PCIE2_PIPE_CLK				206
+#define GCC_PCIE3_PIPE_CLK				207
+#define GCC_USB0_PIPE_CLK				208
+#define GCC_USB0_SLEEP_CLK				209
+#define GCC_LPASS_CORE_AXIM_CLK				210
+#define GCC_PCNOC_LPASS_CLK				211
+#define GCC_LPASS_SWAY_CLK				212
+#define GCC_SNOC_LPASS_CFG_CLK				213
 #endif
+
diff -ruw linux-6.4/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h
--- linux-6.4/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h	2023-05-22 20:06:44.879870719 +0200
@@ -803,6 +803,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
diff -ruw linux-6.4/scripts/gen_autoksyms.sh linux-6.4-fbx/scripts/gen_autoksyms.sh
--- linux-6.4/scripts/gen_autoksyms.sh	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/scripts/gen_autoksyms.sh	2023-05-22 20:06:46.643917640 +0200
@@ -22,7 +22,7 @@
 
 output_file="$1"
 
-needed_symbols=
+needed_symbols=$(sed -n 's/^CONFIG_UNUSED_KSYMS_WHITELIST_SYMS=\(.*\)$/\1/p' include/config/auto.conf)
 
 # Special case for modversions (see modpost.c)
 if grep -q "^CONFIG_MODVERSIONS=y$" include/config/auto.conf; then
diff -ruw linux-6.4/sound/soc/kirkwood/Kconfig linux-6.4-fbx/sound/soc/kirkwood/Kconfig
--- linux-6.4/sound/soc/kirkwood/Kconfig	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/sound/soc/kirkwood/Kconfig	2023-02-27 19:50:24.596313402 +0100
@@ -16,3 +16,8 @@
 	  Say Y if you want to add support for SoC audio on
 	  the Armada 370 Development Board.
 
+config SND_KIRKWOOD_SOC_FBXGW2R
+	tristate "Soc Audio support for fbxgw2r"
+	depends on SND_KIRKWOOD_SOC && MACH_FBXGW2R && I2C
+	select SND_KIRKWOOD_SOC_I2S
+	select SND_SOC_CS42L52
diff -ruw linux-6.4/sound/soc/kirkwood/Makefile linux-6.4-fbx/sound/soc/kirkwood/Makefile
--- linux-6.4/sound/soc/kirkwood/Makefile	2023-06-26 01:29:58.000000000 +0200
+++ linux-6.4-fbx/sound/soc/kirkwood/Makefile	2023-02-27 19:50:24.596313402 +0100
@@ -6,3 +6,6 @@
 snd-soc-armada-370-db-objs := armada-370-db.o
 
 obj-$(CONFIG_SND_KIRKWOOD_SOC_ARMADA370_DB) += snd-soc-armada-370-db.o
+
+snd-soc-fbxgw2r-objs := kirkwood-fbxgw2r.o
+obj-$(CONFIG_SND_KIRKWOOD_SOC_FBXGW2R) += snd-soc-fbxgw2r.o
diff -Nruw linux-6.4-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./Makefile linux-6.4-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/Makefile
--- linux-6.4-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/Makefile	2024-03-18 14:40:14.835740896 +0100
@@ -0,0 +1,26 @@
+board-dtbs = \
+	fbxgw8r-board-00.dtb \
+	fbxgw8r-board-01.dtb \
+	fbxgw8r-board-02.dtb \
+	fbxgw8r-board-03.dtb \
+	fbxgw8r-board-04.dtb
+
+dtb-$(CONFIG_ARCH_BCMBCA) += bcm963158ref1d.dtb fbxgw8r.dtb $(board-dtbs)
+
+always-y	:= $(dtb-y)
+always-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_dtbs
+
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb fbxgw8r_dtbs
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+$(obj)/fbxgw8r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(call cmd,dtbs)
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
+
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_asmedia.dtb
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_dualband_noswitch.dtb
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/arch/arm64/boot/dts/qcom/fbxgw9r.dts	2024-04-19 15:59:31.193600561 +0200
@@ -0,0 +1,983 @@
+/*
+ * Freebox FBXGW8R Board DTS
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "ipq9574.dtsi"
+#include "ipq9574-firmware-optee.dtsi"
+
+#undef USE_PHYLINK_SFP
+
+/ {
+	compatible = "freebox,fbxgw9r-board-00", "freebox,fbxgw9r-board-01",
+		   "freebox,fbxgw9r", "qcom,ipq9574";
+	model = "Freebox FBXGW9R";
+
+	// for diagchar module
+	qcom,diag@0 {
+		compatible = "qcom,diag";
+		status = "ok";
+	};
+
+	aliases {
+		serial0 = &blsp1_uart2;
+		serial1 = &blsp1_uart0;
+		i2c0 = &blsp1_i2c4;
+		i2c1 = &blsp1_i2c1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	reserved-memory {
+		ramoops@ffff0000 {
+		       compatible = "ramoops";
+		       /* RAM top - 64k */
+		       reg = <0x0 0xffff0000 0x0 (64 * 1024)>;
+		       record-size = <(64 * 1024)>;
+		       ecc-size = <16>;
+		       no-dump-oops;
+	       };
+	};
+
+#ifdef USE_PHYLINK_SFP
+	sfp_lan: sfp-lan {
+		compatible = "sff,sfp";
+		i2c-bus = <&blsp1_i2c1>;
+		maximum-power-milliwatt = <3000>;
+		mod-def0-gpios = <&fbxpmu_gpio_expander 1 GPIO_ACTIVE_LOW>;
+		pwr-enable-gpios = <&fbxpmu_gpio_expander 8 GPIO_ACTIVE_HIGH>;
+	};
+#endif
+
+	keypad {
+		compatible = "gpio-keys";
+		autorepeat = <1>;
+
+		keyup {
+			label = "key up";
+			linux,code = <KEY_UP>;
+			gpios = <&fbxpmu_gpio_expander 10 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+		keydown {
+			label = "key down";
+			linux,code = <KEY_DOWN>;
+			gpios = <&fbxpmu_gpio_expander 9 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+		keyright {
+			label = "key right";
+			linux,code = <KEY_RIGHT>;
+			gpios = <&fbxpmu_gpio_expander 12 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+		keyleft {
+			label = "key left";
+			linux,code = <KEY_LEFT>;
+			gpios = <&fbxpmu_gpio_expander 11 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	powerbtn {
+		compatible = "gpio-keys";
+		autorepeat = <0>;
+
+		powerbtn {
+			label = "power";
+			linux,code = <KEY_POWER>;
+			gpios = <&fbxpmu_gpio_expander 13 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	fbxgw9r-gpio {
+		compatible = "fbx,fbxgpio";
+
+		lan-sfp-presence {
+			gpio = <&fbxpmu_gpio_expander 1 GPIO_ACTIVE_HIGH>;
+			input;
+#ifdef USE_PHYLINK_SFP
+			no-claim;
+#endif
+		};
+
+		lan-sfp-pwrgood {
+			gpio = <&fbxpmu_gpio_expander 5 GPIO_ACTIVE_HIGH>;
+			input;
+#ifdef USE_PHYLINK_SFP
+			no-claim;
+#endif
+		};
+
+		lan-sfp-pwren {
+			gpio = <&fbxpmu_gpio_expander 8 GPIO_ACTIVE_HIGH>;
+			output-low;
+#ifdef USE_PHYLINK_SFP
+			no-claim;
+#endif
+		};
+
+		usb3-pwren {
+			gpio = <&fbxpmu_gpio_expander 4 GPIO_ACTIVE_HIGH>;
+			output-high;
+		};
+
+		usb3-pwrgood {
+			gpio = <&fbxpmu_gpio_expander 14 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		poe-status {
+			gpio = <&fbxpmu_gpio_expander 6 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		poe-disable {
+			gpio = <&fbxpmu_gpio_expander 3 GPIO_ACTIVE_HIGH>;
+			output-high;
+		};
+
+		pon-rst {
+			gpio = <&fbxpmu_gpio_expander 7 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		test-mode {
+			gpio = <&fbxpmu_gpio_expander 15 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		iot-rst {
+			gpio = <&tlmm 12 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		iot-swd-data {
+			gpio = <&tlmm 63 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		iot-swd-clk {
+			gpio = <&tlmm 64 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		soc-rst-inhibit {
+			gpio = <&tlmm 44 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		nvme-pwrgood {
+			gpio = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		nvme-is-pcie {
+			gpio = <&tlmm 57 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		nvme-door-opened {
+			gpio = <&tlmm 61 GPIO_ACTIVE_HIGH>;
+			input;
+			no-claim;
+		};
+
+		pcie0-wdisable {
+			gpio = <&tlmm 54 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		pcie1-wdisable {
+			gpio = <&tlmm 55 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		pcie2-wdisable {
+			gpio = <&tlmm 56 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+	};
+
+	nvme_regulator: nvme-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "nvme-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&tlmm 58 GPIO_ACTIVE_HIGH>;
+		startup-delay-us = <10000>;
+		enable-active-high;
+		fault-sense-gpio = <&tlmm 61 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&blsp1_uart0 {
+	// PON uart
+	pinctrl-0 = <&uart_0_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&blsp1_uart2 {
+	// main uart
+	pinctrl-0 = <&uart2_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&blsp1_i2c1 {
+	// used for SFP lan
+	pinctrl-0 = <&i2c_1_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&blsp1_spi3 {
+	// used for OLED + cortina SPI + FXS + IOT
+	pinctrl-0 = <&spi_3_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	spi-nand@0 {
+		compatible = "spi-nand";
+                reg = <0>;
+                spi-max-frequency = <(50 * 1000 * 1000)>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			uboot@0 {
+				label = "cortina-uboot";
+				reg = <0x0 0x400000>;
+			};
+
+			env@400000 {
+				label = "cortina-uboot-env";
+				reg = <0x400000 0x200000>;
+			};
+
+			dtb@600000 {
+				label = "cortina-dtb";
+				reg = <0x600000 0x80000>;
+			};
+
+			uimage@700000 {
+				label = "cortina-uimage";
+				reg = <0x700000 0x600000>;
+			};
+
+			rootfs@d00000 {
+				label = "cortina-rootfs";
+				reg = <0xd00000 0x2800000>;
+			};
+
+			ubi@6400000 {
+				label = "cortina-ubi";
+				reg = <0x6400000 0x1400000>;
+			};
+		};
+	};
+
+	spi-slac@1 {
+		compatible = "microsemi,le9641";
+		reg = <1>;
+		spi-max-frequency = <(500 * 1000)>;
+	};
+
+	ssd1320@2 {
+		compatible = "chipwealth,ch1120";
+		reg = <2>;
+		spi-max-frequency = <(14 * 1000 * 1000)>;
+
+		/*
+		* display mapping info (when looking at it such as keypad
+		* is on the right):
+		*
+		* SEG used on x-axis
+		* COM used on y-axis
+		*
+		* top-left: COM0/SEG159
+		* bottom-right: COM159/SEG0
+		*
+		* visible area (160x128)
+		*  top-left: COM16/SEG159
+		*  bottom-right: COM143/SEG0
+		*
+		* SEG are mapped in alternate: SEG0, SEG80, SEG1, ...
+		*/
+		ssd1320,com-range = <16 143>;
+		ssd1320,seg-range = <0 159>;
+		ssd1320,seg-reverse-dir;
+
+		ssd1320,clk-divide-ratio = <0x0>;
+		ssd1320,precharge-period = <0x1f>;
+		ssd1320,vcom-deselect-level = <0x3f>;
+		ssd1320,iref = <0x02>;
+		ssd1320,discharge-period = <0x02>;
+
+		ssd1320,display-enh-a = <0x02>;
+
+		ssd1320,default-brightness = <0xff>;
+		ssd1320,max-brightness = <0xff>;
+
+		ssd1320,watchdog = <300>;
+		ssd1320,data-select-gpio = <&tlmm 53 GPIO_ACTIVE_HIGH>;
+		ssd1320,reset-gpio = <&fbxpmu_gpio_expander 2 GPIO_ACTIVE_LOW>;
+		ssd1320,vcc-gpio = <&fbxpmu_gpio_expander 16 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&blsp1_i2c4 {
+	// used for PMU
+	pinctrl-0 = <&i2c_4_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	fbxpmu@3c {
+		compatible = "freebox,fbxgwr-pmu";
+		reg = <0x3c>;
+
+		fbxpmu_gpio_expander: fbxpmu@3c {
+			compatible = "freebox,fbxgwr-pmu-gpio";
+			interrupt-parent = <&tlmm>;
+			interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+			gpio-controller;
+			ngpios = <24>;
+			#gpio-cells = <2>;
+			gpio-line-names = "", /* 0 */
+					  "lan-sfp-presence", /* 1 */
+					  "oled-rst", /* 2 */
+					  "poe-dis", /* 3 */
+					  "usb3-pwren", /* 4 */
+					  "lan-sfp-pwrgood", /* 5 */
+					  "poe-status", /* 6 */
+					  "pon-rst", /* 7 */
+					  "lan-sfp-pwren", /* 8 */
+					  "keypad-down", /* 9 */
+					  "keypad-up", /* 10 */
+					  "keypad-cancel", /* 11 */
+					  "keypad-ok", /* 12 */
+					  "power-button", /* 13 */
+					  "usb-pwr-fault", /* 14 */
+					  "test-mode", /* 15 */
+					  "oled-vcc-en"; /* 16 */
+		};
+
+		led-controller {
+			compatible = "freebox,fbxgwr-pmu-led";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			nleds = <3>;
+
+			led0@0 {
+				label = "green";
+				reg = <0x00>;
+			};
+
+			led1@1 {
+				label = "red";
+				reg = <0x01>;
+			};
+
+			led2@2 {
+				label = "blue";
+				reg = <0x02>;
+			};
+		};
+
+		watchdog {
+			compatible = "freebox,fbxgwr-pmu-watchdog";
+
+			interrupt-parent = <&tlmm>;
+			interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+
+		};
+	};
+};
+
+&lpass {
+	status = "okay";
+};
+
+&lpass_pcm {
+	status = "okay";
+	pinctrl-0 = <&audio_pins_pri>;
+        pinctrl-names = "default";
+};
+
+&tlmm {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "", /* 9 */
+			  "", /* 10 */
+			  "pmu_int", /* 11 */
+			  "iot_rst", /* 12 */
+			  "", /* 13 */
+			  "", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "", /* 19 */
+			  "", /* 20 */
+			  "", /* 21 */
+			  "", /* 22 */
+			  "pcie0_rst", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "pcie1_rst", /* 26 */
+			  "", /* 27 */
+			  "", /* 28 */
+			  "pcie2_rst", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "pcie3_rst", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "", /* 36 */
+			  "", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "", /* 40 */
+			  "", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "soc_rst_inhibit", /* 44 */
+			  "fxs_int", /* 45 */
+			  "", /* 46 */
+			  "lanqphy_int", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "nvme_pwrfault", /* 52 */
+			  "oled_data_cmd", /* 53 */
+			  "pcie0_wdisable", /* 54 */
+			  "pcie1_wdisable", /* 55 */
+			  "pcie2_wdisable", /* 56 */
+			  "nvme_pcie_sata", /* 57 */
+			  "nvme_pwren", /* 58 */
+			  "", /* 59 */
+			  "lanqphy_rst", /* 60 */
+			  "nvme_door_open", /* 61 */
+			  "iot_int", /* 62 */
+			  "iot_swd_data", /* 63 */
+			  "iot_swd_clk"; /* 64 */
+};
+
+&rpm_requests {
+	regulators {
+		compatible = "qcom,rpm-mp5496-regulators";
+
+		ipq9574_s1: s1 {
+		/*
+		 * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
+		 * During regulator registration, kernel not knowing the initial voltage,
+		 * considers it as zero and brings up the regulators with minimum supported voltage.
+		 * Update the regulator-min-microvolt with SVS voltage of 725mV so that
+		 * the regulators are brought up with 725mV which is sufficient for all the
+		 * corner parts to operate at 800MHz
+		 */
+			regulator-min-microvolt = <725000>;
+			regulator-max-microvolt = <1075000>;
+		};
+	};
+};
+
+&dwc_0 {
+	dr_mode = "host";
+};
+
+&pcie0_phy {
+	status = "okay";
+};
+
+&pcie0 {
+	// wifi low, PCI x1
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_0_pin>;
+
+	// FIXME: not supported on kernel 6.4, check
+	max-payload-size = <1>; // 1-256 TLP bytes for WKK
+
+	perst-gpios = <&tlmm 23 GPIO_ACTIVE_LOW>;
+	wake-gpios = <&tlmm 24 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pcie2_phy {
+	status = "okay";
+};
+
+&pcie2 {
+	// wifi high, PCI x2
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_2_pin>;
+
+	assigned-clocks = <&gcc GCC_PCIE2_AXI_M_CLK>,
+			<&gcc GCC_PCIE2_RCHNG_CLK>;
+	assigned-clock-rates = <342857143>,
+			<100000000>;
+
+	perst-gpios = <&tlmm 29 GPIO_ACTIVE_LOW>;
+	wake-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pcie3_phy {
+	status = "okay";
+};
+
+&pcie3 {
+	// NVME, PCI x2
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_3_pin>;
+
+	assigned-clocks = <&gcc GCC_PCIE3_AXI_M_CLK>,
+			<&gcc GCC_PCIE3_RCHNG_CLK>;
+	assigned-clock-rates = <342857143>,
+			<100000000>;
+
+	vddpe-3v3-supply = <&nvme_regulator>;
+	reset-names = "powerctl";
+	perst-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
+	wake-gpios = <&tlmm 33 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&sdhc_1 {
+	pinctrl-0 = <&sdc_default_state>;
+	pinctrl-names = "default";
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+	mmc-hs400-1_8v;
+	mmc-hs400-enhanced-strobe;
+	max-frequency = <384000000>;
+	bus-width = <8>;
+	status = "okay";
+
+	partitions-boot0 {
+                compatible = "fixed-partitions";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                disk-name = "mmcblk%dboot0";
+
+                qickstart@0 {
+                        label = "qickstart0";
+                        reg = /bits/64 <0 (2 * 1024 * 1024)>;
+                        read-only;
+                };
+
+                serial@0 {
+                        label = "fbxserial";
+                        reg = /bits/64 <(-1) (8 * 1024)>;
+                        read-only;
+                };
+
+		fbxboot@0 {
+                        label = "fbxboot";
+                        reg = /bits/64 <(-1) (8 * 1024)>;
+                        read-only;
+                };
+
+		lang@0 {
+			label = "lang";
+			reg = /bits/64 <(-1) (128 * 1024)>;
+			read-only;
+		};
+
+		calibration@0 {
+			label = "calibration";
+			reg = /bits/64 <(-1) (64 * 1024)>;
+			read-only;
+		};
+	};
+
+	partitions-boot1 {
+                compatible = "fixed-partitions";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                disk-name = "mmcblk%dboot1";
+
+                qickstart@0 {
+                        label = "qickstart1";
+                        reg = /bits/64 <0 (2 * 1024 * 1024)>;
+                        read-only;
+                };
+	};
+
+	partitions-main {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%d";
+
+		bank0@0 {
+			label = "bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+			read-only;
+		};
+
+		bank1@0 {
+			label= "bank1";
+			reg = /bits/64 <(-1) (256 * 1024 * 1024)>;
+		};
+
+		nvram@0 {
+			label= "nvram";
+			reg = /bits/64 <(-1) (4 * 1024 * 1024)>;
+		};
+
+		config@0 {
+			label= "config";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		newbank0@0 {
+			label= "new_bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		newboot@0 {
+			label= "newboot";
+			reg = /bits/64 <(-1) (2 * 1024 * 1024)>;
+		};
+
+                fbxmbr@0 {
+			label = "fbxmbr";
+			reg = /bits/64 <(-1) (4096)>;
+                };
+
+		fortknox@0 {
+			label = "fortknox";
+			reg = /bits/64 <(-1) (128 * 1024 * 1024)>;
+                };
+
+		userdata@0 {
+			label = "userdata";
+			reg = /bits/64 <(-1) (-1)>;
+                };
+	};
+};
+
+&sleep_clk {
+	clock-frequency = <32000>;
+};
+
+&usb_0_qmpphy {
+	status = "okay";
+};
+
+&usb_0_qusbphy {
+	status = "okay";
+};
+
+&tlmm {
+	pcie_0_pin: pcie-0-state {
+		clkreq-n-pins {
+			pins = "gpio22";
+			function = "pcie0_clk";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+
+		perst-n-pins {
+			pins = "gpio23";
+			function = "gpio";
+			drive-strength = <8>;
+			bias-pull-down;
+			output-low;
+		};
+
+		wake-n-pins {
+			pins = "gpio24";
+			function = "pcie0_wake";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	pcie_2_pin: pcie-2-state {
+		clkreq-n-pins {
+			pins = "gpio28";
+			function = "pcie2_clk";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+
+		perst-n-pins {
+			pins = "gpio29";
+			function = "gpio";
+			drive-strength = <8>;
+			bias-pull-down;
+			output-low;
+		};
+
+		wake-n-pins {
+			pins = "gpio30";
+			function = "pcie2_wake";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	pcie_3_pin: pcie-3-state {
+		clkreq-n-pins {
+			pins = "gpio31";
+			function = "pcie3_clk";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+
+		perst-n-pins {
+			pins = "gpio32";
+			function = "gpio";
+			drive-strength = <8>;
+			bias-pull-up;
+			output-low;
+		};
+
+		wake-n-pins {
+			pins = "gpio33";
+			function = "pcie3_wake";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	sdc_default_state: sdc-default-state {
+		clk-pins {
+			pins = "gpio5";
+			function = "sdc_clk";
+			drive-strength = <8>;
+			bias-disable;
+		};
+
+		cmd-pins {
+			pins = "gpio4";
+			function = "sdc_cmd";
+			drive-strength = <8>;
+			bias-pull-up;
+		};
+
+		data-pins {
+			pins = "gpio0", "gpio1", "gpio2",
+			       "gpio3", "gpio6", "gpio7",
+			       "gpio8", "gpio9";
+			function = "sdc_data";
+			drive-strength = <8>;
+			bias-pull-up;
+		};
+
+		rclk-pins {
+			pins = "gpio10";
+			function = "sdc_rclk";
+			drive-strength = <8>;
+			bias-pull-down;
+		};
+	};
+
+	i2c_1_pins: i2c-1-state {
+		pins = "gpio36", "gpio37";
+		function = "blsp1_i2c";
+		drive-strength = <8>;
+		bias-disable;
+	};
+
+	spi_3_pins: spi-3-state {
+		pins = "gpio15", "gpio16",
+			"gpio17", "gpio18", "gpio19", "gpio20", "gpio21";
+		function = "blsp3_spi";
+		drive-strength = <8>;
+		bias-disable;
+	};
+
+	i2c_4_pins: i2c-4-state {
+		pins = "gpio50", "gpio51";
+		function = "blsp4_i2c";
+		drive-strength = <8>;
+		bias-disable;
+	};
+
+	uart_0_pins: uart-0-state {
+		pins = "gpio13", "gpio14";
+		function = "blsp0_uart";
+		drive-strength = <8>;
+		bias-disable;
+	};
+};
+
+&usb3 {
+	status = "okay";
+};
+
+&xo_board_clk {
+	clock-frequency = <24000000>;
+};
+
+&mdio {
+	status = "okay";
+
+	clock-frequency = <6250000>;
+
+	reset-gpio = <&tlmm 60 GPIO_ACTIVE_LOW>;
+	reset-delay-us = <1000>;
+	reset-post-delay-us = <1000>;
+
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+
+	ess_qphy0: qca8084-qphy0@1 {
+		reg = <1>;
+		/*
+		* phy address can be remapped, so we need to
+		* designate which port we are actually talking of
+		*/
+		qca,phy-type = <0>;
+		qca,phy-physid = <0>;
+		qca,led-tlmm-pin = <16>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	ess_qphy1: qca8084-qphy1@2 {
+		reg = <2>;
+		qca,phy-type = <0>;
+		qca,phy-physid = <1>;
+		qca,led-tlmm-pin = <17>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	ess_qphy2: qca8084-qphy2@3 {
+		reg = <3>;
+		qca,phy-type = <0>;
+		qca,phy-physid = <2>;
+		qca,led-tlmm-pin = <18>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	ess_qphy3: qca8084-qphy3@4 {
+		reg = <4>;
+		qca,phy-type = <0>;
+		qca,phy-physid = <3>;
+		qca,led-tlmm-pin = <19>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	qca8084-uniphy1@5 {
+		/*
+		 * actual serdes connected to CPU, it's mandatory to allocate
+		 * an MDIO address for this port
+		 */
+		reg = <5>;
+		qca,phy-type = <1>;
+		qca,phy-physid = <1>;
+		compatible = "ethernet-phy-id004d.d180";
+		status = "okay";
+	};
+
+	qca8084-xpcs@6 {
+		/*
+		 * actual serdes connected to CPU, it's mandatory to allocate
+		 * an MDIO address for this port
+		 */
+		reg = <6>;
+		qca,phy-type = <1>;
+		qca,phy-physid = <2>;
+		compatible = "ethernet-phy-id004d.d180";
+		status = "okay";
+	};
+};
+
+&ess_phys_port0 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp1";
+	phy-handle = <&ess_qphy0>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port1 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp2";
+	phy-handle = <&ess_qphy1>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port2 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp3";
+	phy-handle = <&ess_qphy2>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port3 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp4";
+	phy-handle = <&ess_qphy3>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port4 {
+	status = "okay";
+	label = "ftth0";
+	phy-mode = "10gbase-r";
+	fbxserial-mac-address = <0>;
+	managed = "in-band-status";
+};
+
+&ess_phys_port5 {
+	status = "okay";
+	label = "sfplan0";
+	phy-mode = "1000base-x";
+	fbxserial-mac-address = <0>;
+	managed = "in-band-status";
+#ifdef USE_PHYLINK_SFP
+	sfp = <&sfp_lan>;
+#endif
+};
+
+&imem_reset_reason {
+	status = "okay";
+	qcom-fbx,scm-el3-reasons;
+};
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/arch/arm64/boot/dts/qcom/ipq9574-firmware-optee.dtsi	2023-07-20 17:19:14.714368621 +0200
@@ -0,0 +1,23 @@
+
+/ {
+	reserved-memory {
+		optee-shared-memory@4ae00000 {
+			no-map;
+			reg = <0x0 0x4ae00000 0x0 0x00100000>;
+		};
+
+		tz@4a600000 {
+			// for OP-TEE: secure RAM size raised to 8MiB.
+			reg = <0x0 0x4a600000 0x0 0x800000>;
+			no-map;
+		};
+	};
+
+	firmware {
+		optee {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+			skip-enumeration;
+		};
+	};
+};
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/block/partitions/dt.c	2023-02-24 19:10:44.415570202 +0100
@@ -0,0 +1,204 @@
+#define PREFIX "dtparts"
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/ctype.h>
+#include "check.h"
+
+/**
+ * match_one: - Determines if a string matches a simple pattern
+ * @s: the string to examine for presence of the pattern
+ * @p: the string containing the pattern
+ */
+static int match_one(char *s, const char *p)
+{
+	char *meta, *from, *to;
+
+	while (1) {
+		int len = -1;
+
+		meta = strchr(p, '%');
+		if (!meta)
+			return strcmp(p, s) == 0;
+
+		if (strncmp(p, s, meta-p))
+			return 0;
+
+		s += meta - p;
+		p = meta + 1;
+
+		if (isdigit(*p))
+			len = simple_strtoul(p, (char **) &p, 10);
+		else if (*p == '%') {
+			if (*s++ != '%')
+				return 0;
+			p++;
+			continue;
+		}
+
+		from = s;
+		switch (*p++) {
+		case 's': {
+			size_t str_len = strlen(s);
+
+			if (str_len == 0)
+				return 0;
+			if (len == -1 || len > str_len)
+				len = str_len;
+			to = s + len;
+			break;
+		}
+		case 'd':
+			simple_strtol(s, &to, 0);
+			goto num;
+		case 'u':
+			simple_strtoul(s, &to, 0);
+			goto num;
+		case 'o':
+			simple_strtoul(s, &to, 8);
+			goto num;
+		case 'x':
+			simple_strtoul(s, &to, 16);
+
+		num:
+			if (to == from)
+				return 0;
+			break;
+		default:
+			return 0;
+		}
+		s = to;
+	}
+}
+
+/*
+ *
+ */
+static struct device_node *find_first_parent_node(const struct device *ddev)
+{
+	while (ddev && !ddev->of_node)
+		ddev = ddev->parent;
+
+	if (!ddev)
+		return NULL;
+	return ddev->of_node;
+}
+
+/*
+ *
+ */
+int dt_partition(struct parsed_partitions *state)
+{
+	struct device *ddev = disk_to_dev(state->disk);
+	struct device_node *np, *part_node, *pp;
+	u64 disk_size, last_end;
+	int nr_parts, i;
+
+	/* find first parent device with a non null device tree
+	 * node */
+	np = find_first_parent_node(ddev);
+	if (!np)
+		return -1;
+
+	part_node = NULL;
+	for_each_child_of_node(np, pp) {
+		char diskname[BDEVNAME_SIZE];
+		const char *pattern;
+
+		if (!of_device_is_compatible(pp, "fixed-partitions"))
+			continue;
+
+		/* check device name match pattern */
+		strlcpy(diskname, state->disk->disk_name, sizeof (diskname));
+
+		if (of_property_read_string(pp, "disk-name", &pattern)) {
+			part_node = pp;
+			break;
+		}
+
+		if (match_one(diskname, pattern)) {
+			part_node = pp;
+			break;
+		}
+	}
+
+	if (!part_node)
+		return -1;
+
+	/* First count the subnodes */
+	nr_parts = 0;
+	for_each_child_of_node(part_node,  pp)
+		nr_parts++;
+
+	if (nr_parts == 0)
+		return 0;
+
+	disk_size = get_capacity(state->disk) << 9;
+
+	last_end = 0;
+	i = 1;
+	for_each_child_of_node(part_node,  pp) {
+		struct partition_meta_info *info;
+		char tmp[sizeof (info->volname) + 4];
+		const __be32 *reg;
+		const char *partname;
+		int a_cells, s_cells;
+		u64 size, offset;
+		int len;
+
+		reg = of_get_property(pp, "reg", &len);
+		if (!reg) {
+			pr_err("part %pOF (%pOF) missing reg property.\n",
+			       pp, np);
+			return -1;
+		}
+
+		a_cells = of_n_addr_cells(pp);
+		s_cells = of_n_size_cells(pp);
+		if (len / 4 != a_cells + s_cells) {
+			pr_err("ofpart partition %pOF (%pOF) "
+			       "error parsing reg property.\n",
+			       pp, np);
+			return -1;
+		}
+
+		partname = of_get_property(pp, "label", &len);
+		if (!partname)
+			partname = of_get_property(pp, "name", &len);
+
+		if (i >= state->limit) {
+			pr_err("too many partitions\n");
+			return -1;
+		}
+
+		offset = of_read_number(reg, a_cells);
+		if (offset == (u64)-1) {
+			offset = last_end;
+		}
+
+		size = of_read_number(reg + a_cells, s_cells);
+		if (size == (u64)-1)
+			size = disk_size - offset;
+
+		last_end = offset + size;
+		put_partition(state, i, offset >> 9, size >> 9);
+
+		info = &state->parts[i].info;
+		strlcpy(info->volname, partname, sizeof (info->volname));
+		state->parts[i].has_info = true;
+
+		if (!IS_ENABLED(CONFIG_OF_PARTITION_IGNORE_RO) &&
+		    of_get_property(pp, "read-only", &len))
+			state->parts[i].flags |= ADDPART_FLAG_RO;
+
+		snprintf(tmp, sizeof(tmp), "(%s/%s)",
+			 info->volname,
+			 state->parts[i].flags ? "ro" : "rw");
+		strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+		i++;
+	}
+
+	strlcat(state->pp_buf, "\n", PAGE_SIZE);
+	return 1;
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/config	2024-04-19 16:23:22.324700497 +0200
@@ -0,0 +1,5224 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm64 6.4.0 Kernel Configuration
+#
+CONFIG_CC_VERSION_TEXT="aarch64-linux-musl-gcc (freebox) 10.4.0"
+CONFIG_CC_IS_GCC=y
+CONFIG_GCC_VERSION=100400
+CONFIG_CLANG_VERSION=0
+CONFIG_AS_IS_GNU=y
+CONFIG_AS_VERSION=23800
+CONFIG_LD_IS_BFD=y
+CONFIG_LD_VERSION=23800
+CONFIG_LLD_VERSION=0
+CONFIG_CC_CAN_LINK=y
+CONFIG_CC_CAN_LINK_STATIC=y
+CONFIG_CC_HAS_ASM_INLINE=y
+CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
+CONFIG_PAHOLE_VERSION=0
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_TABLE_SORT=y
+CONFIG_THREAD_INFO_IN_TASK=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE="/opt/toolchains/aarch64-musl-1.2.2-gcc-10.4.0-binutils-2.38-gdb-11.2-1/bin/aarch64-linux-musl-"
+# CONFIG_COMPILE_TEST is not set
+# CONFIG_WERROR is not set
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_BUILD_SALT=""
+CONFIG_DEFAULT_INIT=""
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_WATCH_QUEUE is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_AUDITSYSCALL=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y
+CONFIG_GENERIC_IRQ_IPI=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+# CONFIG_GENERIC_IRQ_DEBUGFS is not set
+# end of IRQ subsystem
+
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_ARCH_HAS_TICK_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_HZ_PERIODIC=y
+# CONFIG_NO_HZ_IDLE is not set
+# CONFIG_NO_HZ_FULL is not set
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+# end of Timers subsystem
+
+CONFIG_BPF=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+
+#
+# BPF subsystem
+#
+# CONFIG_BPF_SYSCALL is not set
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+# end of BPF subsystem
+
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_PREEMPT_COUNT=y
+# CONFIG_PREEMPT_DYNAMIC is not set
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_TICK_CPU_ACCOUNTING=y
+# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_SCHED_AVG_IRQ=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_PSI is not set
+# end of CPU/Task time and stats accounting
+
+# CONFIG_CPU_ISOLATION is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_RCU_EXPERT is not set
+CONFIG_TREE_SRCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_NEED_SEGCBLIST=y
+# end of RCU Subsystem
+
+CONFIG_IKCONFIG=y
+# CONFIG_IKCONFIG_PROC is not set
+# CONFIG_IKHEADERS is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
+# CONFIG_PRINTK_INDEX is not set
+# CONFIG_FBX_DECRYPT_INITRD is not set
+CONFIG_GENERIC_SCHED_CLOCK=y
+
+#
+# Scheduler features
+#
+# end of Scheduler features
+
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_CC_HAS_INT128=y
+CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
+CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_ARCH_SUPPORTS_INT128=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_FAVOR_DYNMODS is not set
+# CONFIG_MEMCG is not set
+# CONFIG_BLK_CGROUP is not set
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUP_PIDS is not set
+# CONFIG_CGROUP_RDMA is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_CGROUP_PERF is not set
+# CONFIG_CGROUP_MISC is not set
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+# CONFIG_TIME_NS is not set
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_NET_NS=y
+# CONFIG_CHECKPOINT_RESTORE is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_INITRAMFS_FORCE is not set
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_RD_ZSTD is not set
+# CONFIG_BOOT_CONFIG is not set
+# CONFIG_INITRAMFS_PRESERVE_MTIME is not set
+CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_LD_ORPHAN_WARN=y
+CONFIG_LD_ORPHAN_WARN_LEVEL="warn"
+CONFIG_SYSCTL=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_EXPERT=y
+CONFIG_MULTIUSER=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+CONFIG_POSIX_TIMERS=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_FUTEX_PI=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+# CONFIG_IO_URING is not set
+CONFIG_ADVISE_SYSCALLS=y
+CONFIG_MEMBARRIER=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_SELFTEST is not set
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_BASE_RELATIVE=y
+CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
+# CONFIG_KCMP is not set
+# CONFIG_RSEQ is not set
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_GUEST_PERF_EVENTS=y
+# CONFIG_PC104 is not set
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+# end of Kernel Performance Events And Counters
+
+# CONFIG_PROFILING is not set
+# end of General setup
+
+CONFIG_ARM64=y
+CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y
+CONFIG_64BIT=y
+CONFIG_MMU=y
+CONFIG_ARM64_PAGE_SHIFT=12
+CONFIG_ARM64_CONT_PTE_SHIFT=4
+CONFIG_ARM64_CONT_PMD_SHIFT=4
+CONFIG_ARCH_MMAP_RND_BITS_MIN=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=24
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y
+CONFIG_SMP=y
+CONFIG_KERNEL_MODE_NEON=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_PGTABLE_LEVELS=3
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_ARCH_PROC_KCORE_TEXT=y
+CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
+
+#
+# Platform selection
+#
+# CONFIG_ARCH_ACTIONS is not set
+# CONFIG_ARCH_SUNXI is not set
+# CONFIG_ARCH_ALPINE is not set
+# CONFIG_ARCH_APPLE is not set
+# CONFIG_ARCH_BCM is not set
+# CONFIG_ARCH_BERLIN is not set
+# CONFIG_ARCH_BITMAIN is not set
+# CONFIG_ARCH_EXYNOS is not set
+# CONFIG_ARCH_SPARX5 is not set
+# CONFIG_ARCH_K3 is not set
+# CONFIG_ARCH_LG1K is not set
+# CONFIG_ARCH_HISI is not set
+# CONFIG_ARCH_KEEMBAY is not set
+# CONFIG_ARCH_MEDIATEK is not set
+# CONFIG_ARCH_MESON is not set
+# CONFIG_ARCH_MVEBU is not set
+# CONFIG_ARCH_NXP is not set
+# CONFIG_ARCH_NPCM is not set
+CONFIG_ARCH_QCOM=y
+# CONFIG_ARCH_QCOM_DTB is not set
+CONFIG_ARCH_QCOM_FBX_DTB=y
+# CONFIG_ARCH_REALTEK is not set
+# CONFIG_ARCH_RENESAS is not set
+# CONFIG_ARCH_ROCKCHIP is not set
+# CONFIG_ARCH_SEATTLE is not set
+# CONFIG_ARCH_INTEL_SOCFPGA is not set
+# CONFIG_ARCH_SYNQUACER is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_SPRD is not set
+# CONFIG_ARCH_THUNDER is not set
+# CONFIG_ARCH_THUNDER2 is not set
+# CONFIG_ARCH_UNIPHIER is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_VISCONTI is not set
+# CONFIG_ARCH_XGENE is not set
+# CONFIG_ARCH_ZYNQMP is not set
+# end of Platform selection
+
+#
+# Kernel Features
+#
+
+#
+# ARM errata workarounds via the alternatives framework
+#
+# CONFIG_ARM64_ERRATUM_826319 is not set
+# CONFIG_ARM64_ERRATUM_827319 is not set
+# CONFIG_ARM64_ERRATUM_824069 is not set
+# CONFIG_ARM64_ERRATUM_819472 is not set
+# CONFIG_ARM64_ERRATUM_832075 is not set
+CONFIG_ARM64_ERRATUM_834220=y
+# CONFIG_ARM64_ERRATUM_843419 is not set
+CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y
+CONFIG_ARM64_ERRATUM_1024718=y
+# CONFIG_ARM64_ERRATUM_1165522 is not set
+# CONFIG_ARM64_ERRATUM_1319367 is not set
+# CONFIG_ARM64_ERRATUM_1530923 is not set
+# CONFIG_ARM64_ERRATUM_2441007 is not set
+# CONFIG_ARM64_ERRATUM_1286807 is not set
+# CONFIG_ARM64_ERRATUM_1463225 is not set
+# CONFIG_ARM64_ERRATUM_1542419 is not set
+# CONFIG_ARM64_ERRATUM_1508412 is not set
+# CONFIG_ARM64_ERRATUM_2051678 is not set
+# CONFIG_ARM64_ERRATUM_2077057 is not set
+# CONFIG_ARM64_ERRATUM_2658417 is not set
+# CONFIG_ARM64_ERRATUM_2054223 is not set
+# CONFIG_ARM64_ERRATUM_2067961 is not set
+# CONFIG_ARM64_ERRATUM_2441009 is not set
+# CONFIG_ARM64_ERRATUM_2645198 is not set
+# CONFIG_CAVIUM_ERRATUM_22375 is not set
+# CONFIG_CAVIUM_ERRATUM_23154 is not set
+# CONFIG_CAVIUM_ERRATUM_27456 is not set
+# CONFIG_CAVIUM_ERRATUM_30115 is not set
+# CONFIG_CAVIUM_TX2_ERRATUM_219 is not set
+# CONFIG_FUJITSU_ERRATUM_010001 is not set
+# CONFIG_HISILICON_ERRATUM_161600802 is not set
+# CONFIG_QCOM_FALKOR_ERRATUM_1003 is not set
+# CONFIG_QCOM_FALKOR_ERRATUM_1009 is not set
+# CONFIG_QCOM_QDF2400_ERRATUM_0065 is not set
+# CONFIG_QCOM_FALKOR_ERRATUM_E1041 is not set
+# CONFIG_NVIDIA_CARMEL_CNP_ERRATUM is not set
+# CONFIG_ROCKCHIP_ERRATUM_3588001 is not set
+# CONFIG_SOCIONEXT_SYNQUACER_PREITS is not set
+# end of ARM errata workarounds via the alternatives framework
+
+CONFIG_ARM64_4K_PAGES=y
+# CONFIG_ARM64_16K_PAGES is not set
+# CONFIG_ARM64_64K_PAGES is not set
+CONFIG_ARM64_VA_BITS_39=y
+# CONFIG_ARM64_VA_BITS_48 is not set
+CONFIG_ARM64_VA_BITS=39
+CONFIG_ARM64_PA_BITS_48=y
+CONFIG_ARM64_PA_BITS=48
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_SCHED_MC is not set
+# CONFIG_SCHED_CLUSTER is not set
+# CONFIG_SCHED_SMT is not set
+CONFIG_NR_CPUS=4
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_NUMA is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_HW_PERF_EVENTS=y
+# CONFIG_PARAVIRT is not set
+# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
+# CONFIG_KEXEC_FILE is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_XEN is not set
+CONFIG_ARCH_FORCE_MAX_ORDER=10
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+# CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY is not set
+# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set
+# CONFIG_ARM64_SW_TTBR0_PAN is not set
+CONFIG_ARM64_TAGGED_ADDR_ABI=y
+# CONFIG_COMPAT is not set
+
+#
+# ARMv8.1 architectural features
+#
+CONFIG_ARM64_HW_AFDBM=y
+CONFIG_ARM64_PAN=y
+CONFIG_AS_HAS_LDAPR=y
+CONFIG_AS_HAS_LSE_ATOMICS=y
+CONFIG_ARM64_LSE_ATOMICS=y
+CONFIG_ARM64_USE_LSE_ATOMICS=y
+# end of ARMv8.1 architectural features
+
+#
+# ARMv8.2 architectural features
+#
+CONFIG_AS_HAS_ARMV8_2=y
+CONFIG_AS_HAS_SHA3=y
+# CONFIG_ARM64_PMEM is not set
+# CONFIG_ARM64_RAS_EXTN is not set
+# CONFIG_ARM64_CNP is not set
+# end of ARMv8.2 architectural features
+
+#
+# ARMv8.3 architectural features
+#
+# CONFIG_ARM64_PTR_AUTH is not set
+CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y
+CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y
+CONFIG_AS_HAS_ARMV8_3=y
+CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y
+# end of ARMv8.3 architectural features
+
+#
+# ARMv8.4 architectural features
+#
+# CONFIG_ARM64_AMU_EXTN is not set
+CONFIG_AS_HAS_ARMV8_4=y
+# CONFIG_ARM64_TLB_RANGE is not set
+# end of ARMv8.4 architectural features
+
+#
+# ARMv8.5 architectural features
+#
+CONFIG_AS_HAS_ARMV8_5=y
+# CONFIG_ARM64_BTI is not set
+CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y
+# CONFIG_ARM64_E0PD is not set
+CONFIG_ARM64_AS_HAS_MTE=y
+# CONFIG_ARM64_MTE is not set
+# end of ARMv8.5 architectural features
+
+#
+# ARMv8.7 architectural features
+#
+# CONFIG_ARM64_EPAN is not set
+# end of ARMv8.7 architectural features
+
+# CONFIG_ARM64_SVE is not set
+CONFIG_ARM64_MODULE_PLTS=y
+# CONFIG_ARM64_PSEUDO_NMI is not set
+# CONFIG_RELOCATABLE is not set
+# CONFIG_RANDOMIZE_BASE is not set
+CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y
+# end of Kernel Features
+
+#
+# Boot options
+#
+CONFIG_CMDLINE="console=ttyMSM0,115200 earlycon=msm_serial_dm,0x78b1000 maxcpus=4 ip=:::::swp1:dhcp root=/dev/nfs rcupdate.rcu_cpu_stall_timeout=120"
+# CONFIG_CMDLINE_FROM_BOOTLOADER is not set
+CONFIG_CMDLINE_FORCE=y
+# CONFIG_EFI is not set
+# end of Boot options
+
+#
+# Power management options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_PM is not set
+# CONFIG_ENERGY_MODEL is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# end of Power management options
+
+#
+# CPU Power Management
+#
+
+#
+# CPU Idle
+#
+# CONFIG_CPU_IDLE is not set
+# end of CPU Idle
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set
+
+#
+# CPU frequency scaling drivers
+#
+CONFIG_CPUFREQ_DT=y
+CONFIG_CPUFREQ_DT_PLATDEV=y
+# CONFIG_ARM_QCOM_CPUFREQ_HW is not set
+# end of CPU Frequency scaling
+# end of CPU Power Management
+
+CONFIG_IRQ_BYPASS_MANAGER=y
+CONFIG_HAVE_KVM=y
+CONFIG_HAVE_KVM_IRQCHIP=y
+CONFIG_HAVE_KVM_IRQFD=y
+CONFIG_HAVE_KVM_IRQ_ROUTING=y
+CONFIG_HAVE_KVM_DIRTY_RING=y
+CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y
+CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y
+CONFIG_HAVE_KVM_EVENTFD=y
+CONFIG_KVM_MMIO=y
+CONFIG_HAVE_KVM_MSI=y
+CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
+CONFIG_KVM_VFIO=y
+CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y
+CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
+CONFIG_HAVE_KVM_IRQ_BYPASS=y
+CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y
+CONFIG_KVM_XFER_TO_GUEST_WORK=y
+CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+# CONFIG_NVHE_EL2_DEBUG is not set
+
+#
+# General architecture-dependent options
+#
+# CONFIG_KPROBES is not set
+# CONFIG_JUMP_LABEL is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y
+CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
+CONFIG_HAVE_NMI=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
+CONFIG_ARCH_HAS_KEEPINITRD=y
+CONFIG_ARCH_HAS_SET_MEMORY=y
+CONFIG_ARCH_HAS_SET_DIRECT_MAP=y
+CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
+CONFIG_ARCH_WANTS_NO_INSTR=y
+CONFIG_HAVE_ASM_MODVERSIONS=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_RSEQ=y
+CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
+CONFIG_MMU_GATHER_TABLE_FREE=y
+CONFIG_MMU_GATHER_RCU_TABLE_FREE=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_HAVE_ARCH_SECCOMP=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP=y
+CONFIG_SECCOMP_FILTER=y
+# CONFIG_SECCOMP_CACHE_DEBUG is not set
+CONFIG_HAVE_ARCH_STACKLEAK=y
+CONFIG_HAVE_STACKPROTECTOR=y
+# CONFIG_STACKPROTECTOR is not set
+CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
+CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
+CONFIG_LTO_NONE=y
+CONFIG_ARCH_SUPPORTS_CFI_CLANG=y
+CONFIG_HAVE_CONTEXT_TRACKING_USER=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_MOVE_PUD=y
+CONFIG_HAVE_MOVE_PMD=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_HAVE_ARCH_HUGE_VMAP=y
+CONFIG_HAVE_ARCH_HUGE_VMALLOC=y
+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
+CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
+CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
+CONFIG_ARCH_MMAP_RND_BITS=18
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y
+CONFIG_CLONE_BACKWARDS=y
+# CONFIG_COMPAT_32BIT_TIME is not set
+CONFIG_HAVE_ARCH_VMAP_STACK=y
+CONFIG_VMAP_STACK=y
+CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y
+# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set
+CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
+CONFIG_STRICT_MODULE_RWX=y
+CONFIG_HAVE_ARCH_COMPILER_H=y
+CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
+# CONFIG_LOCK_EVENT_COUNTS is not set
+CONFIG_HAVE_PREEMPT_DYNAMIC=y
+CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y
+CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y
+CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
+# end of GCOV-based kernel profiling
+
+CONFIG_HAVE_GCC_PLUGINS=y
+# CONFIG_GCC_PLUGINS is not set
+CONFIG_FUNCTION_ALIGNMENT_4B=y
+CONFIG_FUNCTION_ALIGNMENT=4
+# end of General architecture-dependent options
+
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_DEBUG is not set
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_MODULE_SIG is not set
+CONFIG_MODULE_COMPRESS_NONE=y
+# CONFIG_MODULE_COMPRESS_GZIP is not set
+# CONFIG_MODULE_COMPRESS_XZ is not set
+# CONFIG_MODULE_COMPRESS_ZSTD is not set
+# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
+CONFIG_MODPROBE_PATH="/sbin/modprobe"
+CONFIG_TRIM_UNUSED_KSYMS=y
+CONFIG_UNUSED_KSYMS_WHITELIST=""
+CONFIG_UNUSED_KSYMS_WHITELIST_SYMS="dib7000p_attach"
+CONFIG_MODULES_TREE_LOOKUP=y
+CONFIG_BLOCK=y
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
+CONFIG_BLK_DEV_BSG_COMMON=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+# CONFIG_BLK_DEV_ZONED is not set
+# CONFIG_BLK_WBT is not set
+CONFIG_BLK_DEBUG_FS=y
+# CONFIG_BLK_SED_OPAL is not set
+# CONFIG_BLK_INLINE_ENCRYPTION is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_AIX_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+# CONFIG_CMDLINE_PARTITION is not set
+CONFIG_OF_PARTITION=y
+# CONFIG_OF_PARTITION_IGNORE_RO is not set
+# end of Partition Types
+
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BLOCK_HOLDER_DEPRECATED=y
+CONFIG_BLK_MQ_STACKING=y
+
+#
+# IO Schedulers
+#
+CONFIG_MQ_IOSCHED_DEADLINE=y
+CONFIG_MQ_IOSCHED_KYBER=y
+# CONFIG_IOSCHED_BFQ is not set
+# end of IO Schedulers
+
+CONFIG_PREEMPT_NOTIFIERS=y
+CONFIG_ASN1=y
+CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y
+CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_LOCK=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_INLINE_READ_LOCK=y
+CONFIG_ARCH_INLINE_READ_LOCK_BH=y
+CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_READ_UNLOCK=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_INLINE_WRITE_LOCK=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y
+CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_STATE=y
+CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y
+CONFIG_ARCH_HAVE_ELF_PROT=y
+CONFIG_ARCH_USE_GNU_PROPERTY=y
+CONFIG_ELFCORE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_COREDUMP=y
+# end of Executable file formats
+
+#
+# Memory Management options
+#
+# CONFIG_SWAP is not set
+
+#
+# SLAB allocator options
+#
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLUB_TINY is not set
+CONFIG_SLAB_MERGE_DEFAULT=y
+# CONFIG_SLAB_FREELIST_RANDOM is not set
+# CONFIG_SLAB_FREELIST_HARDENED is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_SLUB_CPU_PARTIAL=y
+# end of SLAB allocator options
+
+# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SPARSEMEM=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_HAVE_FAST_GUP=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_MEMORY_HOTPLUG is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
+# CONFIG_COMPACTION is not set
+# CONFIG_PAGE_REPORTING is not set
+# CONFIG_MIGRATION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_MMU_NOTIFIER=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+CONFIG_PAGE_FRAG_CACHE_ORDER=1
+# CONFIG_MEMORY_FAILURE is not set
+CONFIG_ARCH_WANTS_THP_SWAP=y
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+# CONFIG_CMA is not set
+CONFIG_GENERIC_EARLY_IOREMAP=y
+# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
+# CONFIG_IDLE_PAGE_TRACKING is not set
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y
+CONFIG_ARCH_HAS_PTE_DEVMAP=y
+CONFIG_ARCH_HAS_ZONE_DMA_SET=y
+# CONFIG_ZONE_DMA is not set
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_PERCPU_STATS is not set
+# CONFIG_GUP_TEST is not set
+# CONFIG_DMAPOOL_TEST is not set
+CONFIG_ARCH_HAS_PTE_SPECIAL=y
+# CONFIG_SECRETMEM is not set
+# CONFIG_ANON_VMA_NAME is not set
+# CONFIG_USERFAULTFD is not set
+# CONFIG_LRU_GEN is not set
+CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y
+CONFIG_PER_VMA_LOCK=y
+
+#
+# Data Access Monitoring
+#
+# CONFIG_DAMON is not set
+# end of Data Access Monitoring
+# end of Memory Management options
+
+CONFIG_NET=y
+# CONFIG_NET_PROMISC_MESSAGES is not set
+CONFIG_NET_INGRESS=y
+CONFIG_NET_EGRESS=y
+CONFIG_SKB_EXTENSIONS=y
+
+#
+# Networking options
+#
+CONFIG_NETSKBPAD=64
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+CONFIG_UNIX_SCM=y
+CONFIG_AF_UNIX_OOB=y
+CONFIG_UNIX_ABSTRACT_IGNORE_NETNS=y
+# CONFIG_UNIX_DIAG is not set
+# CONFIG_TLS is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_OFFLOAD=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_AH=y
+CONFIG_XFRM_ESP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_NET_HANDSHAKE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IP_TUNNEL=y
+CONFIG_NET_IPGRE=y
+# CONFIG_NET_IPGRE_BROADCAST is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_NET_IPVTI is not set
+CONFIG_NET_UDP_TUNNEL=y
+# CONFIG_NET_FOU is not set
+# CONFIG_NET_FOU_IP_TUNNELS is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_ESP_OFFLOAD is not set
+# CONFIG_INET_ESPINTCP is not set
+# CONFIG_INET_IPCOMP is not set
+CONFIG_INET_TABLE_PERTURB_ORDER=16
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_INET_RAW_DIAG is not set
+# CONFIG_INET_DIAG_DESTROY is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_ESP_OFFLOAD=y
+# CONFIG_INET6_ESPINTCP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_IPV6_ILA is not set
+CONFIG_INET6_TUNNEL=y
+# CONFIG_IPV6_VTI is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=y
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_IPV6_SEG6_LWTUNNEL is not set
+# CONFIG_IPV6_SEG6_HMAC is not set
+# CONFIG_IPV6_RPL_LWTUNNEL is not set
+# CONFIG_IPV6_IOAM6_LWTUNNEL is not set
+# CONFIG_MPTCP is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+# CONFIG_BRIDGE_NETFILTER is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_INGRESS is not set
+# CONFIG_NETFILTER_EGRESS is not set
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NETFILTER_NETLINK_OSF is not set
+CONFIG_NF_CONNTRACK=y
+# CONFIG_NF_LOG_SYSLOG is not set
+# CONFIG_NF_CONNTRACK_MARK is not set
+# CONFIG_NF_CONNTRACK_ZONES is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CONNTRACK_LABELS is not set
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_GRE=y
+CONFIG_NF_CT_PROTO_SCTP=y
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+CONFIG_NF_CONNTRACK_PPTP=m
+# CONFIG_NF_CONNTRACK_SANE is not set
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_FTP=y
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_NF_NAT_TFTP=y
+CONFIG_NF_NAT_REDIRECT=y
+CONFIG_NF_NAT_MASQUERADE=y
+# CONFIG_NF_TABLES is not set
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+# CONFIG_NETFILTER_XT_CONNMARK is not set
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_NAT=y
+# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+CONFIG_NETFILTER_XT_MATCH_DCCP=y
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+CONFIG_NETFILTER_XT_MATCH_SCTP=y
+# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# end of Core Netfilter Configuration
+
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_FFN=y
+CONFIG_IP_FFN_PROCFS=y
+CONFIG_NF_DEFRAG_IPV4=y
+# CONFIG_NF_SOCKET_IPV4 is not set
+CONFIG_NF_TPROXY_IPV4=y
+# CONFIG_NF_DUP_IPV4 is not set
+# CONFIG_NF_LOG_ARP is not set
+# CONFIG_NF_LOG_IPV4 is not set
+CONFIG_NF_REJECT_IPV4=y
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_SYNPROXY is not set
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# end of IP: Netfilter Configuration
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IPV6_FFN=y
+CONFIG_IPV6_FFN_PROCFS=y
+# CONFIG_NF_SOCKET_IPV6 is not set
+CONFIG_NF_TPROXY_IPV6=y
+# CONFIG_NF_DUP_IPV6 is not set
+CONFIG_NF_REJECT_IPV6=y
+# CONFIG_NF_LOG_IPV6 is not set
+CONFIG_IP6_NF_IPTABLES=y
+# CONFIG_IP6_NF_MATCH_AH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_MH is not set
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_MATCH_SRH is not set
+# CONFIG_IP6_NF_TARGET_HL is not set
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+# CONFIG_IP6_NF_TARGET_SYNPROXY is not set
+CONFIG_IP6_NF_MANGLE=y
+# CONFIG_IP6_NF_RAW is not set
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+# CONFIG_IP6_NF_TARGET_NPT is not set
+# end of IPv6: Netfilter Configuration
+
+CONFIG_NF_DEFRAG_IPV6=y
+# CONFIG_NF_CONNTRACK_BRIDGE is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_BPFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+CONFIG_FBXATM=y
+CONFIG_FBXATM_STACK=y
+# CONFIG_FBXATM_REMOTE_STUB is not set
+# CONFIG_FBXATM_REMOTE_DRIVER is not set
+CONFIG_FBXBRIDGE=y
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_STATE_MESSAGES is not set
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+# CONFIG_BRIDGE_VLAN_FILTERING is not set
+# CONFIG_BRIDGE_MRP is not set
+# CONFIG_BRIDGE_CFM is not set
+CONFIG_NET_DSA=y
+# CONFIG_NET_DSA_TAG_NONE is not set
+# CONFIG_NET_DSA_TAG_AR9331 is not set
+CONFIG_NET_DSA_TAG_BRCM_COMMON=y
+CONFIG_NET_DSA_TAG_BRCM=y
+# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set
+# CONFIG_NET_DSA_TAG_BRCM_PREPEND is not set
+# CONFIG_NET_DSA_TAG_HELLCREEK is not set
+CONFIG_NET_DSA_TAG_BRCM_FBX=y
+# CONFIG_NET_DSA_TAG_GSWIP is not set
+# CONFIG_NET_DSA_TAG_DSA is not set
+# CONFIG_NET_DSA_TAG_EDSA is not set
+# CONFIG_NET_DSA_TAG_MTK is not set
+# CONFIG_NET_DSA_TAG_KSZ is not set
+# CONFIG_NET_DSA_TAG_OCELOT is not set
+# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set
+# CONFIG_NET_DSA_TAG_QCA is not set
+# CONFIG_NET_DSA_TAG_RTL4_A is not set
+# CONFIG_NET_DSA_TAG_RTL8_4 is not set
+# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set
+# CONFIG_NET_DSA_TAG_LAN9303 is not set
+# CONFIG_NET_DSA_TAG_SJA1105 is not set
+# CONFIG_NET_DSA_TAG_TRAILER is not set
+# CONFIG_NET_DSA_TAG_XRS700X is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_VLAN_8021Q_MVRP is not set
+CONFIG_VLAN_FBX=y
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_6LOWPAN is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+CONFIG_NET_SCH_SFQ=y
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_CBS is not set
+# CONFIG_NET_SCH_ETF is not set
+# CONFIG_NET_SCH_TAPRIO is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_NETEM is not set
+CONFIG_NET_SCH_DRR=y
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_SKBPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+# CONFIG_NET_SCH_CODEL is not set
+CONFIG_NET_SCH_FQ_CODEL=y
+# CONFIG_NET_SCH_CAKE is not set
+# CONFIG_NET_SCH_FQ is not set
+# CONFIG_NET_SCH_HHF is not set
+# CONFIG_NET_SCH_PIE is not set
+CONFIG_NET_SCH_INGRESS=y
+# CONFIG_NET_SCH_PLUG is not set
+# CONFIG_NET_SCH_ETS is not set
+# CONFIG_NET_SCH_DEFAULT is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+CONFIG_NET_CLS_U32=y
+# CONFIG_CLS_U32_PERF is not set
+CONFIG_CLS_U32_MARK=y
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
+# CONFIG_NET_CLS_BPF is not set
+# CONFIG_NET_CLS_FLOWER is not set
+# CONFIG_NET_CLS_MATCHALL is not set
+# CONFIG_NET_EMATCH is not set
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_SAMPLE is not set
+# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+CONFIG_NET_ACT_SKBEDIT=y
+# CONFIG_NET_ACT_CSUM is not set
+# CONFIG_NET_ACT_MPLS is not set
+# CONFIG_NET_ACT_VLAN is not set
+# CONFIG_NET_ACT_BPF is not set
+# CONFIG_NET_ACT_SKBMOD is not set
+# CONFIG_NET_ACT_IFE is not set
+# CONFIG_NET_ACT_TUNNEL_KEY is not set
+# CONFIG_NET_ACT_GATE is not set
+# CONFIG_NET_TC_SKB_EXT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+# CONFIG_DNS_RESOLVER is not set
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_BATMAN_V=y
+# CONFIG_BATMAN_ADV_BLA is not set
+# CONFIG_BATMAN_ADV_DAT is not set
+# CONFIG_BATMAN_ADV_NC is not set
+# CONFIG_BATMAN_ADV_MCAST is not set
+# CONFIG_BATMAN_ADV_DEBUG is not set
+CONFIG_BATMAN_ADV_FBX=y
+CONFIG_BATMAN_ADV_FBX_MTU=y
+CONFIG_BATMAN_ADV_FBX_SLAP=y
+# CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_DIAG is not set
+# CONFIG_MPLS is not set
+# CONFIG_NET_NSH is not set
+# CONFIG_HSR is not set
+CONFIG_NET_SWITCHDEV=y
+# CONFIG_NET_L3_MASTER_DEV is not set
+CONFIG_QRTR=y
+# CONFIG_QRTR_SMD is not set
+# CONFIG_QRTR_TUN is not set
+CONFIG_QRTR_MHI=y
+# CONFIG_NET_NCSI is not set
+# CONFIG_PCPU_DEV_REFCNT is not set
+CONFIG_MAX_SKB_FRAGS=17
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_XPS=y
+# CONFIG_CGROUP_NET_PRIO is not set
+# CONFIG_CGROUP_NET_CLASSID is not set
+CONFIG_NET_RX_BUSY_POLL=y
+CONFIG_BQL=y
+CONFIG_NET_FLOW_LIMIT=y
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=y
+# end of Network testing
+# end of Networking options
+
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_AF_KCM is not set
+# CONFIG_MCTP is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+# CONFIG_CFG80211_REQUIRE_SIGNED_REGDB is not set
+# CONFIG_CFG80211_REG_CELLULAR_HINTS is not set
+# CONFIG_CFG80211_REG_RELAX_NO_IR is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+CONFIG_CFG80211_DFS_CACHE=y
+CONFIG_CFG80211_CRDA_SUPPORT=y
+# CONFIG_CFG80211_WEXT is not set
+# CONFIG_FBX80211 is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+# CONFIG_PSAMPLE is not set
+# CONFIG_NET_IFE is not set
+# CONFIG_LWTUNNEL is not set
+CONFIG_DST_CACHE=y
+CONFIG_GRO_CELLS=y
+CONFIG_NET_SELFTESTS=y
+CONFIG_NET_DEVLINK=y
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_POOL_STATS=y
+# CONFIG_FAILOVER is not set
+# CONFIG_ETHTOOL_NETLINK is not set
+
+#
+# Device Drivers
+#
+CONFIG_ARM_AMBA=y
+CONFIG_HAVE_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_SYSCALL=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIEAER_INJECT is not set
+# CONFIG_PCIE_ECRC is not set
+# CONFIG_PCIEASPM is not set
+# CONFIG_PCIE_DPC is not set
+# CONFIG_PCIE_PTM is not set
+CONFIG_PCI_MSI=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+# CONFIG_PCIE_BUS_TUNE_OFF is not set
+# CONFIG_PCIE_BUS_DEFAULT is not set
+# CONFIG_PCIE_BUS_SAFE is not set
+CONFIG_PCIE_BUS_PERFORMANCE=y
+# CONFIG_PCIE_BUS_PEER2PEER is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# PCI controller drivers
+#
+# CONFIG_PCIE_ALTERA is not set
+# CONFIG_PCI_HOST_THUNDER_PEM is not set
+# CONFIG_PCI_HOST_THUNDER_ECAM is not set
+# CONFIG_PCI_FTPCI100 is not set
+# CONFIG_PCI_HOST_GENERIC is not set
+# CONFIG_PCIE_MICROCHIP_HOST is not set
+# CONFIG_PCI_XGENE is not set
+# CONFIG_PCIE_XILINX is not set
+
+#
+# Cadence-based PCIe controllers
+#
+# CONFIG_PCIE_CADENCE_PLAT_HOST is not set
+# CONFIG_PCI_J721E_HOST is not set
+# end of Cadence-based PCIe controllers
+
+#
+# DesignWare-based PCIe controllers
+#
+CONFIG_PCIE_DW=y
+CONFIG_PCIE_DW_HOST=y
+# CONFIG_PCIE_AL is not set
+# CONFIG_PCI_MESON is not set
+# CONFIG_PCI_HISI is not set
+# CONFIG_PCIE_KIRIN is not set
+# CONFIG_PCIE_DW_PLAT_HOST is not set
+CONFIG_PCIE_QCOM=y
+# end of DesignWare-based PCIe controllers
+
+#
+# Mobiveil-based PCIe controllers
+#
+# end of Mobiveil-based PCIe controllers
+# end of PCI controller drivers
+
+#
+# PCI Endpoint
+#
+# CONFIG_PCI_ENDPOINT is not set
+# end of PCI Endpoint
+
+#
+# PCI switch controller drivers
+#
+# CONFIG_PCI_SW_SWITCHTEC is not set
+# end of PCI switch controller drivers
+
+# CONFIG_CXL_BUS is not set
+# CONFIG_PCCARD is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Generic Driver Options
+#
+# CONFIG_UEVENT_HELPER is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_DEVTMPFS_SAFE is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+
+#
+# Firmware loader
+#
+CONFIG_FW_LOADER=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+# CONFIG_FW_LOADER_COMPRESS is not set
+# CONFIG_FW_UPLOAD is not set
+# end of Firmware loader
+
+CONFIG_WANT_DEV_COREDUMP=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
+# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_DMA_FENCE_TRACE is not set
+CONFIG_GENERIC_ARCH_TOPOLOGY=y
+# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set
+# end of Generic Driver Options
+
+#
+# Bus devices
+#
+# CONFIG_BRCMSTB_GISB_ARB is not set
+# CONFIG_MOXTET is not set
+# CONFIG_QCOM_EBI2 is not set
+# CONFIG_QCOM_SSC_BLOCK_BUS is not set
+# CONFIG_VEXPRESS_CONFIG is not set
+CONFIG_MHI_BUS=y
+# CONFIG_MHI_BUS_DEBUG is not set
+# CONFIG_MHI_BUS_PCI_GENERIC is not set
+# CONFIG_MHI_BUS_EP is not set
+# end of Bus devices
+
+# CONFIG_CONNECTOR is not set
+
+#
+# Firmware Drivers
+#
+
+#
+# ARM System Control and Management Interface Protocol
+#
+# CONFIG_ARM_SCMI_PROTOCOL is not set
+# end of ARM System Control and Management Interface Protocol
+
+# CONFIG_ARM_SCPI_PROTOCOL is not set
+# CONFIG_FIRMWARE_MEMMAP is not set
+# CONFIG_FW_CFG_SYSFS is not set
+CONFIG_QCOM_SCM=y
+# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set
+# CONFIG_ARM_FFA_TRANSPORT is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+CONFIG_ARM_PSCI_FW=y
+CONFIG_HAVE_ARM_SMCCC=y
+CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y
+# CONFIG_ARM_SMCCC_SOC_ID is not set
+
+#
+# Tegra firmware driver
+#
+# end of Tegra firmware driver
+# end of Firmware Drivers
+
+# CONFIG_GNSS is not set
+CONFIG_FREEBOX_PROCFS=y
+CONFIG_MTD=y
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_ERASE_PRINTK=y
+
+#
+# Partition parsers
+#
+# CONFIG_MTD_AR7_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_OF_PARTS_IGNORE_RO is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_FBX6HD_PARTS is not set
+# end of Partition parsers
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+
+#
+# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK.
+#
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+# CONFIG_MTD_PARTITIONED_MASTER is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# end of RAM/ROM/Flash chip drivers
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+# end of Mapping drivers for chip access
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_MCHP23K256 is not set
+# CONFIG_MTD_MCHP48L640 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOCG3 is not set
+# end of Self-contained MTD device drivers
+
+#
+# NAND
+#
+CONFIG_MTD_NAND_CORE=y
+# CONFIG_MTD_ONENAND is not set
+# CONFIG_MTD_RAW_NAND is not set
+CONFIG_MTD_SPI_NAND=y
+
+#
+# ECC engine support
+#
+CONFIG_MTD_NAND_ECC=y
+# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set
+# CONFIG_MTD_NAND_ECC_SW_BCH is not set
+# CONFIG_MTD_NAND_ECC_MXIC is not set
+# end of ECC engine support
+# end of NAND
+
+#
+# LPDDR & LPDDR2 PCM memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+# end of LPDDR & LPDDR2 PCM memory drivers
+
+# CONFIG_MTD_SPI_NOR is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_MTD_HYPERBUS is not set
+CONFIG_DTC=y
+CONFIG_OF=y
+# CONFIG_OF_UNITTEST is not set
+CONFIG_OF_DTB_BUILTIN_LIST=""
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_DYNAMIC=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_RESERVED_MEM=y
+CONFIG_OF_RESOLVE=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_NULL_BLK is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_ZRAM is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_UBLK is not set
+
+#
+# NVME Support
+#
+CONFIG_NVME_CORE=y
+CONFIG_BLK_DEV_NVME=y
+# CONFIG_NVME_MULTIPATH is not set
+# CONFIG_NVME_VERBOSE_ERRORS is not set
+CONFIG_NVME_HWMON=y
+# CONFIG_NVME_FC is not set
+# CONFIG_NVME_TCP is not set
+# CONFIG_NVME_AUTH is not set
+# CONFIG_NVME_TARGET is not set
+# end of NVME Support
+
+#
+# Misc devices
+#
+# CONFIG_WINTEGRA_MMAP is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_QCOM_FASTRPC is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_INTELCE_PIC16PMU is not set
+CONFIG_FBXSERIAL_OF=y
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+# CONFIG_DW_XDATA_PCIE is not set
+# CONFIG_PCI_ENDPOINT_TEST is not set
+# CONFIG_XILINX_SDFEC is not set
+# CONFIG_OPEN_DICE is not set
+# CONFIG_VCPU_STALL_DETECTOR is not set
+# CONFIG_DGASP is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_EEPROM_IDT_89HPESX is not set
+# CONFIG_EEPROM_EE1004 is not set
+# CONFIG_EEPROM_EE1004_RAW is not set
+# end of EEPROM support
+
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# end of Texas Instruments shared transport line discipline
+
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+# CONFIG_GENWQE is not set
+# CONFIG_ECHO is not set
+# CONFIG_BCM_VK is not set
+# CONFIG_MISC_ALCOR_PCI is not set
+# CONFIG_MISC_RTSX_PCI is not set
+# CONFIG_MISC_RTSX_USB is not set
+# CONFIG_PVPANIC is not set
+# CONFIG_GP_PCI1XXXX is not set
+
+#
+# RemoTI support
+#
+# end of RemoTI support
+
+#
+# HDMI CEC support
+#
+# CONFIG_HDMI_CEC is not set
+# end of HDMI CEC support
+# end of Misc devices
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI_COMMON=y
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+CONFIG_SCSI_SCAN_ASYNC=y
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# end of SCSI Transports
+
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# end of SCSI device support
+
+# CONFIG_ATA is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+# CONFIG_DM_UNSTRIPED is not set
+CONFIG_DM_CRYPT=y
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+# CONFIG_DM_WRITECACHE is not set
+# CONFIG_DM_EBS is not set
+# CONFIG_DM_ERA is not set
+# CONFIG_DM_CLONE is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+# CONFIG_DM_DUST is not set
+# CONFIG_DM_INIT is not set
+# CONFIG_DM_UEVENT is not set
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_DM_SWITCH is not set
+# CONFIG_DM_LOG_WRITES is not set
+# CONFIG_DM_INTEGRITY is not set
+# CONFIG_DM_AUDIT is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# end of IEEE 1394 (FireWire) support
+
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+# CONFIG_WIREGUARD_DEBUG is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_IPVLAN is not set
+# CONFIG_VXLAN is not set
+# CONFIG_GENEVE is not set
+# CONFIG_BAREUDP is not set
+# CONFIG_GTP is not set
+# CONFIG_AMT is not set
+# CONFIG_MACSEC is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_TUN=y
+# CONFIG_TUN_VNET_CROSS_LE is not set
+CONFIG_VETH=y
+# CONFIG_NLMON is not set
+# CONFIG_MHI_NET is not set
+# CONFIG_ARCNET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_B53 is not set
+# CONFIG_NET_DSA_BCM_SF2 is not set
+# CONFIG_NET_DSA_LOOP is not set
+# CONFIG_NET_DSA_LANTIQ_GSWIP is not set
+# CONFIG_NET_DSA_MT7530 is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_AR9331 is not set
+# CONFIG_NET_DSA_QCA8K is not set
+# CONFIG_NET_DSA_SJA1105 is not set
+# CONFIG_NET_DSA_XRS700X_I2C is not set
+# CONFIG_NET_DSA_XRS700X_MDIO is not set
+# CONFIG_NET_DSA_REALTEK is not set
+# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set
+# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set
+# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set
+# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set
+# end of Distributed Switch Architecture drivers
+
+CONFIG_ETHERNET=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_ALTERA_TSE is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_ENGLEDER is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HISILICON is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_JME is not set
+# CONFIG_NET_VENDOR_ADI is not set
+# CONFIG_NET_VENDOR_LITEX is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+CONFIG_NET_VENDOR_QUALCOMM=y
+# CONFIG_QCA7000_SPI is not set
+# CONFIG_QCA7000_UART is not set
+# CONFIG_QCOM_EMAC is not set
+# CONFIG_RMNET is not set
+CONFIG_IPQ95XX_ESS=y
+CONFIG_IPQ95XX_FBX_FF=y
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VERTEXCOM is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLINK=y
+CONFIG_PHYLIB=y
+CONFIG_SWPHY=y
+# CONFIG_LED_TRIGGER_PHY is not set
+CONFIG_PHYLIB_LEDS=y
+CONFIG_FIXED_PHY=y
+CONFIG_SFP=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AMD_PHY is not set
+# CONFIG_ADIN_PHY is not set
+# CONFIG_ADIN1100_PHY is not set
+CONFIG_AQUANTIA_PHY=y
+# CONFIG_AX88796B_PHY is not set
+CONFIG_BROADCOM_PHY=y
+# CONFIG_BCM54140_PHY is not set
+CONFIG_BCM7XXX_PHY=y
+# CONFIG_BCM84881_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+CONFIG_BCM_NET_PHYLIB=y
+# CONFIG_CICADA_PHY is not set
+# CONFIG_CORTINA_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_INTEL_XWAY_PHY is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_MARVELL_10G_PHY is not set
+# CONFIG_MARVELL_88X2222_PHY is not set
+# CONFIG_MAXLINEAR_GPHY is not set
+# CONFIG_MEDIATEK_GE_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_MICROCHIP_T1S_PHY is not set
+# CONFIG_MICROCHIP_PHY is not set
+# CONFIG_MICROCHIP_T1_PHY is not set
+# CONFIG_MICROSEMI_PHY is not set
+# CONFIG_MOTORCOMM_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_NXP_CBTX_PHY is not set
+# CONFIG_NXP_C45_TJA11XX_PHY is not set
+# CONFIG_NXP_TJA11XX_PHY is not set
+# CONFIG_NCN26000_PHY is not set
+# CONFIG_AT803X_PHY is not set
+# CONFIG_QCA807X_PHY is not set
+CONFIG_QCA8084_PHY=y
+# CONFIG_QSEMI_PHY is not set
+CONFIG_REALTEK_PHY=y
+# CONFIG_RENESAS_PHY is not set
+# CONFIG_ROCKCHIP_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_TERANETICS_PHY is not set
+# CONFIG_DP83822_PHY is not set
+# CONFIG_DP83TC811_PHY is not set
+# CONFIG_DP83848_PHY is not set
+# CONFIG_DP83867_PHY is not set
+# CONFIG_DP83869_PHY is not set
+# CONFIG_DP83TD510_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_XILINX_GMII2RGMII is not set
+# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_PSE_CONTROLLER is not set
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_BUS=y
+CONFIG_FWNODE_MDIO=y
+CONFIG_OF_MDIO=y
+CONFIG_MDIO_DEVRES=y
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MDIO_BCM_UNIMAC is not set
+# CONFIG_MDIO_HISI_FEMAC is not set
+CONFIG_MDIO_I2C=y
+# CONFIG_MDIO_MVUSB is not set
+# CONFIG_MDIO_MSCC_MIIM is not set
+# CONFIG_MDIO_OCTEON is not set
+CONFIG_MDIO_IPQ4019=y
+# CONFIG_MDIO_IPQ8064 is not set
+# CONFIG_MDIO_THUNDER is not set
+
+#
+# MDIO Multiplexers
+#
+# CONFIG_MDIO_BUS_MUX_GPIO is not set
+# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set
+# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
+
+#
+# PCS device drivers
+#
+# end of PCS device drivers
+
+CONFIG_PPP=y
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_DEFLATE is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_MPPE=y
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_SLIP=y
+CONFIG_SLHC=y
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+# CONFIG_SLIP_MODE_SLIP6 is not set
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+CONFIG_ATH_COMMON=y
+CONFIG_WLAN_VENDOR_ATH=y
+CONFIG_ATH_DEBUG=y
+CONFIG_ATH_REG_IGNORE=y
+CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS=y
+CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING=y
+# CONFIG_ATH5K is not set
+# CONFIG_ATH5K_PCI is not set
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K_COMMON_DEBUG=y
+CONFIG_ATH9K_BTCOEX_SUPPORT=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_PCI=y
+# CONFIG_ATH9K_AHB is not set
+CONFIG_ATH9K_DEBUGFS=y
+CONFIG_ATH9K_STATION_STATISTICS=y
+CONFIG_ATH9K_TX99=y
+# CONFIG_ATH9K_DFS_CERTIFIED is not set
+# CONFIG_ATH9K_DYNACK is not set
+# CONFIG_ATH9K_CHANNEL_CONTEXT is not set
+# CONFIG_ATH9K_PCOEM is not set
+# CONFIG_ATH9K_PCI_NO_EEPROM is not set
+# CONFIG_ATH9K_HTC is not set
+# CONFIG_ATH9K_HWRNG is not set
+# CONFIG_ATH9K_COMMON_SPECTRAL is not set
+# CONFIG_CARL9170 is not set
+# CONFIG_ATH6KL is not set
+# CONFIG_AR5523 is not set
+# CONFIG_WIL6210 is not set
+CONFIG_ATH10K=y
+CONFIG_ATH10K_CE=y
+CONFIG_ATH10K_PCI=m
+# CONFIG_ATH10K_AHB is not set
+# CONFIG_ATH10K_SDIO is not set
+# CONFIG_ATH10K_USB is not set
+CONFIG_ATH10K_DEBUG=y
+CONFIG_ATH10K_DEBUGFS=y
+# CONFIG_ATH10K_SPECTRAL is not set
+CONFIG_ATH10K_DFS_CERTIFIED=y
+# CONFIG_WCN36XX is not set
+CONFIG_ATH11K=y
+CONFIG_ATH11K_PCI=m
+CONFIG_ATH11K_DEBUG=y
+CONFIG_ATH11K_DEBUGFS=y
+CONFIG_ATH11K_SMALL_DP_RINGS=y
+# CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION is not set
+CONFIG_ATH12K=m
+CONFIG_ATH12K_DEBUG=y
+CONFIG_ATH12K_DEBUGFS=y
+# CONFIG_ATH12K_MEM_PROFILE_512M is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_MICROCHIP is not set
+# CONFIG_WLAN_VENDOR_PURELIFI is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_SILABS is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_VIRT_WIFI is not set
+# CONFIG_WAN is not set
+
+#
+# Wireless WAN
+#
+# CONFIG_WWAN is not set
+# end of Wireless WAN
+
+# CONFIG_VMXNET3 is not set
+# CONFIG_NETDEVSIM is not set
+# CONFIG_NET_FAILOVER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_LEDS=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1050 is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_DLINK_DIR685 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_PINEPHONE is not set
+# CONFIG_KEYBOARD_SAMSUNG is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_OMAP4 is not set
+# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_CAP11XX is not set
+# CONFIG_KEYBOARD_BCM is not set
+# CONFIG_KEYBOARD_CYPRESS_SF is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_ATMEL_CAPTOUCH is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_E3X0_BUTTON is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_GPIO_BEEPER is not set
+# CONFIG_INPUT_GPIO_DECODER is not set
+# CONFIG_INPUT_GPIO_VIBRA is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_REGULATOR_HAPTIC is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_DA7280_HAPTICS is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_IQS269A is not set
+# CONFIG_INPUT_IQS626A is not set
+# CONFIG_INPUT_IQS7222 is not set
+# CONFIG_INPUT_CMA3000 is not set
+# CONFIG_INPUT_DRV260X_HAPTICS is not set
+# CONFIG_INPUT_DRV2665_HAPTICS is not set
+# CONFIG_INPUT_DRV2667_HAPTICS is not set
+# CONFIG_INPUT_SMSC_CAP1066 is not set
+# CONFIG_RMI4_CORE is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+# end of Hardware I/O ports
+# end of Input device support
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_LEGACY_TIOCSTI is not set
+# CONFIG_LDISC_AUTOLOAD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_EARLYCON=y
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+# CONFIG_SERIAL_SIFIVE is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_SC16IS7XX is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_SERIAL_FSL_LPUART is not set
+# CONFIG_SERIAL_FSL_LINFLEXUART is not set
+# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
+# CONFIG_SERIAL_SPRD is not set
+# end of Serial drivers
+
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_NULL_TTY is not set
+# CONFIG_HVC_DCC is not set
+# CONFIG_RPMSG_TTY is not set
+CONFIG_SERIAL_DEV_BUS=y
+# CONFIG_SERIAL_DEV_CTRL_TTYPORT is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_VIRTIO_CONSOLE is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_HW_RANDOM_BA431 is not set
+# CONFIG_HW_RANDOM_OPTEE is not set
+# CONFIG_HW_RANDOM_CCTRNG is not set
+# CONFIG_HW_RANDOM_XIPHERA is not set
+# CONFIG_HW_RANDOM_ARM_SMCCC_TRNG is not set
+# CONFIG_HW_RANDOM_CN10K is not set
+CONFIG_HW_RANDOM_QCOM=y
+# CONFIG_APPLICOM is not set
+CONFIG_DEVMEM=y
+# CONFIG_DEVPHYSMEM is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_XILLYBUS is not set
+# CONFIG_XILLYUSB is not set
+# end of Character devices
+
+#
+# Diag Support
+#
+CONFIG_DIAG_CHAR=m
+# end of Diag Support
+
+#
+# DIAG traffic over USB
+#
+# CONFIG_DIAG_OVER_USB is not set
+# end of DIAG traffic over USB
+
+#
+# DIAG traffic over QRTR
+#
+CONFIG_DIAG_OVER_QRTR=y
+# end of DIAG traffic over QRTR
+
+#
+# HSIC/SMUX support for DIAG
+#
+# CONFIG_DIAGFWD_BRIDGE_CODE is not set
+# end of HSIC/SMUX support for DIAG
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+
+#
+# Multiplexer I2C Chip support
+#
+# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
+CONFIG_I2C_MUX_GPIO=y
+# CONFIG_I2C_MUX_GPMUX is not set
+# CONFIG_I2C_MUX_LTC4306 is not set
+# CONFIG_I2C_MUX_PCA9541 is not set
+# CONFIG_I2C_MUX_PCA954x is not set
+# CONFIG_I2C_MUX_PINCTRL is not set
+# CONFIG_I2C_MUX_REG is not set
+# CONFIG_I2C_DEMUX_PINCTRL is not set
+# CONFIG_I2C_MUX_MLXCPLD is not set
+# end of Multiplexer I2C Chip support
+
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_NVIDIA_GPU is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CADENCE is not set
+# CONFIG_I2C_CBUS_GPIO is not set
+# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EMEV2 is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
+# CONFIG_I2C_HISI is not set
+# CONFIG_I2C_NOMADIK is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_QCOM_CCI is not set
+CONFIG_I2C_QUP=y
+# CONFIG_I2C_RK3X is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_THUNDERX is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_CP2615 is not set
+# CONFIG_I2C_PCI1XXXX is not set
+# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_VIRTIO is not set
+# end of I2C Hardware Bus support
+
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_SLAVE is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# end of I2C support
+
+# CONFIG_I3C is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_AXI_SPI_ENGINE is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_CADENCE is not set
+# CONFIG_SPI_CADENCE_QUADSPI is not set
+# CONFIG_SPI_CADENCE_XSPI is not set
+# CONFIG_SPI_DESIGNWARE is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_FSL_SPI is not set
+# CONFIG_SPI_MICROCHIP_CORE is not set
+# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PCI1XXXX is not set
+# CONFIG_SPI_PL022 is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_QCOM_QSPI is not set
+CONFIG_SPI_QUP=y
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_SIFIVE is not set
+# CONFIG_SPI_SN_F_OSPI is not set
+# CONFIG_SPI_MXIC is not set
+# CONFIG_SPI_THUNDERX is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_ZYNQMP_GQSPI is not set
+# CONFIG_SPI_AMD is not set
+
+#
+# SPI Multiplexer support
+#
+# CONFIG_SPI_MUX is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_LOOPBACK_TEST is not set
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_SPI_SLAVE is not set
+CONFIG_SPI_DYNAMIC=y
+# CONFIG_SPMI is not set
+# CONFIG_HSI is not set
+# CONFIG_PPS is not set
+
+#
+# PTP clock support
+#
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# end of PTP clock support
+
+CONFIG_PINCTRL=y
+CONFIG_PINMUX=y
+CONFIG_PINCONF=y
+CONFIG_GENERIC_PINCONF=y
+# CONFIG_DEBUG_PINCTRL is not set
+# CONFIG_PINCTRL_CY8C95X0 is not set
+# CONFIG_PINCTRL_MCP23S08 is not set
+# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set
+# CONFIG_PINCTRL_OCELOT is not set
+# CONFIG_PINCTRL_SINGLE is not set
+# CONFIG_PINCTRL_STMFX is not set
+# CONFIG_PINCTRL_SX150X is not set
+CONFIG_PINCTRL_MSM=y
+# CONFIG_PINCTRL_IPQ5332 is not set
+# CONFIG_PINCTRL_IPQ8074 is not set
+# CONFIG_PINCTRL_IPQ6018 is not set
+CONFIG_PINCTRL_IPQ9574=y
+# CONFIG_PINCTRL_MDM9607 is not set
+# CONFIG_PINCTRL_MSM8916 is not set
+# CONFIG_PINCTRL_MSM8953 is not set
+# CONFIG_PINCTRL_MSM8976 is not set
+# CONFIG_PINCTRL_MSM8994 is not set
+# CONFIG_PINCTRL_MSM8996 is not set
+# CONFIG_PINCTRL_MSM8998 is not set
+# CONFIG_PINCTRL_QCM2290 is not set
+# CONFIG_PINCTRL_QCS404 is not set
+# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set
+# CONFIG_PINCTRL_QDU1000 is not set
+# CONFIG_PINCTRL_SA8775P is not set
+# CONFIG_PINCTRL_SC7180 is not set
+# CONFIG_PINCTRL_SC7280 is not set
+# CONFIG_PINCTRL_SC8180X is not set
+# CONFIG_PINCTRL_SC8280XP is not set
+# CONFIG_PINCTRL_SDM660 is not set
+# CONFIG_PINCTRL_SDM670 is not set
+# CONFIG_PINCTRL_SDM845 is not set
+# CONFIG_PINCTRL_SM6115 is not set
+# CONFIG_PINCTRL_SM6125 is not set
+# CONFIG_PINCTRL_SM6350 is not set
+# CONFIG_PINCTRL_SM6375 is not set
+# CONFIG_PINCTRL_SM7150 is not set
+# CONFIG_PINCTRL_SM8150 is not set
+# CONFIG_PINCTRL_SM8250 is not set
+# CONFIG_PINCTRL_SM8350 is not set
+# CONFIG_PINCTRL_SM8450 is not set
+# CONFIG_PINCTRL_SM8550 is not set
+# CONFIG_PINCTRL_LPASS_LPI is not set
+
+#
+# Renesas pinctrl drivers
+#
+# end of Renesas pinctrl drivers
+
+CONFIG_GPIOLIB=y
+CONFIG_GPIOLIB_FASTPATH_LIMIT=512
+CONFIG_OF_GPIO=y
+CONFIG_GPIOLIB_IRQCHIP=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_CDEV=y
+# CONFIG_GPIO_CDEV_V1 is not set
+
+#
+# Memory mapped GPIO drivers
+#
+# CONFIG_GPIO_74XX_MMIO is not set
+# CONFIG_GPIO_ALTERA is not set
+# CONFIG_GPIO_CADENCE is not set
+# CONFIG_GPIO_DWAPB is not set
+# CONFIG_GPIO_FTGPIO010 is not set
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_GRGPIO is not set
+# CONFIG_GPIO_HISI is not set
+# CONFIG_GPIO_HLWD is not set
+# CONFIG_GPIO_LOGICVC is not set
+# CONFIG_GPIO_MB86S7X is not set
+# CONFIG_GPIO_PL061 is not set
+# CONFIG_GPIO_SIFIVE is not set
+# CONFIG_GPIO_SYSCON is not set
+# CONFIG_GPIO_XGENE is not set
+# CONFIG_GPIO_XILINX is not set
+# CONFIG_GPIO_AMD_FCH is not set
+# end of Memory mapped GPIO drivers
+
+#
+# I2C GPIO expanders
+#
+# CONFIG_GPIO_ADNP is not set
+CONFIG_GPIO_FBXGWR_PMU=y
+# CONFIG_GPIO_FXL6408 is not set
+# CONFIG_GPIO_GW_PLD is not set
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCA9570 is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_TPIC2810 is not set
+# end of I2C GPIO expanders
+
+#
+# MFD GPIO expanders
+#
+# end of MFD GPIO expanders
+
+#
+# PCI GPIO expanders
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_PCI_IDIO_16 is not set
+# CONFIG_GPIO_PCIE_IDIO_24 is not set
+# CONFIG_GPIO_RDC321X is not set
+# end of PCI GPIO expanders
+
+#
+# SPI GPIO expanders
+#
+# CONFIG_GPIO_74X164 is not set
+# CONFIG_GPIO_MAX3191X is not set
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_PISOSR is not set
+# CONFIG_GPIO_XRA1403 is not set
+# end of SPI GPIO expanders
+
+#
+# USB GPIO expanders
+#
+# end of USB GPIO expanders
+
+#
+# Virtual GPIO drivers
+#
+# CONFIG_GPIO_AGGREGATOR is not set
+# CONFIG_GPIO_LATCH is not set
+# CONFIG_GPIO_MOCKUP is not set
+# CONFIG_GPIO_SIM is not set
+# end of Virtual GPIO drivers
+
+CONFIG_FREEBOX_GPIO=y
+CONFIG_FREEBOX_GPIO_DT=y
+# CONFIG_FREEBOX_JTAG is not set
+# CONFIG_W1 is not set
+CONFIG_POWER_RESET=y
+# CONFIG_POWER_RESET_BRCMSTB is not set
+# CONFIG_POWER_RESET_GPIO is not set
+# CONFIG_POWER_RESET_GPIO_RESTART is not set
+# CONFIG_POWER_RESET_MSM is not set
+# CONFIG_POWER_RESET_LTC2952 is not set
+# CONFIG_POWER_RESET_REGULATOR is not set
+# CONFIG_POWER_RESET_RESTART is not set
+# CONFIG_POWER_RESET_XGENE is not set
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+# CONFIG_SYSCON_REBOOT_MODE is not set
+# CONFIG_NVMEM_REBOOT_MODE is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_POWER_SUPPLY_HWMON is not set
+# CONFIG_IP5XXX_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_CHARGER_ADP5061 is not set
+# CONFIG_BATTERY_CW2015 is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SAMSUNG_SDI is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_CHARGER_SBS is not set
+# CONFIG_MANAGER_SBS is not set
+# CONFIG_BATTERY_BQ27XXX is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_MANAGER is not set
+# CONFIG_CHARGER_LT3651 is not set
+# CONFIG_CHARGER_LTC4162L is not set
+# CONFIG_CHARGER_DETECTOR_MAX14656 is not set
+# CONFIG_CHARGER_MAX77976 is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_BQ24257 is not set
+# CONFIG_CHARGER_BQ24735 is not set
+# CONFIG_CHARGER_BQ2515X is not set
+# CONFIG_CHARGER_BQ25890 is not set
+# CONFIG_CHARGER_BQ25980 is not set
+# CONFIG_CHARGER_BQ256XX is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GAUGE_LTC2941 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_BATTERY_RT5033 is not set
+# CONFIG_CHARGER_RT9455 is not set
+# CONFIG_CHARGER_RT9467 is not set
+# CONFIG_CHARGER_RT9471 is not set
+# CONFIG_CHARGER_UCS1002 is not set
+# CONFIG_CHARGER_BD99954 is not set
+# CONFIG_BATTERY_UG3105 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM1177 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_AHT10 is not set
+# CONFIG_SENSORS_AS370 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_AXI_FAN_CONTROL is not set
+CONFIG_SENSORS_FBXGWR_PMU=y
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_CORSAIR_CPRO is not set
+# CONFIG_SENSORS_CORSAIR_PSU is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_G762 is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_POWR1220 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LTC2945 is not set
+# CONFIG_SENSORS_LTC2947_I2C is not set
+# CONFIG_SENSORS_LTC2947_SPI is not set
+# CONFIG_SENSORS_LTC2990 is not set
+# CONFIG_SENSORS_LTC2992 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4222 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4260 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX127 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX31722 is not set
+# CONFIG_SENSORS_MAX31730 is not set
+# CONFIG_SENSORS_MAX31760 is not set
+# CONFIG_SENSORS_MAX6620 is not set
+# CONFIG_SENSORS_MAX6621 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MAX31790 is not set
+# CONFIG_SENSORS_MC34VR500 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_TC654 is not set
+# CONFIG_SENSORS_TPS23861 is not set
+# CONFIG_SENSORS_MR75203 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_NCT6683 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NCT6775_I2C is not set
+# CONFIG_SENSORS_NCT7802 is not set
+# CONFIG_SENSORS_NPCM7XX is not set
+# CONFIG_SENSORS_OCC_P8_I2C is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SBTSI is not set
+# CONFIG_SENSORS_SBRMI is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SHT3x is not set
+# CONFIG_SENSORS_SHT4x is not set
+# CONFIG_SENSORS_SHTC1 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC2305 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_STTS751 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_ADC128D818 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_INA238 is not set
+# CONFIG_SENSORS_INA3221 is not set
+# CONFIG_SENSORS_TC74 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP103 is not set
+# CONFIG_SENSORS_TMP108 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_TMP464 is not set
+# CONFIG_SENSORS_TMP513 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83773G is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LD6710_FBX is not set
+# CONFIG_SENSORS_AP806 is not set
+CONFIG_THERMAL=y
+# CONFIG_THERMAL_NETLINK is not set
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_THERMAL_OF=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_BANG_BANG=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+# CONFIG_CPU_THERMAL is not set
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_THERMAL_MMIO is not set
+
+#
+# Qualcomm thermal drivers
+#
+CONFIG_QCOM_TSENS=y
+# CONFIG_QCOM_LMH is not set
+# end of Qualcomm thermal drivers
+
+CONFIG_FREEBOX_WATCHDOG=y
+# CONFIG_FREEBOX_WATCHDOG_CHAR is not set
+# CONFIG_FREEBOX_WATCHDOG_BCM63XX_OF is not set
+CONFIG_FREEBOX_WATCHDOG_FBXGWR_PMU=y
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_ACT8945A is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_MFD_SMPRO is not set
+# CONFIG_MFD_AS3722 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_ATMEL_FLEXCOM is not set
+# CONFIG_MFD_ATMEL_HLCDC is not set
+# CONFIG_MFD_BCM590XX is not set
+# CONFIG_MFD_BD9571MWV is not set
+# CONFIG_MFD_AXP20X_I2C is not set
+# CONFIG_MFD_MADERA is not set
+# CONFIG_MFD_MAX597X is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_DA9062 is not set
+# CONFIG_MFD_DA9063 is not set
+# CONFIG_MFD_DA9150 is not set
+# CONFIG_MFD_DLN2 is not set
+# CONFIG_MFD_GATEWORKS_GSC is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_MFD_MP2629 is not set
+# CONFIG_MFD_HI6421_PMIC is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_MFD_IQS62X is not set
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_KEMPLD is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX14577 is not set
+# CONFIG_MFD_MAX77620 is not set
+# CONFIG_MFD_MAX77650 is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX77714 is not set
+# CONFIG_MFD_MAX77843 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_MT6360 is not set
+# CONFIG_MFD_MT6370 is not set
+# CONFIG_MFD_MT6397 is not set
+# CONFIG_MFD_MENF21BMC is not set
+# CONFIG_MFD_OCELOT is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_CPCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_NTXEC is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_QCOM_RPM is not set
+# CONFIG_MFD_SY7636A is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RT4831 is not set
+# CONFIG_MFD_RT5033 is not set
+# CONFIG_MFD_RT5120 is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_RK808 is not set
+# CONFIG_MFD_RN5T618 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SKY81452 is not set
+# CONFIG_MFD_STMPE is not set
+CONFIG_MFD_SYSCON=y
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP3943 is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_TI_LMU is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65086 is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TI_LP873X is not set
+# CONFIG_MFD_TI_LP87565 is not set
+# CONFIG_MFD_TPS65218 is not set
+# CONFIG_MFD_TPS65219 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TQMX86 is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_LOCHNAGAR is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_ROHM_BD718XX is not set
+# CONFIG_MFD_ROHM_BD71828 is not set
+# CONFIG_MFD_ROHM_BD957XMUF is not set
+# CONFIG_MFD_STPMIC1 is not set
+# CONFIG_MFD_STMFX is not set
+# CONFIG_MFD_FBXGW7R_PANEL is not set
+CONFIG_MFD_FBXGWR_PMU=y
+# CONFIG_MFD_ATC260X_I2C is not set
+# CONFIG_MFD_QCOM_PM8008 is not set
+# CONFIG_RAVE_SP_CORE is not set
+# CONFIG_MFD_INTEL_M10_BMC_SPI is not set
+# CONFIG_MFD_RSMU_I2C is not set
+# CONFIG_MFD_RSMU_SPI is not set
+# end of Multifunction device drivers
+
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+CONFIG_REGULATOR_FAULT_SENSING=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_88PG86X is not set
+# CONFIG_REGULATOR_ACT8865 is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_DA9121 is not set
+# CONFIG_REGULATOR_DA9210 is not set
+# CONFIG_REGULATOR_DA9211 is not set
+# CONFIG_REGULATOR_FAN53555 is not set
+# CONFIG_REGULATOR_FAN53880 is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_ISL9305 is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_LP872X is not set
+# CONFIG_REGULATOR_LP8755 is not set
+# CONFIG_REGULATOR_LTC3589 is not set
+# CONFIG_REGULATOR_LTC3676 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8893 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_MAX20086 is not set
+# CONFIG_REGULATOR_MAX20411 is not set
+# CONFIG_REGULATOR_MAX77826 is not set
+# CONFIG_REGULATOR_MCP16502 is not set
+# CONFIG_REGULATOR_MP5416 is not set
+# CONFIG_REGULATOR_MP8859 is not set
+# CONFIG_REGULATOR_MP886X is not set
+# CONFIG_REGULATOR_MPQ7920 is not set
+# CONFIG_REGULATOR_MT6311 is not set
+# CONFIG_REGULATOR_PCA9450 is not set
+# CONFIG_REGULATOR_PF8X00 is not set
+# CONFIG_REGULATOR_PFUZE100 is not set
+# CONFIG_REGULATOR_PV88060 is not set
+# CONFIG_REGULATOR_PV88080 is not set
+# CONFIG_REGULATOR_PV88090 is not set
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set
+# CONFIG_REGULATOR_RT4801 is not set
+# CONFIG_REGULATOR_RT4803 is not set
+# CONFIG_REGULATOR_RT5190A is not set
+# CONFIG_REGULATOR_RT5739 is not set
+# CONFIG_REGULATOR_RT5759 is not set
+# CONFIG_REGULATOR_RT6160 is not set
+# CONFIG_REGULATOR_RT6190 is not set
+# CONFIG_REGULATOR_RT6245 is not set
+# CONFIG_REGULATOR_RTQ2134 is not set
+# CONFIG_REGULATOR_RTMV20 is not set
+# CONFIG_REGULATOR_RTQ6752 is not set
+# CONFIG_REGULATOR_SLG51000 is not set
+# CONFIG_REGULATOR_SY8106A is not set
+# CONFIG_REGULATOR_SY8824X is not set
+# CONFIG_REGULATOR_SY8827N is not set
+# CONFIG_REGULATOR_TPS51632 is not set
+# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS6286X is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_TPS65132 is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+# CONFIG_REGULATOR_VCTRL is not set
+# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set
+CONFIG_RC_CORE=y
+# CONFIG_LIRC is not set
+# CONFIG_RC_MAP is not set
+# CONFIG_RC_DECODERS is not set
+# CONFIG_RC_DEVICES is not set
+
+#
+# CEC support
+#
+# CONFIG_MEDIA_CEC_SUPPORT is not set
+# end of CEC support
+
+CONFIG_MEDIA_SUPPORT=y
+# CONFIG_MEDIA_SUPPORT_FILTER is not set
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+
+#
+# Media device types
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_SDR_SUPPORT=y
+CONFIG_MEDIA_PLATFORM_SUPPORT=y
+CONFIG_MEDIA_TEST_SUPPORT=y
+# end of Media device types
+
+#
+# Media core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_MEDIA_CONTROLLER is not set
+CONFIG_DVB_CORE=y
+# end of Media core support
+
+#
+# Digital TV options
+#
+# CONFIG_DVB_NET is not set
+CONFIG_DVB_MAX_ADAPTERS=8
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
+# CONFIG_DVB_ULE_DEBUG is not set
+# end of Digital TV options
+
+#
+# Media drivers
+#
+
+#
+# Media drivers
+#
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+
+#
+# Analog TV USB devices
+#
+
+#
+# Analog/digital TV USB devices
+#
+
+#
+# Digital TV USB devices
+#
+# CONFIG_DVB_AS102 is not set
+# CONFIG_DVB_B2C2_FLEXCOP_USB is not set
+CONFIG_DVB_USB_V2=y
+# CONFIG_DVB_USB_AF9015 is not set
+CONFIG_DVB_USB_AF9035=m
+# CONFIG_DVB_USB_ANYSEE is not set
+# CONFIG_DVB_USB_AU6610 is not set
+# CONFIG_DVB_USB_AZ6007 is not set
+# CONFIG_DVB_USB_CE6230 is not set
+# CONFIG_DVB_USB_DVBSKY is not set
+# CONFIG_DVB_USB_EC168 is not set
+# CONFIG_DVB_USB_GL861 is not set
+# CONFIG_DVB_USB_LME2510 is not set
+# CONFIG_DVB_USB_MXL111SF is not set
+# CONFIG_DVB_USB_RTL28XXU is not set
+# CONFIG_DVB_USB_ZD1301 is not set
+CONFIG_DVB_USB=y
+# CONFIG_DVB_USB_DEBUG is not set
+# CONFIG_DVB_USB_A800 is not set
+# CONFIG_DVB_USB_AF9005 is not set
+# CONFIG_DVB_USB_AZ6027 is not set
+# CONFIG_DVB_USB_CINERGY_T2 is not set
+# CONFIG_DVB_USB_CXUSB is not set
+CONFIG_DVB_USB_DIB0700=m
+# CONFIG_DVB_USB_DIBUSB_MB is not set
+# CONFIG_DVB_USB_DIBUSB_MC is not set
+# CONFIG_DVB_USB_DIGITV is not set
+# CONFIG_DVB_USB_DTT200U is not set
+# CONFIG_DVB_USB_DTV5100 is not set
+# CONFIG_DVB_USB_DW2102 is not set
+# CONFIG_DVB_USB_GP8PSK is not set
+# CONFIG_DVB_USB_M920X is not set
+# CONFIG_DVB_USB_NOVA_T_USB2 is not set
+# CONFIG_DVB_USB_OPERA1 is not set
+# CONFIG_DVB_USB_PCTV452E is not set
+# CONFIG_DVB_USB_TECHNISAT_USB2 is not set
+# CONFIG_DVB_USB_TTUSB2 is not set
+# CONFIG_DVB_USB_UMT_010 is not set
+# CONFIG_DVB_USB_VP702X is not set
+# CONFIG_DVB_USB_VP7045 is not set
+# CONFIG_SMS_USB_DRV is not set
+# CONFIG_DVB_TTUSB_BUDGET is not set
+# CONFIG_DVB_TTUSB_DEC is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+
+#
+# Software defined radio USB devices
+#
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+# CONFIG_MEDIA_PLATFORM_DRIVERS is not set
+
+#
+# MMC/SDIO DVB adapters
+#
+# CONFIG_SMS_SDIO_DRV is not set
+# CONFIG_DVB_TEST_DRIVERS is not set
+CONFIG_CYPRESS_FIRMWARE=y
+# end of Media drivers
+
+#
+# Media ancillary drivers
+#
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=y
+
+#
+# Customize TV tuners
+#
+# CONFIG_MEDIA_TUNER_FC0011 is not set
+# CONFIG_MEDIA_TUNER_FC0012 is not set
+# CONFIG_MEDIA_TUNER_FC0013 is not set
+CONFIG_MEDIA_TUNER_IT913X=m
+# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+# CONFIG_MEDIA_TUNER_MC44S803 is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2063 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MXL301RF is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set
+# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_R820T is not set
+# CONFIG_MEDIA_TUNER_SI2157 is not set
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA18212 is not set
+# CONFIG_MEDIA_TUNER_TDA18218 is not set
+# CONFIG_MEDIA_TUNER_TDA18250 is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_TUA9001 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC4000 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# end of Customize TV tuners
+
+#
+# Customise DVB Frontends
+#
+
+#
+# Multistandard (satellite) frontends
+#
+CONFIG_DVB_M88DS3103=m
+# CONFIG_DVB_MXL5XX is not set
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+# CONFIG_DVB_STV090x is not set
+# CONFIG_DVB_STV0910 is not set
+# CONFIG_DVB_STV6110x is not set
+# CONFIG_DVB_STV6111 is not set
+
+#
+# Multistandard (cable + terrestrial) frontends
+#
+# CONFIG_DVB_DRXK is not set
+# CONFIG_DVB_MN88472 is not set
+# CONFIG_DVB_MN88473 is not set
+# CONFIG_DVB_SI2165 is not set
+# CONFIG_DVB_TDA18271C2DD is not set
+
+#
+# DVB-S (satellite) frontends
+#
+# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_CX24117 is not set
+# CONFIG_DVB_CX24120 is not set
+# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
+# CONFIG_DVB_MT312 is not set
+# CONFIG_DVB_S5H1420 is not set
+# CONFIG_DVB_SI21XX is not set
+# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STV0299 is not set
+# CONFIG_DVB_STV0900 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_TDA10071 is not set
+# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_TDA8261 is not set
+# CONFIG_DVB_TDA826X is not set
+# CONFIG_DVB_TS2020 is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_AF9013=m
+# CONFIG_DVB_CX22700 is not set
+# CONFIG_DVB_CX22702 is not set
+# CONFIG_DVB_CXD2820R is not set
+# CONFIG_DVB_CXD2841ER is not set
+# CONFIG_DVB_DIB3000MB is not set
+# CONFIG_DVB_DIB3000MC is not set
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+# CONFIG_DVB_DIB9000 is not set
+# CONFIG_DVB_DRXD is not set
+# CONFIG_DVB_EC100 is not set
+# CONFIG_DVB_L64781 is not set
+# CONFIG_DVB_MT352 is not set
+# CONFIG_DVB_NXT6000 is not set
+CONFIG_DVB_RTL2830=m
+CONFIG_DVB_RTL2832=m
+# CONFIG_DVB_S5H1432 is not set
+CONFIG_DVB_SI2168=m
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_STV0367 is not set
+# CONFIG_DVB_TDA10048 is not set
+# CONFIG_DVB_TDA1004X is not set
+# CONFIG_DVB_ZD1301_DEMOD is not set
+# CONFIG_DVB_ZL10353 is not set
+# CONFIG_DVB_CXD2880 is not set
+
+#
+# DVB-C (cable) frontends
+#
+# CONFIG_DVB_STV0297 is not set
+# CONFIG_DVB_TDA10021 is not set
+# CONFIG_DVB_TDA10023 is not set
+# CONFIG_DVB_VES1820 is not set
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+# CONFIG_DVB_AU8522_DTV is not set
+# CONFIG_DVB_BCM3510 is not set
+# CONFIG_DVB_LG2160 is not set
+# CONFIG_DVB_LGDT3305 is not set
+CONFIG_DVB_LGDT3306A=m
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_MXL692 is not set
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_S5H1409 is not set
+# CONFIG_DVB_S5H1411 is not set
+
+#
+# ISDB-T (terrestrial) frontends
+#
+# CONFIG_DVB_DIB8000 is not set
+# CONFIG_DVB_MB86A20S is not set
+# CONFIG_DVB_S921 is not set
+
+#
+# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
+#
+# CONFIG_DVB_MN88443X is not set
+# CONFIG_DVB_TC90522 is not set
+
+#
+# Digital terrestrial only tuners/PLL
+#
+# CONFIG_DVB_PLL is not set
+CONFIG_DVB_TUNER_DIB0070=m
+# CONFIG_DVB_TUNER_DIB0090 is not set
+
+#
+# SEC control devices for DVB-S
+#
+# CONFIG_DVB_A8293 is not set
+CONFIG_DVB_AF9033=m
+# CONFIG_DVB_ASCOT2E is not set
+# CONFIG_DVB_ATBM8830 is not set
+# CONFIG_DVB_HELENE is not set
+# CONFIG_DVB_HORUS3A is not set
+# CONFIG_DVB_ISL6405 is not set
+# CONFIG_DVB_ISL6421 is not set
+# CONFIG_DVB_ISL6423 is not set
+# CONFIG_DVB_IX2505V is not set
+# CONFIG_DVB_LGS8GL5 is not set
+# CONFIG_DVB_LGS8GXX is not set
+# CONFIG_DVB_LNBH25 is not set
+# CONFIG_DVB_LNBH29 is not set
+# CONFIG_DVB_LNBP21 is not set
+# CONFIG_DVB_LNBP22 is not set
+# CONFIG_DVB_M88RS2000 is not set
+# CONFIG_DVB_TDA665x is not set
+# CONFIG_DVB_DRX39XYJ is not set
+
+#
+# Common Interface (EN50221) controller drivers
+#
+# CONFIG_DVB_CXD2099 is not set
+# CONFIG_DVB_SP2 is not set
+# end of Customise DVB Frontends
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+# end of Media ancillary drivers
+
+#
+# Graphics support
+#
+CONFIG_VIDEO_CMDLINE=y
+# CONFIG_DRM is not set
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
+
+#
+# ARM devices
+#
+# end of ARM devices
+
+#
+# Frame buffer Devices
+#
+CONFIG_FB_NOTIFY=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+CONFIG_FB_SYS_FILLRECT=y
+CONFIG_FB_SYS_COPYAREA=y
+CONFIG_FB_SYS_IMAGEBLIT=y
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYS_FOPS=y
+CONFIG_FB_BACKLIGHT=y
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_OPENCORES is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_SIMPLE is not set
+# CONFIG_FB_SSD1307 is not set
+# CONFIG_FB_SM712 is not set
+CONFIG_FB_SSD1320=y
+# CONFIG_FB_SSD1327 is not set
+# end of Frame buffer Devices
+
+#
+# Backlight & LCD device support
+#
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_KTD253 is not set
+# CONFIG_BACKLIGHT_KTZ8866 is not set
+# CONFIG_BACKLIGHT_QCOM_WLED is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_GPIO is not set
+# CONFIG_BACKLIGHT_LV5207LP is not set
+# CONFIG_BACKLIGHT_BD6107 is not set
+# CONFIG_BACKLIGHT_ARCXCNN is not set
+# CONFIG_BACKLIGHT_LED is not set
+# end of Backlight & LCD device support
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=80
+CONFIG_DUMMY_CONSOLE_ROWS=25
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# end of Console display driver support
+
+# CONFIG_LOGO is not set
+# end of Graphics support
+
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_UHID is not set
+# CONFIG_HID_GENERIC is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_COUGAR is not set
+# CONFIG_HID_MACALLY is not set
+# CONFIG_HID_CMEDIA is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EVISION is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GEMBIRD is not set
+# CONFIG_HID_GFRM is not set
+# CONFIG_HID_GLORIOUS is not set
+# CONFIG_HID_VIVALDI is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_VIEWSONIC is not set
+# CONFIG_HID_VRC2 is not set
+# CONFIG_HID_XIAOMI is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_ITE is not set
+# CONFIG_HID_JABRA is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LED is not set
+# CONFIG_HID_LENOVO is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MALTRON is not set
+# CONFIG_HID_MAYFLASH is not set
+# CONFIG_HID_REDRAGON is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NINTENDO is not set
+# CONFIG_HID_NTI is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PLANTRONICS is not set
+# CONFIG_HID_PXRC is not set
+# CONFIG_HID_RAZER is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SEMITEK is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEAM is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_RMI is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_TOPRE is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_UDRAW_PS3 is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_XINMO is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+# CONFIG_HID_ALPS is not set
+# end of Special HID drivers
+
+#
+# HID-BPF support
+#
+# end of HID-BPF support
+
+#
+# USB HID support
+#
+# CONFIG_USB_HID is not set
+# CONFIG_HID_PID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# end of USB HID Boot Protocol drivers
+# end of USB HID support
+
+# CONFIG_I2C_HID is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+# CONFIG_USB_LED_TRIG is not set
+# CONFIG_USB_ULPI_BUS is not set
+# CONFIG_USB_CONN_GPIO is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_PCI is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_FEW_INIT_RETRIES is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG_PRODUCTLIST is not set
+# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set
+# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set
+CONFIG_USB_AUTOSUSPEND_DELAY=2
+# CONFIG_USB_MON is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+# CONFIG_USB_XHCI_DBGCAP is not set
+# CONFIG_USB_XHCI_PCI_RENESAS is not set
+CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_MAX3421_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HCD_TEST_MODE is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=y
+CONFIG_USB_PRINTER=y
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+# CONFIG_USB_UAS is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USBIP_CORE is not set
+
+#
+# USB dual-mode controller drivers
+#
+# CONFIG_USB_CDNS_SUPPORT is not set
+# CONFIG_USB_MUSB_HDRC is not set
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_HOST=y
+
+#
+# Platform Glue Driver Support
+#
+# CONFIG_USB_DWC3_OF_SIMPLE is not set
+CONFIG_USB_DWC3_QCOM=m
+# CONFIG_USB_DWC2 is not set
+# CONFIG_USB_ISP1760 is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_QCOM_EUD is not set
+# CONFIG_APPLE_MFI_FASTCHARGE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_EHSET_TEST_FIXTURE is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HUB_USB251XB is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+# CONFIG_USB_HSIC_USB4604 is not set
+# CONFIG_USB_LINK_LAYER_TEST is not set
+# CONFIG_USB_CHAOSKEY is not set
+# CONFIG_USB_ONBOARD_HUB is not set
+
+#
+# USB Physical Layer drivers
+#
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_ULPI is not set
+# end of USB Physical Layer drivers
+
+# CONFIG_USB_GADGET is not set
+# CONFIG_TYPEC is not set
+# CONFIG_USB_ROLE_SWITCH is not set
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_ARMMMCI is not set
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_IO_ACCESSORS=y
+# CONFIG_MMC_SDHCI_PCI is not set
+CONFIG_MMC_SDHCI_PLTFM=y
+# CONFIG_MMC_SDHCI_OF_ARASAN is not set
+# CONFIG_MMC_SDHCI_OF_AT91 is not set
+# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set
+# CONFIG_MMC_SDHCI_CADENCE is not set
+# CONFIG_MMC_SDHCI_F_SDH30 is not set
+# CONFIG_MMC_SDHCI_MILBEAUT is not set
+CONFIG_MMC_SDHCI_MSM=y
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_DW is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MMC_USDHI6ROL0 is not set
+CONFIG_MMC_CQHCI=y
+# CONFIG_MMC_HSQ is not set
+# CONFIG_MMC_TOSHIBA_PCI is not set
+# CONFIG_MMC_MTK is not set
+# CONFIG_MMC_SDHCI_XENON is not set
+# CONFIG_MMC_SDHCI_OMAP is not set
+# CONFIG_MMC_SDHCI_AM654 is not set
+# CONFIG_SCSI_UFSHCD is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+# CONFIG_LEDS_CLASS_FLASH is not set
+# CONFIG_LEDS_CLASS_MULTICOLOR is not set
+# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_AN30259A is not set
+# CONFIG_LEDS_AW2013 is not set
+# CONFIG_LEDS_BCM6328 is not set
+# CONFIG_LEDS_BCM6358 is not set
+# CONFIG_LEDS_CR0014114 is not set
+# CONFIG_LEDS_EL15203000 is not set
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3532 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_LM3692X is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+CONFIG_LEDS_FBXGWR_PMU=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP3952 is not set
+# CONFIG_LEDS_LP50XX is not set
+# CONFIG_LEDS_LP55XX_COMMON is not set
+# CONFIG_LEDS_LP8860 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA963X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2606MVV is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_TLC591XX is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_IS31FL319X is not set
+# CONFIG_LEDS_IS31FL32XX is not set
+# CONFIG_LEDS_IS31FL3299 is not set
+
+#
+# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
+#
+# CONFIG_LEDS_BLINKM is not set
+# CONFIG_LEDS_SYSCON is not set
+# CONFIG_LEDS_MLXREG is not set
+# CONFIG_LEDS_USER is not set
+# CONFIG_LEDS_SPI_BYTE is not set
+# CONFIG_LEDS_TI_LMU_COMMON is not set
+# CONFIG_LEDS_LED1202 is not set
+
+#
+# Flash and Torch LED drivers
+#
+
+#
+# RGB LED drivers
+#
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_MTD is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_ACTIVITY is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_LEDS_TRIGGER_PANIC is not set
+# CONFIG_LEDS_TRIGGER_NETDEV is not set
+# CONFIG_LEDS_TRIGGER_PATTERN is not set
+# CONFIG_LEDS_TRIGGER_AUDIO is not set
+# CONFIG_LEDS_TRIGGER_TTY is not set
+
+#
+# Simple LED drivers
+#
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC_SUPPORT=y
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_DMA_ENGINE=y
+CONFIG_DMA_VIRTUAL_CHANNELS=y
+CONFIG_DMA_OF=y
+# CONFIG_ALTERA_MSGDMA is not set
+# CONFIG_AMBA_PL08X is not set
+# CONFIG_DW_AXI_DMAC is not set
+# CONFIG_FSL_EDMA is not set
+# CONFIG_FSL_QDMA is not set
+# CONFIG_INTEL_IDMA64 is not set
+# CONFIG_MV_XOR_V2 is not set
+# CONFIG_PL330_DMA is not set
+# CONFIG_PLX_DMA is not set
+# CONFIG_XILINX_DMA is not set
+# CONFIG_XILINX_XDMA is not set
+# CONFIG_XILINX_ZYNQMP_DMA is not set
+# CONFIG_XILINX_ZYNQMP_DPDMA is not set
+CONFIG_QCOM_BAM_DMA=y
+# CONFIG_QCOM_GPI_DMA is not set
+# CONFIG_QCOM_HIDMA_MGMT is not set
+# CONFIG_QCOM_HIDMA is not set
+# CONFIG_DW_DMAC is not set
+# CONFIG_DW_DMAC_PCI is not set
+# CONFIG_DW_EDMA is not set
+# CONFIG_SF_PDMA is not set
+
+#
+# DMA Clients
+#
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+
+#
+# DMABUF options
+#
+# CONFIG_SYNC_FILE is not set
+# CONFIG_UDMABUF is not set
+# CONFIG_DMABUF_MOVE_NOTIFY is not set
+# CONFIG_DMABUF_DEBUG is not set
+# CONFIG_DMABUF_SELFTESTS is not set
+# CONFIG_DMABUF_HEAPS is not set
+# CONFIG_DMABUF_SYSFS_STATS is not set
+# end of DMABUF options
+
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VFIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VDPA is not set
+# CONFIG_VHOST_MENU is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# end of Microsoft Hyper-V guest support
+
+# CONFIG_GREYBUS is not set
+# CONFIG_COMEDI is not set
+# CONFIG_STAGING is not set
+# CONFIG_GOLDFISH is not set
+# CONFIG_CHROME_PLATFORMS is not set
+# CONFIG_MELLANOX_PLATFORM is not set
+# CONFIG_SURFACE_PLATFORMS is not set
+# CONFIG_FBXGW7R_PLATFORM is not set
+CONFIG_QCOM_IPQ_PLATFORM=y
+# CONFIG_IPQ_SEC_UPGRADE is not set
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_COMMON_CLK=y
+
+#
+# Clock driver for ARM Reference designs
+#
+# CONFIG_CLK_ICST is not set
+# CONFIG_CLK_SP810 is not set
+# end of Clock driver for ARM Reference designs
+
+# CONFIG_LMK04832 is not set
+# CONFIG_COMMON_CLK_MAX9485 is not set
+# CONFIG_COMMON_CLK_SI5341 is not set
+# CONFIG_COMMON_CLK_SI5351 is not set
+# CONFIG_COMMON_CLK_SI514 is not set
+# CONFIG_COMMON_CLK_SI544 is not set
+# CONFIG_COMMON_CLK_SI570 is not set
+# CONFIG_COMMON_CLK_CDCE706 is not set
+# CONFIG_COMMON_CLK_CDCE925 is not set
+# CONFIG_COMMON_CLK_CS2000_CP is not set
+# CONFIG_COMMON_CLK_AXI_CLKGEN is not set
+# CONFIG_COMMON_CLK_XGENE is not set
+# CONFIG_COMMON_CLK_RS9_PCIE is not set
+# CONFIG_COMMON_CLK_SI521XX is not set
+# CONFIG_COMMON_CLK_VC5 is not set
+# CONFIG_COMMON_CLK_VC7 is not set
+# CONFIG_COMMON_CLK_FIXED_MMIO is not set
+CONFIG_COMMON_CLK_QCOM=y
+# CONFIG_QCOM_A53PLL is not set
+# CONFIG_QCOM_A7PLL is not set
+# CONFIG_QCOM_CLK_APCS_MSM8916 is not set
+# CONFIG_QCOM_CLK_APCC_MSM8996 is not set
+# CONFIG_QCOM_CLK_APCS_SDX55 is not set
+# CONFIG_QCOM_CLK_SMD_RPM is not set
+# CONFIG_APQ_GCC_8084 is not set
+# CONFIG_APQ_MMCC_8084 is not set
+CONFIG_IPQ_APSS_PLL=y
+CONFIG_IPQ_APSS_6018=y
+# CONFIG_IPQ_GCC_4019 is not set
+# CONFIG_IPQ_GCC_5332 is not set
+# CONFIG_IPQ_GCC_6018 is not set
+# CONFIG_IPQ_GCC_806X is not set
+# CONFIG_IPQ_LCC_806X is not set
+# CONFIG_IPQ_GCC_8074 is not set
+CONFIG_IPQ_GCC_9574=y
+CONFIG_IPQ_NSSCC_9574=y
+# CONFIG_MSM_GCC_8660 is not set
+# CONFIG_MSM_GCC_8909 is not set
+# CONFIG_MSM_GCC_8916 is not set
+# CONFIG_MSM_GCC_8917 is not set
+# CONFIG_MSM_GCC_8939 is not set
+# CONFIG_MSM_GCC_8960 is not set
+# CONFIG_MSM_LCC_8960 is not set
+# CONFIG_MDM_GCC_9607 is not set
+# CONFIG_MDM_GCC_9615 is not set
+# CONFIG_MDM_LCC_9615 is not set
+# CONFIG_MSM_MMCC_8960 is not set
+# CONFIG_MSM_GCC_8953 is not set
+# CONFIG_MSM_GCC_8974 is not set
+# CONFIG_MSM_MMCC_8974 is not set
+# CONFIG_MSM_GCC_8976 is not set
+# CONFIG_MSM_MMCC_8994 is not set
+# CONFIG_MSM_GCC_8994 is not set
+# CONFIG_MSM_GCC_8996 is not set
+# CONFIG_MSM_MMCC_8996 is not set
+# CONFIG_MSM_GCC_8998 is not set
+# CONFIG_MSM_GPUCC_8998 is not set
+# CONFIG_MSM_MMCC_8998 is not set
+# CONFIG_QCM_GCC_2290 is not set
+# CONFIG_QCM_DISPCC_2290 is not set
+# CONFIG_QCS_GCC_404 is not set
+# CONFIG_SC_CAMCC_7180 is not set
+# CONFIG_SC_CAMCC_7280 is not set
+# CONFIG_SC_DISPCC_7180 is not set
+# CONFIG_SC_DISPCC_7280 is not set
+# CONFIG_SC_DISPCC_8280XP is not set
+# CONFIG_SA_GCC_8775P is not set
+# CONFIG_SA_GPUCC_8775P is not set
+# CONFIG_SC_GCC_7180 is not set
+# CONFIG_SC_GCC_7280 is not set
+# CONFIG_SC_GCC_8180X is not set
+# CONFIG_SC_GCC_8280XP is not set
+# CONFIG_SC_GPUCC_7180 is not set
+# CONFIG_SC_GPUCC_7280 is not set
+# CONFIG_SC_GPUCC_8280XP is not set
+# CONFIG_SC_LPASSCC_7280 is not set
+# CONFIG_SC_LPASS_CORECC_7180 is not set
+# CONFIG_SC_LPASS_CORECC_7280 is not set
+# CONFIG_SC_MSS_7180 is not set
+# CONFIG_SC_VIDEOCC_7180 is not set
+# CONFIG_SC_VIDEOCC_7280 is not set
+# CONFIG_SDM_CAMCC_845 is not set
+# CONFIG_SDM_GCC_660 is not set
+# CONFIG_SDM_MMCC_660 is not set
+# CONFIG_SDM_GPUCC_660 is not set
+# CONFIG_QCS_TURING_404 is not set
+# CONFIG_QCS_Q6SSTOP_404 is not set
+# CONFIG_QDU_GCC_1000 is not set
+# CONFIG_SDM_GCC_845 is not set
+# CONFIG_SDM_GPUCC_845 is not set
+# CONFIG_SDM_VIDEOCC_845 is not set
+# CONFIG_SDM_DISPCC_845 is not set
+# CONFIG_SDM_LPASSCC_845 is not set
+# CONFIG_SDX_GCC_55 is not set
+# CONFIG_SDX_GCC_65 is not set
+# CONFIG_SM_CAMCC_6350 is not set
+# CONFIG_SM_CAMCC_8250 is not set
+# CONFIG_SM_CAMCC_8450 is not set
+# CONFIG_SM_GCC_6115 is not set
+# CONFIG_SM_GCC_6125 is not set
+# CONFIG_SM_GCC_6350 is not set
+# CONFIG_SM_GCC_6375 is not set
+# CONFIG_SM_GCC_7150 is not set
+# CONFIG_SM_GCC_8150 is not set
+# CONFIG_SM_GCC_8250 is not set
+# CONFIG_SM_GCC_8350 is not set
+# CONFIG_SM_GCC_8450 is not set
+# CONFIG_SM_GCC_8550 is not set
+# CONFIG_SM_GPUCC_6115 is not set
+# CONFIG_SM_GPUCC_6125 is not set
+# CONFIG_SM_GPUCC_6375 is not set
+# CONFIG_SM_GPUCC_6350 is not set
+# CONFIG_SM_GPUCC_8150 is not set
+# CONFIG_SM_GPUCC_8250 is not set
+# CONFIG_SM_GPUCC_8350 is not set
+# CONFIG_SM_TCSRCC_8550 is not set
+# CONFIG_SM_VIDEOCC_8150 is not set
+# CONFIG_SM_VIDEOCC_8250 is not set
+# CONFIG_QCOM_HFPLL is not set
+# CONFIG_KPSS_XCC is not set
+# CONFIG_CLK_GFM_LPASS_SM8250 is not set
+# CONFIG_XILINX_VCU is not set
+# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
+# CONFIG_HWSPINLOCK is not set
+
+#
+# Clock Source drivers
+#
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y
+CONFIG_FSL_ERRATUM_A008585=y
+CONFIG_HISILICON_ERRATUM_161010101=y
+CONFIG_ARM64_ERRATUM_858921=y
+# end of Clock Source drivers
+
+CONFIG_MAILBOX=y
+# CONFIG_ARM_MHU is not set
+# CONFIG_ARM_MHU_V2 is not set
+# CONFIG_PLATFORM_MHU is not set
+# CONFIG_PL320_MBOX is not set
+# CONFIG_ALTERA_MBOX is not set
+# CONFIG_MAILBOX_TEST is not set
+CONFIG_QCOM_APCS_IPC=y
+# CONFIG_QCOM_IPCC is not set
+# CONFIG_IOMMU_SUPPORT is not set
+
+#
+# Remoteproc drivers
+#
+# CONFIG_REMOTEPROC is not set
+# end of Remoteproc drivers
+
+#
+# Rpmsg drivers
+#
+CONFIG_RPMSG=y
+# CONFIG_RPMSG_CHAR is not set
+# CONFIG_RPMSG_CTRL is not set
+# CONFIG_RPMSG_NS is not set
+CONFIG_RPMSG_QCOM_GLINK=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+# CONFIG_RPMSG_VIRTIO is not set
+# end of Rpmsg drivers
+
+# CONFIG_SOUNDWIRE is not set
+
+#
+# SOC (System On Chip) specific Drivers
+#
+
+#
+# Amlogic SoC drivers
+#
+# end of Amlogic SoC drivers
+
+#
+# Broadcom SoC drivers
+#
+# CONFIG_SOC_BRCMSTB is not set
+# end of Broadcom SoC drivers
+
+#
+# NXP/Freescale QorIQ SoC drivers
+#
+# CONFIG_QUICC_ENGINE is not set
+# end of NXP/Freescale QorIQ SoC drivers
+
+#
+# fujitsu SoC drivers
+#
+# end of fujitsu SoC drivers
+
+#
+# i.MX SoC drivers
+#
+# end of i.MX SoC drivers
+
+#
+# Enable LiteX SoC Builder specific drivers
+#
+# CONFIG_LITEX_SOC_CONTROLLER is not set
+# end of Enable LiteX SoC Builder specific drivers
+
+# CONFIG_WPCM450_SOC is not set
+
+#
+# Qualcomm SoC drivers
+#
+# CONFIG_QCOM_COMMAND_DB is not set
+# CONFIG_QCOM_CPR is not set
+# CONFIG_QCOM_GENI_SE is not set
+# CONFIG_QCOM_GSBI is not set
+# CONFIG_QCOM_LLCC is not set
+# CONFIG_QCOM_OCMEM is not set
+CONFIG_QCOM_QMI_HELPERS=y
+# CONFIG_QCOM_RAMP_CTRL is not set
+# CONFIG_QCOM_RMTFS_MEM is not set
+# CONFIG_QCOM_RPMH is not set
+CONFIG_QCOM_SMD_RPM=y
+# CONFIG_QCOM_SPM is not set
+# CONFIG_QCOM_WCNSS_CTRL is not set
+# CONFIG_QCOM_APR is not set
+# CONFIG_QCOM_ICC_BWMON is not set
+CONFIG_QCOM_IMEM_RESET_REASON=y
+# end of Qualcomm SoC drivers
+
+# CONFIG_SOC_TI is not set
+
+#
+# Xilinx SoC drivers
+#
+# end of Xilinx SoC drivers
+# end of SOC (System On Chip) specific Drivers
+
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+# CONFIG_MEMORY is not set
+# CONFIG_IIO is not set
+# CONFIG_NTB is not set
+# CONFIG_PWM is not set
+
+#
+# IRQ chip support
+#
+CONFIG_IRQCHIP=y
+CONFIG_ARM_GIC=y
+CONFIG_ARM_GIC_MAX_NR=1
+CONFIG_ARM_GIC_V2M=y
+CONFIG_ARM_GIC_V3=y
+CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_ARM_GIC_V3_ITS_PCI=y
+# CONFIG_AL_FIC is not set
+# CONFIG_XILINX_INTC is not set
+CONFIG_PARTITION_PERCPU=y
+# CONFIG_QCOM_PDC is not set
+# CONFIG_QCOM_MPM is not set
+# end of IRQ chip support
+
+# CONFIG_IPACK_BUS is not set
+CONFIG_RESET_CONTROLLER=y
+# CONFIG_RESET_QCOM_AOSS is not set
+# CONFIG_RESET_QCOM_PDC is not set
+# CONFIG_RESET_SIMPLE is not set
+# CONFIG_RESET_TI_SYSCON is not set
+# CONFIG_RESET_TI_TPS380X is not set
+
+#
+# PHY Subsystem
+#
+CONFIG_GENERIC_PHY=y
+# CONFIG_PHY_CAN_TRANSCEIVER is not set
+CONFIG_XDSL_PHY_API=m
+
+#
+# PHY drivers for Broadcom platforms
+#
+# CONFIG_BCM_KONA_USB2_PHY is not set
+# end of PHY drivers for Broadcom platforms
+
+# CONFIG_PHY_CADENCE_TORRENT is not set
+# CONFIG_PHY_CADENCE_DPHY is not set
+# CONFIG_PHY_CADENCE_DPHY_RX is not set
+# CONFIG_PHY_CADENCE_SIERRA is not set
+# CONFIG_PHY_CADENCE_SALVO is not set
+# CONFIG_PHY_PXA_28NM_HSIC is not set
+# CONFIG_PHY_PXA_28NM_USB2 is not set
+# CONFIG_PHY_LAN966X_SERDES is not set
+# CONFIG_PHY_MAPPHONE_MDM6600 is not set
+# CONFIG_PHY_OCELOT_SERDES is not set
+# CONFIG_PHY_QCOM_APQ8064_SATA is not set
+# CONFIG_PHY_QCOM_EDP is not set
+# CONFIG_PHY_QCOM_IPQ4019_USB is not set
+# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
+# CONFIG_PHY_QCOM_PCIE2 is not set
+CONFIG_PHY_QCOM_QMP=y
+# CONFIG_PHY_QCOM_QMP_COMBO is not set
+CONFIG_PHY_QCOM_QMP_PCIE=y
+# CONFIG_PHY_QCOM_QMP_PCIE_8996 is not set
+# CONFIG_PHY_QCOM_QMP_UFS is not set
+CONFIG_PHY_QCOM_QMP_USB=y
+CONFIG_PHY_QCOM_QUSB2=y
+# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set
+# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set
+# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set
+# CONFIG_PHY_QCOM_USB_HS_28NM is not set
+# CONFIG_PHY_QCOM_USB_SS is not set
+# CONFIG_PHY_QCOM_IPQ806X_USB is not set
+# end of PHY Subsystem
+
+# CONFIG_POWERCAP is not set
+# CONFIG_MCB is not set
+
+#
+# Performance monitor support
+#
+# CONFIG_ARM_CCI_PMU is not set
+# CONFIG_ARM_CCN is not set
+# CONFIG_ARM_CMN is not set
+CONFIG_ARM_PMU=y
+CONFIG_ARM_PMUV3=y
+# CONFIG_ARM_DSU_PMU is not set
+# CONFIG_ARM_SPE_PMU is not set
+# CONFIG_HISI_PCIE_PMU is not set
+# CONFIG_HNS3_PMU is not set
+# end of Performance monitor support
+
+CONFIG_RAS=y
+# CONFIG_USB4 is not set
+
+#
+# Android
+#
+# CONFIG_ANDROID_BINDER_IPC is not set
+# end of Android
+
+# CONFIG_LIBNVDIMM is not set
+# CONFIG_DAX is not set
+CONFIG_NVMEM=y
+CONFIG_NVMEM_SYSFS=y
+
+#
+# Layout Types
+#
+# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set
+# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set
+# end of Layout Types
+
+# CONFIG_NVMEM_IGNORE_RO is not set
+CONFIG_NVMEM_QCOM_QFPROM=y
+# CONFIG_NVMEM_RMEM is not set
+# CONFIG_NVMEM_U_BOOT_ENV is not set
+
+#
+# HW tracing support
+#
+# CONFIG_STM is not set
+# CONFIG_INTEL_TH is not set
+# CONFIG_HISI_PTT is not set
+# end of HW tracing support
+
+# CONFIG_FPGA is not set
+# CONFIG_FSI is not set
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+# CONFIG_OPTEE_INSECURE_LOAD_IMAGE is not set
+CONFIG_PM_OPP=y
+# CONFIG_SIOX is not set
+# CONFIG_SLIMBUS is not set
+# CONFIG_INTERCONNECT is not set
+# CONFIG_COUNTER is not set
+# CONFIG_MOST is not set
+# CONFIG_PECI is not set
+# CONFIG_HTE is not set
+# CONFIG_CDX_BUS is not set
+# end of Device Drivers
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+# CONFIG_VALIDATE_FS_PARSER is not set
+CONFIG_FS_IOMAP=y
+CONFIG_LEGACY_DIRECT_IO=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_XFS_FS=y
+# CONFIG_XFS_SUPPORT_V4 is not set
+CONFIG_XFS_SUPPORT_ASCII_CI=y
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_ONLINE_SCRUB is not set
+# CONFIG_XFS_WARN is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_F2FS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+# CONFIG_EXPORTFS_BLOCK_OPS is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_FS_ENCRYPTION is not set
+# CONFIG_FS_VERITY is not set
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+# CONFIG_VIRTIO_FS is not set
+# CONFIG_OVERLAY_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+# end of Caches
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+# end of CD-ROM/DVD Filesystems
+
+#
+# DOS/FAT/EXFAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_FAT_DEFAULT_UTF8 is not set
+# CONFIG_EXFAT_FS is not set
+CONFIG_NTFS_FS=y
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_NTFS3_FS is not set
+CONFIG_EXFAT_FS_FBX=y
+# end of DOS/FAT/EXFAT/NT Filesystems
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_PROC_CHILDREN is not set
+CONFIG_KERNFS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_TMPFS_INODE64 is not set
+CONFIG_ARCH_SUPPORTS_HUGETLBFS=y
+# CONFIG_HUGETLBFS is not set
+CONFIG_MEMFD_CREATE=y
+CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
+CONFIG_CONFIGFS_FS=y
+# end of Pseudo filesystems
+
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ORANGEFS_FS is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+CONFIG_HFS_FS=y
+CONFIG_HFSPLUS_FS=y
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+CONFIG_CRAMFS_BLOCKDEV=y
+CONFIG_CRAMFS_MTD=y
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_FILE_CACHE is not set
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_SINGLE=y
+# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set
+CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y
+# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set
+# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set
+# CONFIG_SQUASHFS_XATTR is not set
+# CONFIG_SQUASHFS_ZLIB is not set
+# CONFIG_SQUASHFS_LZ4 is not set
+# CONFIG_SQUASHFS_LZO is not set
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_SQUASHFS_ZSTD is not set
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240
+# CONFIG_PSTORE_DEFLATE_COMPRESS is not set
+# CONFIG_PSTORE_LZO_COMPRESS is not set
+# CONFIG_PSTORE_LZ4_COMPRESS is not set
+# CONFIG_PSTORE_LZ4HC_COMPRESS is not set
+# CONFIG_PSTORE_842_COMPRESS is not set
+# CONFIG_PSTORE_ZSTD_COMPRESS is not set
+# CONFIG_PSTORE_CONSOLE is not set
+# CONFIG_PSTORE_PMSG is not set
+CONFIG_PSTORE_RAM=y
+# CONFIG_PSTORE_BLK is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_EROFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set
+CONFIG_NFSD=y
+# CONFIG_NFSD_V2 is not set
+# CONFIG_NFSD_V3_ACL is not set
+CONFIG_NFSD_V4=y
+# CONFIG_NFSD_BLOCKLAYOUT is not set
+# CONFIG_NFSD_SCSILAYOUT is not set
+# CONFIG_NFSD_FLEXFILELAYOUT is not set
+CONFIG_GRACE_PERIOD=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+CONFIG_SMB_SERVER=y
+CONFIG_SMB_INSECURE_SERVER=y
+CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN=y
+# CONFIG_SMB_SERVER_KERBEROS5 is not set
+CONFIG_SMBFS=y
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+# CONFIG_UNICODE is not set
+# end of File systems
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_REQUEST_CACHE is not set
+# CONFIG_PERSISTENT_KEYRINGS is not set
+# CONFIG_BIG_KEYS is not set
+# CONFIG_TRUSTED_KEYS is not set
+# CONFIG_ENCRYPTED_KEYS is not set
+# CONFIG_KEY_DH_OPERATIONS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
+# CONFIG_HARDENED_USERCOPY is not set
+# CONFIG_FORTIFY_SOURCE is not set
+# CONFIG_STATIC_USERMODEHELPER is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_LSM="yama,loadpin,safesetid,integrity"
+
+#
+# Kernel hardening options
+#
+
+#
+# Memory initialization
+#
+CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
+# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+# end of Memory initialization
+
+CONFIG_RANDSTRUCT_NONE=y
+# end of Kernel hardening options
+# end of Security options
+
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SKCIPHER=y
+CONFIG_CRYPTO_SKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_AKCIPHER2=y
+CONFIG_CRYPTO_AKCIPHER=y
+CONFIG_CRYPTO_KPP2=y
+CONFIG_CRYPTO_KPP=y
+CONFIG_CRYPTO_ACOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_NULL2=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+# end of Crypto core or helper
+
+#
+# Public-key cryptography
+#
+CONFIG_CRYPTO_RSA=y
+# CONFIG_CRYPTO_DH is not set
+CONFIG_CRYPTO_ECC=y
+CONFIG_CRYPTO_ECDH=y
+CONFIG_CRYPTO_ECDSA=y
+# CONFIG_CRYPTO_ECRDSA is not set
+# CONFIG_CRYPTO_SM2 is not set
+# CONFIG_CRYPTO_CURVE25519 is not set
+# end of Public-key cryptography
+
+#
+# Block ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_AES_TI is not set
+# CONFIG_CRYPTO_ARIA is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SM4_GENERIC is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# end of Block ciphers
+
+#
+# Length-preserving ciphers and modes
+#
+# CONFIG_CRYPTO_ADIANTUM is not set
+CONFIG_CRYPTO_CHACHA20=y
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CFB is not set
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_HCTR2 is not set
+# CONFIG_CRYPTO_KEYWRAP is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_OFB is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+# end of Length-preserving ciphers and modes
+
+#
+# AEAD (authenticated encryption with associated data) ciphers
+#
+# CONFIG_CRYPTO_AEGIS128 is not set
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ESSIV=y
+# end of AEAD (authenticated encryption with associated data) ciphers
+
+#
+# Hashes, digests, and MACs
+#
+# CONFIG_CRYPTO_BLAKE2B is not set
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_GHASH=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_POLY1305=y
+# CONFIG_CRYPTO_RMD160 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_SHA3=y
+# CONFIG_CRYPTO_SM3_GENERIC is not set
+# CONFIG_CRYPTO_STREEBOG is not set
+# CONFIG_CRYPTO_VMAC is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_XXHASH is not set
+# end of Hashes, digests, and MACs
+
+#
+# CRCs (cyclic redundancy checks)
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRCT10DIF is not set
+# end of CRCs (cyclic redundancy checks)
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_842 is not set
+# CONFIG_CRYPTO_LZ4 is not set
+# CONFIG_CRYPTO_LZ4HC is not set
+# CONFIG_CRYPTO_ZSTD is not set
+# end of Compression
+
+#
+# Random number generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+# CONFIG_CRYPTO_DRBG_HASH is not set
+# CONFIG_CRYPTO_DRBG_CTR is not set
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+# end of Random number generation
+
+#
+# Userspace interface
+#
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+# CONFIG_CRYPTO_USER_API_RNG is not set
+# CONFIG_CRYPTO_USER_API_AEAD is not set
+# end of Userspace interface
+
+CONFIG_CRYPTO_HASH_INFO=y
+# CONFIG_CRYPTO_NHPOLY1305_NEON is not set
+CONFIG_CRYPTO_CHACHA20_NEON=y
+
+#
+# Accelerated Cryptographic Algorithms for CPU (arm64)
+#
+# CONFIG_CRYPTO_GHASH_ARM64_CE is not set
+CONFIG_CRYPTO_POLY1305_NEON=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA256_ARM64=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=y
+# CONFIG_CRYPTO_SHA3_ARM64 is not set
+# CONFIG_CRYPTO_SM3_NEON is not set
+# CONFIG_CRYPTO_SM3_ARM64_CE is not set
+# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set
+CONFIG_CRYPTO_AES_ARM64=y
+CONFIG_CRYPTO_AES_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+# CONFIG_CRYPTO_AES_ARM64_BS is not set
+# CONFIG_CRYPTO_SM4_ARM64_CE is not set
+# CONFIG_CRYPTO_SM4_ARM64_CE_BLK is not set
+# CONFIG_CRYPTO_SM4_ARM64_NEON_BLK is not set
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+# CONFIG_CRYPTO_SM4_ARM64_CE_CCM is not set
+# CONFIG_CRYPTO_SM4_ARM64_CE_GCM is not set
+# end of Accelerated Cryptographic Algorithms for CPU (arm64)
+
+# CONFIG_CRYPTO_HW is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set
+CONFIG_PKCS7_MESSAGE_PARSER=y
+# CONFIG_FIPS_SIGNATURE_SELFTEST is not set
+
+#
+# Certificates for signature checking
+#
+CONFIG_SYSTEM_TRUSTED_KEYRING=y
+CONFIG_SYSTEM_TRUSTED_KEYS=""
+# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
+# CONFIG_SECONDARY_TRUSTED_KEYRING is not set
+# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set
+# end of Certificates for signature checking
+
+#
+# Library routines
+#
+CONFIG_LINEAR_RANGES=y
+# CONFIG_PACKING is not set
+CONFIG_BITREVERSE=y
+CONFIG_HAVE_ARCH_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_NET_UTILS=y
+# CONFIG_CORDIC is not set
+# CONFIG_PRIME_NUMBERS is not set
+CONFIG_RATIONAL=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
+CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
+CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
+# CONFIG_INDIRECT_PIO is not set
+
+#
+# Crypto library routines
+#
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CRYPTO_LIB_AES=y
+CONFIG_CRYPTO_LIB_ARC4=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=y
+CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y
+CONFIG_CRYPTO_LIB_CHACHA=y
+CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=y
+CONFIG_CRYPTO_LIB_CURVE25519=y
+CONFIG_CRYPTO_LIB_DES=y
+CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9
+CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=y
+CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y
+CONFIG_CRYPTO_LIB_POLY1305=y
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=y
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_SHA256=y
+# end of Crypto library routines
+
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC64_ROCKSOFT is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC64 is not set
+# CONFIG_CRC4 is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+CONFIG_CRC8=y
+CONFIG_AUDIT_GENERIC=y
+CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
+# CONFIG_RANDOM32_SELFTEST is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+# CONFIG_XZ_DEC_SPARC is not set
+# CONFIG_XZ_DEC_MICROLZMA is not set
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_INTERVAL_TREE=y
+CONFIG_ASSOCIATIVE_ARRAY=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAS_DMA=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_DMA_DECLARE_COHERENT=y
+CONFIG_ARCH_HAS_SETUP_DMA_OPS=y
+CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y
+CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y
+CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y
+CONFIG_SWIOTLB=y
+# CONFIG_DMA_RESTRICTED_POOL is not set
+CONFIG_DMA_NONCOHERENT_MMAP=y
+CONFIG_DMA_COHERENT_POOL=y
+CONFIG_DMA_DIRECT_REMAP=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_DMA_MAP_BENCHMARK is not set
+CONFIG_SGL_ALLOC=y
+# CONFIG_FORCE_NR_CPUS is not set
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_CLZ_TAB=y
+# CONFIG_IRQ_POLL is not set
+CONFIG_MPILIB=y
+CONFIG_LIBFDT=y
+CONFIG_OID_REGISTRY=y
+CONFIG_HAVE_GENERIC_VDSO=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_VDSO_TIME_NS=y
+CONFIG_SG_POOL=y
+CONFIG_ARCH_STACKWALK=y
+CONFIG_STACKDEPOT=y
+CONFIG_STACKDEPOT_ALWAYS_INIT=y
+CONFIG_SBITMAP=y
+CONFIG_ARCH_HAS_FBXSERIAL=y
+CONFIG_FBXSERIAL=y
+# end of Library routines
+
+CONFIG_GENERIC_IOREMAP=y
+CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
+
+#
+# Kernel hacking
+#
+
+#
+# printk and dmesg options
+#
+CONFIG_PRINTK_TIME=y
+# CONFIG_PRINTK_CALLER is not set
+# CONFIG_STACKTRACE_BUILD_ID is not set
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
+CONFIG_CONSOLE_LOGLEVEL_QUIET=4
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DYNAMIC_DEBUG_CORE is not set
+# CONFIG_SYMBOLIC_ERRNAME is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# end of printk and dmesg options
+
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_MISC is not set
+
+#
+# Compile-time checks and compiler options
+#
+CONFIG_AS_HAS_NON_CONST_LEB128=y
+CONFIG_DEBUG_INFO_NONE=y
+# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set
+# CONFIG_DEBUG_INFO_DWARF4 is not set
+# CONFIG_DEBUG_INFO_DWARF5 is not set
+CONFIG_FRAME_WARN=2048
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_HEADERS_INSTALL is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_VMLINUX_MAP is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# end of Compile-time checks and compiler options
+
+#
+# Generic Kernel Debugging Instruments
+#
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
+CONFIG_MAGIC_SYSRQ_SERIAL=y
+CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE=""
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_FS_ALLOW_ALL=y
+# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set
+# CONFIG_DEBUG_FS_ALLOW_NONE is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
+# CONFIG_UBSAN is not set
+CONFIG_HAVE_ARCH_KCSAN=y
+# end of Generic Kernel Debugging Instruments
+
+#
+# Networking Debugging
+#
+# CONFIG_NET_DEV_REFCNT_TRACKER is not set
+# CONFIG_NET_NS_REFCNT_TRACKER is not set
+# CONFIG_DEBUG_NET is not set
+# end of Networking Debugging
+
+#
+# Memory Debugging
+#
+CONFIG_PAGE_EXTENSION=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_PAGE_OWNER is not set
+# CONFIG_PAGE_TABLE_CHECK is not set
+CONFIG_PAGE_POISONING=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_ARCH_HAS_DEBUG_WX=y
+# CONFIG_DEBUG_WX is not set
+CONFIG_GENERIC_PTDUMP=y
+# CONFIG_PTDUMP_DEBUGFS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000
+# CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF is not set
+# CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN is not set
+# CONFIG_PER_VMA_LOCK_STATS is not set
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_SELFTEST=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+# CONFIG_SHRINKER_DEBUG is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_SCHED_STACK_END_CHECK is not set
+CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
+CONFIG_DEBUG_VM_IRQSOFF=y
+CONFIG_DEBUG_VM=y
+# CONFIG_DEBUG_VM_MAPLE_TREE is not set
+CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
+# CONFIG_DEBUG_VM_PGTABLE is not set
+CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
+CONFIG_DEBUG_VIRTUAL=y
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+CONFIG_HAVE_ARCH_KASAN=y
+CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y
+CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
+CONFIG_CC_HAS_KASAN_GENERIC=y
+CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
+# CONFIG_KASAN is not set
+CONFIG_HAVE_ARCH_KFENCE=y
+# CONFIG_KFENCE is not set
+# end of Memory Debugging
+
+CONFIG_DEBUG_SHIRQ=y
+
+#
+# Debug Oops, Lockups and Hangs
+#
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SOFTLOCKUP_DETECTOR is not set
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_WQ_WATCHDOG is not set
+# CONFIG_TEST_LOCKUP is not set
+# end of Debug Oops, Lockups and Hangs
+
+#
+# Scheduler Debugging
+#
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHED_INFO=y
+# CONFIG_SCHEDSTATS is not set
+# end of Scheduler Debugging
+
+CONFIG_DEBUG_TIMEKEEPING=y
+
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_LOCK_TORTURE_TEST is not set
+# CONFIG_WW_MUTEX_SELFTEST is not set
+# CONFIG_SCF_TORTURE_TEST is not set
+# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
+# end of Lock Debugging (spinlocks, mutexes, etc...)
+
+# CONFIG_DEBUG_IRQFLAGS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_KOBJECT_RELEASE is not set
+
+#
+# Debug kernel data structures
+#
+CONFIG_DEBUG_LIST=y
+# CONFIG_DEBUG_PLIST is not set
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_BUG_ON_DATA_CORRUPTION is not set
+# CONFIG_DEBUG_MAPLE_TREE is not set
+# end of Debug kernel data structures
+
+# CONFIG_DEBUG_CREDENTIALS is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_RCU_SCALE_TEST is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_REF_SCALE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=21
+CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0
+# CONFIG_RCU_CPU_STALL_CPUTIME is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_RCU_EQS_DEBUG is not set
+# end of RCU Debugging
+
+# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_STRICT_DEVMEM=y
+# CONFIG_IO_STRICT_DEVMEM is not set
+
+#
+# arm64 Debugging
+#
+# CONFIG_PID_IN_CONTEXTIDR is not set
+# CONFIG_ARM64_RELOC_TEST is not set
+# CONFIG_CORESIGHT is not set
+# end of arm64 Debugging
+
+#
+# Kernel Testing and Coverage
+#
+# CONFIG_KUNIT is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_ARCH_HAS_KCOV=y
+CONFIG_CC_HAS_SANCOV_TRACE_PC=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_ARCH_USE_MEMTEST=y
+CONFIG_MEMTEST=y
+# end of Kernel Testing and Coverage
+
+#
+# Rust hacking
+#
+# end of Rust hacking
+# end of Kernel hacking
diff -Nruw linux-6.4-fbx/drivers/char/diag./Kconfig linux-6.4-fbx/drivers/char/diag/Kconfig
--- linux-6.4-fbx/drivers/char/diag./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/Kconfig	2023-03-15 19:52:23.513979080 +0100
@@ -0,0 +1,38 @@
+menu "Diag Support"
+
+config DIAG_CHAR
+	tristate "char driver interface and diag forwarding to/from modem"
+	select CRC_CCITT
+	help
+	 Char driver interface for diag user space and diag-forwarding to modem ARM and back.
+	 This enables diagchar for maemo usb gadget or android usb gadget based on config selected.
+endmenu
+
+menu "DIAG traffic over USB"
+
+config DIAG_OVER_USB
+	bool "Enable DIAG traffic to go over USB"
+	depends on DIAG_CHAR
+	help
+	 This feature helps segregate code required for DIAG traffic to go over USB.
+endmenu
+
+menu "DIAG traffic over QRTR"
+
+config DIAG_OVER_QRTR
+	bool "Enable DIAG traffic to go over QRTR"
+        depends on QRTR && DIAG_CHAR
+	default n
+	help
+	 This feature helps segregate code required for DIAG traffic to go over QRTR.
+endmenu
+
+menu "HSIC/SMUX support for DIAG"
+
+config DIAGFWD_BRIDGE_CODE
+	bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
+	depends on DIAG_CHAR
+	depends on USB_QCOM_DIAG_BRIDGE || MHI_BUS
+	help
+	 SMUX/HSIC Transport Layer for DIAG Router
+endmenu
diff -Nruw linux-6.4-fbx/drivers/char/diag./Makefile linux-6.4-fbx/drivers/char/diag/Makefile
--- linux-6.4-fbx/drivers/char/diag./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/Makefile	2023-03-15 19:52:23.513979080 +0100
@@ -0,0 +1,19 @@
+obj-$(CONFIG_DIAG_CHAR) := diagchar.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_peripheral.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
+
+ifdef CONFIG_DIAG_OVER_QRTR
+diagchar-objs += qcom_diagfwd_socket.o
+else
+diagchar-objs += diagfwd_socket.o
+endif
+
+ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+diagchar-objs += diagfwd_bridge.o
+
+ifdef CONFIG_MHI_BUS
+diagchar-objs += diagfwd_mhi.o
+endif
+
+endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_dci.c linux-6.4-fbx/drivers/char/diag/diag_dci.c
--- linux-6.4-fbx/drivers/char/diag./diag_dci.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_dci.c	2023-10-05 12:33:41.363634732 +0200
@@ -0,0 +1,3215 @@
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/reboot.h>
+#include <asm/current.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+static struct timer_list dci_drain_timer;
+static int dci_timer_in_progress;
+static struct work_struct dci_data_drain_work;
+
+struct diag_dci_partial_pkt_t partial_pkt;
+
+unsigned int dci_max_reg = 100;
+unsigned int dci_max_clients = 10;
+struct mutex dci_log_mask_mutex;
+struct mutex dci_event_mask_mutex;
+
+/*
+ * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
+ * connection status again.
+ *
+ * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
+ * connection status
+ */
+#define DCI_HANDSHAKE_RETRY_TIME	500000
+#define DCI_HANDSHAKE_WAIT_TIME		200
+
+spinlock_t ws_lock;
+unsigned long ws_lock_flags;
+
+struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
+	{
+		.ctx = 0,
+		.send_log_mask = diag_send_dci_log_mask,
+		.send_event_mask = diag_send_dci_event_mask,
+		.peripheral_status = 0,
+		.mempool = 0,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.ctx = DIAGFWD_MDM_DCI,
+		.send_log_mask = diag_send_dci_log_mask_remote,
+		.send_event_mask = diag_send_dci_event_mask_remote,
+		.peripheral_status = 0,
+		.mempool = POOL_TYPE_MDM_DCI_WRITE,
+	}
+#endif
+};
+
+struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
+	{
+		.id = 0,
+		.open = 0,
+		.retry_count = 0
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAGFWD_MDM_DCI,
+		.open = 0,
+		.retry_count = 0
+	}
+#endif
+};
+
+/* Number of milliseconds anticipated to process the DCI data */
+#define DCI_WAKEUP_TIMEOUT 1
+
+#define DCI_CAN_ADD_BUF_TO_LIST(buf)					\
+	(buf && buf->data && !buf->in_busy && buf->data_len > 0)	\
+
+#ifdef CONFIG_DEBUG_FS
+struct diag_dci_data_info *dci_traffic;
+struct mutex dci_stat_mutex;
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc)
+{
+	static int curr_dci_data;
+	static unsigned long iteration;
+	struct diag_dci_data_info *temp_data = dci_traffic;
+	if (!temp_data)
+		return;
+	mutex_lock(&dci_stat_mutex);
+	if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
+		curr_dci_data = 0;
+	temp_data += curr_dci_data;
+	temp_data->iteration = iteration + 1;
+	temp_data->data_size = read_bytes;
+	temp_data->peripheral = peripheral;
+	temp_data->ch_type = ch_type;
+	temp_data->proc = proc;
+	diag_get_timestamp(temp_data->time_stamp);
+	curr_dci_data++;
+	iteration++;
+	mutex_unlock(&dci_stat_mutex);
+}
+#else
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc) { }
+#endif
+static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
+{
+	unsigned char *temp = mask;
+	uint8_t i;
+
+	if (!mask)
+		return;
+
+	/* create hard coded table for log mask with 16 categories */
+	for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+		*temp = i;
+		temp++;
+		*temp = dirty ? 1 : 0;
+		temp++;
+		memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
+		temp += DCI_MAX_ITEMS_PER_LOG_CODE;
+	}
+}
+
+static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
+{
+	if (tbl_buf)
+		memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
+}
+
+void dci_drain_data(struct timer_list *t)
+{
+	queue_work(driver->diag_dci_wq, &dci_data_drain_work);
+}
+
+static void dci_check_drain_timer(void)
+{
+	if (!dci_timer_in_progress) {
+		dci_timer_in_progress = 1;
+		mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
+	}
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void dci_handshake_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	int max_retries = 5;
+
+	struct dci_channel_status_t *status = container_of(work,
+						struct dci_channel_status_t,
+						handshake_work);
+
+	if (status->open) {
+		pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
+			 __func__, status->id);
+		return;
+	}
+
+	if (status->retry_count == max_retries) {
+		status->retry_count = 0;
+		pr_info("diag: dci channel connection handshake timed out, id: %d\n",
+			status->id);
+		err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
+		if (err) {
+			pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
+			       __func__, status->id, err);
+		}
+		return;
+	}
+	status->retry_count++;
+	/*
+	 * Sleep for sometime to check for the connection status again. The
+	 * value should be optimum to include a roundabout time for a small
+	 * packet to the remote processor.
+	 */
+	usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
+	mod_timer(&status->wait_time,
+		  jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+}
+
+static void dci_chk_handshake(struct timer_list *t)
+{
+	struct dci_channel_status_t *dci = container_of(t,
+						struct dci_channel_status_t,
+							wait_time);
+	if (dci->id < 0 || dci->id >= NUM_DCI_PROC)
+		return;
+
+}
+#endif
+
+static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
+{
+	if (!buffer || buffer->data)
+		return -EINVAL;
+
+	switch (type) {
+	case DCI_BUF_PRIMARY:
+		buffer->capacity = IN_BUF_SIZE;
+		buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+		if (!buffer->data)
+			return -ENOMEM;
+		break;
+	case DCI_BUF_SECONDARY:
+		buffer->data = NULL;
+		buffer->capacity = IN_BUF_SIZE;
+		break;
+	case DCI_BUF_CMD:
+		buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
+		buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+		if (!buffer->data)
+			return -ENOMEM;
+		break;
+	default:
+		pr_err("diag: In %s, unknown type %d", __func__, type);
+		return -EINVAL;
+	}
+
+	buffer->data_len = 0;
+	buffer->in_busy = 0;
+	buffer->buf_type = type;
+	mutex_init(&buffer->data_mutex);
+
+	return 0;
+}
+
+static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
+{
+	if (!buf)
+		return -EINVAL;
+
+	/* Return 1 if the buffer is not busy and can hold new data */
+	if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
+		return 1;
+
+	return 0;
+}
+
+static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
+				   struct diag_dci_buffer_t *buf)
+{
+	if (!buf || !client || !buf->data)
+		return;
+
+	if (buf->in_list || buf->data_len == 0)
+		return;
+
+	mutex_lock(&client->write_buf_mutex);
+	list_add_tail(&buf->buf_track, &client->list_write_buf);
+	/*
+	 * In the case of DCI, there can be multiple packets in one read. To
+	 * calculate the wakeup source reference count, we must account for each
+	 * packet in a single read.
+	 */
+	diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
+	mutex_lock(&buf->data_mutex);
+	buf->in_busy = 1;
+	buf->in_list = 1;
+	mutex_unlock(&buf->data_mutex);
+	mutex_unlock(&client->write_buf_mutex);
+}
+
+static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
+			       int data_source, int len)
+{
+	struct diag_dci_buffer_t *buf_primary = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+	struct diag_dci_buffer_t *curr = NULL;
+
+	if (!client)
+		return -EINVAL;
+	if (len < 0 || len > IN_BUF_SIZE)
+		return -EINVAL;
+
+	curr = client->buffers[data_source].buf_curr;
+	buf_primary = client->buffers[data_source].buf_primary;
+
+	if (curr && diag_dci_check_buffer(curr, len) == 1)
+		return 0;
+
+	dci_add_buffer_to_list(client, curr);
+	client->buffers[data_source].buf_curr = NULL;
+
+	if (diag_dci_check_buffer(buf_primary, len) == 1) {
+		client->buffers[data_source].buf_curr = buf_primary;
+		return 0;
+	}
+
+	buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
+	if (!buf_temp)
+		return -EIO;
+
+	if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
+		buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
+					       POOL_TYPE_DCI);
+		if (!buf_temp->data) {
+			kfree(buf_temp);
+			buf_temp = NULL;
+			return -ENOMEM;
+		}
+		client->buffers[data_source].buf_curr = buf_temp;
+		return 0;
+	}
+
+	kfree(buf_temp);
+	buf_temp = NULL;
+	return -EIO;
+}
+
+void diag_dci_wakeup_clients()
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+
+		/*
+		 * Don't wake up the client when there is no pending buffer to
+		 * write or when it is writing to user space
+		 */
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+void dci_data_drain_work_fn(struct work_struct *work)
+{
+	int i;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		for (i = 0; i < entry->num_buffers; i++) {
+			proc_buf = &entry->buffers[i];
+
+			mutex_lock(&proc_buf->buf_mutex);
+			buf_temp = proc_buf->buf_primary;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_cmd;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_curr;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
+				dci_add_buffer_to_list(entry, buf_temp);
+				proc_buf->buf_curr = NULL;
+			}
+			mutex_unlock(&proc_buf->buf_mutex);
+		}
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+	dci_timer_in_progress = 0;
+}
+
+static int diag_process_single_dci_pkt(unsigned char *buf, int len,
+				       int data_source, int token)
+{
+	uint8_t cmd_code = 0;
+
+	if (!buf || len < 0) {
+		pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+			__func__, buf, len);
+		return -EIO;
+	}
+
+	cmd_code = *(uint8_t *)buf;
+
+	switch (cmd_code) {
+	case LOG_CMD_CODE:
+		extract_dci_log(buf, len, data_source, token);
+		break;
+	case EVENT_CMD_CODE:
+		extract_dci_events(buf, len, data_source, token);
+		break;
+	case DCI_PKT_RSP_CODE:
+	case DCI_DELAYED_RSP_CODE:
+		extract_dci_pkt_rsp(buf, len, data_source, token);
+		break;
+	case DCI_CONTROL_PKT_CODE:
+		extract_dci_ctrl_pkt(buf, len, token);
+		break;
+	default:
+		pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
+			cmd_code, data_source);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Process the data read from apps userspace client */
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
+{
+	int err = 0;
+
+	if (!buf) {
+		pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
+		return;
+	}
+
+	if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
+						&& data_type != DCI_PKT_TYPE) {
+		pr_err("diag: In %s, unsupported data_type: 0x%x\n",
+				__func__, (unsigned int)data_type);
+		return;
+	}
+
+	err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
+					  DCI_LOCAL_PROC);
+	if (err)
+		return;
+
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+}
+
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
+{
+	int read_bytes = 0, err = 0;
+	uint16_t dci_pkt_len;
+	struct diag_dci_header_t *header = NULL;
+	int header_len = sizeof(struct diag_dci_header_t);
+	int token = BRIDGE_TO_TOKEN(index);
+
+	if (!buf)
+		return;
+
+	diag_dci_record_traffic(recd_bytes, 0, 0, token);
+
+	if (!partial_pkt.processing)
+		goto start;
+
+	if (partial_pkt.remaining > recd_bytes) {
+		if ((partial_pkt.read_len + recd_bytes) >
+							(MAX_DCI_PACKET_SZ)) {
+			pr_err("diag: Invalid length %d, %d received in %s\n",
+			       partial_pkt.read_len, recd_bytes, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+								recd_bytes);
+		read_bytes += recd_bytes;
+		buf += read_bytes;
+		partial_pkt.read_len += recd_bytes;
+		partial_pkt.remaining -= recd_bytes;
+	} else {
+		if ((partial_pkt.read_len + partial_pkt.remaining) >
+							(MAX_DCI_PACKET_SZ)) {
+			pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+			       partial_pkt.read_len,
+			       partial_pkt.remaining, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+						partial_pkt.remaining);
+		read_bytes += partial_pkt.remaining;
+		buf += read_bytes;
+		partial_pkt.read_len += partial_pkt.remaining;
+		partial_pkt.remaining = 0;
+	}
+
+	if (partial_pkt.remaining == 0) {
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		diag_process_single_dci_pkt(partial_pkt.data + 4,
+				partial_pkt.read_len - header_len,
+				DCI_REMOTE_DATA, token);
+		partial_pkt.read_len = 0;
+		partial_pkt.total_len = 0;
+		partial_pkt.processing = 0;
+		goto start;
+	}
+	goto end;
+
+start:
+	while (read_bytes < recd_bytes) {
+		header = (struct diag_dci_header_t *)buf;
+		dci_pkt_len = header->length;
+
+		if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
+			driver->num_dci_client == 0) {
+			read_bytes += header_len + dci_pkt_len;
+			buf += header_len + dci_pkt_len;
+			continue;
+		}
+
+		if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
+			pr_err("diag: Invalid length in the dci packet field %d\n",
+								dci_pkt_len);
+			break;
+		}
+
+		if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
+			partial_pkt.read_len = recd_bytes - read_bytes;
+			partial_pkt.total_len = dci_pkt_len + header_len;
+			partial_pkt.remaining = partial_pkt.total_len -
+						partial_pkt.read_len;
+			partial_pkt.processing = 1;
+			memcpy(partial_pkt.data, buf, partial_pkt.read_len);
+			break;
+		}
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+						 DCI_REMOTE_DATA, DCI_MDM_PROC);
+		if (err)
+			break;
+		read_bytes += header_len + dci_pkt_len;
+		buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
+	}
+end:
+	if (err)
+		return;
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+	return;
+}
+
+/* Process the data read from the peripheral dci channels */
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+				      int recd_bytes)
+{
+	int read_bytes = 0, err = 0;
+	uint16_t dci_pkt_len;
+	struct diag_dci_pkt_header_t *header = NULL;
+	uint8_t recv_pkt_cmd_code;
+
+	if (!buf || !p_info)
+		return;
+
+	/*
+	 * Release wakeup source when there are no more clients to
+	 * process DCI data
+	 */
+	if (driver->num_dci_client == 0) {
+		diag_ws_reset(DIAG_WS_DCI);
+		return;
+	}
+
+	diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
+				DCI_LOCAL_PROC);
+	while (read_bytes < recd_bytes) {
+		header = (struct diag_dci_pkt_header_t *)buf;
+		recv_pkt_cmd_code = header->pkt_code;
+		dci_pkt_len = header->len;
+
+		/*
+		 * Check if the length of the current packet is lesser than the
+		 * remaining bytes in the received buffer. This includes space
+		 * for the Start byte (1), Version byte (1), length bytes (2)
+		 * and End byte (1)
+		 */
+		if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
+			pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
+				__func__, recd_bytes, dci_pkt_len);
+			diag_ws_release();
+			return;
+		}
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+						  (int)p_info->peripheral,
+						  DCI_LOCAL_PROC);
+		if (err) {
+			diag_ws_release();
+			break;
+		}
+		read_bytes += 5 + dci_pkt_len;
+		buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
+	}
+
+	if (err)
+		return;
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+	return;
+}
+
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+			    uint16_t log_code)
+{
+	uint16_t item_num;
+	uint8_t equip_id, *log_mask_ptr, byte_mask;
+	int byte_index, offset;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	equip_id = LOG_GET_EQUIP_ID(log_code);
+	item_num = LOG_GET_ITEM_NUM(log_code);
+	byte_index = item_num/8 + 2;
+	byte_mask = 0x01 << (item_num % 8);
+	offset = equip_id * 514;
+
+	if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
+		pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
+				__func__, offset, log_code, byte_index);
+		return 0;
+	}
+
+	log_mask_ptr = entry->dci_log_mask;
+	log_mask_ptr = log_mask_ptr + offset + byte_index;
+	return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+
+}
+
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+			      uint16_t event_id)
+{
+	uint8_t *event_mask_ptr, byte_mask;
+	int byte_index, bit_index;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	byte_index = event_id/8;
+	bit_index = event_id % 8;
+	byte_mask = 0x1 << bit_index;
+
+	if (byte_index >= DCI_EVENT_MASK_SIZE) {
+		pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
+				__func__, event_id, byte_index);
+		return 0;
+	}
+
+	event_mask_ptr = entry->dci_event_mask;
+	event_mask_ptr = event_mask_ptr + byte_index;
+	return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+}
+
+static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
+{
+	if (!header)
+		return -ENOMEM;
+
+	switch (header->cmd_code) {
+	case 0x7d: /* Msg Mask Configuration */
+	case 0x73: /* Log Mask Configuration */
+	case 0x81: /* Event Mask Configuration */
+	case 0x82: /* Event Mask Change */
+	case 0x60: /* Event Mask Toggle */
+		return 1;
+	}
+
+	if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
+		switch (header->subsys_cmd_code) {
+		case 0x60: /* Extended Event Mask Config */
+		case 0x61: /* Extended Msg Mask Config */
+		case 0x62: /* Extended Log Mask Config */
+		case 0x20C: /* Set current Preset ID */
+		case 0x20D: /* Get current Preset ID */
+		case 0x218: /* HDLC Disabled Command */
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
+								 int client_id)
+{
+	struct dci_pkt_req_entry_t *entry = NULL;
+	entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
+	if (!entry)
+		return NULL;
+
+	driver->dci_tag++;
+	entry->client_id = client_id;
+	entry->uid = uid;
+	entry->tag = driver->dci_tag;
+	pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
+				entry->client_id, entry->uid, entry->tag);
+	list_add_tail(&entry->track, &driver->dci_req_list);
+
+	return entry;
+}
+
+static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
+{
+	struct list_head *start, *temp;
+	struct dci_pkt_req_entry_t *entry = NULL;
+	list_for_each_safe(start, temp, &driver->dci_req_list) {
+		entry = list_entry(start, struct dci_pkt_req_entry_t, track);
+		if (entry->tag == tag)
+			return entry;
+	}
+	return NULL;
+}
+
+static int diag_dci_remove_req_entry(unsigned char *buf, int len,
+				     struct dci_pkt_req_entry_t *entry)
+{
+	uint16_t rsp_count = 0, delayed_rsp_id = 0;
+	if (!buf || len <= 0 || !entry) {
+		pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
+			__func__, buf, len, entry);
+		return -EIO;
+	}
+
+	/* It is an immediate response, delete it from the table */
+	if (*buf != 0x80) {
+		list_del(&entry->track);
+		kfree(entry);
+		entry = NULL;
+		return 1;
+	}
+
+	/* It is a delayed response. Check if the length is valid */
+	if (len < MIN_DELAYED_RSP_LEN) {
+		pr_err("diag: Invalid delayed rsp packet length %d\n", len);
+		return -EINVAL;
+	}
+
+	/*
+	 * If the delayed response id field (uint16_t at byte 8) is 0 then
+	 * there is only one response and we can remove the request entry.
+	 */
+	delayed_rsp_id = *(uint16_t *)(buf + 8);
+	if (delayed_rsp_id == 0) {
+		list_del(&entry->track);
+		kfree(entry);
+		entry = NULL;
+		return 1;
+	}
+
+	/*
+	 * Check the response count field (uint16 at byte 10). The request
+	 * entry can be deleted it it is the last response in the sequence.
+	 * It is the last response in the sequence if the response count
+	 * is 1 or if the signed bit gets dropped.
+	 */
+	rsp_count = *(uint16_t *)(buf + 10);
+	if (rsp_count > 0 && rsp_count < 0x1000) {
+		list_del(&entry->track);
+		kfree(entry);
+		entry = NULL;
+		return 1;
+	}
+
+	return 0;
+}
+
+static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
+{
+	struct diag_ctrl_dci_status *header = NULL;
+	unsigned char *temp = buf;
+	uint32_t read_len = 0;
+	uint8_t i;
+	int peripheral_mask, status;
+
+	if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
+		pr_err("diag: In %s, invalid buf %pK or length: %d\n",
+		       __func__, buf, len);
+		return;
+	}
+
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+		return;
+	}
+
+	header = (struct diag_ctrl_dci_status *)temp;
+	temp += sizeof(struct diag_ctrl_dci_status);
+	read_len += sizeof(struct diag_ctrl_dci_status);
+
+	for (i = 0; i < header->count; i++) {
+		if (read_len > (len - 2)) {
+			pr_err("diag: In %s, Invalid length len: %d\n",
+			       __func__, len);
+			return;
+		}
+
+		switch (*(uint8_t *)temp) {
+		case PERIPHERAL_MODEM:
+			peripheral_mask = DIAG_CON_MPSS;
+			break;
+		case PERIPHERAL_LPASS:
+			peripheral_mask = DIAG_CON_LPASS;
+			break;
+		case PERIPHERAL_WCNSS:
+			peripheral_mask = DIAG_CON_WCNSS;
+			break;
+		case PERIPHERAL_SENSORS:
+			peripheral_mask = DIAG_CON_SENSORS;
+			break;
+		default:
+			pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
+				__func__, *(uint8_t *)temp);
+			return;
+		}
+		temp += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+
+		status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
+							DIAG_STATUS_CLOSED;
+		temp += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+		diag_dci_notify_client(peripheral_mask, status, token);
+	}
+}
+
+static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
+					   int token)
+{
+	struct diag_ctrl_dci_handshake_pkt *header = NULL;
+	unsigned char *temp = buf;
+	int err = 0;
+
+	if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
+		return;
+
+	if (!VALID_DCI_TOKEN(token))
+		return;
+
+	header = (struct diag_ctrl_dci_handshake_pkt *)temp;
+	if (header->magic == DCI_MAGIC) {
+		dci_channel_status[token].open = 1;
+		err = dci_ops_tbl[token].send_log_mask(token);
+		if (err) {
+			pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
+			       __func__, token, err);
+		}
+		err = dci_ops_tbl[token].send_event_mask(token);
+		if (err) {
+			pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
+			       __func__, token, err);
+		}
+	}
+}
+
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
+{
+	unsigned char *temp = buf;
+	uint32_t ctrl_pkt_id;
+
+	diag_ws_on_read(DIAG_WS_DCI, len);
+	if (!buf) {
+		pr_err("diag: Invalid buffer in %s\n", __func__);
+		goto err;
+	}
+
+	if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
+		pr_err("diag: In %s, invalid length %d\n", __func__, len);
+		goto err;
+	}
+
+	/* Skip the Control packet command code */
+	temp += sizeof(uint8_t);
+	len -= sizeof(uint8_t);
+	ctrl_pkt_id = *(uint32_t *)temp;
+	switch (ctrl_pkt_id) {
+	case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
+		dci_process_ctrl_status(temp, len, token);
+		break;
+	case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
+		dci_process_ctrl_handshake_pkt(temp, len, token);
+		break;
+	default:
+		pr_debug("diag: In %s, unknown control pkt %d\n",
+			 __func__, ctrl_pkt_id);
+		break;
+	}
+
+err:
+	/*
+	 * DCI control packets are not consumed by the clients. Mimic client
+	 * consumption by setting and clearing the wakeup source copy_count
+	 * explicitly.
+	 */
+	diag_ws_on_copy_fail(DIAG_WS_DCI);
+}
+
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 int token)
+{
+	int tag;
+	struct diag_dci_client_tbl *entry = NULL;
+	void *temp_buf = NULL;
+	uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
+	uint32_t rsp_len = 0;
+	struct diag_dci_buffer_t *rsp_buf = NULL;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	unsigned char *temp = buf;
+	int save_req_uid = 0;
+	struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
+
+	if (!buf || len <= 0) {
+		pr_err("diag: Invalid pointer in %s\n", __func__);
+		return;
+	}
+	dci_cmd_code = *(uint8_t *)(temp);
+	if (dci_cmd_code == DCI_PKT_RSP_CODE) {
+		cmd_code_len = sizeof(uint8_t);
+	} else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
+		cmd_code_len = sizeof(uint32_t);
+	} else {
+		pr_err("diag: In %s, invalid command code %d\n", __func__,
+								dci_cmd_code);
+		return;
+	}
+	if (len < (cmd_code_len + sizeof(int)))
+		return;
+	temp += cmd_code_len;
+	tag = *(int *)temp;
+	temp += sizeof(int);
+
+	/*
+	 * The size of the response is (total length) - (length of the command
+	 * code, the tag (int)
+	 */
+	if (len >= cmd_code_len + sizeof(int)) {
+		rsp_len = len - (cmd_code_len + sizeof(int));
+		if ((rsp_len == 0) || (rsp_len > (len - 5))) {
+			pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d\n",
+					__func__, len, rsp_len);
+			return;
+		}
+	} else {
+		pr_err("diag:%s: Invalid length(%d) for calculating rsp_len\n",
+			__func__, len);
+		return;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	req_entry = diag_dci_get_request_entry(tag);
+	if (!req_entry) {
+		pr_err_ratelimited("diag: No matching client for DCI data\n");
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	entry = diag_dci_get_client_entry(req_entry->client_id);
+	if (!entry) {
+		pr_err("diag: In %s, couldn't find client entry, id:%d\n",
+						__func__, req_entry->client_id);
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	save_req_uid = req_entry->uid;
+	/* Remove the headers and send only the response to this function */
+	delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
+	if (delete_flag < 0) {
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	mutex_lock(&entry->buffers[data_source].buf_mutex);
+	rsp_buf = entry->buffers[data_source].buf_cmd;
+
+	mutex_lock(&rsp_buf->data_mutex);
+	/*
+	 * Check if we can fit the data in the rsp buffer. The total length of
+	 * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
+	 * + field for length (int) + delete_flag (uint8_t)
+	 */
+	if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
+		pr_alert("diag: create capacity for pkt rsp\n");
+		rsp_buf->capacity += 9 + rsp_len;
+		temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
+				    GFP_KERNEL);
+		if (!temp_buf) {
+			pr_err("diag: DCI realloc failed\n");
+			mutex_unlock(&rsp_buf->data_mutex);
+			mutex_unlock(&entry->buffers[data_source].buf_mutex);
+			mutex_unlock(&driver->dci_mutex);
+			return;
+		} else {
+			rsp_buf->data = temp_buf;
+		}
+	}
+
+	/* Fill in packet response header information */
+	pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
+	/* Packet Length = Response Length + Length of uid field (int) */
+	pkt_rsp_header.length = rsp_len + sizeof(int);
+	pkt_rsp_header.delete_flag = delete_flag;
+	pkt_rsp_header.uid = save_req_uid;
+	memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
+		sizeof(struct diag_dci_pkt_rsp_header_t));
+	rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
+	memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
+	rsp_buf->data_len += rsp_len;
+	rsp_buf->data_source = data_source;
+
+	mutex_unlock(&rsp_buf->data_mutex);
+
+	/*
+	 * Add directly to the list for writing responses to the
+	 * userspace as these shouldn't be buffered and shouldn't wait
+	 * for log and event buffers to be full
+	 */
+	dci_add_buffer_to_list(entry, rsp_buf);
+	mutex_unlock(&entry->buffers[data_source].buf_mutex);
+	mutex_unlock(&driver->dci_mutex);
+}
+
+static void copy_dci_event(unsigned char *buf, int len,
+			   struct diag_dci_client_tbl *client, int data_source)
+{
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
+
+	total_len = sizeof(int) + len;
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_events++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+
+	proc_buf->health.received_events++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf, len);
+	data_buffer->data_len += len;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+
+}
+
+void extract_dci_events(unsigned char *buf, int len, int data_source, int token)
+{
+	uint16_t event_id, event_id_packet, length, temp_len;
+	uint8_t payload_len, payload_len_field;
+	uint8_t timestamp[8] = {0}, timestamp_len;
+	unsigned char event_data[MAX_EVENT_SIZE];
+	unsigned int total_event_len;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+	/*
+	 * 1 byte for event code and 2 bytes for the length field.
+	 * The length field indicates the total length removing the cmd_code
+	 * and the lenght field. The event parsing in that case should happen
+	 * till the end.
+	 */
+	if (len < 3) {
+		pr_err("diag: In %s invalid len: %d\n", __func__, len);
+		return;
+	}
+	length = *(uint16_t *)(buf + 1); /* total length of event series */
+	if ((length == 0) || (len != (length + 3))) {
+		pr_err("diag: Incoming dci event length: %d is invalid\n",
+			length);
+		return;
+	}
+	/*
+	 * Move directly to the start of the event series.
+	 * The event parsing should happen from start of event
+	 * series till the end.
+	 */
+	temp_len = 3;
+	while (temp_len < length) {
+		event_id_packet = *(uint16_t *)(buf + temp_len);
+		event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
+		if (event_id_packet & 0x8000) {
+			/* The packet has the two smallest byte of the
+			 * timestamp
+			 */
+			timestamp_len = 2;
+		} else {
+			/* The packet has the full timestamp. The first event
+			 * will always have full timestamp. Save it in the
+			 * timestamp buffer and use it for subsequent events if
+			 * necessary.
+			 */
+			timestamp_len = 8;
+			if ((temp_len + timestamp_len + 2) <= len)
+				memcpy(timestamp, buf + temp_len + 2,
+					timestamp_len);
+			else {
+				pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+						__func__, len, temp_len);
+				return;
+			}
+		}
+		/* 13th and 14th bit represent the payload length */
+		if (((event_id_packet & 0x6000) >> 13) == 3) {
+			payload_len_field = 1;
+			if ((temp_len + timestamp_len + 3) <= len) {
+				payload_len = *(uint8_t *)
+					(buf + temp_len + 2 + timestamp_len);
+			} else {
+				pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+						__func__, len, temp_len);
+				return;
+			}
+			if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
+			((temp_len + timestamp_len + payload_len + 3) <= len)) {
+				/*
+				 * Copy the payload length and the payload
+				 * after skipping temp_len bytes for already
+				 * parsed packet, timestamp_len for timestamp
+				 * buffer, 2 bytes for event_id_packet.
+				 */
+				memcpy(event_data + 12, buf + temp_len + 2 +
+							timestamp_len, 1);
+				memcpy(event_data + 13, buf + temp_len + 2 +
+					timestamp_len + 1, payload_len);
+			} else {
+				pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+				(MAX_EVENT_SIZE - 13), payload_len, temp_len);
+				return;
+			}
+		} else {
+			payload_len_field = 0;
+			payload_len = (event_id_packet & 0x6000) >> 13;
+			/*
+			 * Copy the payload after skipping temp_len bytes
+			 * for already parsed packet, timestamp_len for
+			 * timestamp buffer, 2 bytes for event_id_packet.
+			 */
+			if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
+			((temp_len + timestamp_len + payload_len + 2) <= len))
+				memcpy(event_data + 12, buf + temp_len + 2 +
+						timestamp_len, payload_len);
+			else {
+				pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+				(MAX_EVENT_SIZE - 12), payload_len, temp_len);
+				return;
+			}
+		}
+
+		/* Before copying the data to userspace, check if we are still
+		 * within the buffer limit. This is an error case, don't count
+		 * it towards the health statistics.
+		 *
+		 * Here, the offset of 2 bytes(uint16_t) is for the
+		 * event_id_packet length
+		 */
+		temp_len += sizeof(uint16_t) + timestamp_len +
+						payload_len_field + payload_len;
+		if (temp_len > len) {
+			pr_err("diag: Invalid length in %s, len: %d, read: %d",
+						__func__, len, temp_len);
+			return;
+		}
+
+		/* 2 bytes for the event id & timestamp len is hard coded to 8,
+		   as individual events have full timestamp */
+		*(uint16_t *)(event_data) = 10 +
+					payload_len_field + payload_len;
+		*(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
+		memcpy(event_data + 4, timestamp, 8);
+		/* 2 bytes for the event length field which is added to
+		   the event data */
+		total_event_len = 2 + 10 + payload_len_field + payload_len;
+		/* parse through event mask tbl of each client and check mask */
+		mutex_lock(&driver->dci_mutex);
+		list_for_each_safe(start, temp, &driver->dci_client_list) {
+			entry = list_entry(start, struct diag_dci_client_tbl,
+									track);
+			if (entry->client_info.token != token)
+				continue;
+			if (diag_dci_query_event_mask(entry, event_id)) {
+				/* copy to client buffer */
+				copy_dci_event(event_data, total_event_len,
+					       entry, data_source);
+			}
+		}
+		mutex_unlock(&driver->dci_mutex);
+	}
+}
+
+static void copy_dci_log(unsigned char *buf, int len,
+			 struct diag_dci_client_tbl *client, int data_source)
+{
+	uint16_t log_length = 0;
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
+
+	log_length = *(uint16_t *)(buf + 2);
+	if (log_length > USHRT_MAX - 4) {
+		pr_err("diag: Integer overflow in %s, log_len: %d",
+				__func__, log_length);
+		return;
+	}
+	total_len = sizeof(int) + log_length;
+
+	/* Check if we are within the len. The check should include the
+	 * first 4 bytes for the Log code(2) and the length bytes (2)
+	 */
+	if ((log_length + sizeof(uint16_t) + 2) > len) {
+		pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
+						__func__, log_length, len);
+		return;
+	}
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_logs++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+	proc_buf->health.received_logs++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	if (!data_buffer->data) {
+		mutex_unlock(&data_buffer->data_mutex);
+		return;
+	}
+
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
+	       log_length);
+	data_buffer->data_len += log_length;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+}
+
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token)
+{
+	uint16_t log_code, read_bytes = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+	/*
+	 * The first eight bytes for the incoming log packet contains
+	 * Command code (2), the length of the packet (2), the length
+	 * of the log (2) and log code (2)
+	 */
+	if (len < 8) {
+		pr_err("diag: In %s invalid len: %d\n", __func__, len);
+		return;
+	}
+
+	log_code = *(uint16_t *)(buf + 6);
+	read_bytes += sizeof(uint16_t) + 6;
+
+	/* parse through log mask table of each client and check mask */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		if (diag_dci_query_log_mask(entry, log_code)) {
+			pr_debug("\t log code %x needed by client %d",
+				 log_code, entry->client->tgid);
+			/* copy to client buffer */
+			copy_dci_log(buf, len, entry, data_source);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+void diag_dci_channel_open_work(struct work_struct *work)
+{
+	int i, j;
+	char dirty_bits[16];
+	uint8_t *client_log_mask_ptr;
+	uint8_t *log_mask_ptr;
+	int ret;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	/* Update apps and peripheral(s) with the dci log and event masks */
+	memset(dirty_bits, 0, 16 * sizeof(uint8_t));
+
+	/*
+	 * From each log entry used by each client, determine
+	 * which log entries in the cumulative logs that need
+	 * to be updated on the peripheral.
+	 */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != DCI_LOCAL_PROC)
+			continue;
+		client_log_mask_ptr = entry->dci_log_mask;
+		for (j = 0; j < 16; j++) {
+			if (*(client_log_mask_ptr+1))
+				dirty_bits[j] = 1;
+			client_log_mask_ptr += 514;
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+
+	mutex_lock(&dci_log_mask_mutex);
+	/* Update the appropriate dirty bits in the cumulative mask */
+	log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+	for (i = 0; i < 16; i++) {
+		if (dirty_bits[i])
+			*(log_mask_ptr+1) = dirty_bits[i];
+
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+
+	/* Send updated mask to userspace clients */
+	diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated log mask to peripherals */
+	ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
+
+	/* Send updated event mask to userspace clients */
+	diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated event mask to peripheral */
+	ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
+}
+
+void diag_dci_notify_client(int peripheral_mask, int data, int proc)
+{
+	int stat;
+	struct kernel_siginfo info;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	memset(&info, 0, sizeof(info));
+	info.si_code = SI_QUEUE;
+	info.si_int = (peripheral_mask | data);
+	if (data == DIAG_STATUS_OPEN)
+		dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
+	else
+		dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
+
+	/* Notify the DCI process that the peripheral DCI Channel is up */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != proc)
+			continue;
+		if (entry->client_info.notification_list & peripheral_mask) {
+			info.si_signo = entry->client_info.signal_type;
+			if (entry->client &&
+				entry->tgid == entry->client->tgid) {
+				DIAG_LOG(DIAG_DEBUG_DCI,
+					"entry tgid = %d, dci client tgid = %d\n",
+					entry->tgid, entry->client->tgid);
+				stat = send_sig_info(
+					entry->client_info.signal_type,
+					&info, entry->client);
+				if (stat)
+					pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
+							info.si_int, stat);
+			} else
+				pr_err("diag: client data is corrupted, signal data: 0x%x\n",
+						info.si_int);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
+			     unsigned char *buf, int len, int tag)
+{
+	int i, status = DIAG_DCI_NO_ERROR;
+	uint32_t write_len = 0;
+	struct diag_dci_pkt_header_t header;
+
+	if (!entry)
+		return -EIO;
+
+	if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
+		pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
+		       __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
+		return -EIO;
+	}
+
+	if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
+		pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
+		       __func__, len, DIAG_MAX_REQ_SIZE);
+		return -EIO;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	/* prepare DCI packet */
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.len = len + sizeof(int) + sizeof(uint8_t);
+	header.pkt_code = DCI_PKT_RSP_CODE;
+	header.tag = tag;
+	memcpy(driver->apps_dci_buf, &header, sizeof(header));
+	write_len += sizeof(header);
+	memcpy(driver->apps_dci_buf + write_len , buf, len);
+	write_len += len;
+	*(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	/* This command is registered locally on the Apps */
+	if (entry->proc == APPS_DATA) {
+		diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
+				       DCI_PKT_TYPE);
+		diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		if (entry->proc == i) {
+			status = 1;
+			break;
+		}
+
+	if (status) {
+		status = diag_dci_write_proc(entry->proc,
+					     DIAG_DATA_TYPE,
+					     driver->apps_dci_buf,
+					     write_len);
+	} else {
+		pr_err("diag: Cannot send packet to peripheral %d",
+		       entry->proc);
+		status = DIAG_DCI_SEND_DATA_FAIL;
+	}
+	mutex_unlock(&driver->dci_mutex);
+	return status;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+unsigned char *dci_get_buffer_from_bridge(int token)
+{
+	uint8_t retries = 0, max_retries = 3;
+	unsigned char *buf = NULL;
+	unsigned long flags;
+
+	do {
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
+		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+				    dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
+		if (!buf) {
+			usleep_range(5000, 5100);
+			retries++;
+		} else
+			break;
+	} while (retries < max_retries);
+
+	return buf;
+}
+
+int diag_dci_write_bridge(int token, unsigned char *buf, int len)
+{
+	return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
+}
+
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
+{
+	unsigned long flags;
+	int token = BRIDGE_TO_TOKEN(index);
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&driver->dci_mempool_lock, flags);
+	diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+				    int token)
+{
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int ret = DIAG_DCI_NO_ERROR;
+	uint32_t write_len = 0;
+	unsigned long flags;
+
+	if (!data)
+		return -EIO;
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return -EAGAIN;
+	}
+
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	/*
+	 * The Length of the DCI packet = length of the command + tag (int) +
+	 * the command code size (uint8_t)
+	 */
+	dci_header.length = len + sizeof(int) + sizeof(uint8_t);
+	dci_header.cmd_code = DCI_PKT_RSP_CODE;
+
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	*(int *)(buf + write_len) = tag;
+	write_len += sizeof(int);
+	memcpy(buf + write_len, data, len);
+	write_len += len;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+
+	ret = diag_dci_write_bridge(token, buf, write_len);
+	if (ret) {
+		pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
+			token, ret);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
+	} else {
+		ret = DIAG_DCI_NO_ERROR;
+	}
+
+	return ret;
+}
+#else
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+				    int token)
+{
+	return DIAG_DCI_NO_ERROR;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_dci_send_handshake_pkt(int index)
+{
+	int err = 0;
+	int token = BRIDGE_TO_TOKEN(index);
+	int write_len = 0;
+	struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	unsigned long flags;
+
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+		return -EINVAL;
+	}
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return -EAGAIN;
+	}
+
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	/* Include the cmd code (uint8_t) in the length */
+	dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+	memcpy(buf, &dci_header, sizeof(dci_header));
+	write_len += sizeof(dci_header);
+
+	ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
+	/*
+	 *  The control packet data length accounts for the version (uint32_t)
+	 *  of the packet and the magic number (uint32_t).
+	 */
+	ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
+	ctrl_pkt.version = 1;
+	ctrl_pkt.magic = DCI_MAGIC;
+	memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
+	write_len += sizeof(ctrl_pkt);
+
+	*(uint8_t *)(buf + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	err = diag_dci_write_bridge(token, buf, write_len);
+	if (err) {
+		pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
+		       token, err);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
+		return err;
+	}
+
+	mod_timer(&(dci_channel_status[token].wait_time),
+		  jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+
+	return 0;
+}
+#else
+int diag_dci_send_handshake_pkt(int index)
+{
+	return 0;
+}
+#endif
+
+static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
+				     unsigned char *req_buf, int req_len,
+				     int tag)
+{
+	uint8_t cmd_code, subsys_id, i, goto_download = 0;
+	uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
+	uint16_t ss_cmd_code;
+	uint32_t write_len = 0;
+	unsigned char *dest_buf = driver->apps_dci_buf;
+	unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
+	struct diag_dci_pkt_header_t dci_header;
+
+	if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
+		return -EIO;
+
+	cmd_code = pkt_header->cmd_code;
+	subsys_id = pkt_header->subsys_id;
+	ss_cmd_code = pkt_header->subsys_cmd_code;
+
+	if (cmd_code == DIAG_CMD_DOWNLOAD) {
+		*payload_ptr = DIAG_CMD_DOWNLOAD;
+		write_len = sizeof(uint8_t);
+		goto_download = 1;
+		goto fill_buffer;
+	} else if (cmd_code == DIAG_CMD_VERSION) {
+		if (chk_polling_response()) {
+			for (i = 0; i < 55; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_EXT_BUILD) {
+		if (chk_polling_response()) {
+			*payload_ptr = DIAG_CMD_EXT_BUILD;
+			write_len = sizeof(uint8_t);
+			payload_ptr += sizeof(uint8_t);
+			for (i = 0; i < 8; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			*(int *)(payload_ptr) = chk_config_get_id();
+			write_len += sizeof(int);
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
+		write_len = diag_cmd_log_on_demand(req_buf, req_len,
+						   payload_ptr,
+						   APPS_BUF_SIZE - header_len);
+		goto fill_buffer;
+	} else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
+		return DIAG_DCI_TABLE_ERR;
+	}
+
+	if (subsys_id == DIAG_SS_DIAG) {
+		if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint32_t *)(payload_ptr + write_len) =
+							DIAG_MAX_REQ_SIZE;
+			write_len += sizeof(uint32_t);
+		} else if (ss_cmd_code == DIAG_DIAG_STM) {
+			write_len = diag_process_stm_cmd(req_buf, payload_ptr);
+		}
+	} else if (subsys_id == DIAG_SS_PARAMS) {
+		if (ss_cmd_code == DIAG_DIAG_POLL) {
+			if (chk_polling_response()) {
+				memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+				write_len = sizeof(struct diag_pkt_header_t);
+				payload_ptr += write_len;
+				for (i = 0; i < 12; i++, write_len++) {
+					*(payload_ptr) = 0;
+					payload_ptr++;
+				}
+			}
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(int *)(payload_ptr + write_len) = wrap_enabled;
+			write_len += sizeof(int);
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
+			wrap_enabled = true;
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint16_t *)(payload_ptr + write_len) = wrap_count;
+			write_len += sizeof(uint16_t);
+		} else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
+			write_len = diag_cmd_get_mobile_id(req_buf, req_len,
+						   payload_ptr,
+						   APPS_BUF_SIZE - header_len);
+		}
+	}
+
+fill_buffer:
+	if (write_len > 0) {
+		/* Check if we are within the range of the buffer*/
+		if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
+			pr_err("diag: In %s, invalid length %d\n", __func__,
+						write_len + header_len);
+			return -ENOMEM;
+		}
+		dci_header.start = CONTROL_CHAR;
+		dci_header.version = 1;
+		/*
+		 * Length of the rsp pkt = actual data len + pkt rsp code
+		 * (uint8_t) + tag (int)
+		 */
+		dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
+		dci_header.pkt_code = DCI_PKT_RSP_CODE;
+		dci_header.tag = tag;
+		driver->in_busy_dcipktdata = 1;
+		memcpy(dest_buf, &dci_header, header_len);
+		diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
+						dci_header.len);
+		driver->in_busy_dcipktdata = 0;
+
+		if (goto_download) {
+			/*
+			 * Sleep for sometime so that the response reaches the
+			 * client. The value 5000 empirically as an optimum
+			 * time for the response to reach the client.
+			 */
+			usleep_range(5000, 5100);
+			/* call download API */
+			kernel_restart(NULL);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	return DIAG_DCI_TABLE_ERR;
+}
+
+static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
+{
+	int ret = DIAG_DCI_TABLE_ERR;
+	int common_cmd = 0;
+	struct diag_pkt_header_t *header = NULL;
+	unsigned char *temp = buf;
+	unsigned char *req_buf = NULL;
+	uint8_t retry_count = 0, max_retries = 3;
+	uint32_t read_len = 0, req_len = len;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+	struct dci_pkt_req_t req_hdr;
+	struct diag_cmd_reg_t *reg_item;
+	struct diag_cmd_reg_entry_t reg_entry;
+	struct diag_cmd_reg_entry_t *temp_entry;
+
+	if (!buf)
+		return -EIO;
+
+	if (len <= sizeof(struct dci_pkt_req_t) || len > DCI_REQ_BUF_SIZE) {
+		pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
+		return -EIO;
+	}
+
+	req_hdr = *(struct dci_pkt_req_t *)temp;
+	temp += sizeof(struct dci_pkt_req_t);
+	read_len += sizeof(struct dci_pkt_req_t);
+	req_len -= sizeof(struct dci_pkt_req_t);
+	req_buf = temp; /* Start of the Request */
+	header = (struct diag_pkt_header_t *)temp;
+	temp += sizeof(struct diag_pkt_header_t);
+	read_len += sizeof(struct diag_pkt_header_t);
+	if (read_len >= DCI_REQ_BUF_SIZE) {
+		pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
+		       read_len);
+		return -EIO;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
+	if (!dci_entry) {
+		pr_err("diag: Invalid client %d in %s\n",
+		       req_hdr.client_id, __func__);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_REG;
+	}
+
+	/* Check if the command is allowed on DCI */
+	if (diag_dci_filter_commands(header)) {
+		pr_debug("diag: command not supported %d %d %d",
+			 header->cmd_code, header->subsys_id,
+			 header->subsys_cmd_code);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	common_cmd = diag_check_common_cmd(header);
+	if (common_cmd < 0) {
+		pr_debug("diag: error in checking common command, %d\n",
+			 common_cmd);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	/*
+	 * Previous packet is yet to be consumed by the client. Wait
+	 * till the buffer is free.
+	 */
+	while (retry_count < max_retries) {
+		retry_count++;
+		if (driver->in_busy_dcipktdata)
+			usleep_range(10000, 10100);
+		else
+			break;
+	}
+	/* The buffer is still busy */
+	if (driver->in_busy_dcipktdata) {
+		pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
+								__func__);
+		mutex_unlock(&driver->dci_mutex);
+		return -EAGAIN;
+	}
+
+	/* Register this new DCI packet */
+	req_entry = diag_register_dci_transaction(req_hdr.uid,
+						  req_hdr.client_id);
+	if (!req_entry) {
+		pr_alert("diag: registering new DCI transaction failed\n");
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_REG;
+	}
+	mutex_unlock(&driver->dci_mutex);
+
+	/*
+	 * If the client has registered for remote data, route the packet to the
+	 * remote processor
+	 */
+	if (dci_entry->client_info.token > 0) {
+		ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
+					       dci_entry->client_info.token);
+		return ret;
+	}
+
+	/* Check if it is a dedicated Apps command */
+	ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
+					req_entry->tag);
+	if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
+		return ret;
+
+	reg_entry.cmd_code = header->cmd_code;
+	reg_entry.subsys_id = header->subsys_id;
+	reg_entry.cmd_code_hi = header->subsys_cmd_code;
+	reg_entry.cmd_code_lo = header->subsys_cmd_code;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
+	if (temp_entry) {
+		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+								entry);
+		ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
+					req_entry->tag);
+	} else {
+		DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
+				reg_entry.cmd_code, reg_entry.subsys_id,
+				reg_entry.cmd_code_hi);
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	return ret;
+}
+
+int diag_process_dci_transaction(unsigned char *buf, int len)
+{
+	unsigned char *temp = buf;
+	uint16_t log_code, item_num;
+	int ret = -1, found = 0, client_id = 0, client_token = 0;
+	int count, set_mask, num_codes, bit_index, event_id, offset = 0;
+	unsigned int byte_index, read_len = 0;
+	uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
+	uint8_t *event_mask_ptr;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+
+	if (!temp || len < sizeof(int)) {
+		pr_err("diag: Invalid input in %s\n", __func__);
+		return -EINVAL;
+	}
+
+	/* This is Pkt request/response transaction */
+	if (*(int *)temp > 0) {
+		return diag_process_dci_pkt_rsp(buf, len);
+	} else if (*(int *)temp == DCI_LOG_TYPE) {
+		/* Minimum length of a log mask config is 12 + 2 bytes for
+		   atleast one log code to be set or reset */
+		if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
+			pr_err("diag: dci: Invalid length in %s\n", __func__);
+			return -EIO;
+		}
+
+		/* Extract each log code and put in client table */
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		client_id = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		set_mask = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		num_codes = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+
+		/* find client table entry */
+		mutex_lock(&driver->dci_mutex);
+		dci_entry = diag_dci_get_client_entry(client_id);
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
+			mutex_unlock(&driver->dci_mutex);
+			return ret;
+		}
+		client_token = dci_entry->client_info.token;
+
+		if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+			pr_err("diag: dci: Invalid number of log codes %d\n",
+								num_codes);
+			mutex_unlock(&driver->dci_mutex);
+			return -EIO;
+		}
+
+		head_log_mask_ptr = dci_entry->dci_log_mask;
+		if (!head_log_mask_ptr) {
+			pr_err("diag: dci: Invalid Log mask pointer in %s\n",
+								__func__);
+			mutex_unlock(&driver->dci_mutex);
+			return -ENOMEM;
+		}
+		pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
+		count = 0; /* iterator for extracting log codes */
+
+		while (count < num_codes) {
+			if (read_len + sizeof(uint16_t) > len) {
+				pr_err("diag: dci: Invalid length for log type in %s",
+								__func__);
+				mutex_unlock(&driver->dci_mutex);
+				return -EIO;
+			}
+			log_code = *(uint16_t *)temp;
+			equip_id = LOG_GET_EQUIP_ID(log_code);
+			item_num = LOG_GET_ITEM_NUM(log_code);
+			byte_index = item_num/8 + 2;
+			if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
+				pr_err("diag: dci: Log type, invalid byte index\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			byte_mask = 0x01 << (item_num % 8);
+			/*
+			 * Parse through log mask table and find
+			 * relevant range
+			 */
+			log_mask_ptr = head_log_mask_ptr;
+			found = 0;
+			offset = 0;
+			while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
+				if (*log_mask_ptr == equip_id) {
+					found = 1;
+					pr_debug("diag: find equip id = %x at %pK\n",
+						 equip_id, log_mask_ptr);
+					break;
+				} else {
+					pr_debug("diag: did not find equip id = %x at %d\n",
+						 equip_id, *log_mask_ptr);
+					log_mask_ptr += 514;
+					offset += 514;
+				}
+			}
+			if (!found) {
+				pr_err("diag: dci equip id not found\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			*(log_mask_ptr+1) = 1; /* set the dirty byte */
+			log_mask_ptr = log_mask_ptr + byte_index;
+			if (set_mask)
+				*log_mask_ptr |= byte_mask;
+			else
+				*log_mask_ptr &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_log_mask(
+				offset, byte_index,
+				byte_mask, client_token);
+			temp += 2;
+			read_len += 2;
+			count++;
+			ret = DIAG_DCI_NO_ERROR;
+		}
+		/* send updated mask to userspace clients */
+		if (client_token == DCI_LOCAL_PROC)
+			diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+		/* send updated mask to peripherals */
+		ret = dci_ops_tbl[client_token].send_log_mask(client_token);
+		mutex_unlock(&driver->dci_mutex);
+	} else if (*(int *)temp == DCI_EVENT_TYPE) {
+		/* Minimum length of a event mask config is 12 + 4 bytes for
+		  atleast one event id to be set or reset. */
+		if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
+			pr_err("diag: dci: Invalid length in %s\n", __func__);
+			return -EIO;
+		}
+
+		/* Extract each event id and put in client table */
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		client_id = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		set_mask = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		num_codes = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+
+		/* find client table entry */
+		mutex_lock(&driver->dci_mutex);
+		dci_entry = diag_dci_get_client_entry(client_id);
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
+			mutex_unlock(&driver->dci_mutex);
+			return ret;
+		}
+		client_token = dci_entry->client_info.token;
+
+		/* Check for positive number of event ids. Also, the number of
+		   event ids should fit in the buffer along with set_mask and
+		   num_codes which are 4 bytes each */
+		if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+			pr_err("diag: dci: Invalid number of event ids %d\n",
+								num_codes);
+			mutex_unlock(&driver->dci_mutex);
+			return -EIO;
+		}
+
+		event_mask_ptr = dci_entry->dci_event_mask;
+		if (!event_mask_ptr) {
+			pr_err("diag: dci: Invalid event mask pointer in %s\n",
+								__func__);
+			mutex_unlock(&driver->dci_mutex);
+			return -ENOMEM;
+		}
+		pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
+		count = 0; /* iterator for extracting log codes */
+		while (count < num_codes) {
+			if (read_len + sizeof(int) > len) {
+				pr_err("diag: dci: Invalid length for event type in %s",
+								__func__);
+				mutex_unlock(&driver->dci_mutex);
+				return -EIO;
+			}
+			event_id = *(int *)temp;
+			byte_index = event_id/8;
+			if (byte_index >= DCI_EVENT_MASK_SIZE) {
+				pr_err("diag: dci: Event type, invalid byte index\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			bit_index = event_id % 8;
+			byte_mask = 0x1 << bit_index;
+			/*
+			 * Parse through event mask table and set
+			 * relevant byte & bit combination
+			 */
+			if (set_mask)
+				*(event_mask_ptr + byte_index) |= byte_mask;
+			else
+				*(event_mask_ptr + byte_index) &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_event_mask(byte_index, byte_mask,
+							 client_token);
+			temp += sizeof(int);
+			read_len += sizeof(int);
+			count++;
+			ret = DIAG_DCI_NO_ERROR;
+		}
+		/* send updated mask to userspace clients */
+		if (dci_entry->client_info.token == DCI_LOCAL_PROC)
+			diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+		/* send updated mask to peripherals */
+		ret = dci_ops_tbl[client_token].send_event_mask(client_token);
+		mutex_unlock(&driver->dci_mutex);
+	} else {
+		pr_alert("diag: Incorrect DCI transaction\n");
+	}
+	return ret;
+}
+
+
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.client_id == client_id)
+			return entry;
+	}
+	return NULL;
+}
+
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	struct pid *pid_struct = NULL;
+	struct task_struct *task_s = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		pid_struct = find_get_pid(entry->tgid);
+		if (!pid_struct) {
+			DIAG_LOG(DIAG_DEBUG_DCI,
+			"diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
+			tgid, entry->tgid);
+			continue;
+		}
+		task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+		if (!task_s) {
+			DIAG_LOG(DIAG_DEBUG_DCI,
+				"diag: valid task doesn't exist for pid = %d\n",
+				entry->tgid);
+			continue;
+		}
+		if (task_s == entry->client)
+			if (entry->client->tgid == tgid)
+				return entry;
+	}
+	return NULL;
+}
+
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
+{
+	uint8_t *event_mask_ptr, *update_ptr = NULL;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	bool is_set = false;
+
+	mutex_lock(&dci_event_mask_mutex);
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return;
+	}
+	update_ptr += offset;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		event_mask_ptr = entry->dci_event_mask;
+		event_mask_ptr += offset;
+		if ((*event_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the event mask set */
+			break;
+		}
+	}
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
+	mutex_unlock(&dci_event_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_event_mask(int token)
+{
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *event_mask_ptr, *update_ptr = NULL;
+
+	mutex_lock(&dci_event_mask_mutex);
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return;
+	}
+
+	create_dci_event_mask_tbl(update_ptr);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		event_mask_ptr = entry->dci_event_mask;
+		for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(event_mask_ptr+i);
+	}
+	mutex_unlock(&dci_event_mask_mutex);
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_event_mask_remote(int token)
+{
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	struct diag_ctrl_event_mask event_mask;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int event_header_size = sizeof(struct diag_ctrl_event_mask);
+	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	unsigned char *event_mask_ptr = NULL;
+	uint32_t write_len = 0;
+	unsigned long flags;
+
+	mutex_lock(&dci_event_mask_mutex);
+	event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!event_mask_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EINVAL;
+	}
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EAGAIN;
+	}
+
+	/* Frame the DCI header */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+	event_mask.stream_id = DCI_MASK_STREAM;
+	event_mask.status = DIAG_CTRL_MASK_VALID;
+	event_mask.event_config = 0; /* event config */
+	event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
+	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+		if (event_mask_ptr[i] != 0) {
+			event_mask.event_config = 1;
+			break;
+		}
+	}
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	memcpy(buf + write_len, &event_mask, event_header_size);
+	write_len += event_header_size;
+	memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+	write_len += DCI_EVENT_MASK_SIZE;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+	err = diag_dci_write_bridge(token, buf, write_len);
+	if (err) {
+		pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
+		       token, err);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
+		ret = err;
+	} else {
+		ret = DIAG_DCI_NO_ERROR;
+	}
+	mutex_unlock(&dci_event_mask_mutex);
+	return ret;
+}
+#endif
+
+int diag_send_dci_event_mask(int token)
+{
+	void *buf = event_mask.update_buf;
+	struct diag_ctrl_event_mask header;
+	int header_size = sizeof(struct diag_ctrl_event_mask);
+	int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
+	unsigned char *event_mask_ptr = NULL;
+
+	mutex_lock(&dci_event_mask_mutex);
+	event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
+	if (!event_mask_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EINVAL;
+	}
+
+	mutex_lock(&event_mask.lock);
+	/* send event mask update */
+	header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+	header.stream_id = DCI_MASK_STREAM;
+	header.status = DIAG_CTRL_MASK_VALID;
+	header.event_config = 0; /* event config */
+	header.event_mask_size = DCI_EVENT_MASK_SIZE;
+	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+		if (event_mask_ptr[i] != 0) {
+			header.event_config = 1;
+			break;
+		}
+	}
+	memcpy(buf, &header, header_size);
+	memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		/*
+		 * Don't send to peripheral if its regular channel
+		 * is down. It may also mean that the peripheral doesn't
+		 * support DCI.
+		 */
+		err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+					  header_size + DCI_EVENT_MASK_SIZE);
+		if (err != DIAG_DCI_NO_ERROR)
+			ret = DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	mutex_unlock(&event_mask.lock);
+	mutex_unlock(&dci_event_mask_mutex);
+
+	return ret;
+}
+
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+						uint8_t byte_mask, int token)
+{
+	uint8_t *log_mask_ptr, *update_ptr = NULL;
+	bool is_set = false;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	mutex_lock(&dci_log_mask_mutex);
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return;
+	}
+
+	update_ptr += offset;
+	/* update the dirty bit */
+	*(update_ptr+1) = 1;
+	update_ptr = update_ptr + byte_index;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		log_mask_ptr = entry->dci_log_mask;
+		log_mask_ptr = log_mask_ptr + offset + byte_index;
+		if ((*log_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the log mask set */
+			break;
+		}
+	}
+
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
+	mutex_unlock(&dci_log_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_log_mask(int token)
+{
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *log_mask_ptr, *update_ptr = NULL;
+
+	/* Clear the composite mask and redo all the masks */
+	mutex_lock(&dci_log_mask_mutex);
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return;
+	}
+
+	create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		log_mask_ptr = entry->dci_log_mask;
+		for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(log_mask_ptr+i);
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+}
+
+static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
+{
+	struct diag_ctrl_log_mask header;
+	int header_len = sizeof(struct diag_ctrl_log_mask);
+
+	header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+	header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
+	header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
+	header.stream_id = DCI_MASK_STREAM;
+	header.status = 3;
+	header.equip_id = *src_ptr;
+	header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
+	memcpy(dest_ptr, &header, header_len);
+	memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
+
+	return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token)
+{
+
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int log_header_size = sizeof(struct diag_ctrl_log_mask);
+	uint8_t *log_mask_ptr = NULL;
+	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	int updated;
+	uint32_t write_len = 0;
+	unsigned long flags;
+
+	mutex_lock(&dci_log_mask_mutex);
+	log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!log_mask_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return -EINVAL;
+	}
+
+	/* DCI header is common to all equipment IDs */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+		updated = 1;
+		write_len = 0;
+		if (!*(log_mask_ptr + 1)) {
+			log_mask_ptr += 514;
+			continue;
+		}
+
+		buf = dci_get_buffer_from_bridge(token);
+		if (!buf) {
+			pr_err("diag: In %s, unable to get dci buffers to write data\n",
+				__func__);
+			mutex_unlock(&dci_log_mask_mutex);
+			return -EAGAIN;
+		}
+
+		memcpy(buf + write_len, &dci_header, dci_header_size);
+		write_len += dci_header_size;
+		write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
+		*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+		write_len += sizeof(uint8_t);
+		err = diag_dci_write_bridge(token, buf, write_len);
+		if (err) {
+			pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
+			       i, token, err);
+			spin_lock_irqsave(&driver->dci_mempool_lock, flags);
+			diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+			spin_unlock_irqrestore(&driver->dci_mempool_lock,
+				flags);
+			updated = 0;
+		}
+		if (updated)
+			*(log_mask_ptr + 1) = 0; /* clear dirty byte */
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+	return ret;
+}
+#endif
+
+int diag_send_dci_log_mask(int token)
+{
+	void *buf = log_mask.update_buf;
+	int write_len = 0;
+	uint8_t *log_mask_ptr = NULL;
+	int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	int updated;
+
+
+	mutex_lock(&dci_log_mask_mutex);
+	log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+	if (!log_mask_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return -EINVAL;
+	}
+
+	mutex_lock(&log_mask.lock);
+	for (i = 0; i < 16; i++) {
+		updated = 1;
+		/* Dirty bit is set don't update the mask for this equip id */
+		if (!(*(log_mask_ptr + 1))) {
+			log_mask_ptr += 514;
+			continue;
+		}
+		write_len = dci_fill_log_mask(buf, log_mask_ptr);
+		for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
+			err = diag_dci_write_proc(j, DIAG_CNTL_TYPE, buf,
+						  write_len);
+			if (err != DIAG_DCI_NO_ERROR) {
+				updated = 0;
+				ret = DIAG_DCI_SEND_DATA_FAIL;
+			}
+		}
+		if (updated)
+			*(log_mask_ptr+1) = 0; /* clear dirty byte */
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&log_mask.lock);
+	mutex_unlock(&dci_log_mask_mutex);
+	return ret;
+}
+
+static int diag_dci_init_local(void)
+{
+	struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
+
+	create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
+	create_dci_event_mask_tbl(temp->event_mask_composite);
+	temp->peripheral_status |= DIAG_CON_APSS;
+
+	return 0;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_dci_init_handshake_remote(void)
+{
+	int i;
+	struct dci_channel_status_t *temp = NULL;
+
+	for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
+		temp = &dci_channel_status[i];
+		temp->id = i;
+		INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
+		timer_setup(&temp->wait_time, dci_chk_handshake, 0);
+	}
+}
+
+int diag_dci_init_remote(void)
+{
+	int i;
+	struct dci_ops_tbl_t *temp = NULL;
+
+	diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
+
+	for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
+		temp = &dci_ops_tbl[i];
+		create_dci_log_mask_tbl(temp->log_mask_composite,
+					DCI_LOG_MASK_CLEAN);
+		create_dci_event_mask_tbl(temp->event_mask_composite);
+	}
+
+	partial_pkt.data = kzalloc(MAX_DCI_PACKET_SZ, GFP_KERNEL);
+	if (!partial_pkt.data) {
+		pr_err("diag: Unable to create partial pkt data\n");
+		return -ENOMEM;
+	}
+
+	partial_pkt.total_len = 0;
+	partial_pkt.read_len = 0;
+	partial_pkt.remaining = 0;
+	partial_pkt.processing = 0;
+
+	diag_dci_init_handshake_remote();
+
+	return 0;
+}
+#else
+int diag_dci_init_remote(void)
+{
+	return 0;
+}
+#endif
+
+static int diag_dci_init_ops_tbl(void)
+{
+	int err = 0;
+
+	err = diag_dci_init_local();
+	if (err)
+		goto err;
+	return 0;
+
+err:
+	return -ENOMEM;
+}
+
+int diag_dci_init(void)
+{
+	int ret = 0;
+
+	driver->dci_tag = 0;
+	driver->dci_client_id = 0;
+	driver->num_dci_client = 0;
+	mutex_init(&driver->dci_mutex);
+	mutex_init(&dci_log_mask_mutex);
+	mutex_init(&dci_event_mask_mutex);
+	spin_lock_init(&ws_lock);
+	spin_lock_init(&driver->dci_mempool_lock);
+
+	ret = diag_dci_init_ops_tbl();
+	if (ret)
+		goto err;
+
+	if (driver->apps_dci_buf == NULL) {
+		driver->apps_dci_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+		if (driver->apps_dci_buf == NULL)
+			goto err;
+	}
+	INIT_LIST_HEAD(&driver->dci_client_list);
+	INIT_LIST_HEAD(&driver->dci_req_list);
+
+	driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+	if (!driver->diag_dci_wq)
+		goto err;
+
+	INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
+
+	timer_setup(&dci_drain_timer, dci_drain_data, 0);
+	return DIAG_DCI_NO_ERROR;
+err:
+	pr_err("diag: Could not initialize diag DCI buffers");
+	kfree(driver->apps_dci_buf);
+	driver->apps_dci_buf = NULL;
+
+	if (driver->diag_dci_wq)
+		destroy_workqueue(driver->diag_dci_wq);
+	kfree(partial_pkt.data);
+	partial_pkt.data = NULL;
+	mutex_destroy(&driver->dci_mutex);
+	mutex_destroy(&dci_log_mask_mutex);
+	mutex_destroy(&dci_event_mask_mutex);
+	return DIAG_DCI_NO_REG;
+}
+
+void diag_dci_channel_init(void)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		diagfwd_open(peripheral, TYPE_DCI);
+		diagfwd_open(peripheral, TYPE_DCI_CMD);
+	}
+}
+
+void diag_dci_exit(void)
+{
+	kfree(partial_pkt.data);
+	partial_pkt.data = NULL;
+	kfree(driver->apps_dci_buf);
+	driver->apps_dci_buf = NULL;
+	mutex_destroy(&driver->dci_mutex);
+	mutex_destroy(&dci_log_mask_mutex);
+	mutex_destroy(&dci_event_mask_mutex);
+	destroy_workqueue(driver->diag_dci_wq);
+}
+
+int diag_dci_clear_log_mask(int client_id)
+{
+	int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+	uint8_t *update_ptr;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	entry = diag_dci_get_client_entry(client_id);
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return DIAG_DCI_TABLE_ERR;
+	}
+	token = entry->client_info.token;
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+
+	create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+	diag_dci_invalidate_cumulative_log_mask(token);
+
+	/*
+	 * Send updated mask to userspace clients only if the client
+	 * is registered on the local processor
+	 */
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = dci_ops_tbl[token].send_log_mask(token);
+	return err;
+}
+
+int diag_dci_clear_event_mask(int client_id)
+{
+	int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+	uint8_t *update_ptr;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	entry = diag_dci_get_client_entry(client_id);
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return DIAG_DCI_TABLE_ERR;
+	}
+	token = entry->client_info.token;
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+
+	create_dci_event_mask_tbl(entry->dci_event_mask);
+	diag_dci_invalidate_cumulative_event_mask(token);
+
+	/*
+	 * Send updated mask to userspace clients only if the client is
+	 * registerted on the local processor
+	 */
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = dci_ops_tbl[token].send_event_mask(token);
+	return err;
+}
+
+uint8_t diag_dci_get_cumulative_real_time(int token)
+{
+	uint8_t real_time = MODE_NONREALTIME;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->real_time == MODE_REALTIME &&
+					entry->client_info.token == token) {
+			real_time = 1;
+			break;
+		}
+	}
+	return real_time;
+}
+
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
+{
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+	entry->real_time = real_time;
+	return 1;
+}
+
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
+{
+	int i, err = 0;
+	struct diag_dci_client_tbl *new_entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+
+	if (!reg_entry)
+		return DIAG_DCI_NO_REG;
+	if (!VALID_DCI_TOKEN(reg_entry->token)) {
+		pr_alert("diag: Invalid DCI client token, %d\n",
+						reg_entry->token);
+		return DIAG_DCI_NO_REG;
+	}
+
+	if (driver->dci_state == DIAG_DCI_NO_REG)
+		return DIAG_DCI_NO_REG;
+
+	if (driver->num_dci_client >= MAX_DCI_CLIENTS)
+		return DIAG_DCI_NO_REG;
+
+	new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+	if (new_entry == NULL) {
+		pr_err("diag: unable to alloc memory\n");
+		return DIAG_DCI_NO_REG;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+
+	new_entry->client = current;
+	new_entry->tgid = current->tgid;
+	new_entry->client_info.notification_list =
+				reg_entry->notification_list;
+	new_entry->client_info.signal_type =
+				reg_entry->signal_type;
+	new_entry->client_info.token = reg_entry->token;
+	switch (reg_entry->token) {
+	case DCI_LOCAL_PROC:
+		new_entry->num_buffers = NUM_DCI_PERIPHERALS;
+		break;
+	case DCI_MDM_PROC:
+		new_entry->num_buffers = 1;
+		break;
+	}
+
+	new_entry->buffers = NULL;
+	new_entry->real_time = MODE_REALTIME;
+	new_entry->in_service = 0;
+	INIT_LIST_HEAD(&new_entry->list_write_buf);
+	mutex_init(&new_entry->write_buf_mutex);
+	new_entry->dci_log_mask =  kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
+	if (!new_entry->dci_log_mask) {
+		pr_err("diag: Unable to create log mask for client, %d",
+							driver->dci_client_id);
+		goto fail_alloc;
+	}
+	create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+
+	new_entry->dci_event_mask =  kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
+	if (!new_entry->dci_event_mask) {
+		pr_err("diag: Unable to create event mask for client, %d",
+							driver->dci_client_id);
+		goto fail_alloc;
+	}
+	create_dci_event_mask_tbl(new_entry->dci_event_mask);
+
+	new_entry->buffers = kzalloc(new_entry->num_buffers *
+				     sizeof(struct diag_dci_buf_peripheral_t),
+				     GFP_KERNEL);
+	if (!new_entry->buffers) {
+		pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
+								__func__);
+		goto fail_alloc;
+	}
+
+	for (i = 0; i < new_entry->num_buffers; i++) {
+		proc_buf = &new_entry->buffers[i];
+		if (!proc_buf)
+			goto fail_alloc;
+
+		mutex_init(&proc_buf->health_mutex);
+		mutex_init(&proc_buf->buf_mutex);
+		proc_buf->health.dropped_events = 0;
+		proc_buf->health.dropped_logs = 0;
+		proc_buf->health.received_events = 0;
+		proc_buf->health.received_logs = 0;
+		proc_buf->buf_primary = kzalloc(
+					sizeof(struct diag_dci_buffer_t),
+					GFP_KERNEL);
+		if (!proc_buf->buf_primary)
+			goto fail_alloc;
+		proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
+					    GFP_KERNEL);
+		if (!proc_buf->buf_cmd)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_primary,
+					   DCI_BUF_PRIMARY);
+		if (err)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
+		if (err)
+			goto fail_alloc;
+		proc_buf->buf_curr = proc_buf->buf_primary;
+	}
+
+	list_add_tail(&new_entry->track, &driver->dci_client_list);
+	driver->dci_client_id++;
+	new_entry->client_info.client_id = driver->dci_client_id;
+	reg_entry->client_id = driver->dci_client_id;
+	driver->num_dci_client++;
+	if (driver->num_dci_client == 1)
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	mutex_unlock(&driver->dci_mutex);
+
+	return driver->dci_client_id;
+
+fail_alloc:
+	if (new_entry) {
+		for (i = 0; ((i < new_entry->num_buffers) &&
+			new_entry->buffers); i++) {
+			proc_buf = &new_entry->buffers[i];
+			if (proc_buf) {
+				mutex_destroy(&proc_buf->health_mutex);
+				if (proc_buf->buf_primary) {
+					kfree(proc_buf->buf_primary->data);
+					proc_buf->buf_primary->data = NULL;
+					mutex_destroy(
+					   &proc_buf->buf_primary->data_mutex);
+				}
+				kfree(proc_buf->buf_primary);
+				proc_buf->buf_primary = NULL;
+				if (proc_buf->buf_cmd) {
+					kfree(proc_buf->buf_cmd->data);
+					proc_buf->buf_cmd->data = NULL;
+					mutex_destroy(
+					   &proc_buf->buf_cmd->data_mutex);
+				}
+				kfree(proc_buf->buf_cmd);
+				proc_buf->buf_cmd = NULL;
+			}
+		}
+		kfree(new_entry->dci_event_mask);
+		new_entry->dci_event_mask = NULL;
+		kfree(new_entry->dci_log_mask);
+		new_entry->dci_log_mask = NULL;
+		kfree(new_entry->buffers);
+		new_entry->buffers = NULL;
+		kfree(new_entry);
+		new_entry = NULL;
+	}
+	mutex_unlock(&driver->dci_mutex);
+	return DIAG_DCI_NO_REG;
+}
+
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
+{
+	int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_buffer_t *buf_entry, *temp;
+	struct list_head *start, *req_temp;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	int token = DCI_LOCAL_PROC;
+
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	token = entry->client_info.token;
+	/*
+	 * Remove the entry from the list before freeing the buffers
+	 * to ensure that we don't have any invalid access.
+	 */
+	if (!list_empty(&entry->track))
+		list_del(&entry->track);
+	driver->num_dci_client--;
+	/*
+	 * Clear the client's log and event masks, update the cumulative
+	 * masks and send the masks to peripherals
+	 */
+	kfree(entry->dci_log_mask);
+	entry->dci_log_mask = NULL;
+	diag_dci_invalidate_cumulative_log_mask(token);
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	ret = dci_ops_tbl[token].send_log_mask(token);
+	if (ret != DIAG_DCI_NO_ERROR) {
+		return ret;
+	}
+	kfree(entry->dci_event_mask);
+	entry->dci_event_mask = NULL;
+	diag_dci_invalidate_cumulative_event_mask(token);
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	ret = dci_ops_tbl[token].send_event_mask(token);
+	if (ret != DIAG_DCI_NO_ERROR) {
+		return ret;
+	}
+
+	list_for_each_safe(start, req_temp, &driver->dci_req_list) {
+		req_entry = list_entry(start, struct dci_pkt_req_entry_t,
+				       track);
+		if (req_entry->client_id == entry->client_info.client_id) {
+			if (!list_empty(&req_entry->track))
+				list_del(&req_entry->track);
+			kfree(req_entry);
+			req_entry = NULL;
+		}
+	}
+
+	/* Clean up any buffer that is pending write */
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+							buf_track) {
+		if (!list_empty(&buf_entry->buf_track))
+			list_del(&buf_entry->buf_track);
+		if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			kfree(buf_entry);
+			buf_entry = NULL;
+		} else if (buf_entry->buf_type == DCI_BUF_CMD) {
+			peripheral = buf_entry->data_source;
+			if (peripheral == APPS_DATA)
+				continue;
+		}
+		/*
+		 * These are buffers that can't be written to the client which
+		 * means that the copy cannot be completed. Make sure that we
+		 * remove those references in DCI wakeup source.
+		 */
+		diag_ws_on_copy_fail(DIAG_WS_DCI);
+	}
+	mutex_unlock(&entry->write_buf_mutex);
+
+	for (i = 0; i < entry->num_buffers; i++) {
+		proc_buf = &entry->buffers[i];
+		buf_entry = proc_buf->buf_curr;
+		mutex_lock(&proc_buf->buf_mutex);
+		/* Clean up secondary buffer from mempool that is active */
+		if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			mutex_destroy(&buf_entry->data_mutex);
+			kfree(buf_entry);
+			buf_entry = NULL;
+		}
+
+		mutex_lock(&proc_buf->buf_primary->data_mutex);
+		kfree(proc_buf->buf_primary->data);
+		proc_buf->buf_primary->data = NULL;
+		mutex_unlock(&proc_buf->buf_primary->data_mutex);
+
+		mutex_lock(&proc_buf->buf_cmd->data_mutex);
+		kfree(proc_buf->buf_cmd->data);
+		proc_buf->buf_cmd->data = NULL;
+		mutex_unlock(&proc_buf->buf_cmd->data_mutex);
+
+		mutex_destroy(&proc_buf->health_mutex);
+		mutex_destroy(&proc_buf->buf_primary->data_mutex);
+		mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+
+		kfree(proc_buf->buf_primary);
+		proc_buf->buf_primary = NULL;
+		kfree(proc_buf->buf_cmd);
+		proc_buf->buf_cmd = NULL;
+		mutex_unlock(&proc_buf->buf_mutex);
+	}
+	mutex_destroy(&entry->write_buf_mutex);
+
+	kfree(entry->buffers);
+	entry->buffers = NULL;
+	kfree(entry);
+	entry = NULL;
+
+	if (driver->num_dci_client == 0) {
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
+	} else {
+		real_time = diag_dci_get_cumulative_real_time(token);
+		diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
+{
+	uint8_t dest_channel = TYPE_DATA;
+	int err = 0;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
+	    !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+		DIAG_LOG(DIAG_DEBUG_DCI,
+			"buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
+			buf, peripheral, len,
+			driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
+		return -EINVAL;
+	}
+
+	if (pkt_type == DIAG_DATA_TYPE) {
+		dest_channel = TYPE_DCI_CMD;
+	} else if (pkt_type == DIAG_CNTL_TYPE) {
+		dest_channel = TYPE_CNTL;
+	} else {
+		pr_err("diag: Invalid DCI pkt type in %s", __func__);
+		return -EINVAL;
+	}
+
+	err = diagfwd_write(peripheral, dest_channel, buf, len);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, dest_channel, len, err);
+	} else {
+		err = DIAG_DCI_NO_ERROR;
+	}
+
+	return err;
+}
+
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
+{
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_health_t *health = NULL;
+	struct diag_dci_health_stats *stats = NULL;
+	int i, proc;
+
+	if (!stats_proc)
+		return -EINVAL;
+
+	stats = &stats_proc->health;
+	proc = stats_proc->proc;
+	if (proc < ALL_PROC || proc > APPS_DATA)
+		return -EINVAL;
+
+	entry = diag_dci_get_client_entry(stats_proc->client_id);
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	/*
+	 * If the client has registered for remote processor, the
+	 * proc field doesn't have any effect as they have only one buffer.
+	 */
+	if (entry->client_info.token)
+		proc = 0;
+
+	stats->stats.dropped_logs = 0;
+	stats->stats.dropped_events = 0;
+	stats->stats.received_logs = 0;
+	stats->stats.received_events = 0;
+
+	if (proc != ALL_PROC) {
+		health = &entry->buffers[proc].health;
+		stats->stats.dropped_logs = health->dropped_logs;
+		stats->stats.dropped_events = health->dropped_events;
+		stats->stats.received_logs = health->received_logs;
+		stats->stats.received_events = health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[proc].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[proc].health_mutex);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	for (i = 0; i < entry->num_buffers; i++) {
+		health = &entry->buffers[i].health;
+		stats->stats.dropped_logs += health->dropped_logs;
+		stats->stats.dropped_events += health->dropped_events;
+		stats->stats.received_logs += health->received_logs;
+		stats->stats.received_events += health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[i].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[i].health_mutex);
+		}
+	}
+	return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
+{
+	if (!support_list)
+		return -ENOMEM;
+
+	if (!VALID_DCI_TOKEN(support_list->proc))
+		return -EIO;
+
+	support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
+	return DIAG_DCI_NO_ERROR;
+}
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_dci.h linux-6.4-fbx/drivers/char/diag/diag_dci.h
--- linux-6.4-fbx/drivers/char/diag./diag_dci.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_dci.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,325 @@
+/* Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_DCI_H
+#define DIAG_DCI_H
+
+#define MAX_DCI_CLIENTS		10
+#define DCI_PKT_RSP_CODE	0x93
+#define DCI_DELAYED_RSP_CODE	0x94
+#define DCI_CONTROL_PKT_CODE	0x9A
+#define LOG_CMD_CODE		0x10
+#define EVENT_CMD_CODE		0x60
+#define DCI_PKT_RSP_TYPE	0
+#define DCI_LOG_TYPE		-1
+#define DCI_EVENT_TYPE		-2
+#define SET_LOG_MASK		1
+#define DISABLE_LOG_MASK	0
+#define MAX_EVENT_SIZE		512
+#define DCI_CLIENT_INDEX_INVALID -1
+#define DCI_LOG_CON_MIN_LEN		16
+#define DCI_EVENT_CON_MIN_LEN		16
+
+#define DCI_BUF_PRIMARY		1
+#define DCI_BUF_SECONDARY	2
+#define DCI_BUF_CMD		3
+
+#ifdef CONFIG_DEBUG_FS
+#define DIAG_DCI_DEBUG_CNT	100
+#define DIAG_DCI_DEBUG_LEN	100
+#endif
+
+/* 16 log code categories, each has:
+ * 1 bytes equip id + 1 dirty byte + 512 byte max log mask
+ */
+#define DCI_LOG_MASK_SIZE		(16*514)
+#define DCI_EVENT_MASK_SIZE		512
+#define DCI_MASK_STREAM			2
+#define DCI_MAX_LOG_CODES		16
+#define DCI_MAX_ITEMS_PER_LOG_CODE	512
+
+#define DCI_LOG_MASK_CLEAN		0
+#define DCI_LOG_MASK_DIRTY		1
+
+#define MIN_DELAYED_RSP_LEN		12
+/*
+ * Maximum data size that peripherals send = 8.5K log +
+ * DCI header + footer (6 bytes)
+ */
+#define MAX_DCI_PACKET_SZ		8710
+
+extern unsigned int dci_max_reg;
+extern unsigned int dci_max_clients;
+
+#define DCI_LOCAL_PROC		0
+#define DCI_REMOTE_BASE		1
+#define DCI_MDM_PROC		DCI_REMOTE_BASE
+#define DCI_REMOTE_LAST		(DCI_REMOTE_BASE + 1)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DCI_PROC		1
+#else
+#define NUM_DCI_PROC		DCI_REMOTE_LAST
+#endif
+
+#define DCI_REMOTE_DATA	0
+
+#define NUM_REMOTE_DATA_DEV     3
+#define DIAGFWD_MDM_DCI         NUM_REMOTE_DATA_DEV
+#define NUM_REMOTE_DCI_DEV      (DIAGFWD_MDM_DCI - NUM_REMOTE_DATA_DEV + 1)
+
+#define VALID_DCI_TOKEN(x)	((x >= 0 && x < NUM_DCI_PROC) ? 1 : 0)
+#define BRIDGE_TO_TOKEN(x)	(x - DIAGFWD_MDM_DCI + DCI_REMOTE_BASE)
+#define TOKEN_TO_BRIDGE(x)	(dci_ops_tbl[x].ctx)
+
+#define DCI_MAGIC		(0xAABB1122)
+
+struct dci_pkt_req_t {
+	int uid;
+	int client_id;
+} __packed;
+
+struct dci_stream_req_t {
+	int type;
+	int client_id;
+	int set_flag;
+	int count;
+} __packed;
+
+struct dci_pkt_req_entry_t {
+	int client_id;
+	int uid;
+	int tag;
+	struct list_head track;
+} __packed;
+
+struct diag_dci_reg_tbl_t {
+	int client_id;
+	uint16_t notification_list;
+	int signal_type;
+	int token;
+} __packed;
+
+struct diag_dci_health_t {
+	int dropped_logs;
+	int dropped_events;
+	int received_logs;
+	int received_events;
+};
+
+struct diag_dci_partial_pkt_t {
+	unsigned char *data;
+	uint32_t total_len;
+	uint32_t read_len;
+	uint32_t remaining;
+	uint8_t processing;
+} __packed;
+
+struct diag_dci_buffer_t {
+	unsigned char *data;
+	unsigned int data_len;
+	struct mutex data_mutex;
+	uint8_t in_busy;
+	uint8_t buf_type;
+	int data_source;
+	int capacity;
+	uint8_t in_list;
+	struct list_head buf_track;
+};
+
+struct diag_dci_buf_peripheral_t {
+	struct diag_dci_buffer_t *buf_curr;
+	struct diag_dci_buffer_t *buf_primary;
+	struct diag_dci_buffer_t *buf_cmd;
+	struct diag_dci_health_t health;
+	struct mutex health_mutex;
+	struct mutex buf_mutex;
+};
+
+struct diag_dci_client_tbl {
+	int tgid;
+	struct diag_dci_reg_tbl_t client_info;
+	struct task_struct *client;
+	unsigned char *dci_log_mask;
+	unsigned char *dci_event_mask;
+	uint8_t real_time;
+	struct list_head track;
+	struct diag_dci_buf_peripheral_t *buffers;
+	uint8_t num_buffers;
+	uint8_t in_service;
+	struct list_head list_write_buf;
+	struct mutex write_buf_mutex;
+};
+
+struct diag_dci_health_stats {
+	struct diag_dci_health_t stats;
+	int reset_status;
+};
+
+struct diag_dci_health_stats_proc {
+	int client_id;
+	struct diag_dci_health_stats health;
+	int proc;
+} __packed;
+
+struct diag_dci_peripherals_t {
+	int proc;
+	uint16_t list;
+} __packed;
+
+/* This is used for querying DCI Log
+   or Event Mask */
+struct diag_log_event_stats {
+	int client_id;
+	uint16_t code;
+	int is_set;
+} __packed;
+
+struct diag_dci_pkt_rsp_header_t {
+	int type;
+	int length;
+	uint8_t delete_flag;
+	int uid;
+} __packed;
+
+struct diag_dci_pkt_header_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t len;
+	uint8_t pkt_code;
+	int tag;
+} __packed;
+
+struct diag_dci_header_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t length;
+	uint8_t cmd_code;
+} __packed;
+
+struct dci_ops_tbl_t {
+	int ctx;
+	int mempool;
+	unsigned char log_mask_composite[DCI_LOG_MASK_SIZE];
+	unsigned char event_mask_composite[DCI_EVENT_MASK_SIZE];
+	int (*send_log_mask)(int token);
+	int (*send_event_mask)(int token);
+	uint16_t peripheral_status;
+} __packed;
+
+struct dci_channel_status_t {
+	int id;
+	int open;
+	int retry_count;
+	struct timer_list wait_time;
+	struct work_struct handshake_work;
+} __packed;
+
+extern struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC];
+
+enum {
+	DIAG_DCI_NO_ERROR = 1001,	/* No error */
+	DIAG_DCI_NO_REG,		/* Could not register */
+	DIAG_DCI_NO_MEM,		/* Failed memory allocation */
+	DIAG_DCI_NOT_SUPPORTED,	/* This particular client is not supported */
+	DIAG_DCI_HUGE_PACKET,	/* Request/Response Packet too huge */
+	DIAG_DCI_SEND_DATA_FAIL,/* writing to kernel or peripheral fails */
+	DIAG_DCI_TABLE_ERR	/* Error dealing with registration tables */
+};
+
+#define DCI_HDR_SIZE					\
+	((sizeof(struct diag_dci_pkt_header_t) >	\
+	  sizeof(struct diag_dci_header_t)) ?		\
+	(sizeof(struct diag_dci_pkt_header_t) + 1) :	\
+	(sizeof(struct diag_dci_header_t) + 1))		\
+
+#define DCI_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_HDR_SIZE)
+
+#define DCI_REQ_HDR_SIZE				\
+	((sizeof(struct dci_pkt_req_t) >		\
+	  sizeof(struct dci_stream_req_t)) ?		\
+	(sizeof(struct dci_pkt_req_t)) :		\
+	(sizeof(struct dci_stream_req_t)))		\
+
+#define DCI_REQ_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_REQ_HDR_SIZE)
+
+#ifdef CONFIG_DEBUG_FS
+/* To collect debug information during each smd read */
+struct diag_dci_data_info {
+	unsigned long iteration;
+	int data_size;
+	char time_stamp[DIAG_TS_SIZE];
+	uint8_t peripheral;
+	uint8_t ch_type;
+	uint8_t proc;
+};
+
+extern struct diag_dci_data_info *dci_traffic;
+extern struct mutex dci_stat_mutex;
+#endif
+
+int diag_dci_init(void);
+void diag_dci_channel_init(void);
+void diag_dci_exit(void);
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry);
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry);
+void diag_dci_channel_open_work(struct work_struct *);
+void diag_dci_notify_client(int peripheral_mask, int data, int proc);
+void diag_dci_wakeup_clients(void);
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes);
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+				      int recd_bytes);
+int diag_process_dci_transaction(unsigned char *buf, int len);
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 int token);
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token);
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id);
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid);
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes);
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list);
+/* DCI Log streaming functions */
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+						uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_log_mask(int token);
+int diag_send_dci_log_mask(int token);
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token);
+int diag_dci_clear_log_mask(int client_id);
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+			    uint16_t log_code);
+/* DCI event streaming functions */
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_event_mask(int token);
+int diag_send_dci_event_mask(int token);
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+			int token);
+int diag_dci_clear_event_mask(int client_id);
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+			      uint16_t event_id);
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc);
+uint8_t diag_dci_get_cumulative_real_time(int token);
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry,
+			   uint8_t real_time);
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc);
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len);
+void dci_drain_data(struct timer_list *t);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token);
+int diag_send_dci_event_mask_remote(int token);
+unsigned char *dci_get_buffer_from_bridge(int token);
+int diag_dci_write_bridge(int token, unsigned char *buf, int len);
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
+int diag_dci_send_handshake_pkt(int index);
+int diag_dci_init_remote(void);
+#endif
+
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_debugfs.c linux-6.4-fbx/drivers/char/diag/diag_debugfs.c
--- linux-6.4-fbx/drivers/char/diag./diag_debugfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_debugfs.c	2023-10-05 12:33:41.363634732 +0200
@@ -0,0 +1,1103 @@
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#include "diagfwd_smux.h"
+#endif
+#ifdef CONFIG_MHI_BUS
+#include "diagfwd_mhi.h"
+#endif
+#include "diagmem.h"
+#include "diag_dci.h"
+#include "diag_usb.h"
+#include "diagfwd_peripheral.h"
+#ifdef CONFIG_QCOM_SMD
+#include "diagfwd_smd.h"
+#endif
+#include "diagfwd_socket.h"
+#include "diag_debugfs.h"
+#include "diag_ipc_logging.h"
+
+#define DEBUG_BUF_SIZE	4096
+static struct dentry *diag_dbgfs_dent;
+static int diag_dbgfs_table_index;
+static int diag_dbgfs_mempool_index;
+static int diag_dbgfs_usbinfo_index;
+#ifdef CONFIG_QCOM_SMD
+static int diag_dbgfs_smdinfo_index;
+#endif
+static int diag_dbgfs_socketinfo_index;
+static int diag_dbgfs_hsicinfo_index;
+static int diag_dbgfs_mhiinfo_index;
+static int diag_dbgfs_bridgeinfo_index;
+static int diag_dbgfs_finished;
+static int diag_dbgfs_dci_data_index;
+static int diag_dbgfs_dci_finished;
+static struct mutex diag_dci_dbgfs_mutex;
+static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret, i;
+	unsigned int buf_size;
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+	buf_size = ksize(buf);
+	ret = scnprintf(buf, buf_size,
+		"CPU Tools ID: %d\n"
+		"Check Polling Response: %d\n"
+		"Polling Registered: %d\n"
+		"Uses Device Tree: %d\n"
+		"Apps Supports Separate CMDRSP: %d\n"
+		"Apps Supports HDLC Encoding: %d\n"
+		"Apps Supports Sockets: %d\n"
+		"Logging Mode: %d\n"
+		"RSP Buffer is Busy: %d\n"
+		"HDLC Disabled: %d\n"
+		"Time Sync Enabled: %d\n"
+		"MD session mode: %d\n"
+		"MD session mask: %d\n"
+		"Uses Time API: %d\n",
+		chk_config_get_id(),
+		chk_polling_response(),
+		driver->polling_reg_flag,
+		driver->use_device_tree,
+		driver->supports_separate_cmdrsp,
+		driver->supports_apps_hdlc_encoding,
+		driver->supports_sockets,
+		driver->logging_mode,
+		driver->rsp_buf_busy,
+		driver->hdlc_disabled,
+		driver->time_sync_enabled,
+		driver->md_session_mode,
+		driver->md_session_mask,
+		driver->uses_time_api);
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		ret += scnprintf(buf+ret, buf_size-ret,
+			"p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c|\n",
+			PERIPHERAL_STRING(i),
+			driver->feature[i].feature_mask[0],
+			driver->feature[i].feature_mask[1],
+			driver->feature[i].rcvd_feature_mask ? 'F':'f',
+			driver->feature[i].separate_cmd_rsp ? 'C':'c',
+			driver->feature[i].encode_hdlc ? 'H':'h',
+			driver->feature[i].peripheral_buffering ? 'B':'b',
+			driver->feature[i].mask_centralization ? 'M':'m',
+			driver->feature[i].stm_support ? 'Q':'q',
+			driver->feature[i].sockets_enabled ? 'S':'s',
+			driver->feature[i].sent_feature_mask ? 'T':'t');
+	}
+
+#ifdef CONFIG_DIAG_OVER_USB
+	ret += scnprintf(buf+ret, buf_size-ret,
+		"USB Connected: %d\n",
+		driver->usb_connected);
+#endif
+
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		ret += scnprintf(buf+ret, buf_size-ret,
+				 "Real Time Mode: %d: %d\n", i,
+				 driver->real_time_mode[i]);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_dcistats(struct file *file,
+				char __user *ubuf, size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	unsigned int bytes_remaining, bytes_written = 0;
+	unsigned int bytes_in_buf = 0, i = 0;
+	struct diag_dci_data_info *temp_data = dci_traffic;
+	unsigned int buf_size;
+	buf_size = (DEBUG_BUF_SIZE < count) ? DEBUG_BUF_SIZE : count;
+
+	if (diag_dbgfs_dci_finished) {
+		diag_dbgfs_dci_finished = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * buf_size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+
+	mutex_lock(&diag_dci_dbgfs_mutex);
+	if (diag_dbgfs_dci_data_index == 0) {
+		bytes_written =
+			scnprintf(buf, buf_size,
+			"number of clients: %d\n"
+			"dci proc active: %d\n"
+			"dci real time vote: %d\n",
+			driver->num_dci_client,
+			(driver->proc_active_mask & DIAG_PROC_DCI) ? 1 : 0,
+			(driver->proc_rt_vote_mask[DIAG_LOCAL_PROC] &
+							DIAG_PROC_DCI) ? 1 : 0);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+#ifdef CONFIG_DIAG_OVER_USB
+		bytes_written = scnprintf(buf+bytes_in_buf, bytes_remaining,
+			"usb_connected: %d\n",
+			driver->usb_connected);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+#endif
+#ifdef CONFIG_PM_SLEEP
+		bytes_written = scnprintf(buf+bytes_in_buf,
+					  bytes_remaining,
+					  "dci power: active, relax: %lu, %lu\n",
+					  driver->diag_dev->power.wakeup->
+						active_count,
+					  driver->diag_dev->
+						power.wakeup->relax_count);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+#endif
+	}
+	temp_data += diag_dbgfs_dci_data_index;
+	for (i = diag_dbgfs_dci_data_index; i < DIAG_DCI_DEBUG_CNT; i++) {
+		if (temp_data->iteration != 0) {
+			bytes_written = scnprintf(
+				buf + bytes_in_buf, bytes_remaining,
+				"i %-5ld\t"
+				"s %-5d\t"
+				"p %-5d\t"
+				"r %-5d\t"
+				"c %-5d\t"
+				"t %-15s\n",
+				temp_data->iteration,
+				temp_data->data_size,
+				temp_data->peripheral,
+				temp_data->proc,
+				temp_data->ch_type,
+				temp_data->time_stamp);
+			bytes_in_buf += bytes_written;
+			bytes_remaining -= bytes_written;
+			/* Check if there is room for another entry */
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+		temp_data++;
+	}
+	diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+	mutex_unlock(&diag_dci_dbgfs_mutex);
+	bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
+								bytes_in_buf);
+	kfree(buf);
+	diag_dbgfs_dci_finished = 1;
+	return bytes_written;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t diag_dbgfs_read_power(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret;
+	unsigned int buf_size;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	ret = scnprintf(buf, buf_size,
+		"DCI reference count: %d\n"
+		"DCI copy count: %d\n"
+		"DCI Client Count: %d\n\n"
+		"Memory Device reference count: %d\n"
+		"Memory Device copy count: %d\n"
+		"Logging mode: %d\n\n"
+		"Wakeup source active count: %lu\n"
+		"Wakeup source relax count: %lu\n\n",
+		driver->dci_ws.ref_count,
+		driver->dci_ws.copy_count,
+		driver->num_dci_client,
+		driver->md_ws.ref_count,
+		driver->md_ws.copy_count,
+		driver->logging_mode,
+		driver->diag_dev->power.wakeup->active_count,
+		driver->diag_dev->power.wakeup->relax_count);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+#endif
+static ssize_t diag_dbgfs_read_table(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret = 0;
+	int i = 0;
+	int is_polling = 0;
+	unsigned int bytes_remaining;
+	unsigned int bytes_in_buffer = 0;
+	unsigned int bytes_written;
+	unsigned int buf_size;
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	if (diag_dbgfs_table_index == driver->cmd_reg_count) {
+		diag_dbgfs_table_index = 0;
+		mutex_unlock(&driver->cmd_reg_mutex);
+		return 0;
+	}
+
+	buf_size = (DEBUG_BUF_SIZE < count) ? DEBUG_BUF_SIZE : count;
+
+	buf = kzalloc(sizeof(char) * buf_size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		mutex_unlock(&driver->cmd_reg_mutex);
+		return -ENOMEM;
+	}
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+
+	if (diag_dbgfs_table_index == 0) {
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+					  "Client ids: Modem: %d, LPASS: %d, WCNSS: %d, SLPI: %d, APPS: %d\n",
+					  PERIPHERAL_MODEM, PERIPHERAL_LPASS,
+					  PERIPHERAL_WCNSS, PERIPHERAL_SENSORS,
+					  APPS_DATA);
+		bytes_in_buffer += bytes_written;
+		bytes_remaining -= bytes_written;
+	}
+
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (i < diag_dbgfs_table_index) {
+			i++;
+			continue;
+		}
+
+		is_polling = diag_cmd_chk_polling(&item->entry);
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+					  "i: %3d, cmd_code: %4x, subsys_id: %4x, cmd_code_lo: %4x, cmd_code_hi: %4x, proc: %d, process_id: %5d %s\n",
+					  i++,
+					  item->entry.cmd_code,
+					  item->entry.subsys_id,
+					  item->entry.cmd_code_lo,
+					  item->entry.cmd_code_hi,
+					  item->proc,
+					  item->pid,
+					  (is_polling == DIAG_CMD_POLLING) ?
+					  "<-- Polling Cmd" : "");
+
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_table_index = i;
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_mempool(struct file *file, char __user *ubuf,
+						size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (diag_dbgfs_mempool_index >= NUM_MEMORY_POOLS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_mempool_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"%-24s\t"
+			"%-10s\t"
+			"%-5s\t"
+			"%-5s\t"
+			"%-5s\n",
+			"POOL", "HANDLE", "COUNT", "SIZE", "ITEMSIZE");
+	bytes_in_buffer += bytes_written;
+	bytes_remaining = buf_size - bytes_in_buffer;
+
+	for (i = diag_dbgfs_mempool_index; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"%-24s\t"
+			"%-10p\t"
+			"%-5d\t"
+			"%-5d\t"
+			"%-5d\n",
+			mempool->name,
+			mempool->pool,
+			mempool->count,
+			mempool->poolsize,
+			mempool->itemsize);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_mempool_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_usb_info *usb_info = NULL;
+
+	if (diag_dbgfs_usbinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_usbinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_usbinfo_index; i < NUM_DIAG_USB_DEV; i++) {
+		usb_info = &diag_usb[i];
+		if (!usb_info->enabled)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"hdl: %pK\n"
+			"connected: %d\n"
+			"diag state: %d\n"
+			"enabled: %d\n"
+			"mempool: %s\n"
+			"read pending: %d\n"
+			"read count: %lu\n"
+			"write count: %lu\n"
+			"read work pending: %d\n"
+			"read done work pending: %d\n"
+			"connect work pending: %d\n"
+			"disconnect work pending: %d\n"
+			"max size supported: %d\n\n",
+			usb_info->id,
+			usb_info->name,
+			usb_info->hdl,
+			atomic_read(&usb_info->connected),
+			atomic_read(&usb_info->diag_state),
+			usb_info->enabled,
+			DIAG_MEMPOOL_GET_NAME(usb_info->mempool),
+			atomic_read(&usb_info->read_pending),
+			usb_info->read_cnt,
+			usb_info->write_cnt,
+			work_pending(&usb_info->read_work),
+			work_pending(&usb_info->read_done_work),
+			work_pending(&usb_info->connect_work),
+			work_pending(&usb_info->disconnect_work),
+			usb_info->max_size);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_usbinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+#ifdef CONFIG_QCOM_SMD
+static ssize_t diag_dbgfs_read_smdinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_smd_info *smd_info = NULL;
+	struct diagfwd_info *fwd_ctxt = NULL;
+
+	if (diag_dbgfs_smdinfo_index >= NUM_PERIPHERALS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_smdinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = 0; i < NUM_TYPES; i++) {
+		for (j = 0; j < NUM_PERIPHERALS; j++) {
+			switch (i) {
+			case TYPE_DATA:
+				smd_info = &smd_data[j];
+				break;
+			case TYPE_CNTL:
+				smd_info = &smd_cntl[j];
+				break;
+			case TYPE_DCI:
+				smd_info = &smd_dci[j];
+				break;
+			case TYPE_CMD:
+				smd_info = &smd_cmd[j];
+				break;
+			case TYPE_DCI_CMD:
+				smd_info = &smd_dci_cmd[j];
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			fwd_ctxt = (struct diagfwd_info *)(smd_info->fwd_ctxt);
+
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+				bytes_remaining,
+				"name\t\t:\t%s\n"
+				"hdl\t\t:\t%pK\n"
+				"inited\t\t:\t%d\n"
+				"opened\t\t:\t%d\n"
+				"diag_state\t:\t%d\n"
+				"fifo size\t:\t%d\n"
+				"open pending\t:\t%d\n"
+				"close pending\t:\t%d\n"
+				"read pending\t:\t%d\n"
+				"buf_1 busy\t:\t%d\n"
+				"buf_2 busy\t:\t%d\n"
+				"bytes read\t:\t%lu\n"
+				"bytes written\t:\t%lu\n"
+				"fwd inited\t:\t%d\n"
+				"fwd opened\t:\t%d\n"
+				"fwd ch_open\t:\t%d\n\n",
+				smd_info->name,
+				smd_info->hdl,
+				smd_info->inited,
+				atomic_read(&smd_info->opened),
+				atomic_read(&smd_info->diag_state),
+				smd_info->fifo_size,
+				work_pending(&smd_info->open_work),
+				work_pending(&smd_info->close_work),
+				work_pending(&smd_info->read_work),
+				(fwd_ctxt && fwd_ctxt->buf_1) ?
+				atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+				(fwd_ctxt && fwd_ctxt->buf_2) ?
+				atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+				(fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->inited : -1,
+				(fwd_ctxt) ?
+				atomic_read(&fwd_ctxt->opened) : -1,
+				(fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+			bytes_in_buffer += bytes_written;
+
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
+
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+	}
+	diag_dbgfs_smdinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+#endif
+
+static ssize_t diag_dbgfs_read_socketinfo(struct file *file, char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_socket_info *info = NULL;
+	struct diagfwd_info *fwd_ctxt = NULL;
+
+	if (diag_dbgfs_socketinfo_index >= NUM_PERIPHERALS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_socketinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = 0; i < NUM_TYPES; i++) {
+		for (j = 0; j < NUM_PERIPHERALS; j++) {
+			switch (i) {
+			case TYPE_DATA:
+				info = &socket_data[j];
+				break;
+			case TYPE_CNTL:
+				info = &socket_cntl[j];
+				break;
+			case TYPE_DCI:
+				info = &socket_dci[j];
+				break;
+			case TYPE_CMD:
+				info = &socket_cmd[j];
+				break;
+			case TYPE_DCI_CMD:
+				info = &socket_dci_cmd[j];
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+				bytes_remaining,
+				"name\t\t:\t%s\n"
+				"hdl\t\t:\t%pK\n"
+				"inited\t\t:\t%d\n"
+				"opened\t\t:\t%d\n"
+				"diag_state\t:\t%d\n"
+				"buf_1 busy\t:\t%d\n"
+				"buf_2 busy\t:\t%d\n"
+				"flow ctrl count\t:\t%d\n"
+				"data_ready\t:\t%d\n"
+				"init pending\t:\t%d\n"
+				"read pending\t:\t%d\n"
+				"bytes read\t:\t%lu\n"
+				"bytes written\t:\t%lu\n"
+				"fwd inited\t:\t%d\n"
+				"fwd opened\t:\t%d\n"
+				"fwd ch_open\t:\t%d\n\n",
+				info->name,
+				info->hdl,
+				info->inited,
+				atomic_read(&info->opened),
+				atomic_read(&info->diag_state),
+				(fwd_ctxt && fwd_ctxt->buf_1) ?
+				atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+				(fwd_ctxt && fwd_ctxt->buf_2) ?
+				atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+				atomic_read(&info->flow_cnt),
+				info->data_ready,
+				work_pending(&info->init_work),
+				work_pending(&info->read_work),
+				(fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->inited : -1,
+				(fwd_ctxt) ?
+				atomic_read(&fwd_ctxt->opened) : -1,
+				(fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+			bytes_in_buffer += bytes_written;
+
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
+
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+	}
+	diag_dbgfs_socketinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	const int size = 10;
+	unsigned char cmd[size];
+	long value = 0;
+	int len = 0;
+
+	if (count < 1)
+		return -EINVAL;
+
+	len = (count < (size - 1)) ? count : size - 1;
+	if (copy_from_user(cmd, buf, len))
+		return -EFAULT;
+
+	cmd[len] = 0;
+	if (cmd[len-1] == '\n') {
+		cmd[len-1] = 0;
+		len--;
+	}
+
+	if (kstrtol(cmd, 10, &value))
+		return -EINVAL;
+
+	if (value < 0)
+		return -EINVAL;
+
+	diag_debug_mask = (uint16_t)value;
+	return count;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+static ssize_t diag_dbgfs_read_hsicinfo(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_hsic_info *hsic_info = NULL;
+
+	if (diag_dbgfs_hsicinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_hsicinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_hsicinfo_index; i < NUM_HSIC_DEV; i++) {
+		hsic_info = &diag_hsic[i];
+		if (!hsic_info->enabled)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"bridge index: %s\n"
+			"opened: %d\n"
+			"enabled: %d\n"
+			"suspended: %d\n"
+			"mempool: %s\n"
+			"read work pending: %d\n"
+			"open work pending: %d\n"
+			"close work pending: %d\n\n",
+			hsic_info->id,
+			hsic_info->name,
+			DIAG_BRIDGE_GET_NAME(hsic_info->dev_id),
+			hsic_info->opened,
+			hsic_info->enabled,
+			hsic_info->suspended,
+			DIAG_MEMPOOL_GET_NAME(hsic_info->mempool),
+			work_pending(&hsic_info->read_work),
+			work_pending(&hsic_info->open_work),
+			work_pending(&hsic_info->close_work));
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_hsicinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+const struct file_operations diag_dbgfs_hsicinfo_ops = {
+	.read = diag_dbgfs_read_hsicinfo,
+};
+#endif
+#ifdef CONFIG_MHI_BUS
+static ssize_t diag_dbgfs_read_mhiinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_mhi_info *mhi_info = NULL;
+
+	if (diag_dbgfs_mhiinfo_index >= NUM_MHI_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_mhiinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_mhiinfo_index; i < NUM_MHI_DEV; i++) {
+		mhi_info = &diag_mhi[i];
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"enabled %d\n"
+			"bridge index: %s\n"
+			"mempool: %s\n"
+			"read ch opened: %d\n"
+			"write ch opened: %d\n"
+			"read work pending: %d\n"
+			"read done work pending: %d\n"
+			"open work pending: %d\n"
+			"close work pending: %d\n\n",
+			mhi_info->id,
+			mhi_info->name,
+			mhi_info->enabled,
+			DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
+			DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
+			atomic_read(&mhi_info->read_ch.opened),
+			atomic_read(&mhi_info->write_ch.opened),
+			work_pending(&mhi_info->read_work),
+			work_pending(&mhi_info->read_done_work),
+			work_pending(&mhi_info->open_work),
+			work_pending(&mhi_info->close_work));
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_mhiinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+
+const struct file_operations diag_dbgfs_mhiinfo_ops = {
+	.read = diag_dbgfs_read_mhiinfo,
+};
+
+#endif
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diagfwd_bridge_info *info = NULL;
+
+	if (diag_dbgfs_bridgeinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_bridgeinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_bridgeinfo_index; i < NUM_REMOTE_DEV; i++) {
+		info = &bridge_info[i];
+		if (!info->inited)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"type: %d\n"
+			"inited: %d\n"
+			"ctxt: %d\n"
+			"dev_ops: %pK\n"
+			"dci_read_buf: %pK\n"
+			"dci_read_ptr: %pK\n"
+			"dci_read_len: %d\n\n",
+			info->id,
+			info->name,
+			info->type,
+			info->inited,
+			info->ctxt,
+			info->dev_ops,
+			info->dci_read_buf,
+			info->dci_read_ptr,
+			info->dci_read_len);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_bridgeinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+const struct file_operations diag_dbgfs_bridge_ops = {
+	.read = diag_dbgfs_read_bridge,
+};
+
+#endif
+
+const struct file_operations diag_dbgfs_status_ops = {
+	.read = diag_dbgfs_read_status,
+};
+
+#ifdef CONFIG_QCOM_SMD
+const struct file_operations diag_dbgfs_smdinfo_ops = {
+	.read = diag_dbgfs_read_smdinfo,
+};
+#endif
+
+const struct file_operations diag_dbgfs_socketinfo_ops = {
+	.read = diag_dbgfs_read_socketinfo,
+};
+
+const struct file_operations diag_dbgfs_table_ops = {
+	.read = diag_dbgfs_read_table,
+};
+
+const struct file_operations diag_dbgfs_mempool_ops = {
+	.read = diag_dbgfs_read_mempool,
+};
+
+const struct file_operations diag_dbgfs_usbinfo_ops = {
+	.read = diag_dbgfs_read_usbinfo,
+};
+
+const struct file_operations diag_dbgfs_dcistats_ops = {
+	.read = diag_dbgfs_read_dcistats,
+};
+
+#ifdef CONFIG_PM_SLEEP
+const struct file_operations diag_dbgfs_power_ops = {
+	.read = diag_dbgfs_read_power,
+};
+#endif
+
+const struct file_operations diag_dbgfs_debug_ops = {
+	.write = diag_dbgfs_write_debug
+};
+
+int diag_debugfs_init(void)
+{
+	struct dentry *entry = NULL;
+
+	diag_dbgfs_dent = debugfs_create_dir("diag", 0);
+	if (IS_ERR(diag_dbgfs_dent))
+		return -ENOMEM;
+
+	entry = debugfs_create_file("status", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_status_ops);
+	if (!entry)
+		goto err;
+#ifdef CONFIG_QCOM_SMD
+	entry = debugfs_create_file("smdinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_smdinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+	entry = debugfs_create_file("socketinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_socketinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("table", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_table_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("mempool", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_mempool_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("usbinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_usbinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("dci_stats", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_dcistats_ops);
+	if (!entry)
+		goto err;
+#ifdef CONFIG_PM_SLEEP
+	entry = debugfs_create_file("power", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_power_ops);
+	if (!entry)
+		goto err;
+#endif
+	entry = debugfs_create_file("debug", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_debug_ops);
+	if (!entry)
+		goto err;
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	entry = debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_bridge_ops);
+	if (!entry)
+		goto err;
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+	entry = debugfs_create_file("hsicinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_hsicinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+#ifdef CONFIG_MHI_BUS
+	entry = debugfs_create_file("mhiinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_mhiinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+#endif
+	diag_dbgfs_table_index = 0;
+	diag_dbgfs_mempool_index = 0;
+	diag_dbgfs_usbinfo_index = 0;
+#ifdef CONFIG_QCOM_SMD
+	diag_dbgfs_smdinfo_index = 0;
+#endif
+	diag_dbgfs_socketinfo_index = 0;
+	diag_dbgfs_hsicinfo_index = 0;
+	diag_dbgfs_bridgeinfo_index = 0;
+	diag_dbgfs_mhiinfo_index = 0;
+	diag_dbgfs_finished = 0;
+	diag_dbgfs_dci_data_index = 0;
+	diag_dbgfs_dci_finished = 0;
+
+	/* DCI related structures */
+	dci_traffic = kzalloc(sizeof(struct diag_dci_data_info) *
+				DIAG_DCI_DEBUG_CNT, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(dci_traffic))
+		pr_warn("diag: could not allocate memory for dci debug info\n");
+
+	mutex_init(&dci_stat_mutex);
+	mutex_init(&diag_dci_dbgfs_mutex);
+	return 0;
+err:
+	kfree(dci_traffic);
+	debugfs_remove_recursive(diag_dbgfs_dent);
+	return -ENOMEM;
+}
+
+void diag_debugfs_cleanup(void)
+{
+	if (diag_dbgfs_dent) {
+		debugfs_remove_recursive(diag_dbgfs_dent);
+		diag_dbgfs_dent = NULL;
+	}
+
+	kfree(dci_traffic);
+	mutex_destroy(&dci_stat_mutex);
+	mutex_destroy(&diag_dci_dbgfs_mutex);
+}
+#else
+int diag_debugfs_init(void) { return 0; }
+void diag_debugfs_cleanup(void) { }
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_debugfs.h linux-6.4-fbx/drivers/char/diag/diag_debugfs.h
--- linux-6.4-fbx/drivers/char/diag./diag_debugfs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_debugfs.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,19 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_DEBUGFS_H
+#define DIAG_DEBUGFS_H
+
+int diag_debugfs_init(void);
+void diag_debugfs_cleanup(void);
+
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_ipc_logging.h linux-6.4-fbx/drivers/char/diag/diag_ipc_logging.h
--- linux-6.4-fbx/drivers/char/diag./diag_ipc_logging.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_ipc_logging.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,47 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGIPCLOG_H
+#define DIAGIPCLOG_H
+
+#include <linux/ipc_logging.h>
+
+#define DIAG_IPC_LOG_PAGES	50
+
+#define DIAG_DEBUG_USERSPACE	0x0001
+#define DIAG_DEBUG_MUX		0x0002
+#define DIAG_DEBUG_DCI		0x0004
+#define DIAG_DEBUG_PERIPHERALS	0x0008
+#define DIAG_DEBUG_MASKS	0x0010
+#define DIAG_DEBUG_POWER	0x0020
+#define DIAG_DEBUG_BRIDGE	0x0040
+
+// #define DIAG_DEBUG
+
+extern uint16_t diag_debug_mask;
+
+
+#ifdef DIAG_DEBUG
+extern void *diag_ipc_log;
+
+#define DIAG_LOG(log_lvl, msg, ...)					\
+	do {								\
+		if (diag_ipc_log && (log_lvl & diag_debug_mask)) {	\
+			ipc_log_string(diag_ipc_log,			\
+				"[%s] " msg, __func__, ##__VA_ARGS__);	\
+		}							\
+	} while (0)
+#else
+#define DIAG_LOG(log_lvl, msg, ...) printk("diaglog: [%s]" msg, __func__, ##__VA_ARGS__)
+#endif
+
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_masks.c linux-6.4-fbx/drivers/char/diag/diag_masks.c
--- linux-6.4-fbx/drivers/char/diag./diag_masks.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_masks.c	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,2292 @@
+/* Copyright (c) 2008-2020 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+#define ALL_EQUIP_ID		100
+#define ALL_SSID		-1
+
+#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
+
+struct diag_mask_info msg_mask;
+struct diag_mask_info msg_bt_mask;
+struct diag_mask_info log_mask;
+struct diag_mask_info event_mask;
+
+static const struct diag_ssid_range_t msg_mask_tbl[] = {
+	{ .ssid_first = MSG_SSID_0, .ssid_last = MSG_SSID_0_LAST },
+	{ .ssid_first = MSG_SSID_1, .ssid_last = MSG_SSID_1_LAST },
+	{ .ssid_first = MSG_SSID_2, .ssid_last = MSG_SSID_2_LAST },
+	{ .ssid_first = MSG_SSID_3, .ssid_last = MSG_SSID_3_LAST },
+	{ .ssid_first = MSG_SSID_4, .ssid_last = MSG_SSID_4_LAST },
+	{ .ssid_first = MSG_SSID_5, .ssid_last = MSG_SSID_5_LAST },
+	{ .ssid_first = MSG_SSID_6, .ssid_last = MSG_SSID_6_LAST },
+	{ .ssid_first = MSG_SSID_7, .ssid_last = MSG_SSID_7_LAST },
+	{ .ssid_first = MSG_SSID_8, .ssid_last = MSG_SSID_8_LAST },
+	{ .ssid_first = MSG_SSID_9, .ssid_last = MSG_SSID_9_LAST },
+	{ .ssid_first = MSG_SSID_10, .ssid_last = MSG_SSID_10_LAST },
+	{ .ssid_first = MSG_SSID_11, .ssid_last = MSG_SSID_11_LAST },
+	{ .ssid_first = MSG_SSID_12, .ssid_last = MSG_SSID_12_LAST },
+	{ .ssid_first = MSG_SSID_13, .ssid_last = MSG_SSID_13_LAST },
+	{ .ssid_first = MSG_SSID_14, .ssid_last = MSG_SSID_14_LAST },
+	{ .ssid_first = MSG_SSID_15, .ssid_last = MSG_SSID_15_LAST },
+	{ .ssid_first = MSG_SSID_16, .ssid_last = MSG_SSID_16_LAST },
+	{ .ssid_first = MSG_SSID_17, .ssid_last = MSG_SSID_17_LAST },
+	{ .ssid_first = MSG_SSID_18, .ssid_last = MSG_SSID_18_LAST },
+	{ .ssid_first = MSG_SSID_19, .ssid_last = MSG_SSID_19_LAST },
+	{ .ssid_first = MSG_SSID_20, .ssid_last = MSG_SSID_20_LAST },
+	{ .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST },
+	{ .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST },
+	{ .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST },
+	{ .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }
+};
+
+static int diag_check_update(int md_peripheral, int pid)
+{
+	int ret;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	ret = (!info || (info &&
+		(info->peripheral_mask & MD_PERIPHERAL_MASK(md_peripheral))));
+	mutex_unlock(&driver->md_session_lock);
+
+	return ret;
+}
+
+static int diag_apps_responds(void)
+{
+	/*
+	 * Apps processor should respond to mask commands only if the
+	 * Modem channel is up, the feature mask is received from Modem
+	 * and if Modem supports Mask Centralization.
+	 */
+	if (!chk_apps_only())
+		return 0;
+
+	if (driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+	    driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open &&
+	    driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+		if (driver->feature[PERIPHERAL_MODEM].mask_centralization)
+			return 1;
+		return 0;
+	}
+	return 1;
+}
+
+static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
+{
+	int i;
+	int err = 0;
+	int send_once = 0;
+	int header_len = sizeof(struct diag_ctrl_log_mask);
+	uint8_t *buf = NULL;
+	uint8_t *temp = NULL;
+	uint32_t mask_size = 0;
+	struct diag_ctrl_log_mask ctrl_pkt;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_log_mask_t *mask = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0 &&
+	    driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))
+		mask_info = driver->md_session_map[peripheral]->log_mask;
+	else
+		mask_info = &log_mask;
+
+	if (!mask_info)
+		return;
+
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	buf = mask_info->update_buf;
+
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		ctrl_pkt.equip_id = 0;
+		ctrl_pkt.num_items = 0;
+		ctrl_pkt.log_mask_size = 0;
+		send_once = 1;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		ctrl_pkt.equip_id = 0;
+		ctrl_pkt.num_items = 0;
+		ctrl_pkt.log_mask_size = 0;
+		send_once = 1;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		send_once = 0;
+		break;
+	default:
+		pr_debug("diag: In %s, invalid log_mask status\n", __func__);
+		return;
+	}
+
+	mutex_lock(&mask_info->lock);
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		if (equip_id != i && equip_id != ALL_EQUIP_ID)
+			continue;
+
+		mutex_lock(&mask->lock);
+		ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+		ctrl_pkt.stream_id = 1;
+		ctrl_pkt.status = mask_info->status;
+		if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+			mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+			ctrl_pkt.equip_id = i;
+			ctrl_pkt.num_items = mask->num_items_tools;
+			ctrl_pkt.log_mask_size = mask_size;
+		}
+		ctrl_pkt.data_len = LOG_MASK_CTRL_HEADER_LEN + mask_size;
+
+		if (header_len + mask_size > mask_info->update_buf_len) {
+			temp = krealloc(buf, header_len + mask_size,
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err_ratelimited("diag: Unable to realloc log update buffer, new size: %d, equip_id: %d\n",
+				       header_len + mask_size, equip_id);
+				mutex_unlock(&mask->lock);
+				break;
+			}
+			mask_info->update_buf = temp;
+			mask_info->update_buf_len = header_len + mask_size;
+			buf = temp;
+		}
+
+		memcpy(buf, &ctrl_pkt, header_len);
+		if (mask_size > 0 && mask_size <= LOG_MASK_SIZE)
+			memcpy(buf + header_len, mask->ptr, mask_size);
+		mutex_unlock(&mask->lock);
+
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "sending ctrl pkt to %d, e %d num_items %d size %d\n",
+			 peripheral, i, ctrl_pkt.num_items,
+			 ctrl_pkt.log_mask_size);
+
+		err = diagfwd_write(peripheral, TYPE_CNTL,
+				    buf, header_len + mask_size);
+		if (err && err != -ENODEV)
+			pr_err_ratelimited("diag: Unable to send log masks to peripheral %d, equip_id: %d, err: %d\n",
+			       peripheral, i, err);
+		if (send_once || equip_id != ALL_EQUIP_ID)
+			break;
+
+	}
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_event_mask_update(uint8_t peripheral)
+{
+	uint8_t *buf = NULL;
+	uint8_t *temp = NULL;
+	struct diag_ctrl_event_mask header;
+	struct diag_mask_info *mask_info = NULL;
+	int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+	int write_len = 0;
+	int err = 0;
+	int temp_len = 0;
+
+	if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
+		pr_debug("diag: In %s, invalid event mask length %d\n",
+			 __func__, num_bytes);
+		return;
+	}
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0 &&
+	    (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
+		mask_info = driver->md_session_map[peripheral]->event_mask;
+	else
+		mask_info = &event_mask;
+
+	if (!mask_info)
+		return;
+
+	buf = mask_info->update_buf;
+	mutex_lock(&mask_info->lock);
+	header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	header.stream_id = 1;
+	header.status = mask_info->status;
+
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		header.event_config = 0;
+		header.event_mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		header.event_config = 1;
+		header.event_mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		header.event_config = 1;
+		header.event_mask_size = num_bytes;
+		if (num_bytes + sizeof(header) > mask_info->update_buf_len) {
+			temp_len = num_bytes + sizeof(header);
+			temp = krealloc(buf, temp_len, GFP_KERNEL);
+			if (!temp) {
+				pr_err("diag: Unable to realloc event mask update buffer\n");
+				goto err;
+			} else {
+				mask_info->update_buf = temp;
+				mask_info->update_buf_len = temp_len;
+				buf = temp;
+			}
+		}
+		if (num_bytes > 0 && num_bytes < mask_info->mask_len)
+			memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
+		else {
+			pr_err("diag: num_bytes(%d) is not satisfying length condition\n",
+				num_bytes);
+			goto err;
+		}
+		write_len += num_bytes;
+		break;
+	default:
+		pr_debug("diag: In %s, invalid status %d\n", __func__,
+			 mask_info->status);
+		goto err;
+	}
+	header.data_len = EVENT_MASK_CTRL_HEADER_LEN + header.event_mask_size;
+	memcpy(buf, &header, sizeof(header));
+	write_len += sizeof(header);
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, write_len);
+	if (err && err != -ENODEV)
+		pr_err_ratelimited("diag: Unable to send event masks to peripheral %d\n",
+		       peripheral);
+err:
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
+{
+	int i;
+	int err = 0;
+	int header_len = sizeof(struct diag_ctrl_msg_mask);
+	int temp_len = 0;
+	uint8_t *buf = NULL;
+	uint8_t *temp = NULL;
+	uint8_t msg_mask_tbl_count_local = 0;
+	uint32_t mask_size = 0;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_ctrl_msg_mask header;
+	struct diag_md_session_t *md_session_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if ((driver->md_session_mask != 0) &&
+		(driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))) {
+		md_session_info = driver->md_session_map[peripheral];
+		mask_info = driver->md_session_map[peripheral]->msg_mask;
+	} else
+		mask_info = &msg_mask;
+
+	if (!mask_info)
+		return;
+
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		mutex_unlock(&driver->msg_mask_lock);
+		return;
+	}
+	buf = mask_info->update_buf;
+	if (md_session_info)
+		msg_mask_tbl_count_local = md_session_info->msg_mask_tbl_count;
+	else
+		msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_lock(&mask_info->lock);
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		mask_size = 1;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		break;
+	default:
+		pr_debug("diag: In %s, invalid status: %d\n", __func__,
+			 mask_info->status);
+		goto err;
+	}
+
+	for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
+		mutex_lock(&driver->msg_mask_lock);
+		if (((mask->ssid_first > first) ||
+			(mask->ssid_last_tools < last)) && first != ALL_SSID) {
+			mutex_unlock(&driver->msg_mask_lock);
+			continue;
+		}
+
+		mutex_lock(&mask->lock);
+		if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+			mask_size =
+				mask->ssid_last_tools - mask->ssid_first + 1;
+			temp_len = mask_size * sizeof(uint32_t);
+			if (temp_len + header_len <= mask_info->update_buf_len)
+				goto proceed;
+			temp = krealloc(mask_info->update_buf, temp_len,
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err("diag: In %s, unable to realloc msg_mask update buffer\n",
+				       __func__);
+				mask_size = (mask_info->update_buf_len -
+					    header_len) / sizeof(uint32_t);
+			} else {
+				mask_info->update_buf = temp;
+				mask_info->update_buf_len = temp_len;
+				buf = temp;
+				pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n",
+					 __func__, mask_info->update_buf_len);
+			}
+		} else if (mask_info->status == DIAG_CTRL_MASK_ALL_ENABLED) {
+			mask_size = 1;
+		}
+proceed:
+		header.cmd_type = DIAG_CTRL_MSG_F3_MASK;
+		header.status = mask_info->status;
+		header.stream_id = 1;
+		header.msg_mode = 0;
+		header.ssid_first = mask->ssid_first;
+		header.ssid_last = mask->ssid_last_tools;
+		header.msg_mask_size = mask_size;
+		mask_size *= sizeof(uint32_t);
+		header.data_len = MSG_MASK_CTRL_HEADER_LEN + mask_size;
+		memcpy(buf, &header, header_len);
+		if (mask_size > 0)
+			memcpy(buf + header_len, mask->ptr, mask_size);
+		mutex_unlock(&mask->lock);
+		mutex_unlock(&driver->msg_mask_lock);
+
+		err = diagfwd_write(peripheral, TYPE_CNTL, buf,
+				    header_len + mask_size);
+		if (err && err != -ENODEV)
+			pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n",
+			       peripheral, err);
+
+		if (first != ALL_SSID)
+			break;
+	}
+err:
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_time_sync_update(uint8_t peripheral)
+{
+	struct diag_ctrl_msg_time_sync time_sync_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, Invalid peripheral, %d\n",
+				__func__, peripheral);
+		return;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+		!driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+			__func__, peripheral, driver->diagfwd_cntl[peripheral]);
+		return;
+	}
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+	time_sync_msg.ctrl_pkt_data_len = 5;
+	time_sync_msg.version = 1;
+	time_sync_msg.time_api = driver->uses_time_api;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg, msg_size);
+	if (err)
+		pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+				__func__, peripheral, TYPE_CNTL,
+				msg_size, err);
+	mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static void diag_send_feature_mask_update(uint8_t peripheral)
+{
+	void *buf = driver->buf_feature_mask_update;
+	int header_size = sizeof(struct diag_ctrl_feature_mask);
+	uint8_t feature_bytes[FEATURE_MASK_LEN] = {0, 0};
+	struct diag_ctrl_feature_mask feature_mask;
+	int total_len = 0;
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, Invalid peripheral, %d\n",
+			__func__, peripheral);
+		return;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+		       __func__, peripheral, driver->diagfwd_cntl[peripheral]);
+		return;
+	}
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	/* send feature mask update */
+	feature_mask.ctrl_pkt_id = DIAG_CTRL_MSG_FEATURE;
+	feature_mask.ctrl_pkt_data_len = sizeof(uint32_t) + FEATURE_MASK_LEN;
+	feature_mask.feature_mask_len = FEATURE_MASK_LEN;
+	memcpy(buf, &feature_mask, header_size);
+	DIAG_SET_FEATURE_MASK(F_DIAG_FEATURE_MASK_SUPPORT);
+	DIAG_SET_FEATURE_MASK(F_DIAG_LOG_ON_DEMAND_APPS);
+	DIAG_SET_FEATURE_MASK(F_DIAG_STM);
+	if (driver->supports_separate_cmdrsp)
+		DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
+	if (driver->supports_apps_hdlc_encoding)
+		DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
+	DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
+	if (driver->supports_sockets)
+		DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
+
+	memcpy(buf + header_size, &feature_bytes, FEATURE_MASK_LEN);
+	total_len = header_size + FEATURE_MASK_LEN;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, total_len);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to write feature mask to peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       total_len, err);
+		mutex_unlock(&driver->diag_cntl_mutex);
+		return;
+	}
+	driver->feature[peripheral].sent_feature_mask = 1;
+	mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	uint8_t msg_mask_tbl_count = 0;
+	struct diag_msg_mask_t *mask_ptr = NULL;
+	struct diag_msg_ssid_query_t rsp;
+	struct diag_ssid_range_t ssid_range;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds()) {
+		mutex_unlock(&driver->md_session_lock);
+		return 0;
+	}
+	mutex_lock(&driver->msg_mask_lock);
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+		driver->msg_mask_tbl_count;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
+	rsp.status = MSG_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.count = msg_mask_tbl_count;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask_ptr++) {
+		if (write_len + sizeof(ssid_range) > dest_len) {
+			pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n",
+			       __func__);
+			break;
+		}
+		ssid_range.ssid_first = mask_ptr->ssid_first;
+		ssid_range.ssid_last = mask_ptr->ssid_last_tools;
+		memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
+		write_len += sizeof(ssid_range);
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&driver->md_session_lock);
+	return write_len;
+}
+
+static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i = 0;
+	int write_len = 0;
+	int num_entries = 0;
+	int copy_len = 0;
+	struct diag_msg_mask_t *build_mask = NULL;
+	struct diag_build_mask_req_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+
+	if (!src_buf || !dest_buf || dest_len <= 0 ||
+		src_len < sizeof(struct diag_build_mask_req_t)) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	mutex_lock(&driver->msg_mask_lock);
+	req = (struct diag_build_mask_req_t *)src_buf;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = MSG_STATUS_FAIL;
+	rsp.padding = 0;
+	build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+	for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+		if (build_mask->ssid_first != req->ssid_first)
+			continue;
+		num_entries = req->ssid_last - req->ssid_first + 1;
+		if (num_entries > build_mask->range) {
+			pr_warn("diag: In %s, truncating ssid range for ssid_first: %d ssid_last %d\n",
+				__func__, req->ssid_first, req->ssid_last);
+			num_entries = build_mask->range;
+			req->ssid_last = req->ssid_first + build_mask->range;
+		}
+		copy_len = num_entries * sizeof(uint32_t);
+		if (copy_len + sizeof(rsp) > dest_len)
+			copy_len = dest_len - sizeof(rsp);
+		memcpy(dest_buf + sizeof(rsp), build_mask->ptr, copy_len);
+		write_len += copy_len;
+		rsp.ssid_last = build_mask->ssid_last;
+		rsp.status = MSG_STATUS_SUCCESS;
+		break;
+	}
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	mutex_unlock(&driver->msg_mask_lock);
+
+	return write_len;
+}
+
+static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	uint32_t mask_size = 0;
+	uint8_t msg_mask_tbl_count = 0;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_build_mask_req_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || dest_len <= 0 ||
+	    !mask_info || (src_len < sizeof(struct diag_build_mask_req_t))) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!diag_apps_responds()) {
+		mutex_unlock(&driver->md_session_lock);
+		return 0;
+	}
+
+	mutex_lock(&driver->msg_mask_lock);
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+	req = (struct diag_build_mask_req_t *)src_buf;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = MSG_STATUS_FAIL;
+	rsp.padding = 0;
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		if ((req->ssid_first < mask->ssid_first) ||
+		    (req->ssid_first > mask->ssid_last_tools)) {
+			continue;
+		}
+		mask_size = mask->range * sizeof(uint32_t);
+		/* Copy msg mask only till the end of the rsp buffer */
+		if (mask_size + sizeof(rsp) > dest_len)
+			mask_size = dest_len - sizeof(rsp);
+		memcpy(dest_buf + sizeof(rsp), mask->ptr, mask_size);
+		write_len += mask_size;
+		rsp.status = MSG_STATUS_SUCCESS;
+		break;
+	}
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&driver->md_session_lock);
+	return write_len;
+}
+
+static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	int header_len = sizeof(struct diag_msg_build_mask_t);
+	int found = 0;
+	uint32_t mask_size = 0;
+	uint32_t offset = 0;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_msg_build_mask_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask_next = NULL;
+	uint32_t *temp = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
+		(src_len < sizeof(struct diag_msg_build_mask_t))) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	req = (struct diag_msg_build_mask_t *)src_buf;
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		if (i < (msg_mask_tbl_count - 1)) {
+			mask_next = mask;
+			mask_next++;
+		} else
+			mask_next = NULL;
+
+		if ((req->ssid_first < mask->ssid_first) ||
+		    (req->ssid_first > mask->ssid_first + MAX_SSID_PER_RANGE) ||
+		    (mask_next && (req->ssid_first >= mask_next->ssid_first))) {
+			continue;
+		}
+		mask_next = NULL;
+		found = 1;
+		mutex_lock(&mask->lock);
+		mask_size = req->ssid_last - req->ssid_first + 1;
+		if (mask_size > MAX_SSID_PER_RANGE) {
+			pr_warn("diag: In %s, truncating ssid range, %d-%d to max allowed: %d\n",
+				__func__, mask->ssid_first, mask->ssid_last,
+				MAX_SSID_PER_RANGE);
+			mask_size = MAX_SSID_PER_RANGE;
+			mask->range_tools = MAX_SSID_PER_RANGE;
+			mask->ssid_last_tools =
+				mask->ssid_first + mask->range_tools;
+		}
+		if (req->ssid_last > mask->ssid_last_tools) {
+			pr_debug("diag: Msg SSID range mismatch\n");
+			if (mask_size != MAX_SSID_PER_RANGE)
+				mask->ssid_last_tools = req->ssid_last;
+			mask->range_tools =
+				mask->ssid_last_tools - mask->ssid_first + 1;
+			temp = krealloc(mask->ptr,
+					mask->range_tools * sizeof(uint32_t),
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
+						   __func__, mask_size);
+				mutex_unlock(&mask->lock);
+				mutex_unlock(&driver->msg_mask_lock);
+				mutex_unlock(&mask_info->lock);
+				mutex_unlock(&driver->md_session_lock);
+				return -ENOMEM;
+			}
+			mask->ptr = temp;
+		}
+
+		offset = req->ssid_first - mask->ssid_first;
+		if (offset + mask_size > mask->range_tools) {
+			pr_err("diag: In %s, Not in msg mask range, mask_size: %d, offset: %d\n",
+			       __func__, mask_size, offset);
+			mutex_unlock(&mask->lock);
+			break;
+		}
+		mask_size = mask_size * sizeof(uint32_t);
+		if (mask_size && src_len >= header_len + mask_size)
+			memcpy(mask->ptr + offset, src_buf + header_len,
+				mask_size);
+		mutex_unlock(&mask->lock);
+		mask_info->status = DIAG_CTRL_MASK_VALID;
+		break;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA, pid))
+		diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_MSG_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = found;
+	rsp.padding = 0;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+	if (!found)
+		goto end;
+	if (mask_size + write_len > dest_len)
+		mask_size = dest_len - write_len;
+	if (mask_size && src_len >= header_len + mask_size)
+		memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+	write_len += mask_size;
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i, pid))
+			continue;
+		diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+	}
+end:
+	return write_len;
+}
+
+static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	int header_len = sizeof(struct diag_msg_config_rsp_t);
+	struct diag_msg_config_rsp_t rsp;
+	struct diag_msg_config_rsp_t *req = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_mask_info *mask_info = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
+		(src_len < sizeof(struct diag_msg_config_rsp_t))) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	req = (struct diag_msg_config_rsp_t *)src_buf;
+
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+	mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
+					   DIAG_CTRL_MASK_ALL_DISABLED;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (mask && mask->ptr) {
+			mutex_lock(&mask->lock);
+			memset(mask->ptr, req->rt_mask,
+			       mask->range * sizeof(uint32_t));
+			mutex_unlock(&mask->lock);
+		}
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA, pid))
+		diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_ALL_MSG_MASK;
+	rsp.status = MSG_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.rt_mask = req->rt_mask;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i, pid))
+			continue;
+		diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int write_len = 0;
+	uint32_t mask_size;
+	struct diag_event_mask_config_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	mask_size = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+	if (mask_size + sizeof(rsp) > dest_len) {
+		pr_err("diag: In %s, invalid mask size: %d\n", __func__,
+		       mask_size);
+		return -ENOMEM;
+	}
+
+	rsp.cmd_code = DIAG_CMD_GET_EVENT_MASK;
+	rsp.status = EVENT_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.num_bits = driver->last_event_id + 1;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	memcpy(dest_buf + write_len, event_mask.ptr, mask_size);
+	write_len += mask_size;
+
+	return write_len;
+}
+
+static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	int mask_len = 0;
+	int header_len = sizeof(struct diag_event_mask_config_t);
+	struct diag_event_mask_config_t rsp;
+	struct diag_event_mask_config_t *req;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	mask_info = (!info) ? &event_mask : info->event_mask;
+	if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
+		src_len < sizeof(struct diag_event_mask_config_t)) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	req = (struct diag_event_mask_config_t *)src_buf;
+	mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
+	if (mask_len <= 0 || mask_len > event_mask.mask_len) {
+		pr_err("diag: In %s, invalid event mask len: %d\n", __func__,
+		       mask_len);
+		mutex_unlock(&driver->md_session_lock);
+		return -EIO;
+	}
+
+	mutex_lock(&mask_info->lock);
+	if (src_len >= header_len + mask_len)
+		memcpy(mask_info->ptr, src_buf + header_len, mask_len);
+	mask_info->status = DIAG_CTRL_MASK_VALID;
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA, pid))
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_SET_EVENT_MASK;
+	rsp.status = EVENT_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.num_bits = driver->last_event_id + 1;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+	memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
+	write_len += mask_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i, pid))
+			continue;
+		diag_send_event_mask_update(i);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	uint8_t toggle = 0;
+	struct diag_event_report_t header;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	mask_info = (!info) ? &event_mask : info->event_mask;
+	if (!src_buf || !dest_buf || src_len <= sizeof(uint8_t) ||
+		dest_len <= 0 || !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	toggle = *(src_buf + 1);
+	mutex_lock(&mask_info->lock);
+	if (toggle) {
+		mask_info->status = DIAG_CTRL_MASK_ALL_ENABLED;
+		memset(mask_info->ptr, 0xFF, mask_info->mask_len);
+	} else {
+		mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+		memset(mask_info->ptr, 0, mask_info->mask_len);
+	}
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA, pid))
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
+	header.padding = 0;
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i, pid))
+			continue;
+		diag_send_event_mask_update(i);
+	}
+	memcpy(dest_buf, &header, sizeof(header));
+	write_len += sizeof(header);
+
+	return write_len;
+}
+
+static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int status = LOG_STATUS_INVALID;
+	int write_len = 0;
+	int read_len = 0;
+	int req_header_len = sizeof(struct diag_log_config_req_t);
+	int rsp_header_len = sizeof(struct diag_log_config_rsp_t);
+	uint32_t mask_size = 0;
+	struct diag_log_mask_t *log_item = NULL;
+	struct diag_log_config_req_t *req;
+	struct diag_log_config_rsp_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
+		src_len < sizeof(struct diag_log_config_req_t)) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds()) {
+		mutex_unlock(&driver->md_session_lock);
+		return 0;
+	}
+
+	req = (struct diag_log_config_req_t *)src_buf;
+	read_len += req_header_len;
+
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_MASK;
+	/*
+	 * Don't copy the response header now. Copy at the end after
+	 * calculating the status field value
+	 */
+	write_len += rsp_header_len;
+
+	log_item = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!log_item->ptr) {
+		pr_err("diag: Invalid input in %s, mask: %pK\n",
+			__func__, log_item);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
+		if (log_item->equip_id != req->equip_id)
+			continue;
+		mutex_lock(&log_item->lock);
+		mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items_tools);
+		/*
+		 * Make sure we have space to fill the response in the buffer.
+		 * Destination buffer should atleast be able to hold equip_id
+		 * (uint32_t), num_items(uint32_t), mask (mask_size) and the
+		 * response header.
+		 */
+		if ((mask_size + (2 * sizeof(uint32_t)) + rsp_header_len) >
+								dest_len) {
+			pr_err("diag: In %s, invalid length: %d, max rsp_len: %d\n",
+				__func__, mask_size, dest_len);
+			status = LOG_STATUS_FAIL;
+			mutex_unlock(&log_item->lock);
+			break;
+		}
+		*(uint32_t *)(dest_buf + write_len) = log_item->equip_id;
+		write_len += sizeof(uint32_t);
+		*(uint32_t *)(dest_buf + write_len) = log_item->num_items_tools;
+		write_len += sizeof(uint32_t);
+		if (mask_size > 0) {
+			memcpy(dest_buf + write_len, log_item->ptr, mask_size);
+			write_len += mask_size;
+		}
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "sending log e %d num_items %d size %d\n",
+			 log_item->equip_id, log_item->num_items_tools,
+			 log_item->range_tools);
+		mutex_unlock(&log_item->lock);
+		status = LOG_STATUS_SUCCESS;
+		break;
+	}
+
+	rsp.status = status;
+	memcpy(dest_buf, &rsp, rsp_header_len);
+
+	mutex_unlock(&driver->md_session_lock);
+	return write_len;
+}
+
+static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	int i;
+	int write_len = 0;
+	struct diag_log_config_rsp_t rsp;
+	struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr;
+
+	if (!mask)
+		return -EINVAL;
+
+	if (!diag_apps_responds())
+		return 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_RANGE;
+	rsp.status = LOG_STATUS_SUCCESS;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	for (i = 0; i < MAX_EQUIP_ID && write_len < dest_len; i++, mask++) {
+		*(uint32_t *)(dest_buf + write_len) = mask->num_items_tools;
+		write_len += sizeof(uint32_t);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len,
+				 int pid)
+{
+	int i;
+	int write_len = 0;
+	int status = LOG_STATUS_SUCCESS;
+	int read_len = 0;
+	int payload_len = 0;
+	int req_header_len = sizeof(struct diag_log_config_req_t);
+	int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
+	uint32_t mask_size = 0;
+	struct diag_log_config_req_t *req;
+	struct diag_log_config_set_rsp_t rsp;
+	struct diag_log_mask_t *mask = NULL;
+	unsigned char *temp_buf = NULL;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || dest_len <= 0 || !mask_info ||
+		src_len < sizeof(struct diag_log_config_req_t)) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+
+	req = (struct diag_log_config_req_t *)src_buf;
+	read_len += req_header_len;
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (req->equip_id >= MAX_EQUIP_ID) {
+		pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
+		       __func__, req->equip_id);
+		status = LOG_STATUS_INVALID;
+	}
+
+	if (req->num_items == 0) {
+		pr_err("diag: In %s, Invalid number of items in log mask request, equip_id: %d\n",
+		       __func__, req->equip_id);
+		status = LOG_STATUS_INVALID;
+	}
+
+	mutex_lock(&mask_info->lock);
+	for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) {
+		if (!mask || !mask->ptr)
+			continue;
+		if (mask->equip_id != req->equip_id)
+			continue;
+		mutex_lock(&mask->lock);
+
+		DIAG_LOG(DIAG_DEBUG_MASKS, "e: %d current: %d %d new: %d %d",
+			 mask->equip_id, mask->num_items_tools,
+			 mask->range_tools, req->num_items,
+			 LOG_ITEMS_TO_SIZE(req->num_items));
+		/*
+		 * If the size of the log mask cannot fit into our
+		 * buffer, trim till we have space left in the buffer.
+		 * num_items should then reflect the items that we have
+		 * in our buffer.
+		 */
+		mask->num_items_tools = (req->num_items > MAX_ITEMS_ALLOWED) ?
+					MAX_ITEMS_ALLOWED : req->num_items;
+		mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+		memset(mask->ptr, 0, mask->range_tools);
+		if (mask_size > mask->range_tools) {
+			DIAG_LOG(DIAG_DEBUG_MASKS,
+				 "log range mismatch, e: %d old: %d new: %d\n",
+				 req->equip_id, mask->range_tools,
+				 LOG_ITEMS_TO_SIZE(mask->num_items_tools));
+			/* Change in the mask reported by tools */
+			temp_buf = krealloc(mask->ptr, mask_size, GFP_KERNEL);
+			if (!temp_buf) {
+				mask_info->status = DIAG_CTRL_MASK_INVALID;
+				mutex_unlock(&mask->lock);
+				break;
+			}
+			mask->ptr = temp_buf;
+			memset(mask->ptr, 0, mask_size);
+			mask->range_tools = mask_size;
+		}
+		req->num_items = mask->num_items_tools;
+		if (mask_size > 0 && src_len >= read_len + mask_size)
+			memcpy(mask->ptr, src_buf + read_len, mask_size);
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "copying log mask, e %d num %d range %d size %d\n",
+			 req->equip_id, mask->num_items_tools,
+			 mask->range_tools, mask_size);
+		mutex_unlock(&mask->lock);
+		mask_info->status = DIAG_CTRL_MASK_VALID;
+		break;
+	}
+	mutex_unlock(&mask_info->lock);
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA, pid))
+		diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	payload_len = LOG_ITEMS_TO_SIZE(req->num_items);
+	if ((payload_len + rsp_header_len > dest_len) || (payload_len == 0)) {
+		pr_err("diag: In %s, invalid length, payload_len: %d, header_len: %d, dest_len: %d\n",
+		       __func__, payload_len, rsp_header_len , dest_len);
+		status = LOG_STATUS_FAIL;
+	}
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_LOG_MASK;
+	rsp.status = status;
+	rsp.equip_id = req->equip_id;
+	rsp.num_items = req->num_items;
+	memcpy(dest_buf, &rsp, rsp_header_len);
+	write_len += rsp_header_len;
+	if (status != LOG_STATUS_SUCCESS)
+		goto end;
+	memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
+	write_len += payload_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i, pid))
+			continue;
+		diag_send_log_mask_update(i, req->equip_id);
+	}
+end:
+	return write_len;
+}
+
+static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
+			unsigned char *dest_buf, int dest_len, int pid)
+{
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_log_mask_t *mask = NULL;
+	struct diag_log_config_rsp_t header;
+	int write_len = 0;
+	int i;
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	if (!mask_info->ptr) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+			__func__, mask_info->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->md_session_lock);
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		if (mask && mask->ptr) {
+			mutex_lock(&mask->lock);
+			memset(mask->ptr, 0, mask->range);
+			mutex_unlock(&mask->lock);
+		}
+	}
+	mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+	mutex_unlock(&driver->md_session_lock);
+	if (diag_check_update(APPS_DATA, pid))
+		diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	header.cmd_code = DIAG_CMD_LOG_CONFIG;
+	header.padding[0] = 0;
+	header.padding[1] = 0;
+	header.padding[2] = 0;
+	header.sub_cmd = DIAG_CMD_OP_LOG_DISABLE;
+	header.status = LOG_STATUS_SUCCESS;
+	memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
+	write_len += sizeof(struct diag_log_config_rsp_t);
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i, pid))
+			continue;
+		diag_send_log_mask_update(i, ALL_EQUIP_ID);
+	}
+
+	return write_len;
+}
+
+int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+				     struct diag_ssid_range_t *range)
+{
+	if (!msg_mask || !range)
+		return -EIO;
+	if (range->ssid_last < range->ssid_first)
+		return -EINVAL;
+	msg_mask->ssid_first = range->ssid_first;
+	msg_mask->ssid_last = range->ssid_last;
+	msg_mask->ssid_last_tools = range->ssid_last;
+	msg_mask->range = msg_mask->ssid_last - msg_mask->ssid_first + 1;
+	if (msg_mask->range < MAX_SSID_PER_RANGE)
+		msg_mask->range = MAX_SSID_PER_RANGE;
+	msg_mask->range_tools = msg_mask->range;
+	mutex_init(&msg_mask->lock);
+	if (msg_mask->range > 0) {
+		msg_mask->ptr = kzalloc(msg_mask->range * sizeof(uint32_t),
+					GFP_KERNEL);
+		if (!msg_mask->ptr)
+			return -ENOMEM;
+		kmemleak_not_leak(msg_mask->ptr);
+	}
+	return 0;
+}
+
+static int diag_create_msg_mask_table(void)
+{
+	int i;
+	int err = 0;
+	struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
+	struct diag_ssid_range_t range;
+
+	mutex_lock(&msg_mask.lock);
+	mutex_lock(&driver->msg_mask_lock);
+	driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		range.ssid_first = msg_mask_tbl[i].ssid_first;
+		range.ssid_last = msg_mask_tbl[i].ssid_last;
+		err = diag_create_msg_mask_table_entry(mask, &range);
+		if (err)
+			break;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&msg_mask.lock);
+	return err;
+}
+
+static int diag_create_build_time_mask(void)
+{
+	int i;
+	int err = 0;
+	const uint32_t *tbl = NULL;
+	uint32_t tbl_size = 0;
+	struct diag_msg_mask_t *build_mask = NULL;
+	struct diag_ssid_range_t range;
+
+	mutex_lock(&msg_bt_mask.lock);
+	mutex_lock(&driver->msg_mask_lock);
+	driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+	build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+	for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+		range.ssid_first = msg_mask_tbl[i].ssid_first;
+		range.ssid_last = msg_mask_tbl[i].ssid_last;
+		err = diag_create_msg_mask_table_entry(build_mask, &range);
+		if (err)
+			break;
+		switch (build_mask->ssid_first) {
+		case MSG_SSID_0:
+			tbl = msg_bld_masks_0;
+			tbl_size = sizeof(msg_bld_masks_0);
+			break;
+		case MSG_SSID_1:
+			tbl = msg_bld_masks_1;
+			tbl_size = sizeof(msg_bld_masks_1);
+			break;
+		case MSG_SSID_2:
+			tbl = msg_bld_masks_2;
+			tbl_size = sizeof(msg_bld_masks_2);
+			break;
+		case MSG_SSID_3:
+			tbl = msg_bld_masks_3;
+			tbl_size = sizeof(msg_bld_masks_3);
+			break;
+		case MSG_SSID_4:
+			tbl = msg_bld_masks_4;
+			tbl_size = sizeof(msg_bld_masks_4);
+			break;
+		case MSG_SSID_5:
+			tbl = msg_bld_masks_5;
+			tbl_size = sizeof(msg_bld_masks_5);
+			break;
+		case MSG_SSID_6:
+			tbl = msg_bld_masks_6;
+			tbl_size = sizeof(msg_bld_masks_6);
+			break;
+		case MSG_SSID_7:
+			tbl = msg_bld_masks_7;
+			tbl_size = sizeof(msg_bld_masks_7);
+			break;
+		case MSG_SSID_8:
+			tbl = msg_bld_masks_8;
+			tbl_size = sizeof(msg_bld_masks_8);
+			break;
+		case MSG_SSID_9:
+			tbl = msg_bld_masks_9;
+			tbl_size = sizeof(msg_bld_masks_9);
+			break;
+		case MSG_SSID_10:
+			tbl = msg_bld_masks_10;
+			tbl_size = sizeof(msg_bld_masks_10);
+			break;
+		case MSG_SSID_11:
+			tbl = msg_bld_masks_11;
+			tbl_size = sizeof(msg_bld_masks_11);
+			break;
+		case MSG_SSID_12:
+			tbl = msg_bld_masks_12;
+			tbl_size = sizeof(msg_bld_masks_12);
+			break;
+		case MSG_SSID_13:
+			tbl = msg_bld_masks_13;
+			tbl_size = sizeof(msg_bld_masks_13);
+			break;
+		case MSG_SSID_14:
+			tbl = msg_bld_masks_14;
+			tbl_size = sizeof(msg_bld_masks_14);
+			break;
+		case MSG_SSID_15:
+			tbl = msg_bld_masks_15;
+			tbl_size = sizeof(msg_bld_masks_15);
+			break;
+		case MSG_SSID_16:
+			tbl = msg_bld_masks_16;
+			tbl_size = sizeof(msg_bld_masks_16);
+			break;
+		case MSG_SSID_17:
+			tbl = msg_bld_masks_17;
+			tbl_size = sizeof(msg_bld_masks_17);
+			break;
+		case MSG_SSID_18:
+			tbl = msg_bld_masks_18;
+			tbl_size = sizeof(msg_bld_masks_18);
+			break;
+		case MSG_SSID_19:
+			tbl = msg_bld_masks_19;
+			tbl_size = sizeof(msg_bld_masks_19);
+			break;
+		case MSG_SSID_20:
+			tbl = msg_bld_masks_20;
+			tbl_size = sizeof(msg_bld_masks_20);
+			break;
+		case MSG_SSID_21:
+			tbl = msg_bld_masks_21;
+			tbl_size = sizeof(msg_bld_masks_21);
+			break;
+		case MSG_SSID_22:
+			tbl = msg_bld_masks_22;
+			tbl_size = sizeof(msg_bld_masks_22);
+			break;
+		}
+		if (!tbl)
+			continue;
+		if (tbl_size > build_mask->range * sizeof(uint32_t)) {
+			pr_warn("diag: In %s, table %d has more ssid than max, ssid_first: %d, ssid_last: %d\n",
+				__func__, i, build_mask->ssid_first,
+				build_mask->ssid_last);
+			tbl_size = build_mask->range * sizeof(uint32_t);
+		}
+		memcpy(build_mask->ptr, tbl, tbl_size);
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&msg_bt_mask.lock);
+	return err;
+}
+
+static int diag_create_log_mask_table(void)
+{
+	struct diag_log_mask_t *mask = NULL;
+	uint8_t i;
+	int err = 0;
+
+	mutex_lock(&log_mask.lock);
+	mask = (struct diag_log_mask_t *)(log_mask.ptr);
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		mask->equip_id = i;
+		mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
+		mask->num_items_tools = mask->num_items;
+		mutex_init(&mask->lock);
+		if (LOG_ITEMS_TO_SIZE(mask->num_items) > MAX_ITEMS_PER_EQUIP_ID)
+			mask->range = LOG_ITEMS_TO_SIZE(mask->num_items);
+		else
+			mask->range = MAX_ITEMS_PER_EQUIP_ID;
+		mask->range_tools = mask->range;
+		mask->ptr = kzalloc(mask->range, GFP_KERNEL);
+		if (!mask->ptr) {
+			err = -ENOMEM;
+			break;
+		}
+		kmemleak_not_leak(mask->ptr);
+	}
+	mutex_unlock(&log_mask.lock);
+	return err;
+}
+
+static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
+			    int update_buf_len)
+{
+	if (!mask_info || mask_len < 0 || update_buf_len < 0)
+		return -EINVAL;
+
+	mask_info->status = DIAG_CTRL_MASK_INVALID;
+	mask_info->mask_len = mask_len;
+	mask_info->update_buf_len = update_buf_len;
+	if (mask_len > 0) {
+		mask_info->ptr = kzalloc(mask_len, GFP_KERNEL);
+		if (!mask_info->ptr)
+			return -ENOMEM;
+		kmemleak_not_leak(mask_info->ptr);
+	}
+	if (update_buf_len > 0) {
+		mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
+		if (!mask_info->update_buf) {
+			kfree(mask_info->ptr);
+			mask_info->ptr = NULL;
+			return -ENOMEM;
+		}
+		kmemleak_not_leak(mask_info->update_buf);
+	}
+	return 0;
+}
+
+static void __diag_mask_exit(struct diag_mask_info *mask_info)
+{
+	if (!mask_info || !mask_info->ptr)
+		return;
+
+	mutex_lock(&mask_info->lock);
+	kfree(mask_info->ptr);
+	mask_info->ptr = NULL;
+	kfree(mask_info->update_buf);
+	mask_info->update_buf = NULL;
+	mutex_unlock(&mask_info->lock);
+}
+
+int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+	int i;
+	int err = 0;
+	struct diag_log_mask_t *src_mask = NULL;
+	struct diag_log_mask_t *dest_mask = NULL;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	mutex_init(&dest->lock);
+	err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	mutex_lock(&dest->lock);
+	src_mask = (struct diag_log_mask_t *)(src->ptr);
+	dest_mask = (struct diag_log_mask_t *)(dest->ptr);
+
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+
+	for (i = 0; i < MAX_EQUIP_ID; i++, src_mask++, dest_mask++) {
+		dest_mask->equip_id = src_mask->equip_id;
+		dest_mask->num_items = src_mask->num_items;
+		dest_mask->num_items_tools = src_mask->num_items_tools;
+		mutex_init(&dest_mask->lock);
+		dest_mask->range = src_mask->range;
+		dest_mask->range_tools = src_mask->range_tools;
+		dest_mask->ptr = kzalloc(dest_mask->range_tools, GFP_KERNEL);
+		if (!dest_mask->ptr) {
+			err = -ENOMEM;
+			break;
+		}
+		kmemleak_not_leak(dest_mask->ptr);
+		memcpy(dest_mask->ptr, src_mask->ptr, dest_mask->range_tools);
+	}
+	mutex_unlock(&dest->lock);
+
+	return err;
+}
+
+void diag_log_mask_free(struct diag_mask_info *mask_info)
+{
+	int i;
+	struct diag_log_mask_t *mask = NULL;
+
+	if (!mask_info || !mask_info->ptr)
+		return;
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&mask_info->lock);
+		return;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		kfree(mask->ptr);
+		mask->ptr = NULL;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	__diag_mask_exit(mask_info);
+
+}
+
+static int diag_msg_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	mutex_init(&msg_mask.lock);
+	err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	err = diag_create_msg_mask_table();
+	if (err) {
+		pr_err("diag: Unable to create msg masks, err: %d\n", err);
+		return err;
+	}
+	mutex_lock(&driver->msg_mask_lock);
+	driver->msg_mask = &msg_mask;
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->max_ssid_count[i] = 0;
+	mutex_unlock(&driver->msg_mask_lock);
+
+	return 0;
+}
+
+int diag_msg_mask_copy(struct diag_md_session_t *new_session,
+	struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+	int i;
+	int err = 0;
+	struct diag_msg_mask_t *src_mask = NULL;
+	struct diag_msg_mask_t *dest_mask = NULL;
+	struct diag_ssid_range_t range;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	mutex_init(&dest->lock);
+	mutex_lock(&dest->lock);
+	mutex_lock(&driver->msg_mask_lock);
+	new_session->msg_mask_tbl_count =
+		driver->msg_mask_tbl_count;
+	err = __diag_mask_init(dest,
+		(new_session->msg_mask_tbl_count *
+		sizeof(struct diag_msg_mask_t)), APPS_BUF_SIZE);
+	if (err) {
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&dest->lock);
+		return err;
+	}
+	src_mask = (struct diag_msg_mask_t *)src->ptr;
+	dest_mask = (struct diag_msg_mask_t *)dest->ptr;
+
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+	for (i = 0; i < new_session->msg_mask_tbl_count; i++) {
+		range.ssid_first = src_mask->ssid_first;
+		range.ssid_last = src_mask->ssid_last;
+		err = diag_create_msg_mask_table_entry(dest_mask, &range);
+		if (err)
+			break;
+		memcpy(dest_mask->ptr, src_mask->ptr,
+		       dest_mask->range * sizeof(uint32_t));
+		src_mask++;
+		dest_mask++;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&dest->lock);
+	return err;
+}
+
+void diag_msg_mask_free(struct diag_mask_info *mask_info,
+	struct diag_md_session_t *session_info)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+
+	if (!mask_info || !mask_info->ptr)
+		return;
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		return;
+	}
+
+	msg_mask_tbl_count = (session_info) ?
+		session_info->msg_mask_tbl_count :
+		driver->msg_mask_tbl_count;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		kfree(mask->ptr);
+		mask->ptr = NULL;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	__diag_mask_exit(mask_info);
+}
+
+static void diag_msg_mask_exit(void)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
+	if (mask) {
+		for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+			kfree(mask->ptr);
+		kfree(msg_mask.ptr);
+		msg_mask.ptr = NULL;
+	}
+	kfree(msg_mask.update_buf);
+	msg_mask.update_buf = NULL;
+	mutex_unlock(&driver->msg_mask_lock);
+}
+
+static int diag_build_time_mask_init(void)
+{
+	int err = 0;
+
+	/* There is no need for update buffer for Build Time masks */
+	mutex_init(&msg_bt_mask.lock);
+	err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0);
+	if (err)
+		return err;
+	err = diag_create_build_time_mask();
+	if (err) {
+		pr_err("diag: Unable to create msg build time masks, err: %d\n",
+		       err);
+		return err;
+	}
+	driver->build_time_mask = &msg_bt_mask;
+	return 0;
+}
+
+static void diag_build_time_mask_exit(void)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+	mutex_lock(&driver->msg_mask_lock);
+	mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
+	if (mask) {
+		for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, mask++)
+			kfree(mask->ptr);
+		kfree(msg_bt_mask.ptr);
+		msg_bt_mask.ptr = NULL;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+}
+
+static int diag_log_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	mutex_init(&log_mask.lock);
+	err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+	err = diag_create_log_mask_table();
+	if (err)
+		return err;
+	driver->log_mask = &log_mask;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->num_equip_id[i] = 0;
+
+	return 0;
+}
+
+static void diag_log_mask_exit(void)
+{
+	int i;
+	struct diag_log_mask_t *mask = NULL;
+
+	mask = (struct diag_log_mask_t *)(log_mask.ptr);
+	if (mask) {
+		for (i = 0; i < MAX_EQUIP_ID; i++, mask++)
+			kfree(mask->ptr);
+		kfree(log_mask.ptr);
+	}
+
+	kfree(log_mask.update_buf);
+}
+
+static int diag_event_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	mutex_init(&event_mask.lock);
+	err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+	driver->event_mask_size = EVENT_MASK_SIZE;
+	driver->last_event_id = APPS_EVENT_LAST_ID;
+	driver->event_mask = &event_mask;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->num_event_id[i] = 0;
+
+	return 0;
+}
+
+int diag_event_mask_copy(struct diag_mask_info *dest,
+			 struct diag_mask_info *src)
+{
+	int err = 0;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	mutex_init(&dest->lock);
+	err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	mutex_lock(&dest->lock);
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+	memcpy(dest->ptr, src->ptr, dest->mask_len);
+	mutex_unlock(&dest->lock);
+
+	return err;
+}
+
+void diag_event_mask_free(struct diag_mask_info *mask_info)
+{
+	if (!mask_info)
+		return;
+
+	__diag_mask_exit(mask_info);
+}
+
+static void diag_event_mask_exit(void)
+{
+	kfree(event_mask.ptr);
+	kfree(event_mask.update_buf);
+}
+
+int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+			       struct diag_md_session_t *info)
+{
+	int i;
+	int err = 0;
+	int len = 0;
+	int copy_len = 0;
+	int total_len = 0;
+	struct diag_msg_mask_userspace_t header;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	unsigned char *ptr = NULL;
+	uint8_t msg_mask_tbl_count = 0;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!mask_info)
+		return -EIO;
+
+	if (!mask_info->ptr || !mask_info->update_buf) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+			__func__, mask_info->ptr, mask_info->update_buf);
+		return -EINVAL;
+	}
+	mutex_lock(&driver->diag_maskclear_mutex);
+	if (driver->mask_clear) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag:%s: count = %zu\n", __func__, count);
+		mutex_unlock(&driver->diag_maskclear_mutex);
+		return -EIO;
+	}
+	mutex_unlock(&driver->diag_maskclear_mutex);
+	mutex_lock(&mask_info->lock);
+	mutex_lock(&driver->msg_mask_lock);
+
+	mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&driver->msg_mask_lock);
+		mutex_unlock(&mask_info->lock);
+		return -EINVAL;
+	}
+
+	msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count :
+			driver->msg_mask_tbl_count;
+	for (i = 0; i < msg_mask_tbl_count; i++, mask++) {
+		if (!mask->ptr)
+			continue;
+		ptr = mask_info->update_buf;
+		len = 0;
+		mutex_lock(&mask->lock);
+		header.ssid_first = mask->ssid_first;
+		header.ssid_last = mask->ssid_last_tools;
+		header.range = mask->range_tools;
+		memcpy(ptr, &header, sizeof(header));
+		len += sizeof(header);
+		copy_len = (sizeof(uint32_t) * mask->range_tools);
+		if ((len + copy_len) > mask_info->update_buf_len) {
+			pr_err("diag: In %s, no space to update msg mask, first: %d, last: %d\n",
+			       __func__, mask->ssid_first,
+			       mask->ssid_last_tools);
+			mutex_unlock(&mask->lock);
+			continue;
+		}
+		memcpy(ptr + len, mask->ptr, copy_len);
+		len += copy_len;
+		mutex_unlock(&mask->lock);
+		/* + sizeof(int) to account for data_type already in buf */
+		if (total_len + sizeof(int) + len > count) {
+			pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d, count: %zu\n",
+			       __func__, total_len, count);
+			err = -ENOMEM;
+			break;
+		}
+		err = copy_to_user(buf + total_len, (void *)ptr, len);
+		if (err) {
+			pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
+			       __func__, err);
+			break;
+		}
+		total_len += len;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+	mutex_unlock(&mask_info->lock);
+	return err ? err : total_len;
+}
+
+int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+			       struct diag_md_session_t *info)
+{
+	int i;
+	int err = 0;
+	int len = 0;
+	int copy_len = 0;
+	int total_len = 0;
+	struct diag_log_mask_userspace_t header;
+	struct diag_log_mask_t *mask = NULL;
+	struct diag_mask_info *mask_info = NULL;
+	unsigned char *ptr = NULL;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!mask_info)
+		return -EIO;
+
+	if (!mask_info->ptr || !mask_info->update_buf) {
+		pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+			__func__, mask_info->ptr, mask_info->update_buf);
+		return -EINVAL;
+	}
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_log_mask_t *)(mask_info->ptr);
+	if (!mask->ptr) {
+		pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+			__func__, mask->ptr);
+		mutex_unlock(&mask_info->lock);
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		ptr = mask_info->update_buf;
+		len = 0;
+		mutex_lock(&mask->lock);
+		header.equip_id = mask->equip_id;
+		header.num_items = mask->num_items_tools;
+		memcpy(ptr, &header, sizeof(header));
+		len += sizeof(header);
+		copy_len = LOG_ITEMS_TO_SIZE(header.num_items);
+		if ((len + copy_len) > mask_info->update_buf_len) {
+			pr_err("diag: In %s, no space to update log mask, equip_id: %d\n",
+			       __func__, mask->equip_id);
+			mutex_unlock(&mask->lock);
+			continue;
+		}
+		memcpy(ptr + len, mask->ptr, copy_len);
+		len += copy_len;
+		mutex_unlock(&mask->lock);
+		/* + sizeof(int) to account for data_type already in buf */
+		if (total_len + sizeof(int) + len > count) {
+			pr_err("diag: In %s, unable to send log masks to user space, total_len: %d, count: %zu\n",
+			       __func__, total_len, count);
+			err = -ENOMEM;
+			break;
+		}
+		err = copy_to_user(buf + total_len, (void *)ptr, len);
+		if (err) {
+			pr_err("diag: In %s Unable to send log masks to user space clients, err: %d\n",
+			       __func__, err);
+			break;
+		}
+		total_len += len;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	return err ? err : total_len;
+}
+
+void diag_send_updates_peripheral(uint8_t peripheral)
+{
+	diag_send_feature_mask_update(peripheral);
+	if (driver->time_sync_enabled)
+		diag_send_time_sync_update(peripheral);
+	diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
+	diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
+	diag_send_event_mask_update(peripheral);
+	diag_send_real_time_update(peripheral,
+				driver->real_time_mode[DIAG_LOCAL_PROC]);
+	diag_send_peripheral_buffering_mode(
+				&driver->buffering_mode[peripheral]);
+}
+
+int diag_process_apps_masks(unsigned char *buf, int len, int pid)
+{
+	int size = 0;
+	int sub_cmd = 0;
+	int (*hdlr)(unsigned char *src_buf, int src_len,
+		    unsigned char *dest_buf, int dest_len, int pid) = NULL;
+
+	if (!buf || len <= 0)
+		return -EINVAL;
+
+	if (*buf == DIAG_CMD_LOG_CONFIG) {
+		sub_cmd = *(int *)(buf + sizeof(int));
+		switch (sub_cmd) {
+		case DIAG_CMD_OP_LOG_DISABLE:
+			hdlr = diag_cmd_disable_log_mask;
+			break;
+		case DIAG_CMD_OP_GET_LOG_RANGE:
+			hdlr = diag_cmd_get_log_range;
+			break;
+		case DIAG_CMD_OP_SET_LOG_MASK:
+			hdlr = diag_cmd_set_log_mask;
+			break;
+		case DIAG_CMD_OP_GET_LOG_MASK:
+			hdlr = diag_cmd_get_log_mask;
+			break;
+		}
+	} else if (*buf == DIAG_CMD_MSG_CONFIG) {
+		sub_cmd = *(uint8_t *)(buf + sizeof(uint8_t));
+		switch (sub_cmd) {
+		case DIAG_CMD_OP_GET_SSID_RANGE:
+			hdlr = diag_cmd_get_ssid_range;
+			break;
+		case DIAG_CMD_OP_GET_BUILD_MASK:
+			hdlr = diag_cmd_get_build_mask;
+			break;
+		case DIAG_CMD_OP_GET_MSG_MASK:
+			hdlr = diag_cmd_get_msg_mask;
+			break;
+		case DIAG_CMD_OP_SET_MSG_MASK:
+			hdlr = diag_cmd_set_msg_mask;
+			break;
+		case DIAG_CMD_OP_SET_ALL_MSG_MASK:
+			hdlr = diag_cmd_set_all_msg_mask;
+			break;
+		}
+	} else if (*buf == DIAG_CMD_GET_EVENT_MASK) {
+		hdlr = diag_cmd_get_event_mask;
+	} else if (*buf == DIAG_CMD_SET_EVENT_MASK) {
+		hdlr = diag_cmd_update_event_mask;
+	} else if (*buf == DIAG_CMD_EVENT_TOGGLE) {
+		hdlr = diag_cmd_toggle_events;
+	}
+
+	if (hdlr)
+		size = hdlr(buf, len, driver->apps_rsp_buf,
+			    DIAG_MAX_RSP_SIZE, pid);
+
+	return (size > 0) ? size : 0;
+}
+
+int diag_masks_init(void)
+{
+	int err = 0;
+	err = diag_msg_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_build_time_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_log_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_event_mask_init();
+	if (err)
+		goto fail;
+
+	if (driver->buf_feature_mask_update == NULL) {
+		driver->buf_feature_mask_update = kzalloc(sizeof(
+					struct diag_ctrl_feature_mask) +
+					FEATURE_MASK_LEN, GFP_KERNEL);
+		if (driver->buf_feature_mask_update == NULL)
+			goto fail;
+		kmemleak_not_leak(driver->buf_feature_mask_update);
+	}
+
+	return 0;
+fail:
+	pr_err("diag: Could not initialize diag mask buffers\n");
+	diag_masks_exit();
+	return -ENOMEM;
+}
+
+void diag_masks_exit(void)
+{
+	diag_msg_mask_exit();
+	diag_build_time_mask_exit();
+	diag_log_mask_exit();
+	diag_event_mask_exit();
+	kfree(driver->buf_feature_mask_update);
+}
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_masks.h linux-6.4-fbx/drivers/char/diag/diag_masks.h
--- linux-6.4-fbx/drivers/char/diag./diag_masks.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_masks.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,180 @@
+/* Copyright (c) 2013-2015, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MASKS_H
+#define DIAG_MASKS_H
+
+#include "diagfwd.h"
+
+struct diag_log_mask_t {
+	uint8_t equip_id;
+	uint32_t num_items;
+	uint32_t num_items_tools;
+	uint32_t range;
+	uint32_t range_tools;
+	struct mutex lock;
+	uint8_t *ptr;
+};
+
+struct diag_ssid_range_t {
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_mask_t {
+	uint32_t ssid_first;
+	uint32_t ssid_last;
+	uint32_t ssid_last_tools;
+	uint32_t range;
+	uint32_t range_tools;
+	struct mutex lock;
+	uint32_t *ptr;
+};
+
+struct diag_log_config_req_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_log_config_rsp_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t status;
+} __packed;
+
+struct diag_log_config_set_rsp_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t status;
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_log_on_demand_rsp_t {
+	uint8_t cmd_code;
+	uint16_t log_code;
+	uint8_t status;
+} __packed;
+
+struct diag_event_report_t {
+	uint8_t cmd_code;
+	uint16_t padding;
+} __packed;
+
+struct diag_event_mask_config_t {
+	uint8_t cmd_code;
+	uint8_t status;
+	uint16_t padding;
+	uint16_t num_bits;
+} __packed;
+
+struct diag_msg_config_rsp_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint8_t status;
+	uint8_t padding;
+	uint32_t rt_mask;
+} __packed;
+
+struct diag_msg_ssid_query_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint8_t status;
+	uint8_t padding;
+	uint32_t count;
+} __packed;
+
+struct diag_build_mask_req_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_build_mask_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+	uint8_t status;
+	uint8_t padding;
+} __packed;
+
+struct diag_msg_mask_userspace_t {
+	uint32_t ssid_first;
+	uint32_t ssid_last;
+	uint32_t range;
+} __packed;
+
+struct diag_log_mask_userspace_t {
+	uint8_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+#define MAX_EQUIP_ID	16
+#define MSG_MASK_SIZE	(MSG_MASK_TBL_CNT * sizeof(struct diag_msg_mask_t))
+#define LOG_MASK_SIZE	(MAX_EQUIP_ID * sizeof(struct diag_log_mask_t))
+#define EVENT_MASK_SIZE 513
+#define MAX_ITEMS_PER_EQUIP_ID	512
+#define MAX_ITEMS_ALLOWED	0xFFF
+
+#define LOG_MASK_CTRL_HEADER_LEN	11
+#define MSG_MASK_CTRL_HEADER_LEN	11
+#define EVENT_MASK_CTRL_HEADER_LEN	7
+
+#define LOG_STATUS_SUCCESS	0
+#define LOG_STATUS_INVALID	1
+#define LOG_STATUS_FAIL		2
+
+#define MSG_STATUS_FAIL		0
+#define MSG_STATUS_SUCCESS	1
+
+#define EVENT_STATUS_SUCCESS	0
+#define EVENT_STATUS_FAIL	1
+
+#define DIAG_CTRL_MASK_INVALID		0
+#define DIAG_CTRL_MASK_ALL_DISABLED	1
+#define DIAG_CTRL_MASK_ALL_ENABLED	2
+#define DIAG_CTRL_MASK_VALID		3
+
+extern struct diag_mask_info msg_mask;
+extern struct diag_mask_info msg_bt_mask;
+extern struct diag_mask_info log_mask;
+extern struct diag_mask_info event_mask;
+
+int diag_masks_init(void);
+void diag_masks_exit(void);
+int diag_log_mask_copy(struct diag_mask_info *dest,
+		       struct diag_mask_info *src);
+int diag_msg_mask_copy(struct diag_md_session_t *new_session,
+	struct diag_mask_info *dest, struct diag_mask_info *src);
+int diag_event_mask_copy(struct diag_mask_info *dest,
+			 struct diag_mask_info *src);
+void diag_log_mask_free(struct diag_mask_info *mask_info);
+void diag_msg_mask_free(struct diag_mask_info *mask_info,
+	struct diag_md_session_t *session_info);
+void diag_event_mask_free(struct diag_mask_info *mask_info);
+int diag_process_apps_masks(unsigned char *buf, int len, int pid);
+void diag_send_updates_peripheral(uint8_t peripheral);
+
+extern int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+					    struct diag_ssid_range_t *range);
+extern int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+				      struct diag_md_session_t *info);
+extern int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+				      struct diag_md_session_t *info);
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_memorydevice.c linux-6.4-fbx/drivers/char/diag/diag_memorydevice.c
--- linux-6.4-fbx/drivers/char/diag./diag_memorydevice.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_memorydevice.c	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,433 @@
+/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diag_memorydevice.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+
+struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
+	{
+		.id = DIAG_MD_LOCAL,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MUX_APPS,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAG_MD_MDM,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MDM_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+	{
+		.id = DIAG_MD_MDM2,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MDM2_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+	{
+		.id = DIAG_MD_SMUX,
+		.ctx = 0,
+		.mempool = POOL_TYPE_QSC_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	}
+#endif
+};
+
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
+{
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || !ops)
+		return -EINVAL;
+
+	diag_md[id].ops = ops;
+	diag_md[id].ctx = ctx;
+	return 0;
+}
+
+void diag_md_open_all()
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		if (ch->ops && ch->ops->open)
+			ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+	}
+
+	return;
+}
+
+void diag_md_close_all()
+{
+	int i, j;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+
+		if (ch->ops && ch->ops->close)
+			ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+
+		/*
+		 * When we close the Memory device mode, make sure we flush the
+		 * internal buffers in the table so that there are no stale
+		 * entries.
+		 */
+		spin_lock_irqsave(&ch->lock, flags);
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			entry = &ch->tbl[j];
+			if (entry->len <= 0)
+				continue;
+			if (ch->ops && ch->ops->write_done)
+				ch->ops->write_done(entry->buf, entry->len,
+						    entry->ctx,
+						    DIAG_MEMORY_DEVICE_MODE);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+		}
+		spin_unlock_irqrestore(&ch->lock, flags);
+	}
+
+	diag_ws_reset(DIAG_WS_MUX);
+}
+
+int diag_md_write(int id, unsigned char *buf, int len, int ctx)
+{
+	int i, pid = 0;
+	uint8_t found = 0;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	uint8_t peripheral;
+	struct diag_md_session_t *session_info = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+		return -EINVAL;
+
+	if (!buf || len < 0)
+		return -EINVAL;
+
+	peripheral = GET_BUF_PERIPHERAL(ctx);
+	if (peripheral > NUM_PERIPHERALS)
+		return -EINVAL;
+
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(peripheral);
+	if (!session_info) {
+		mutex_unlock(&driver->md_session_lock);
+		return -EIO;
+	}
+	pid = session_info->pid;
+	mutex_unlock(&driver->md_session_lock);
+
+	ch = &diag_md[id];
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		if (ch->tbl[i].buf != buf)
+			continue;
+		found = 1;
+		pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
+				   buf, ctx, ch->tbl[i].len,
+				   i, id, driver->logging_mode);
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	if (found)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		if (ch->tbl[i].len == 0) {
+			ch->tbl[i].buf = buf;
+			ch->tbl[i].len = len;
+			ch->tbl[i].ctx = ctx;
+			found = 1;
+			diag_ws_on_read(DIAG_WS_MUX, len);
+		}
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	if (!found) {
+		pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
+				   id);
+		return -ENOMEM;
+	}
+
+	found = 0;
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients && !found; i++) {
+		if ((driver->client_map[i].pid != pid) ||
+		    (driver->client_map[i].pid == 0))
+			continue;
+
+		found = 1;
+		if (!(driver->data_ready[i] & USER_SPACE_DATA_TYPE)) {
+			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+			atomic_inc(&driver->data_ready_notif[i]);
+		}
+		pr_debug("diag: wake up logging process\n");
+		wake_up_interruptible(&driver->wait_q);
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+
+	if (!found)
+		return -EINVAL;
+
+	return 0;
+}
+
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+			struct diag_md_session_t *info)
+{
+	int i, j;
+	int err = 0;
+	int ret = *pret;
+	int num_data = 0;
+	int remote_token;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+	uint8_t drain_again = 0;
+	uint8_t peripheral = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
+		ch = &diag_md[i];
+		for (j = 0; j < ch->num_tbl_entries && !err; j++) {
+			entry = &ch->tbl[j];
+			if (entry->len <= 0)
+				continue;
+			peripheral = GET_BUF_PERIPHERAL(entry->ctx);
+			/* Account for Apps data as well */
+			if (peripheral > NUM_PERIPHERALS)
+				goto drop_data;
+			session_info =
+			diag_md_session_get_peripheral(peripheral);
+			if (session_info && info &&
+				(session_info->pid != info->pid))
+				continue;
+			if ((info && (info->peripheral_mask &
+			    MD_PERIPHERAL_MASK(peripheral)) == 0))
+				goto drop_data;
+			/*
+			 * If the data is from remote processor, copy the remote
+			 * token first
+			 */
+			if (i > 0) {
+				if ((ret + (3 * sizeof(int)) + entry->len) >=
+							buf_size) {
+					drain_again = 1;
+					break;
+				}
+			} else {
+				if ((ret + (2 * sizeof(int)) + entry->len) >=
+						buf_size) {
+					drain_again = 1;
+					break;
+				}
+			}
+			if (i > 0) {
+				remote_token = diag_get_remote(i);
+				err = copy_to_user(buf + ret, &remote_token,
+						   sizeof(int));
+				if (err)
+					goto drop_data;
+				ret += sizeof(int);
+			}
+
+			/* Copy the length of data being passed */
+			err = copy_to_user(buf + ret, (void *)&(entry->len),
+					   sizeof(int));
+			if (err)
+				goto drop_data;
+			ret += sizeof(int);
+
+			/* Copy the actual data being passed */
+			err = copy_to_user(buf + ret, (void *)entry->buf,
+					   entry->len);
+			if (err)
+				goto drop_data;
+			ret += entry->len;
+
+			/*
+			 * The data is now copied to the user space client,
+			 * Notify that the write is complete and delete its
+			 * entry from the table
+			 */
+			num_data++;
+drop_data:
+			spin_lock_irqsave(&ch->lock, flags);
+			if (ch->ops && ch->ops->write_done)
+				ch->ops->write_done(entry->buf, entry->len,
+						    entry->ctx,
+						    DIAG_MEMORY_DEVICE_MODE);
+			diag_ws_on_copy(DIAG_WS_MUX);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+			spin_unlock_irqrestore(&ch->lock, flags);
+		}
+	}
+
+	*pret = ret;
+	err = copy_to_user(buf + sizeof(int), (void *)&num_data, sizeof(int));
+	diag_ws_on_copy_complete(DIAG_WS_MUX);
+	if (drain_again)
+		chk_logging_wakeup();
+
+	return err;
+}
+
+int diag_md_close_peripheral(int id, uint8_t peripheral)
+{
+	int i;
+	uint8_t found = 0;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+		return -EINVAL;
+
+	ch = &diag_md[id];
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		entry = &ch->tbl[i];
+		if (GET_BUF_PERIPHERAL(entry->ctx) != peripheral)
+			continue;
+		found = 1;
+		if (ch->ops && ch->ops->write_done) {
+			ch->ops->write_done(entry->buf, entry->len,
+					    entry->ctx,
+					    DIAG_MEMORY_DEVICE_MODE);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+		}
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+	return 0;
+}
+
+int diag_md_init()
+{
+	int i, j;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
+		ch = &diag_md[i];
+		ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+		ch->tbl = kzalloc(ch->num_tbl_entries *
+				  sizeof(struct diag_buf_tbl_t),
+				  GFP_KERNEL);
+		if (!ch->tbl)
+			goto fail;
+
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			ch->tbl[j].buf = NULL;
+			ch->tbl[j].len = 0;
+			ch->tbl[j].ctx = 0;
+		}
+		spin_lock_init(&(ch->lock));
+	}
+
+	return 0;
+
+fail:
+	diag_md_exit();
+	return -ENOMEM;
+}
+
+int diag_md_mdm_init(void)
+{
+	int i, j;
+	struct diag_md_info *ch = NULL;
+
+	for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+		ch->tbl = kcalloc(ch->num_tbl_entries, sizeof(*ch->tbl),
+				GFP_KERNEL);
+		if (!ch->tbl)
+			goto fail;
+
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			ch->tbl[j].buf = NULL;
+			ch->tbl[j].len = 0;
+			ch->tbl[j].ctx = 0;
+		}
+		spin_lock_init(&(ch->lock));
+	}
+
+	return 0;
+
+fail:
+	diag_md_mdm_exit();
+	return -ENOMEM;
+}
+
+void diag_md_exit(void)
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < DIAG_MD_LOCAL_LAST; i++) {
+		ch = &diag_md[i];
+		kfree(ch->tbl);
+		ch->num_tbl_entries = 0;
+		ch->ops = NULL;
+	}
+}
+
+void diag_md_mdm_exit(void)
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = DIAG_MD_BRIDGE_BASE; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		kfree(ch->tbl);
+		ch->num_tbl_entries = 0;
+		ch->ops = NULL;
+	}
+}
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_memorydevice.h linux-6.4-fbx/drivers/char/diag/diag_memorydevice.h
--- linux-6.4-fbx/drivers/char/diag./diag_memorydevice.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_memorydevice.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,59 @@
+/* Copyright (c) 2014-2015, 2017-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MEMORYDEVICE_H
+#define DIAG_MEMORYDEVICE_H
+
+#define DIAG_MD_LOCAL		0
+#define DIAG_MD_LOCAL_LAST	1
+#define DIAG_MD_BRIDGE_BASE	DIAG_MD_LOCAL_LAST
+#define DIAG_MD_MDM		(DIAG_MD_BRIDGE_BASE)
+#define DIAG_MD_MDM2		(DIAG_MD_BRIDGE_BASE + 1)
+#define DIAG_MD_SMUX		(DIAG_MD_BRIDGE_BASE + 2)
+#define DIAG_MD_BRIDGE_LAST	(DIAG_MD_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_MD_DEV		DIAG_MD_LOCAL_LAST
+#else
+#define NUM_DIAG_MD_DEV		DIAG_MD_BRIDGE_LAST
+#endif
+
+struct diag_buf_tbl_t {
+	unsigned char *buf;
+	int len;
+	int ctx;
+};
+
+struct diag_md_info {
+	int id;
+	int ctx;
+	int mempool;
+	int num_tbl_entries;
+	spinlock_t lock;
+	struct diag_buf_tbl_t *tbl;
+	struct diag_mux_ops *ops;
+};
+
+extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
+
+int diag_md_init(void);
+int diag_md_mdm_init(void);
+void diag_md_exit(void);
+void diag_md_mdm_exit(void);
+void diag_md_open_all(void);
+void diag_md_close_all(void);
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
+int diag_md_close_peripheral(int id, uint8_t peripheral);
+int diag_md_write(int id, unsigned char *buf, int len, int ctx);
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+			 struct diag_md_session_t *info);
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_mux.c linux-6.4-fbx/drivers/char/diag/diag_mux.c
--- linux-6.4-fbx/drivers/char/diag./diag_mux.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_mux.c	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,243 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/kmemleak.h>
+
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diag_mux.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+
+
+struct diag_mux_state_t *diag_mux;
+static struct diag_logger_t usb_logger;
+static struct diag_logger_t md_logger;
+
+static struct diag_logger_ops usb_log_ops = {
+	.open = diag_usb_connect_all,
+	.close = diag_usb_disconnect_all,
+	.queue_read = diag_usb_queue_read,
+	.write = diag_usb_write,
+	.close_peripheral = NULL
+};
+
+static struct diag_logger_ops md_log_ops = {
+	.open = diag_md_open_all,
+	.close = diag_md_close_all,
+	.queue_read = NULL,
+	.write = diag_md_write,
+	.close_peripheral = diag_md_close_peripheral,
+};
+
+int diag_mux_init()
+{
+	diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
+			 GFP_KERNEL);
+	if (!diag_mux)
+		return -ENOMEM;
+	kmemleak_not_leak(diag_mux);
+
+	usb_logger.mode = DIAG_USB_MODE;
+	usb_logger.log_ops = &usb_log_ops;
+
+	md_logger.mode = DIAG_MEMORY_DEVICE_MODE;
+	md_logger.log_ops = &md_log_ops;
+	diag_md_init();
+
+	/*
+	 * Set USB logging as the default logger. This is the mode
+	 * Diag should be in when it initializes.
+	 */
+	diag_mux->usb_ptr = &usb_logger;
+	diag_mux->md_ptr = &md_logger;
+	diag_mux->logger = &usb_logger;
+	diag_mux->mux_mask = 0;
+	diag_mux->mode = DIAG_USB_MODE;
+	return 0;
+}
+
+void diag_mux_exit()
+{
+	kfree(diag_mux);
+}
+
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops)
+{
+	int err = 0;
+	if (!ops)
+		return -EINVAL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return 0;
+
+	/* Register with USB logger */
+	usb_logger.ops[proc] = ops;
+	err = diag_usb_register(proc, ctx, ops);
+	if (err) {
+		pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
+		       proc, err);
+		return err;
+	}
+
+	md_logger.ops[proc] = ops;
+	err = diag_md_register(proc, ctx, ops);
+	if (err) {
+		pr_err("diag: MUX: unable to register md operations for proc: %d, err: %d\n",
+		       proc, err);
+		return err;
+	}
+
+	return 0;
+}
+
+int diag_mux_queue_read(int proc)
+{
+	struct diag_logger_t *logger = NULL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	if (diag_mux->mode == DIAG_MULTI_MODE)
+		logger = diag_mux->usb_ptr;
+	else
+		logger = diag_mux->logger;
+
+	if (logger && logger->log_ops && logger->log_ops->queue_read)
+		return logger->log_ops->queue_read(proc);
+
+	return 0;
+}
+
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
+{
+	struct diag_logger_t *logger = NULL;
+	int peripheral;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	peripheral = GET_BUF_PERIPHERAL(ctx);
+	if (peripheral > NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+		logger = diag_mux->md_ptr;
+	else
+		logger = diag_mux->usb_ptr;
+
+	if (logger && logger->log_ops && logger->log_ops->write)
+		return logger->log_ops->write(proc, buf, len, ctx);
+	return 0;
+}
+
+int diag_mux_close_peripheral(int proc, uint8_t peripheral)
+{
+	struct diag_logger_t *logger = NULL;
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	/* Peripheral should account for Apps data as well */
+	if (peripheral > NUM_PERIPHERALS)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+		logger = diag_mux->md_ptr;
+	else
+		logger = diag_mux->logger;
+
+	if (logger && logger->log_ops && logger->log_ops->close_peripheral)
+		return logger->log_ops->close_peripheral(proc, peripheral);
+	return 0;
+}
+
+int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
+{
+	unsigned int new_mask = 0;
+
+	if (!req_mode)
+		return -EINVAL;
+
+	if (*peripheral_mask <= 0 || *peripheral_mask > DIAG_CON_ALL) {
+		pr_err("diag: mask %d in %s\n", *peripheral_mask, __func__);
+		return -EINVAL;
+	}
+
+	switch (*req_mode) {
+	case DIAG_USB_MODE:
+		new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
+		if (new_mask != DIAG_CON_NONE)
+			*req_mode = DIAG_MULTI_MODE;
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		new_mask = (*peripheral_mask) | diag_mux->mux_mask;
+		if (new_mask != DIAG_CON_ALL)
+			*req_mode = DIAG_MULTI_MODE;
+		break;
+	default:
+		pr_err("diag: Invalid mode %d in %s\n", *req_mode, __func__);
+		return -EINVAL;
+	}
+
+	switch (diag_mux->mode) {
+	case DIAG_USB_MODE:
+		if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+			diag_mux->usb_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->md_ptr;
+			diag_mux->md_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_MULTI_MODE) {
+			diag_mux->md_ptr->log_ops->open();
+			diag_mux->logger = NULL;
+		}
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		if (*req_mode == DIAG_USB_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->usb_ptr;
+			diag_mux->usb_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_MULTI_MODE) {
+			diag_mux->usb_ptr->log_ops->open();
+			diag_mux->logger = NULL;
+		}
+		break;
+	case DIAG_MULTI_MODE:
+		if (*req_mode == DIAG_USB_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->usb_ptr;
+		} else if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+			diag_mux->usb_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->md_ptr;
+		}
+		break;
+	}
+	diag_mux->mode = *req_mode;
+	diag_mux->mux_mask = new_mask;
+	*peripheral_mask = new_mask;
+	return 0;
+}
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_mux.h linux-6.4-fbx/drivers/char/diag/diag_mux.h
--- linux-6.4-fbx/drivers/char/diag./diag_mux.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_mux.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,76 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_MUX_H
+#define DIAG_MUX_H
+#include "diagchar.h"
+
+struct diag_mux_state_t {
+	struct diag_logger_t *logger;
+	struct diag_logger_t *usb_ptr;
+	struct diag_logger_t *md_ptr;
+	unsigned int mux_mask;
+	unsigned int mode;
+};
+
+struct diag_mux_ops {
+	int (*open)(int id, int mode);
+	int (*close)(int id, int mode);
+	int (*read_done)(unsigned char *buf, int len, int id);
+	int (*write_done)(unsigned char *buf, int len, int buf_ctx,
+			      int id);
+};
+
+#define DIAG_USB_MODE			0
+#define DIAG_MEMORY_DEVICE_MODE		1
+#define DIAG_NO_LOGGING_MODE		2
+#define DIAG_MULTI_MODE			3
+
+#define DIAG_MUX_LOCAL		0
+#define DIAG_MUX_LOCAL_LAST	1
+#define DIAG_MUX_BRIDGE_BASE	DIAG_MUX_LOCAL_LAST
+#define DIAG_MUX_MDM		(DIAG_MUX_BRIDGE_BASE)
+#define DIAG_MUX_MDM2		(DIAG_MUX_BRIDGE_BASE + 1)
+#define DIAG_MUX_SMUX		(DIAG_MUX_BRIDGE_BASE + 2)
+#define DIAG_MUX_BRIDGE_LAST	(DIAG_MUX_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MUX_PROC		DIAG_MUX_LOCAL_LAST
+#else
+#define NUM_MUX_PROC		DIAG_MUX_BRIDGE_LAST
+#endif
+
+struct diag_logger_ops {
+	void (*open)(void);
+	void (*close)(void);
+	int (*queue_read)(int id);
+	int (*write)(int id, unsigned char *buf, int len, int ctx);
+	int (*close_peripheral)(int id, uint8_t peripheral);
+};
+
+struct diag_logger_t {
+	int mode;
+	struct diag_mux_ops *ops[NUM_MUX_PROC];
+	struct diag_logger_ops *log_ops;
+};
+
+extern struct diag_mux_state_t *diag_mux;
+
+int diag_mux_init(void);
+void diag_mux_exit(void);
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops);
+int diag_mux_queue_read(int proc);
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx);
+int diag_mux_close_peripheral(int proc, uint8_t peripheral);
+int diag_mux_open_all(struct diag_logger_t *logger);
+int diag_mux_close_all(void);
+int diag_mux_switch_logging(int *new_mode, int *peripheral_mask);
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_usb.c linux-6.4-fbx/drivers/char/diag/diag_usb.c
--- linux-6.4-fbx/drivers/char/diag./diag_usb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_usb.c	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,693 @@
+/* Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/list.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diag_usb.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diag_ipc_logging.h"
+
+#define DIAG_USB_STRING_SZ	10
+#define DIAG_USB_MAX_SIZE	16384
+#ifndef CONFIG_DIAG_OVER_USB
+#define DIAG_LEGACY             "diag"
+#define DIAG_MDM		"diag_mdm"
+#define DIAG_QSC		"diag_qsc"
+#define DIAG_MDM2		"diag_mdm2"
+#endif
+
+struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV] = {
+	{
+		.id = DIAG_USB_LOCAL,
+		.name = DIAG_LEGACY,
+		.enabled = 0,
+		.mempool = POOL_TYPE_MUX_APPS,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAG_USB_MDM,
+		.name = DIAG_MDM,
+		.enabled = 0,
+		.mempool = POOL_TYPE_MDM_MUX,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	},
+	{
+		.id = DIAG_USB_MDM2,
+		.name = DIAG_MDM2,
+		.enabled = 0,
+		.mempool = POOL_TYPE_MDM2_MUX,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	},
+	{
+		.id = DIAG_USB_QSC,
+		.name = DIAG_QSC,
+		.enabled = 0,
+		.mempool = POOL_TYPE_QSC_MUX,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	}
+#endif
+};
+
+#ifdef CONFIG_DIAG_OVER_USB
+static int diag_usb_buf_tbl_add(struct diag_usb_info *usb_info,
+				unsigned char *buf, uint32_t len, int ctxt)
+{
+	struct list_head *start, *temp;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+
+	list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+		entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+		if (entry->buf == buf) {
+			atomic_inc(&entry->ref_count);
+			return 0;
+		}
+	}
+
+	/* New buffer, not found in the list */
+	entry = kzalloc(sizeof(struct diag_usb_buf_tbl_t), GFP_ATOMIC);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->buf = buf;
+	entry->ctxt = ctxt;
+	entry->len = len;
+	atomic_set(&entry->ref_count, 1);
+	INIT_LIST_HEAD(&entry->track);
+	list_add_tail(&entry->track, &usb_info->buf_tbl);
+
+	return 0;
+}
+
+static void diag_usb_buf_tbl_remove(struct diag_usb_info *usb_info,
+				    unsigned char *buf)
+{
+	struct list_head *start, *temp;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+
+	list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+		entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+		if (entry->buf == buf) {
+			DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+			atomic_dec(&entry->ref_count);
+			/*
+			 * Remove reference from the table if it is the
+			 * only instance of the buffer
+			 */
+			if (atomic_read(&entry->ref_count) == 0)
+				list_del(&entry->track);
+			break;
+		}
+	}
+}
+
+static struct diag_usb_buf_tbl_t *diag_usb_buf_tbl_get(
+				struct diag_usb_info *usb_info,
+				unsigned char *buf)
+{
+	struct list_head *start, *temp;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+
+	list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+		entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+		if (entry->buf == buf) {
+			DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+			atomic_dec(&entry->ref_count);
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * This function is called asynchronously when USB is connected and
+ * synchronously when Diag wants to connect to USB explicitly.
+ */
+static void usb_connect(struct diag_usb_info *ch)
+{
+	int err = 0;
+	int num_write = 0;
+	int num_read = 1; /* Only one read buffer for any USB channel */
+
+	if (!ch || !atomic_read(&ch->connected))
+		return;
+
+	num_write = diag_mempools[ch->mempool].poolsize;
+	err = usb_diag_alloc_req(ch->hdl, num_write, num_read);
+	if (err) {
+		pr_err("diag: Unable to allocate usb requests for %s, write: %d read: %d, err: %d\n",
+		       ch->name, num_write, num_read, err);
+		return;
+	}
+
+	if (ch->ops && ch->ops->open) {
+		if (atomic_read(&ch->diag_state)) {
+			ch->ops->open(ch->ctxt, DIAG_USB_MODE);
+		} else {
+			/*
+			 * This case indicates that the USB is connected
+			 * but the logging is still happening in MEMORY
+			 * DEVICE MODE. Continue the logging without
+			 * resetting the buffers.
+			 */
+		}
+	}
+	/* As soon as we open the channel, queue a read */
+	queue_work(ch->usb_wq, &(ch->read_work));
+}
+
+static void usb_connect_work_fn(struct work_struct *work)
+{
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						connect_work);
+	usb_connect(ch);
+}
+
+/*
+ * This function is called asynchronously when USB is disconnected
+ * and synchronously when Diag wants to disconnect from USB
+ * explicitly.
+ */
+static void usb_disconnect(struct diag_usb_info *ch)
+{
+	if (!ch)
+		return;
+
+	if (!atomic_read(&ch->connected) &&
+		driver->usb_connected && diag_mask_param())
+		diag_clear_masks(0);
+
+	if (ch && ch->ops && ch->ops->close)
+		ch->ops->close(ch->ctxt, DIAG_USB_MODE);
+}
+
+static void usb_disconnect_work_fn(struct work_struct *work)
+{
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						disconnect_work);
+	usb_disconnect(ch);
+}
+
+static void usb_read_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	unsigned long flags;
+	struct diag_request *req = NULL;
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						read_work);
+	if (!ch)
+		return;
+
+	if (!atomic_read(&ch->connected) || !ch->enabled ||
+	    atomic_read(&ch->read_pending) || !atomic_read(&ch->diag_state)) {
+		pr_debug_ratelimited("diag: Discarding USB read, ch: %s e: %d, c: %d, p: %d, d: %d\n",
+				     ch->name, ch->enabled,
+				     atomic_read(&ch->connected),
+				     atomic_read(&ch->read_pending),
+				     atomic_read(&ch->diag_state));
+		return;
+	}
+
+	spin_lock_irqsave(&ch->lock, flags);
+	req = ch->read_ptr;
+	if (req) {
+		atomic_set(&ch->read_pending, 1);
+		req->buf = ch->read_buf;
+		req->length = USB_MAX_OUT_BUF;
+		err = usb_diag_read(ch->hdl, req);
+		if (err) {
+			pr_debug("diag: In %s, error in reading from USB %s, err: %d\n",
+				 __func__, ch->name, err);
+			atomic_set(&ch->read_pending, 0);
+			queue_work(ch->usb_wq, &(ch->read_work));
+		}
+	} else {
+		pr_err_ratelimited("diag: In %s invalid read req\n", __func__);
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+static void usb_read_done_work_fn(struct work_struct *work)
+{
+	struct diag_request *req = NULL;
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						read_done_work);
+	if (!ch)
+		return;
+
+	/*
+	 * USB is disconnected/Disabled before the previous read completed.
+	 * Discard the packet and don't do any further processing.
+	 */
+	if (!atomic_read(&ch->connected) || !ch->enabled ||
+	    !atomic_read(&ch->diag_state))
+		return;
+
+	req = ch->read_ptr;
+	ch->read_cnt++;
+
+	if (ch->ops && ch->ops->read_done && req->status >= 0)
+		ch->ops->read_done(req->buf, req->actual, ch->ctxt);
+}
+
+static void diag_usb_write_done(struct diag_usb_info *ch,
+				struct diag_request *req)
+{
+	int ctxt = 0;
+	int len = 0;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+	unsigned char *buf = NULL;
+	unsigned long flags;
+
+	if (!ch || !req)
+		return;
+
+	ch->write_cnt++;
+	entry = diag_usb_buf_tbl_get(ch, req->context);
+	if (!entry) {
+		pr_err_ratelimited("diag: In %s, unable to find entry %pK in the table\n",
+				   __func__, req->context);
+		return;
+	}
+	if (atomic_read(&entry->ref_count) != 0) {
+		DIAG_LOG(DIAG_DEBUG_MUX, "partial write_done ref %d\n",
+			 atomic_read(&entry->ref_count));
+		diag_ws_on_copy_complete(DIAG_WS_MUX);
+		diagmem_free(driver, req, ch->mempool);
+		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %d\n",
+		 ctxt);
+	spin_lock_irqsave(&ch->write_lock, flags);
+	list_del(&entry->track);
+	ctxt = entry->ctxt;
+	buf = entry->buf;
+	len = entry->len;
+	kfree(entry);
+	diag_ws_on_copy_complete(DIAG_WS_MUX);
+
+	if (ch->ops && ch->ops->write_done)
+		ch->ops->write_done(buf, len, ctxt, DIAG_USB_MODE);
+	buf = NULL;
+	len = 0;
+	ctxt = 0;
+	spin_unlock_irqrestore(&ch->write_lock, flags);
+	diagmem_free(driver, req, ch->mempool);
+}
+
+static void diag_usb_notifier(void *priv, unsigned event,
+			      struct diag_request *d_req)
+{
+	int id = 0;
+	unsigned long flags;
+	struct diag_usb_info *usb_info = NULL;
+
+	id = (int)(uintptr_t)priv;
+	if (id < 0 || id >= NUM_DIAG_USB_DEV)
+		return;
+	usb_info = &diag_usb[id];
+
+	switch (event) {
+	case USB_DIAG_CONNECT:
+		usb_info->max_size = usb_diag_request_size(usb_info->hdl);
+		atomic_set(&usb_info->connected, 1);
+		pr_info("diag: USB channel %s connected\n", usb_info->name);
+		queue_work(usb_info->usb_wq,
+			   &usb_info->connect_work);
+		break;
+	case USB_DIAG_DISCONNECT:
+		atomic_set(&usb_info->connected, 0);
+		pr_info("diag: USB channel %s disconnected\n", usb_info->name);
+		queue_work(usb_info->usb_wq,
+			   &usb_info->disconnect_work);
+		break;
+	case USB_DIAG_READ_DONE:
+		spin_lock_irqsave(&usb_info->lock, flags);
+		usb_info->read_ptr = d_req;
+		spin_unlock_irqrestore(&usb_info->lock, flags);
+		atomic_set(&usb_info->read_pending, 0);
+		queue_work(usb_info->usb_wq,
+			   &usb_info->read_done_work);
+		break;
+	case USB_DIAG_WRITE_DONE:
+		diag_usb_write_done(usb_info, d_req);
+		break;
+	default:
+		pr_err_ratelimited("diag: Unknown event from USB diag\n");
+		break;
+	}
+}
+
+int diag_usb_queue_read(int id)
+{
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+				   __func__, id);
+		return -EINVAL;
+	}
+	queue_work(diag_usb[id].usb_wq, &(diag_usb[id].read_work));
+	return 0;
+}
+
+static int diag_usb_write_ext(struct diag_usb_info *usb_info,
+			      unsigned char *buf, int len, int ctxt)
+{
+	int err = 0;
+	int write_len = 0;
+	int bytes_remaining = len;
+	int offset = 0;
+	unsigned long flags;
+	struct diag_request *req = NULL;
+
+	if (!usb_info || !buf || len <= 0) {
+		pr_err_ratelimited("diag: In %s, usb_info: %pK buf: %pK, len: %d\n",
+				   __func__, usb_info, buf, len);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&usb_info->write_lock, flags);
+	while (bytes_remaining > 0) {
+		req = diagmem_alloc(driver, sizeof(struct diag_request),
+				    usb_info->mempool);
+		if (!req) {
+			/*
+			 * This should never happen. It either means that we are
+			 * trying to write more buffers than the max supported
+			 * by this particualar diag USB channel at any given
+			 * instance, or the previous write ptrs are stuck in
+			 * the USB layer.
+			 */
+			pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+					   __func__, usb_info->name);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return -ENOMEM;
+		}
+
+		write_len = (bytes_remaining > usb_info->max_size) ?
+				usb_info->max_size : (bytes_remaining);
+
+		req->buf = buf + offset;
+		req->length = write_len;
+		req->context = (void *)buf;
+
+		if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+		    !atomic_read(&usb_info->diag_state)) {
+			pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+					     usb_info->name);
+			diagmem_free(driver, req, usb_info->mempool);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return -ENODEV;
+		}
+
+		if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+			diagmem_free(driver, req, usb_info->mempool);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return -ENOMEM;
+		}
+
+		diag_ws_on_read(DIAG_WS_MUX, len);
+		err = usb_diag_write(usb_info->hdl, req);
+		diag_ws_on_copy(DIAG_WS_MUX);
+		if (err) {
+			pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+					   __func__, usb_info->name, err);
+			DIAG_LOG(DIAG_DEBUG_MUX,
+				 "ERR! unable to write t usb, err: %d\n", err);
+			diag_ws_on_copy_fail(DIAG_WS_MUX);
+			diag_usb_buf_tbl_remove(usb_info, buf);
+			diagmem_free(driver, req, usb_info->mempool);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return err;
+		}
+		offset += write_len;
+		bytes_remaining -= write_len;
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			 "bytes_remaining: %d write_len: %d, len: %d\n",
+			 bytes_remaining, write_len, len);
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "done writing!");
+	spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+	return 0;
+}
+
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	int err = 0;
+	struct diag_request *req = NULL;
+	struct diag_usb_info *usb_info = NULL;
+	unsigned long flags;
+
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+				   __func__, id);
+		return -EINVAL;
+	}
+
+	usb_info = &diag_usb[id];
+
+	if (len > usb_info->max_size) {
+		DIAG_LOG(DIAG_DEBUG_MUX, "len: %d, max_size: %d\n",
+			 len, usb_info->max_size);
+		return diag_usb_write_ext(usb_info, buf, len, ctxt);
+	}
+
+	req = diagmem_alloc(driver, sizeof(struct diag_request),
+			    usb_info->mempool);
+	if (!req) {
+		/*
+		 * This should never happen. It either means that we are
+		 * trying to write more buffers than the max supported by
+		 * this particualar diag USB channel at any given instance,
+		 * or the previous write ptrs are stuck in the USB layer.
+		 */
+		pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+				   __func__, usb_info->name);
+		return -ENOMEM;
+	}
+
+	req->buf = buf;
+	req->length = len;
+	req->context = (void *)buf;
+
+	if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+	    !atomic_read(&usb_info->diag_state)) {
+		pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+				     usb_info->name);
+		diagmem_free(driver, req, usb_info->mempool);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&usb_info->write_lock, flags);
+	if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+		DIAG_LOG(DIAG_DEBUG_MUX,
+					"ERR! unable to add buf %pK to table\n",
+			 buf);
+		diagmem_free(driver, req, usb_info->mempool);
+		spin_unlock_irqrestore(&usb_info->write_lock, flags);
+		return -ENOMEM;
+	}
+
+	diag_ws_on_read(DIAG_WS_MUX, len);
+	err = usb_diag_write(usb_info->hdl, req);
+	diag_ws_on_copy(DIAG_WS_MUX);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+				   __func__, usb_info->name, err);
+		diag_ws_on_copy_fail(DIAG_WS_MUX);
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			 "ERR! unable to write t usb, err: %d\n", err);
+		diag_usb_buf_tbl_remove(usb_info, buf);
+		diagmem_free(driver, req, usb_info->mempool);
+	}
+	spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+	return err;
+}
+
+/*
+ * This functions performs USB connect operations wrt Diag synchronously. It
+ * doesn't translate to actual USB connect. This is used when Diag switches
+ * logging to USB mode and wants to mimic USB connection.
+ */
+void diag_usb_connect_all(void)
+{
+	int i = 0;
+	struct diag_usb_info *usb_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+		usb_info = &diag_usb[i];
+		if (!usb_info->enabled)
+			continue;
+		atomic_set(&usb_info->diag_state, 1);
+		usb_connect(usb_info);
+	}
+}
+
+/*
+ * This functions performs USB disconnect operations wrt Diag synchronously.
+ * It doesn't translate to actual USB disconnect. This is used when Diag
+ * switches logging from USB mode and want to mimic USB disconnect.
+ */
+void diag_usb_disconnect_all(void)
+{
+	int i = 0;
+	struct diag_usb_info *usb_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+		usb_info = &diag_usb[i];
+		if (!usb_info->enabled)
+			continue;
+		atomic_set(&usb_info->diag_state, 0);
+		usb_disconnect(usb_info);
+	}
+}
+
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+	struct diag_usb_info *ch = NULL;
+	unsigned char wq_name[DIAG_USB_NAME_SZ + DIAG_USB_STRING_SZ];
+
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err("diag: Unable to register with USB, id: %d\n", id);
+		return -EIO;
+	}
+
+	if (!ops) {
+		pr_err("diag: Invalid operations for USB\n");
+		return -EIO;
+	}
+
+	ch = &diag_usb[id];
+	ch->ops = ops;
+	ch->ctxt = ctxt;
+	spin_lock_init(&ch->lock);
+	spin_lock_init(&ch->write_lock);
+	ch->read_buf = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+	if (!ch->read_buf)
+		goto err;
+	ch->read_ptr = kzalloc(sizeof(struct diag_request), GFP_KERNEL);
+	if (!ch->read_ptr)
+		goto err;
+	atomic_set(&ch->connected, 0);
+	atomic_set(&ch->read_pending, 0);
+	/*
+	 * This function is called when the mux registers with Diag-USB.
+	 * The registration happens during boot up and Diag always starts
+	 * in USB mode. Set the state to 1.
+	 */
+	atomic_set(&ch->diag_state, 1);
+	INIT_LIST_HEAD(&ch->buf_tbl);
+	diagmem_init(driver, ch->mempool);
+	INIT_WORK(&(ch->read_work), usb_read_work_fn);
+	INIT_WORK(&(ch->read_done_work), usb_read_done_work_fn);
+	INIT_WORK(&(ch->connect_work), usb_connect_work_fn);
+	INIT_WORK(&(ch->disconnect_work), usb_disconnect_work_fn);
+	strlcpy(wq_name, "DIAG_USB_", DIAG_USB_STRING_SZ);
+	strlcat(wq_name, ch->name, sizeof(ch->name));
+	ch->usb_wq = create_singlethread_workqueue(wq_name);
+	if (!ch->usb_wq)
+		goto err;
+	ch->hdl = usb_diag_open(ch->name, (void *)(uintptr_t)id,
+				diag_usb_notifier);
+	if (IS_ERR(ch->hdl)) {
+		pr_err("diag: Unable to open USB channel %s\n", ch->name);
+		goto err;
+	}
+	ch->enabled = 1;
+	pr_debug("diag: Successfully registered USB %s\n", ch->name);
+	return 0;
+
+err:
+	if (ch->usb_wq)
+		destroy_workqueue(ch->usb_wq);
+	kfree(ch->read_ptr);
+	kfree(ch->read_buf);
+	return -ENOMEM;
+}
+
+void diag_usb_exit(int id)
+{
+	struct diag_usb_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err("diag: In %s, incorrect id %d\n", __func__, id);
+		return;
+	}
+
+	ch = &diag_usb[id];
+	ch->ops = NULL;
+	atomic_set(&ch->connected, 0);
+	atomic_set(&ch->read_pending, 0);
+	atomic_set(&ch->diag_state, 0);
+	ch->enabled = 0;
+	ch->ctxt = 0;
+	ch->read_cnt = 0;
+	ch->write_cnt = 0;
+	diagmem_exit(driver, ch->mempool);
+	ch->mempool = 0;
+	if (ch->hdl) {
+		usb_diag_close(ch->hdl);
+		ch->hdl = NULL;
+	}
+	if (ch->usb_wq)
+		destroy_workqueue(ch->usb_wq);
+	kfree(ch->read_ptr);
+	ch->read_ptr = NULL;
+	kfree(ch->read_buf);
+	ch->read_buf = NULL;
+}
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diag_usb.h linux-6.4-fbx/drivers/char/diag/diag_usb.h
--- linux-6.4-fbx/drivers/char/diag./diag_usb.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diag_usb.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,110 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGUSB_H
+#define DIAGUSB_H
+
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diag_mux.h"
+
+#define DIAG_USB_LOCAL		0
+#define DIAG_USB_LOCAL_LAST	1
+#define DIAG_USB_BRIDGE_BASE	DIAG_USB_LOCAL_LAST
+#define DIAG_USB_MDM		(DIAG_USB_BRIDGE_BASE)
+#define DIAG_USB_MDM2		(DIAG_USB_BRIDGE_BASE + 1)
+#define DIAG_USB_QSC		(DIAG_USB_BRIDGE_BASE + 2)
+#define DIAG_USB_BRIDGE_LAST	(DIAG_USB_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_USB_DEV	DIAG_USB_LOCAL_LAST
+#else
+#define NUM_DIAG_USB_DEV	DIAG_USB_BRIDGE_LAST
+#endif
+
+#define DIAG_USB_NAME_SZ	24
+#define DIAG_USB_GET_NAME(x)	(diag_usb[x].name)
+
+#define DIAG_USB_MODE		0
+
+struct diag_usb_buf_tbl_t {
+	struct list_head track;
+	unsigned char *buf;
+	uint32_t len;
+	atomic_t ref_count;
+	int ctxt;
+};
+
+struct diag_usb_info {
+	int id;
+	int ctxt;
+	char name[DIAG_USB_NAME_SZ];
+	atomic_t connected;
+	atomic_t diag_state;
+	atomic_t read_pending;
+	int enabled;
+	int mempool;
+	int max_size;
+	struct list_head buf_tbl;
+	unsigned long read_cnt;
+	unsigned long write_cnt;
+	spinlock_t lock;
+	spinlock_t write_lock;
+	struct usb_diag_ch *hdl;
+	struct diag_mux_ops *ops;
+	unsigned char *read_buf;
+	struct diag_request *read_ptr;
+	struct work_struct read_work;
+	struct work_struct read_done_work;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+	struct workqueue_struct *usb_wq;
+};
+
+extern struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV];
+#ifdef CONFIG_DIAG_OVER_USB
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops);
+int diag_usb_queue_read(int id);
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt);
+void diag_usb_connect_all(void);
+void diag_usb_disconnect_all(void);
+void diag_usb_exit(int id);
+#else
+static inline int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+	return 0;
+}
+static inline int diag_usb_queue_read(int id)
+{
+	return 0;
+}
+static inline int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	return 0;
+}
+static inline void diag_usb_connect_all(void)
+{
+	return;
+}
+static inline void diag_usb_disconnect_all(void)
+{
+	return;
+}
+static inline void diag_usb_exit(int id)
+{
+	return;
+}
+#endif
+
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagchar.h linux-6.4-fbx/drivers/char/diag/diagchar.h
--- linux-6.4-fbx/drivers/char/diag./diagchar.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagchar.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,647 @@
+/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_H
+#define DIAGCHAR_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/ktime.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+
+#define THRESHOLD_CLIENT_LIMIT	50
+
+/* Size of the USB buffers used for read and write*/
+#define USB_MAX_OUT_BUF 4096
+#define APPS_BUF_SIZE	4096
+#define IN_BUF_SIZE		16384
+#define MAX_SYNC_OBJ_NAME_SIZE	32
+
+#define DIAG_MAX_REQ_SIZE	(16 * 1024)
+#define DIAG_MAX_RSP_SIZE	(16 * 1024)
+#define APF_DIAG_PADDING	256
+/*
+ * In the worst case, the HDLC buffer can be atmost twice the size of the
+ * original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
+ * (1 byte)
+ */
+#define DIAG_MAX_HDLC_BUF_SIZE	((DIAG_MAX_REQ_SIZE * 2) + 3)
+
+/* The header of callback data type has remote processor token (of type int) */
+#define CALLBACK_HDR_SIZE	(sizeof(int))
+#define CALLBACK_BUF_SIZE	(DIAG_MAX_REQ_SIZE + CALLBACK_HDR_SIZE)
+
+#define MAX_SSID_PER_RANGE	200
+
+#define ALL_PROC		-1
+
+#define REMOTE_DATA		4
+
+#define USER_SPACE_DATA		16384
+
+#define DIAG_CTRL_MSG_LOG_MASK	9
+#define DIAG_CTRL_MSG_EVENT_MASK	10
+#define DIAG_CTRL_MSG_F3_MASK	11
+#define CONTROL_CHAR	0x7E
+
+#define DIAG_CON_APSS		(0x0001)	/* Bit mask for APSS */
+#define DIAG_CON_MPSS		(0x0002)	/* Bit mask for MPSS */
+#define DIAG_CON_LPASS		(0x0004)	/* Bit mask for LPASS */
+#define DIAG_CON_WCNSS		(0x0008)	/* Bit mask for WCNSS */
+#define DIAG_CON_SENSORS	(0x0010)	/* Bit mask for Sensors */
+#define DIAG_CON_NONE		(0x0000)	/* Bit mask for No SS*/
+#define DIAG_CON_ALL		(DIAG_CON_APSS | DIAG_CON_MPSS \
+				| DIAG_CON_LPASS | DIAG_CON_WCNSS \
+				| DIAG_CON_SENSORS)
+
+#define DIAG_STM_MODEM	0x01
+#define DIAG_STM_LPASS	0x02
+#define DIAG_STM_WCNSS	0x04
+#define DIAG_STM_APPS	0x08
+#define DIAG_STM_SENSORS 0x10
+
+#define INVALID_PID		-1
+#define DIAG_CMD_FOUND		1
+#define DIAG_CMD_NOT_FOUND	0
+#define DIAG_CMD_POLLING	1
+#define DIAG_CMD_NOT_POLLING	0
+#define DIAG_CMD_ADD		1
+#define DIAG_CMD_REMOVE		0
+
+#define DIAG_CMD_VERSION	0
+#define DIAG_CMD_ERROR		0x13
+#define DIAG_CMD_DOWNLOAD	0x3A
+#define DIAG_CMD_DIAG_SUBSYS	0x4B
+#define DIAG_CMD_LOG_CONFIG	0x73
+#define DIAG_CMD_LOG_ON_DMND	0x78
+#define DIAG_CMD_EXT_BUILD	0x7c
+#define DIAG_CMD_MSG_CONFIG	0x7D
+#define DIAG_CMD_GET_EVENT_MASK	0x81
+#define DIAG_CMD_SET_EVENT_MASK	0x82
+#define DIAG_CMD_EVENT_TOGGLE	0x60
+#define DIAG_CMD_NO_SUBSYS	0xFF
+#define DIAG_CMD_STATUS	0x0C
+#define DIAG_SS_WCDMA	0x04
+#define DIAG_CMD_QUERY_CALL	0x0E
+#define DIAG_SS_GSM	0x08
+#define DIAG_CMD_QUERY_TMC	0x02
+#define DIAG_SS_TDSCDMA	0x57
+#define DIAG_CMD_TDSCDMA_STATUS	0x0E
+#define DIAG_CMD_DIAG_SUBSYS_DELAY 0x80
+
+#define DIAG_SS_DIAG		0x12
+#define DIAG_SS_PARAMS		0x32
+#define DIAG_SS_FILE_READ_MODEM 0x0816
+#define DIAG_SS_FILE_READ_ADSP  0x0E10
+#define DIAG_SS_FILE_READ_WCNSS 0x141F
+#define DIAG_SS_FILE_READ_SLPI 0x01A18
+#define DIAG_SS_FILE_READ_APPS 0x020F
+
+#define DIAG_DIAG_MAX_PKT_SZ	0x55
+#define DIAG_DIAG_STM		0x214
+#define DIAG_DIAG_POLL		0x03
+#define DIAG_DEL_RSP_WRAP	0x04
+#define DIAG_DEL_RSP_WRAP_CNT	0x05
+#define DIAG_EXT_MOBILE_ID	0x06
+#define DIAG_GET_TIME_API	0x21B
+#define DIAG_SET_TIME_API	0x21C
+#define DIAG_SWITCH_COMMAND	0x081B
+#define DIAG_BUFFERING_MODE	0x080C
+
+#define DIAG_CMD_OP_LOG_DISABLE		0
+#define DIAG_CMD_OP_GET_LOG_RANGE	1
+#define DIAG_CMD_OP_SET_LOG_MASK	3
+#define DIAG_CMD_OP_GET_LOG_MASK	4
+
+#define DIAG_CMD_OP_GET_SSID_RANGE	1
+#define DIAG_CMD_OP_GET_BUILD_MASK	2
+#define DIAG_CMD_OP_GET_MSG_MASK	3
+#define DIAG_CMD_OP_SET_MSG_MASK	4
+#define DIAG_CMD_OP_SET_ALL_MSG_MASK	5
+
+#define DIAG_CMD_OP_GET_MSG_ALLOC       0x33
+#define DIAG_CMD_OP_GET_MSG_DROP	0x30
+#define DIAG_CMD_OP_RESET_MSG_STATS	0x2F
+#define DIAG_CMD_OP_GET_LOG_ALLOC	0x31
+#define DIAG_CMD_OP_GET_LOG_DROP	0x2C
+#define DIAG_CMD_OP_RESET_LOG_STATS	0x2B
+#define DIAG_CMD_OP_GET_EVENT_ALLOC	0x32
+#define DIAG_CMD_OP_GET_EVENT_DROP	0x2E
+#define DIAG_CMD_OP_RESET_EVENT_STATS	0x2D
+
+#define DIAG_CMD_OP_HDLC_DISABLE	0x218
+
+#define BAD_PARAM_RESPONSE_MESSAGE 20
+
+#define PERSIST_TIME_SUCCESS 0
+#define PERSIST_TIME_FAILURE 1
+#define PERSIST_TIME_NOT_SUPPORTED 2
+
+#define MODE_CMD	41
+#define RESET_ID	2
+
+#define PKT_DROP	0
+#define PKT_ALLOC	1
+#define PKT_RESET	2
+
+#define FEATURE_MASK_LEN	2
+
+#define DIAG_MD_NONE			0
+#define DIAG_MD_PERIPHERAL		1
+
+/*
+ * The status bit masks when received in a signal handler are to be
+ * used in conjunction with the peripheral list bit mask to determine the
+ * status for a peripheral. For instance, 0x00010002 would denote an open
+ * status on the MPSS
+ */
+#define DIAG_STATUS_OPEN (0x00010000)	/* DCI channel open status mask   */
+#define DIAG_STATUS_CLOSED (0x00020000)	/* DCI channel closed status mask */
+
+#define MODE_NONREALTIME	0
+#define MODE_REALTIME		1
+#define MODE_UNKNOWN		2
+
+#define DIAG_BUFFERING_MODE_STREAMING	0
+#define DIAG_BUFFERING_MODE_THRESHOLD	1
+#define DIAG_BUFFERING_MODE_CIRCULAR	2
+
+#define DIAG_MIN_WM_VAL		0
+#define DIAG_MAX_WM_VAL		100
+
+#define DEFAULT_LOW_WM_VAL	15
+#define DEFAULT_HIGH_WM_VAL	85
+
+#define TYPE_DATA		0
+#define TYPE_CNTL		1
+#define TYPE_DCI		2
+#define TYPE_CMD		3
+#define TYPE_DCI_CMD		4
+#define NUM_TYPES		5
+
+#define PERIPHERAL_MODEM	0
+#define PERIPHERAL_LPASS	1
+#define PERIPHERAL_WCNSS	2
+#define PERIPHERAL_SENSORS	3
+#define NUM_PERIPHERALS		4
+#define APPS_DATA		(NUM_PERIPHERALS)
+
+/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
+#define NUM_MD_SESSIONS		(NUM_PERIPHERALS + 1)
+
+#define MD_PERIPHERAL_MASK(x)	(1 << x)
+
+/*
+ * Number of stm processors includes all the peripherals and
+ * apps.Added 1 below to indicate apps
+ */
+#define NUM_STM_PROCESSORS	(NUM_PERIPHERALS + 1)
+/*
+ * Indicates number of peripherals that can support DCI and Apps
+ * processor. This doesn't mean that a peripheral has the
+ * feature.
+ */
+#define NUM_DCI_PERIPHERALS	(NUM_PERIPHERALS + 1)
+
+#define DIAG_PROC_DCI			1
+#define DIAG_PROC_MEMORY_DEVICE		2
+
+/* Flags to vote the DCI or Memory device process up or down
+   when it becomes active or inactive */
+#define VOTE_DOWN			0
+#define VOTE_UP				1
+
+#define DIAG_TS_SIZE	50
+
+#define DIAG_MDM_BUF_SIZE	2048
+/* The Maximum request size is 2k + DCI header + footer (6 bytes) */
+#define DIAG_MDM_DCI_BUF_SIZE	(2048 + 6)
+
+#define DIAG_LOCAL_PROC	0
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Local Processor only */
+#define DIAG_NUM_PROC	1
+#else
+/* Local Processor + Remote Devices */
+#define DIAG_NUM_PROC	(1 + NUM_REMOTE_DEV)
+#endif
+
+#define DIAG_WS_DCI		0
+#define DIAG_WS_MUX		1
+
+#define DIAG_DATA_TYPE		1
+#define DIAG_CNTL_TYPE		2
+#define DIAG_DCI_TYPE		3
+
+/* List of remote processor supported */
+enum remote_procs {
+	MDM = 1,
+	MDM2 = 2,
+	QSC = 5,
+};
+
+struct diag_pkt_header_t {
+	uint8_t cmd_code;
+	uint8_t subsys_id;
+	uint16_t subsys_cmd_code;
+} __packed;
+
+struct diag_cmd_ext_mobile_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t padding[3];
+	uint32_t family;
+	uint32_t chip_id;
+} __packed;
+
+struct diag_cmd_time_sync_query_req_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+};
+
+struct diag_cmd_time_sync_query_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+};
+
+struct diag_cmd_time_sync_switch_req_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+	uint8_t persist_time;
+};
+
+struct diag_cmd_time_sync_switch_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+	uint8_t time_api_status;
+	uint8_t persist_time_status;
+};
+
+struct diag_cmd_reg_entry_t {
+	uint16_t cmd_code;
+	uint16_t subsys_id;
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+} __packed;
+
+struct diag_cmd_reg_t {
+	struct list_head link;
+	struct diag_cmd_reg_entry_t entry;
+	uint8_t proc;
+	int pid;
+};
+
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @entries: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_t {
+	char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+	uint32_t count;
+	struct diag_cmd_reg_entry_t *entries;
+};
+
+struct diag_client_map {
+	char name[20];
+	int pid;
+};
+
+struct real_time_vote_t {
+	int client_id;
+	uint16_t proc;
+	uint8_t real_time_vote;
+} __packed;
+
+struct real_time_query_t {
+	int real_time;
+	int proc;
+} __packed;
+
+struct diag_buffering_mode_t {
+	uint8_t peripheral;
+	uint8_t mode;
+	uint8_t high_wm_val;
+	uint8_t low_wm_val;
+} __packed;
+
+struct diag_callback_reg_t {
+	int proc;
+} __packed;
+
+struct diag_ws_ref_t {
+	int ref_count;
+	int copy_count;
+	spinlock_t lock;
+};
+
+/* This structure is defined in USB header file */
+#ifndef CONFIG_DIAG_OVER_USB
+struct diag_request {
+	char *buf;
+	int length;
+	int actual;
+	int status;
+	void *context;
+};
+#endif
+
+struct diag_pkt_stats_t {
+	uint32_t alloc_count;
+	uint32_t drop_count;
+};
+
+struct diag_cmd_stats_rsp_t {
+	struct diag_pkt_header_t header;
+	uint32_t payload;
+};
+
+struct diag_cmd_hdlc_disable_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t framing_version;
+	uint8_t result;
+};
+
+struct diag_pkt_frame_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t length;
+};
+
+struct diag_partial_pkt_t {
+	uint32_t total_len;
+	uint32_t read_len;
+	uint32_t remaining;
+	uint32_t capacity;
+	uint8_t processing;
+	unsigned char *data;
+} __packed;
+
+struct diag_logging_mode_param_t {
+	uint32_t req_mode;
+	uint32_t peripheral_mask;
+	uint8_t mode_param;
+} __packed;
+
+struct diag_md_session_t {
+	int pid;
+	int peripheral_mask;
+	uint8_t hdlc_disabled;
+	uint8_t msg_mask_tbl_count;
+	struct timer_list hdlc_reset_timer;
+	struct diag_mask_info *msg_mask;
+	struct diag_mask_info *log_mask;
+	struct diag_mask_info *event_mask;
+	struct task_struct *task;
+};
+
+/*
+ * High level structure for storing Diag masks.
+ *
+ * @ptr: Pointer to the buffer that stores the masks
+ * @mask_len: Length of the buffer pointed by ptr
+ * @update_buf: Buffer for performing mask updates to peripherals
+ * @update_buf_len: Length of the buffer pointed by buf
+ * @status: status of the mask - all enable, disabled, valid
+ * @lock: To protect access to the mask variables
+ */
+struct diag_mask_info {
+	uint8_t *ptr;
+	int mask_len;
+	uint8_t *update_buf;
+	int update_buf_len;
+	uint8_t status;
+	struct mutex lock;
+};
+
+struct diag_md_proc_info {
+	int pid;
+	struct task_struct *socket_process;
+	struct task_struct *callback_process;
+	struct task_struct *mdlog_process;
+};
+
+struct diag_feature_t {
+	uint8_t feature_mask[FEATURE_MASK_LEN];
+	uint8_t rcvd_feature_mask;
+	uint8_t log_on_demand;
+	uint8_t separate_cmd_rsp;
+	uint8_t encode_hdlc;
+	uint8_t peripheral_buffering;
+	uint8_t mask_centralization;
+	uint8_t stm_support;
+	uint8_t sockets_enabled;
+	uint8_t sent_feature_mask;
+};
+
+struct diagchar_dev {
+
+	/* State for the char driver */
+	unsigned int major;
+	unsigned int minor_start;
+	int num;
+	struct cdev *cdev;
+	char *name;
+	struct class *diagchar_class;
+	struct device *diag_dev;
+	int ref_count;
+	int mask_clear;
+	struct mutex diag_maskclear_mutex;
+	struct mutex diag_notifier_mutex;
+	struct mutex diagchar_mutex;
+	struct mutex diag_file_mutex;
+	wait_queue_head_t wait_q;
+	struct diag_client_map *client_map;
+	int *data_ready;
+	atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
+	int num_clients;
+	int polling_reg_flag;
+	int use_device_tree;
+	int supports_separate_cmdrsp;
+	int supports_apps_hdlc_encoding;
+	int supports_sockets;
+	/* The state requested in the STM command */
+	int stm_state_requested[NUM_STM_PROCESSORS];
+	/* The current STM state */
+	int stm_state[NUM_STM_PROCESSORS];
+	uint16_t stm_peripheral;
+	struct work_struct stm_update_work;
+	uint16_t mask_update;
+	struct work_struct mask_update_work;
+	uint16_t close_transport;
+	struct work_struct close_transport_work;
+	struct workqueue_struct *cntl_wq;
+	struct mutex cntl_lock;
+	/* Whether or not the peripheral supports STM */
+	/* Delayed response Variables */
+	uint16_t delayed_rsp_id;
+	struct mutex delayed_rsp_mutex;
+	/* DCI related variables */
+	struct list_head dci_req_list;
+	struct list_head dci_client_list;
+	int dci_tag;
+	int dci_client_id;
+	struct mutex dci_mutex;
+	int num_dci_client;
+	unsigned char *apps_dci_buf;
+	int dci_state;
+	struct workqueue_struct *diag_dci_wq;
+	struct list_head cmd_reg_list;
+	struct mutex cmd_reg_mutex;
+	spinlock_t dci_mempool_lock;
+	uint32_t cmd_reg_count;
+	struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
+	/* Sizes that reflect memory pool sizes */
+	unsigned int poolsize;
+	unsigned int poolsize_hdlc;
+	unsigned int poolsize_dci;
+	unsigned int poolsize_user;
+	/* Buffers for masks */
+	struct mutex diag_cntl_mutex;
+	/* Members for Sending response */
+	unsigned char *encoded_rsp_buf;
+	int encoded_rsp_len;
+	uint8_t rsp_buf_busy;
+	spinlock_t rsp_buf_busy_lock;
+	int rsp_buf_ctxt;
+	struct diagfwd_info *diagfwd_data[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_cntl[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_dci[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
+	struct diag_feature_t feature[NUM_PERIPHERALS];
+	struct diag_buffering_mode_t buffering_mode[NUM_PERIPHERALS];
+	uint8_t buffering_flag[NUM_PERIPHERALS];
+	struct mutex mode_lock;
+	unsigned char *user_space_data_buf;
+	uint8_t user_space_data_busy;
+	struct diag_pkt_stats_t msg_stats;
+	struct diag_pkt_stats_t log_stats;
+	struct diag_pkt_stats_t event_stats;
+	/* buffer for updating mask to peripherals */
+	unsigned char *buf_feature_mask_update;
+	uint8_t hdlc_disabled;
+	struct mutex hdlc_disable_mutex;
+	struct mutex hdlc_recovery_mutex;
+	struct timer_list hdlc_reset_timer;
+	struct mutex diag_hdlc_mutex;
+	unsigned char *hdlc_buf;
+	uint32_t hdlc_buf_len;
+	unsigned char *apps_rsp_buf;
+	struct diag_partial_pkt_t incoming_pkt;
+	int in_busy_pktdata;
+	/* Variables for non real time mode */
+	int real_time_mode[DIAG_NUM_PROC];
+	int real_time_update_busy;
+	uint16_t proc_active_mask;
+	uint16_t proc_rt_vote_mask[DIAG_NUM_PROC];
+	struct mutex real_time_mutex;
+	struct work_struct diag_real_time_work;
+	struct workqueue_struct *diag_real_time_wq;
+#ifdef CONFIG_DIAG_OVER_USB
+	int usb_connected;
+#endif
+	struct workqueue_struct *diag_wq;
+	struct work_struct diag_drain_work;
+	struct work_struct update_user_clients;
+	struct work_struct update_md_clients;
+	struct workqueue_struct *diag_cntl_wq;
+	uint8_t log_on_demand_support;
+	uint8_t *apps_req_buf;
+	uint32_t apps_req_buf_len;
+	uint8_t *dci_pkt_buf; /* For Apps DCI packets */
+	uint32_t dci_pkt_length;
+	int in_busy_dcipktdata;
+	int logging_mode;
+	int logging_mask;
+	int mask_check;
+	uint32_t md_session_mask;
+	uint8_t md_session_mode;
+	struct diag_md_session_t *md_session_map[NUM_MD_SESSIONS];
+	struct mutex md_session_lock;
+	/* Power related variables */
+	struct diag_ws_ref_t dci_ws;
+	struct diag_ws_ref_t md_ws;
+	/* Pointers to Diag Masks */
+	struct diag_mask_info *msg_mask;
+	struct diag_mask_info *log_mask;
+	struct diag_mask_info *event_mask;
+	struct diag_mask_info *build_time_mask;
+	uint8_t msg_mask_tbl_count;
+	uint8_t bt_msg_mask_tbl_count;
+	uint16_t event_mask_size;
+	uint16_t last_event_id;
+	struct mutex msg_mask_lock;
+	/* Variables for Mask Centralization */
+	uint16_t num_event_id[NUM_PERIPHERALS];
+	uint32_t num_equip_id[NUM_PERIPHERALS];
+	uint32_t max_ssid_count[NUM_PERIPHERALS];
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	/* For sending command requests in callback mode */
+	unsigned char *hdlc_encode_buf;
+	int hdlc_encode_buf_len;
+#endif
+	int time_sync_enabled;
+	uint8_t uses_time_api;
+	struct platform_device *pdev;
+};
+
+extern struct diagchar_dev *driver;
+
+extern int wrap_enabled;
+extern uint16_t wrap_count;
+
+void diag_get_timestamp(char *time_str);
+void check_drain_timer(void);
+int diag_get_remote(int remote_info);
+
+void diag_ws_init(void);
+void diag_ws_on_notify(void);
+void diag_ws_on_read(int type, int pkt_len);
+void diag_ws_on_copy(int type);
+void diag_ws_on_copy_fail(int type);
+void diag_ws_on_copy_complete(int type);
+void diag_ws_reset(int type);
+void diag_ws_release(void);
+void chk_logging_wakeup(void);
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+		     int pid);
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+			struct diag_cmd_reg_entry_t *entry,
+			int proc);
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc);
+void diag_cmd_remove_reg_by_pid(int pid);
+void diag_cmd_remove_reg_by_proc(int proc);
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+int diag_mask_param(void);
+void diag_clear_masks(int pid);
+
+void diag_record_stats(int type, int flag);
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid);
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral);
+
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagchar_core.c linux-6.4-fbx/drivers/char/diag/diagchar_core.c
--- linux-6.4-fbx/drivers/char/diag./diagchar_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagchar_core.c	2023-10-05 12:33:41.363634732 +0200
@@ -0,0 +1,3753 @@
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <asm/current.h>
+#include <linux/kmemleak.h>
+
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_debugfs.h"
+#include "diag_masks.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_mhi.h"
+
+#include <linux/coresight-stm.h>
+#include <linux/kernel.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+MODULE_DESCRIPTION("Diag Char Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+
+#define MIN_SIZ_ALLOW 4
+#define INIT	1
+#define EXIT	-1
+struct diagchar_dev *driver;
+struct diagchar_priv {
+	int pid;
+};
+
+#define USER_SPACE_RAW_DATA	0
+#define USER_SPACE_HDLC_DATA	1
+
+/* Memory pool variables */
+/* Used for copying any incoming packet from user space clients. */
+static unsigned int poolsize = 12;
+module_param(poolsize, uint, 0);
+
+/*
+ * Used for HDLC encoding packets coming from the user
+ * space.
+ */
+static unsigned int poolsize_hdlc = 10;
+module_param(poolsize_hdlc, uint, 0);
+
+/*
+ * This is used for incoming DCI requests from the user space clients.
+ * Don't expose itemsize as it is internal.
+ */
+static unsigned int poolsize_user = 8;
+module_param(poolsize_user, uint, 0);
+
+/*
+ * USB structures allocated for writing Diag data generated on the Apps to USB.
+ * Don't expose itemsize as it is constant.
+ */
+static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
+static unsigned int poolsize_usb_apps = 10;
+module_param(poolsize_usb_apps, uint, 0);
+
+/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
+static unsigned int poolsize_dci = 10;
+module_param(poolsize_dci, uint, 0);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Used for reading data from the remote device. */
+static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm = 18;
+module_param(itemsize_mdm, uint, 0);
+module_param(poolsize_mdm, uint, 0);
+
+/*
+ * Used for reading DCI data from the remote device.
+ * Don't expose poolsize for DCI data. There is only one read buffer
+ */
+static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm_dci = 1;
+module_param(itemsize_mdm_dci, uint, 0);
+
+/*
+ * Used for USB structues associated with a remote device.
+ * Don't expose the itemsize since it is constant.
+ */
+static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
+static unsigned int poolsize_mdm_usb = 18;
+module_param(poolsize_mdm_usb, uint, 0);
+
+/*
+ * Used for writing read DCI data to remote peripherals. Don't
+ * expose poolsize for DCI data. There is only one read
+ * buffer. Add 6 bytes for DCI header information: Start (1),
+ * Version (1), Length (2), Tag (2)
+ */
+static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
+static unsigned int poolsize_mdm_dci_write = 1;
+module_param(itemsize_mdm_dci_write, uint, 0);
+
+/*
+ * Used for USB structures associated with a remote SMUX
+ * device Don't expose the itemsize since it is constant
+ */
+static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
+static unsigned int poolsize_qsc_usb = 8;
+module_param(poolsize_qsc_usb, uint, 0);
+#endif
+
+/* This is the max number of user-space clients supported at initialization*/
+static unsigned int max_clients = 15;
+module_param(max_clients, uint, 0);
+
+/* Timer variables */
+static struct timer_list drain_timer;
+static int timer_in_progress;
+
+/*
+ * Diag Mask clear variable
+ * Used for clearing masks upon
+ * USB disconnection and stopping ODL
+ */
+static int diag_mask_clear_param = 1;
+module_param(diag_mask_clear_param, int, 0644);
+
+struct diag_apps_data_t {
+	void *buf;
+	uint32_t len;
+	int ctxt;
+};
+
+static struct diag_apps_data_t hdlc_data;
+static struct diag_apps_data_t non_hdlc_data;
+static struct mutex apps_data_mutex;
+
+#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
+
+#ifdef DIAG_DEBUG
+uint16_t diag_debug_mask;
+void *diag_ipc_log;
+#endif
+
+static void diag_md_session_close(int pid);
+
+/*
+ * Returns the next delayed rsp id. If wrapping is enabled,
+ * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
+ */
+static uint16_t diag_get_next_delayed_rsp_id(void)
+{
+	uint16_t rsp_id = 0;
+
+	mutex_lock(&driver->delayed_rsp_mutex);
+	rsp_id = driver->delayed_rsp_id;
+	if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
+		rsp_id++;
+	else {
+		if (wrap_enabled) {
+			rsp_id = 1;
+			wrap_count++;
+		} else
+			rsp_id = DIAGPKT_MAX_DELAYED_RSP;
+	}
+	driver->delayed_rsp_id = rsp_id;
+	mutex_unlock(&driver->delayed_rsp_mutex);
+
+	return rsp_id;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param);
+
+#define COPY_USER_SPACE_OR_EXIT(buf, data, length)		\
+do {								\
+	if ((count < ret+length) || (copy_to_user(buf,		\
+			(void *)&data, length))) {		\
+		ret = -EFAULT;					\
+		goto exit;					\
+	}							\
+	ret += length;						\
+} while (0)
+
+#define COPY_USER_SPACE_OR_ERR(buf, data, length)		\
+do {								\
+	if ((count < ret+length) || (copy_to_user(buf,		\
+			(void *)&data, length))) {		\
+		ret = -EFAULT;					\
+		break;						\
+	}							\
+	ret += length;						\
+} while (0)
+
+static void drain_timer_func(struct timer_list *t)
+{
+	queue_work(driver->diag_wq , &(driver->diag_drain_work));
+}
+
+static void diag_drain_apps_data(struct diag_apps_data_t *data)
+{
+	int err = 0;
+
+	if (!data || !data->buf)
+		return;
+
+	err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+			     data->ctxt);
+	if (err)
+		diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+
+	data->buf = NULL;
+	data->len = 0;
+}
+
+void diag_update_user_client_work_fn(struct work_struct *work)
+{
+	diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
+}
+
+static void diag_update_md_client_work_fn(struct work_struct *work)
+{
+	diag_update_md_clients(HDLC_SUPPORT_TYPE);
+}
+
+void diag_drain_work_fn(struct work_struct *work)
+{
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	timer_in_progress = 0;
+	mutex_lock(&apps_data_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (!hdlc_disabled)
+		diag_drain_apps_data(&hdlc_data);
+	else
+		diag_drain_apps_data(&non_hdlc_data);
+	mutex_unlock(&apps_data_mutex);
+}
+
+void check_drain_timer(void)
+{
+	int ret = 0;
+
+	if (!timer_in_progress) {
+		timer_in_progress = 1;
+		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
+	}
+}
+
+void diag_add_client(int i, struct file *file)
+{
+	struct diagchar_priv *diagpriv_data;
+
+	driver->client_map[i].pid = current->tgid;
+	diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
+							GFP_KERNEL);
+	if (diagpriv_data)
+		diagpriv_data->pid = current->tgid;
+	file->private_data = diagpriv_data;
+	strlcpy(driver->client_map[i].name, current->comm, 20);
+	driver->client_map[i].name[19] = '\0';
+}
+
+static void diag_mempool_init(void)
+{
+	uint32_t itemsize = DIAG_MAX_REQ_SIZE;
+	uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
+	uint32_t itemsize_dci = IN_BUF_SIZE;
+	uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
+
+	itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
+		     CALLBACK_HDR_SIZE);
+	diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
+	diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
+	diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
+	diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
+
+	diagmem_init(driver, POOL_TYPE_COPY);
+	diagmem_init(driver, POOL_TYPE_HDLC);
+	diagmem_init(driver, POOL_TYPE_USER);
+	diagmem_init(driver, POOL_TYPE_DCI);
+}
+
+static void diag_mempool_exit(void)
+{
+	diagmem_exit(driver, POOL_TYPE_COPY);
+	diagmem_exit(driver, POOL_TYPE_HDLC);
+	diagmem_exit(driver, POOL_TYPE_USER);
+	diagmem_exit(driver, POOL_TYPE_DCI);
+}
+
+static int diagchar_open(struct inode *inode, struct file *file)
+{
+	int i = 0;
+	void *temp;
+
+	if (driver) {
+		mutex_lock(&driver->diagchar_mutex);
+
+		for (i = 0; i < driver->num_clients; i++)
+			if (driver->client_map[i].pid == 0)
+				break;
+
+		if (i < driver->num_clients) {
+			diag_add_client(i, file);
+		} else {
+			if (i < THRESHOLD_CLIENT_LIMIT) {
+				driver->num_clients++;
+				temp = krealloc(driver->client_map
+					, (driver->num_clients) * sizeof(struct
+						 diag_client_map), GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->client_map = temp;
+				temp = krealloc(driver->data_ready
+					, (driver->num_clients) * sizeof(int),
+							GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->data_ready = temp;
+				diag_add_client(i, file);
+			} else {
+				mutex_unlock(&driver->diagchar_mutex);
+				pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
+				pr_err_ratelimited("diag: Cannot open handle %s"
+					   " %d", current->comm, current->tgid);
+				for (i = 0; i < driver->num_clients; i++)
+					pr_debug("%d) %s PID=%d", i, driver->
+						client_map[i].name,
+						driver->client_map[i].pid);
+				return -ENOMEM;
+			}
+		}
+		driver->data_ready[i] = 0x0;
+		atomic_set(&driver->data_ready_notif[i], 0);
+		driver->data_ready[i] |= MSG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+		driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+
+		if (driver->ref_count == 0)
+			diag_mempool_init();
+		driver->ref_count++;
+		mutex_unlock(&driver->diagchar_mutex);
+		return 0;
+	}
+	return -ENOMEM;
+
+fail:
+	driver->num_clients--;
+	mutex_unlock(&driver->diagchar_mutex);
+	pr_err_ratelimited("diag: Insufficient memory for new client");
+	return -ENOMEM;
+}
+
+static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
+{
+	uint32_t ret = 0;
+
+	if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
+		ret |= DIAG_CON_APSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
+		ret |= DIAG_CON_MPSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
+		ret |= DIAG_CON_LPASS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
+		ret |= DIAG_CON_WCNSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
+		ret |= DIAG_CON_SENSORS;
+
+	return ret;
+}
+int diag_mask_param(void)
+{
+	return diag_mask_clear_param;
+}
+
+void diag_clear_masks(int pid)
+{
+	int ret;
+	char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
+	char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
+	char cmd_disable_event_mask[] = { 0x60, 0};
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag: %s: masks clear request upon %s\n", __func__,
+	((pid) ? "ODL exit" : "USB Disconnection"));
+
+	ret = diag_process_apps_masks(cmd_disable_log_mask,
+			sizeof(cmd_disable_log_mask), pid);
+	ret = diag_process_apps_masks(cmd_disable_msg_mask,
+			sizeof(cmd_disable_msg_mask), pid);
+	ret = diag_process_apps_masks(cmd_disable_event_mask,
+			sizeof(cmd_disable_event_mask), pid);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag:%s: masks cleared successfully\n", __func__);
+}
+
+static void diag_close_logging_process(const int pid)
+{
+	int i;
+	int session_peripheral_mask;
+	struct diag_md_session_t *session_info = NULL;
+	struct diag_logging_mode_param_t params;
+
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_pid(pid);
+	if (!session_info) {
+		mutex_unlock(&driver->md_session_lock);
+		return;
+	}
+	session_peripheral_mask = session_info->peripheral_mask;
+	mutex_unlock(&driver->md_session_lock);
+
+	if (diag_mask_clear_param)
+		diag_clear_masks(pid);
+
+	mutex_lock(&driver->diag_maskclear_mutex);
+	driver->mask_clear = 1;
+	mutex_unlock(&driver->diag_maskclear_mutex);
+
+	session_peripheral_mask = session_info->peripheral_mask;
+	for (i = 0; i < NUM_MD_SESSIONS; i++)
+		if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
+			diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
+
+	params.req_mode = USB_MODE;
+	params.mode_param = 0;
+	params.peripheral_mask =
+		diag_translate_kernel_to_user_mask(session_peripheral_mask);
+
+	mutex_lock(&driver->md_session_lock);
+	diag_md_session_close(pid);
+	mutex_unlock(&driver->md_session_lock);
+	diag_switch_logging(&params);
+}
+
+static int diag_remove_client_entry(struct file *file)
+{
+	int i = -1;
+	struct diagchar_priv *diagpriv_data = NULL;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+
+	if (!driver)
+		return -ENOMEM;
+
+	mutex_lock(&driver->diag_file_mutex);
+	if (!file) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
+		mutex_unlock(&driver->diag_file_mutex);
+		return -ENOENT;
+	}
+	if (!(file->private_data)) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
+		mutex_unlock(&driver->diag_file_mutex);
+		return -EINVAL;
+	}
+
+	diagpriv_data = file->private_data;
+
+	/*
+	 * clean up any DCI registrations, if this is a DCI client
+	 * This will specially help in case of ungraceful exit of any DCI client
+	 * This call will remove any pending registrations of such client
+	 */
+	mutex_lock(&driver->dci_mutex);
+	dci_entry = dci_lookup_client_entry_pid(current->tgid);
+	if (dci_entry)
+		diag_dci_deinit_client(dci_entry);
+	mutex_unlock(&driver->dci_mutex);
+
+	diag_close_logging_process(current->tgid);
+
+	/* Delete the pkt response table entry for the exiting process */
+	diag_cmd_remove_reg_by_pid(current->tgid);
+
+	mutex_lock(&driver->diagchar_mutex);
+	driver->ref_count--;
+	if (driver->ref_count == 0)
+		diag_mempool_exit();
+
+	for (i = 0; i < driver->num_clients; i++) {
+		if (NULL != diagpriv_data && diagpriv_data->pid ==
+						driver->client_map[i].pid) {
+			driver->client_map[i].pid = 0;
+			kfree(diagpriv_data);
+			diagpriv_data = NULL;
+			file->private_data = 0;
+			break;
+		}
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+	mutex_unlock(&driver->diag_file_mutex);
+	return 0;
+}
+static int diagchar_close(struct inode *inode, struct file *file)
+{
+	int ret;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n",
+		current->comm, current->tgid);
+	ret = diag_remove_client_entry(file);
+	mutex_lock(&driver->diag_maskclear_mutex);
+	driver->mask_clear = 0;
+	mutex_unlock(&driver->diag_maskclear_mutex);
+	return ret;
+}
+
+void diag_record_stats(int type, int flag)
+{
+	struct diag_pkt_stats_t *pkt_stats = NULL;
+
+	switch (type) {
+	case DATA_TYPE_EVENT:
+		pkt_stats = &driver->event_stats;
+		break;
+	case DATA_TYPE_F3:
+		pkt_stats = &driver->msg_stats;
+		break;
+	case DATA_TYPE_LOG:
+		pkt_stats = &driver->log_stats;
+		break;
+	case DATA_TYPE_RESPONSE:
+		if (flag != PKT_DROP)
+			return;
+		pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
+				   __func__);
+		return;
+	case DATA_TYPE_DELAYED_RESPONSE:
+		/* No counters to increase for Delayed responses */
+		return;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	switch (flag) {
+	case PKT_ALLOC:
+		atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
+		break;
+	case PKT_DROP:
+		atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
+		break;
+	case PKT_RESET:
+		atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
+		atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
+				   __func__, flag);
+		return;
+	}
+}
+
+void diag_get_timestamp(char *time_str)
+{
+	struct tm broken_tm;
+	if (!time_str)
+		return;
+	time64_to_tm(ktime_get_real_seconds(), 0, &broken_tm);
+	scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
+				broken_tm.tm_min, broken_tm.tm_sec, 0L);
+}
+
+int diag_get_remote(int remote_info)
+{
+	int val = (remote_info < 0) ? -remote_info : remote_info;
+	int remote_val;
+
+	switch (val) {
+	case MDM:
+	case MDM2:
+	case QSC:
+		remote_val = -remote_info;
+		break;
+	default:
+		remote_val = 0;
+		break;
+	}
+
+	return remote_val;
+}
+
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
+{
+	int polling = DIAG_CMD_NOT_POLLING;
+
+	if (!entry)
+		return -EIO;
+
+	if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
+		if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
+		    entry->cmd_code_hi >= DIAG_CMD_STATUS &&
+		    entry->cmd_code_lo <= DIAG_CMD_STATUS)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_WCDMA &&
+			 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
+			 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_GSM &&
+			 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
+			 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_PARAMS &&
+			 entry->cmd_code_hi >= DIAG_DIAG_POLL  &&
+			 entry->cmd_code_lo <= DIAG_DIAG_POLL)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
+			 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
+			 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
+			polling = DIAG_CMD_POLLING;
+	}
+
+	return polling;
+}
+
+static void diag_cmd_invalidate_polling(int change_flag)
+{
+	int polling = DIAG_CMD_NOT_POLLING;
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	if (change_flag == DIAG_CMD_ADD) {
+		if (driver->polling_reg_flag)
+			return;
+	}
+
+	driver->polling_reg_flag = 0;
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			return;
+		}
+		polling = diag_cmd_chk_polling(&item->entry);
+		if (polling == DIAG_CMD_POLLING) {
+			driver->polling_reg_flag = 1;
+			break;
+		}
+	}
+}
+
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+		     int pid)
+{
+	struct diag_cmd_reg_t *new_item = NULL;
+
+	if (!new_entry) {
+		pr_err("diag: In %s, invalid new entry\n", __func__);
+		return -EINVAL;
+	}
+
+	if (proc > APPS_DATA) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
+		return -EINVAL;
+	}
+
+	if (proc != APPS_DATA)
+		pid = INVALID_PID;
+
+	new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
+	if (!new_item) {
+		pr_err("diag: In %s, unable to create memory for new command registration\n",
+		       __func__);
+		return -ENOMEM;
+	}
+	kmemleak_not_leak(new_item);
+
+	new_item->pid = pid;
+	new_item->proc = proc;
+	memcpy(&new_item->entry, new_entry,
+	       sizeof(struct diag_cmd_reg_entry_t));
+	INIT_LIST_HEAD(&new_item->link);
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_add_tail(&new_item->link, &driver->cmd_reg_list);
+	driver->cmd_reg_count++;
+	diag_cmd_invalidate_polling(DIAG_CMD_ADD);
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	return 0;
+}
+
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+			struct diag_cmd_reg_entry_t *entry, int proc)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+	struct diag_cmd_reg_entry_t *temp_entry = NULL;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid entry\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			return NULL;
+		}
+		temp_entry = &item->entry;
+		if (temp_entry->cmd_code == entry->cmd_code &&
+		    temp_entry->subsys_id == entry->subsys_id &&
+		    temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+		    temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+		    (proc == item->proc || proc == ALL_PROC)) {
+			return &item->entry;
+		} else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+			   entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
+			if (temp_entry->subsys_id == entry->subsys_id &&
+			    temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+			    temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+			    (proc == item->proc || proc == ALL_PROC)) {
+				return &item->entry;
+			}
+		} else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+			   temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
+			if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
+			    (temp_entry->cmd_code_lo <= entry->cmd_code) &&
+			    (proc == item->proc || proc == ALL_PROC)) {
+				if (entry->cmd_code == MODE_CMD) {
+					if (entry->subsys_id == RESET_ID &&
+						item->proc != APPS_DATA) {
+						continue;
+					}
+					if (entry->subsys_id != RESET_ID &&
+						item->proc == APPS_DATA) {
+						continue;
+					}
+				}
+				return &item->entry;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
+{
+	struct diag_cmd_reg_t *item = NULL;
+	struct diag_cmd_reg_entry_t *temp_entry;
+	if (!entry) {
+		pr_err("diag: In %s, invalid entry\n", __func__);
+		return;
+	}
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(entry, proc);
+	if (temp_entry) {
+		item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
+		if (!item) {
+			mutex_unlock(&driver->cmd_reg_mutex);
+			return;
+		}
+		list_del(&item->link);
+		kfree(item);
+		driver->cmd_reg_count--;
+	}
+	diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_pid(int pid)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			mutex_unlock(&driver->cmd_reg_mutex);
+			return;
+		}
+		if (item->pid == pid) {
+			list_del(&item->link);
+			kfree(item);
+			driver->cmd_reg_count--;
+		}
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_proc(int proc)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (&item->entry == NULL) {
+			pr_err("diag: In %s, unable to search command\n",
+			       __func__);
+			mutex_unlock(&driver->cmd_reg_mutex);
+			return;
+		}
+		if (item->proc == proc) {
+			list_del(&item->link);
+			kfree(item);
+			driver->cmd_reg_count--;
+		}
+	}
+	diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+static int diag_copy_dci(char __user *buf, size_t count,
+			struct diag_dci_client_tbl *entry, int *pret)
+{
+	int total_data_len = 0;
+	int ret = 0;
+	int exit_stat = 1;
+	uint8_t drain_again = 0;
+	struct diag_dci_buffer_t *buf_entry, *temp;
+
+	if (!buf || !entry || !pret)
+		return exit_stat;
+
+	ret = *pret;
+
+	ret += sizeof(int);
+	if (ret >= count) {
+		pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
+		       __func__, ret, count);
+		return -EINVAL;
+	}
+
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+								buf_track) {
+
+		if ((ret + buf_entry->data_len) > count) {
+			drain_again = 1;
+			break;
+		}
+
+		list_del(&buf_entry->buf_track);
+		mutex_lock(&buf_entry->data_mutex);
+		if ((buf_entry->data_len > 0) &&
+		    (buf_entry->in_busy) &&
+		    (buf_entry->data)) {
+			if (copy_to_user(buf+ret, (void *)buf_entry->data,
+					 buf_entry->data_len))
+				goto drop;
+			ret += buf_entry->data_len;
+			total_data_len += buf_entry->data_len;
+			diag_ws_on_copy(DIAG_WS_DCI);
+drop:
+			buf_entry->in_busy = 0;
+			buf_entry->data_len = 0;
+			buf_entry->in_list = 0;
+			if (buf_entry->buf_type == DCI_BUF_CMD) {
+				mutex_unlock(&buf_entry->data_mutex);
+				continue;
+			} else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+				diagmem_free(driver, buf_entry->data,
+					     POOL_TYPE_DCI);
+				buf_entry->data = NULL;
+				mutex_unlock(&buf_entry->data_mutex);
+				kfree(buf_entry);
+				continue;
+			}
+
+		}
+		mutex_unlock(&buf_entry->data_mutex);
+	}
+
+	if (total_data_len > 0) {
+		/* Copy the total data length */
+		COPY_USER_SPACE_OR_EXIT(buf+8, total_data_len, 4);
+		ret -= 4;
+	} else {
+		pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
+			__func__, total_data_len);
+	}
+
+	exit_stat = 0;
+exit:
+	entry->in_service = 0;
+	mutex_unlock(&entry->write_buf_mutex);
+	*pret = ret;
+	if (drain_again)
+		dci_drain_data(0);
+
+	return exit_stat;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_remote_init(void)
+{
+	diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
+	diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
+	diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
+	diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
+			poolsize_mdm_dci);
+	diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+	diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+	diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
+			poolsize_mdm_dci_write);
+	diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
+			poolsize_mdm_dci_write);
+	diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
+			poolsize_qsc_usb);
+	diag_md_mdm_init();
+	if (diag_dci_init_remote())
+		return -ENOMEM;
+	driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+	if (!driver->hdlc_encode_buf)
+		return -ENOMEM;
+	driver->hdlc_encode_buf_len = 0;
+	return 0;
+}
+
+void diag_remote_exit(void)
+{
+	kfree(driver->hdlc_encode_buf);
+	driver->hdlc_encode_buf = NULL;
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+				    uint8_t hdlc_flag)
+{
+	int err = 0;
+	int max_len = 0;
+	uint8_t retry_count = 0;
+	uint8_t max_retries = 3;
+	uint16_t payload = 0;
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	int bridge_index = proc - 1;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	if (!buf)
+		return -EINVAL;
+
+	if (len <= 0) {
+		pr_err("diag: In %s, invalid len: %d", __func__, len);
+		return -EBADMSG;
+	}
+
+	if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+		pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+			bridge_index);
+		return -EINVAL;
+	 }
+
+	do {
+		if (driver->hdlc_encode_buf_len == 0)
+			break;
+		usleep_range(10000, 10100);
+		retry_count++;
+	} while (retry_count < max_retries);
+
+	if (driver->hdlc_encode_buf_len != 0)
+		return -EAGAIN;
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (hdlc_disabled) {
+		if (len < 4) {
+			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+			__func__, len);
+			return -EBADMSG;
+		}
+		payload = *(uint16_t *)(buf + 2);
+		if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+			pr_err("diag: Dropping packet, payload size is %d\n",
+				payload);
+			return -EBADMSG;
+		}
+		driver->hdlc_encode_buf_len = payload;
+		/*
+		 * Adding 5 bytes for start (1 byte), version (1 byte),
+		 * payload (2 bytes) and end (1 byte)
+		 */
+		if (len == (payload + 5)) {
+			/*
+			 * Adding 4 bytes for start (1 byte), version (1 byte)
+			 * and payload (2 bytes)
+			 */
+			memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+			goto send_data;
+		} else {
+			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+			__func__, len);
+			return -EBADMSG;
+		}
+	}
+
+	if (hdlc_flag) {
+		if (DIAG_MAX_HDLC_BUF_SIZE < len) {
+			pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+			       len);
+			return -EBADMSG;
+		}
+		driver->hdlc_encode_buf_len = len;
+		memcpy(driver->hdlc_encode_buf, buf, len);
+		goto send_data;
+	}
+
+	/*
+	 * The worst case length will be twice as the incoming packet length.
+	 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
+	 */
+	max_len = (2 * len) + 3;
+	if (DIAG_MAX_HDLC_BUF_SIZE < max_len) {
+		pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+		       max_len);
+		return -EBADMSG;
+	}
+
+	/* Perform HDLC encoding on incoming data */
+	send.state = DIAG_STATE_START;
+	send.pkt = (void *)(buf);
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+
+	enc.dest = driver->hdlc_encode_buf;
+	enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
+	diag_hdlc_encode(&send, &enc);
+	driver->hdlc_encode_buf_len = (int)(enc.dest -
+					(void *)driver->hdlc_encode_buf);
+
+send_data:
+	err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
+				   driver->hdlc_encode_buf_len);
+	if (err) {
+		pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
+				   proc, err);
+		driver->hdlc_encode_buf_len = 0;
+	}
+
+	return err;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+	int bridge_index = proc - 1;
+
+	if (!buf || len < 0) {
+		pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+		       __func__, buf, len);
+		return -EINVAL;
+	}
+
+	if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+		pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+		       bridge_index);
+		return -EINVAL;
+	}
+
+	driver->user_space_data_busy = 1;
+	return diagfwd_bridge_write(bridge_index, buf, len);
+}
+#else
+int diag_remote_init(void)
+{
+	return 0;
+}
+
+void diag_remote_exit(void)
+{
+	return;
+}
+
+int diagfwd_bridge_init(void)
+{
+	return 0;
+}
+
+void diagfwd_bridge_exit(void)
+{
+	return;
+}
+
+uint16_t diag_get_remote_device_mask(void)
+{
+	return 0;
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+				    uint8_t hdlc_flag)
+{
+	return -EINVAL;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+	return 0;
+}
+#endif
+
+static int mask_request_validate(unsigned char mask_buf[], int len)
+{
+	uint8_t packet_id;
+	uint8_t subsys_id;
+	uint16_t ss_cmd;
+
+	if (len <= 0)
+		return 0;
+	packet_id = mask_buf[0];
+
+	if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
+		if (len < 2*sizeof(uint8_t) + sizeof(uint16_t))
+			return 0;
+		subsys_id = mask_buf[1];
+		ss_cmd = *(uint16_t *)(mask_buf + 2);
+		switch (subsys_id) {
+		case DIAG_SS_DIAG:
+			if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
+				(ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
+				(ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
+				(ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
+				(ss_cmd == DIAG_SS_FILE_READ_APPS))
+				return 1;
+			break;
+		default:
+			return 0;
+		}
+	} else if (packet_id == 0x4B) {
+		if (len < 2*sizeof(uint8_t) + sizeof(uint16_t))
+			return 0;
+		subsys_id = mask_buf[1];
+		ss_cmd = *(uint16_t *)(mask_buf + 2);
+		/* Packets with SSID which are allowed */
+		switch (subsys_id) {
+		case 0x04: /* DIAG_SUBSYS_WCDMA */
+			if ((ss_cmd == 0) || (ss_cmd == 0xF))
+				return 1;
+			break;
+		case 0x08: /* DIAG_SUBSYS_GSM */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		case 0x09: /* DIAG_SUBSYS_UMTS */
+		case 0x0F: /* DIAG_SUBSYS_CM */
+			if (ss_cmd == 0)
+				return 1;
+			break;
+		case 0x0C: /* DIAG_SUBSYS_OS */
+			if ((ss_cmd == 2) || (ss_cmd == 0x100))
+				return 1; /* MPU and APU */
+			break;
+		case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
+			if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
+				return 1;
+			else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
+				return 0;
+			else if (ss_cmd == DIAG_GET_TIME_API)
+				return 1;
+			else if (ss_cmd == DIAG_SET_TIME_API)
+				return 1;
+			else if (ss_cmd == DIAG_SWITCH_COMMAND)
+				return 1;
+			else if (ss_cmd == DIAG_BUFFERING_MODE)
+				return 1;
+			break;
+		case 0x13: /* DIAG_SUBSYS_FS */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		default:
+			return 0;
+			break;
+		}
+	} else {
+		switch (packet_id) {
+		case 0x00:    /* Version Number */
+		case 0x0C:    /* CDMA status packet */
+		case 0x1C:    /* Diag Version */
+		case 0x1D:    /* Time Stamp */
+		case 0x60:    /* Event Report Control */
+		case 0x63:    /* Status snapshot */
+		case 0x73:    /* Logging Configuration */
+		case 0x7C:    /* Extended build ID */
+		case 0x7D:    /* Extended Message configuration */
+		case 0x81:    /* Event get mask */
+		case 0x82:    /* Set the event mask */
+			return 1;
+			break;
+		default:
+			return 0;
+			break;
+		}
+	}
+	return 0;
+}
+
+static void diag_md_session_init(void)
+{
+	int i;
+
+	mutex_init(&driver->md_session_lock);
+	driver->md_session_mask = 0;
+	driver->md_session_mode = DIAG_MD_NONE;
+	for (i = 0; i < NUM_MD_SESSIONS; i++)
+		driver->md_session_map[i] = NULL;
+}
+
+static void diag_md_session_exit(void)
+{
+	int i;
+	struct diag_md_session_t *session_info = NULL;
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i]) {
+			session_info = driver->md_session_map[i];
+			diag_log_mask_free(session_info->log_mask);
+			kfree(session_info->log_mask);
+			session_info->log_mask = NULL;
+			diag_msg_mask_free(session_info->msg_mask,
+				session_info);
+			kfree(session_info->msg_mask);
+			session_info->msg_mask = NULL;
+			diag_event_mask_free(session_info->event_mask);
+			kfree(session_info->event_mask);
+			session_info->event_mask = NULL;
+			kfree(session_info);
+			session_info = NULL;
+			driver->md_session_map[i] = NULL;
+		}
+	}
+	mutex_destroy(&driver->md_session_lock);
+	driver->md_session_mask = 0;
+	driver->md_session_mode = DIAG_MD_NONE;
+}
+
+int diag_md_session_create(int mode, int peripheral_mask, int proc)
+{
+	int i;
+	int err = 0;
+	struct diag_md_session_t *new_session = NULL;
+
+	/*
+	 * If a session is running with a peripheral mask and a new session
+	 * request comes in with same peripheral mask value then return
+	 * invalid param
+	 */
+	if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
+	    (driver->md_session_mask & peripheral_mask) != 0)
+		return -EINVAL;
+
+	mutex_lock(&driver->md_session_lock);
+	new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
+	if (!new_session) {
+		mutex_unlock(&driver->md_session_lock);
+		return -ENOMEM;
+	}
+
+	new_session->peripheral_mask = 0;
+	new_session->pid = current->tgid;
+	new_session->task = current;
+
+	new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
+					GFP_KERNEL);
+	if (!new_session->log_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+	new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
+					  GFP_KERNEL);
+	if (!new_session->event_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+	new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
+					GFP_KERNEL);
+	if (!new_session->msg_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+
+	err = diag_log_mask_copy(new_session->log_mask, &log_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of log copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	err = diag_event_mask_copy(new_session->event_mask, &event_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of event copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	new_session->msg_mask_tbl_count = 0;
+	err = diag_msg_mask_copy(new_session, new_session->msg_mask,
+		&msg_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of msg copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
+			continue;
+		if (driver->md_session_map[i] != NULL) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				 "another instance present for %d\n", i);
+			err = -EEXIST;
+			goto fail_peripheral;
+		}
+		new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
+		driver->md_session_map[i] = new_session;
+		driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
+	}
+	timer_setup(&new_session->hdlc_reset_timer,
+		diag_md_hdlc_reset_timer_func,
+		0);
+
+	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	mutex_unlock(&driver->md_session_lock);
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		 "created session in peripheral mode\n");
+	return 0;
+
+fail_peripheral:
+	diag_log_mask_free(new_session->log_mask);
+	kfree(new_session->log_mask);
+	new_session->log_mask = NULL;
+	diag_event_mask_free(new_session->event_mask);
+	kfree(new_session->event_mask);
+	new_session->event_mask = NULL;
+	diag_msg_mask_free(new_session->msg_mask,
+		new_session);
+	kfree(new_session->msg_mask);
+	new_session->msg_mask = NULL;
+	kfree(new_session);
+	new_session = NULL;
+	mutex_unlock(&driver->md_session_lock);
+	return err;
+}
+
+static void diag_md_session_close(int pid)
+{
+	int i;
+	uint8_t found = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	session_info = diag_md_session_get_pid(pid);
+	if (!session_info)
+		return;
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] != session_info)
+			continue;
+		driver->md_session_map[i] = NULL;
+		driver->md_session_mask &= ~session_info->peripheral_mask;
+	}
+	diag_log_mask_free(session_info->log_mask);
+	kfree(session_info->log_mask);
+	session_info->log_mask = NULL;
+	diag_msg_mask_free(session_info->msg_mask,
+			  session_info);
+	kfree(session_info->msg_mask);
+	session_info->msg_mask = NULL;
+	diag_event_mask_free(session_info->event_mask);
+	kfree(session_info->event_mask);
+	session_info->event_mask = NULL;
+	del_timer(&session_info->hdlc_reset_timer);
+
+	for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
+		if (driver->md_session_map[i] != NULL)
+			found = 1;
+	}
+
+	driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
+	kfree(session_info);
+	session_info = NULL;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
+}
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid)
+{
+	int i;
+
+	if (pid <= 0)
+		return NULL;
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] &&
+		    driver->md_session_map[i]->pid == pid)
+			return driver->md_session_map[i];
+	}
+	return NULL;
+}
+
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
+{
+	if (peripheral >= NUM_MD_SESSIONS)
+		return NULL;
+	return driver->md_session_map[peripheral];
+}
+
+static int diag_md_peripheral_switch(int pid,
+				int peripheral_mask, int req_mode) {
+	int i, bit = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	session_info = diag_md_session_get_pid(pid);
+	if (!session_info)
+		return -EINVAL;
+	if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
+		return -EINVAL;
+
+	/*
+	 * check that md_session_map for i == session_info,
+	 * if not then race condition occurred and bail
+	 */
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
+		if (!bit)
+			continue;
+		if (req_mode == DIAG_USB_MODE) {
+			if (driver->md_session_map[i] != session_info)
+				return -EINVAL;
+			driver->md_session_map[i] = NULL;
+			driver->md_session_mask &= ~bit;
+			session_info->peripheral_mask &= ~bit;
+
+		} else {
+			if (driver->md_session_map[i] != NULL)
+				return -EINVAL;
+			driver->md_session_map[i] = session_info;
+			driver->md_session_mask |= bit;
+			session_info->peripheral_mask |= bit;
+
+		}
+	}
+
+	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
+		peripheral_mask, req_mode);
+}
+
+static int diag_md_session_check(int curr_mode, int req_mode,
+				 const struct diag_logging_mode_param_t *param,
+				 uint8_t *change_mode)
+{
+	int i, bit = 0, err = 0, peripheral_mask = 0;
+	int change_mask = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	if (!param || !change_mode)
+		return -EIO;
+
+	*change_mode = 0;
+
+	switch (curr_mode) {
+	case DIAG_USB_MODE:
+	case DIAG_MEMORY_DEVICE_MODE:
+	case DIAG_MULTI_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
+		return -EINVAL;
+
+	if (req_mode == DIAG_USB_MODE) {
+		if (curr_mode == DIAG_USB_MODE)
+			return 0;
+		mutex_lock(&driver->md_session_lock);
+		if (driver->md_session_mode == DIAG_MD_NONE
+		    && driver->md_session_mask == 0 && driver->logging_mask) {
+			*change_mode = 1;
+			mutex_unlock(&driver->md_session_lock);
+			return 0;
+		}
+		/*
+		 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
+		 * Check if requested peripherals are already in usb mode
+		 */
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
+			if (!bit)
+				continue;
+			if (bit & driver->logging_mask)
+				change_mask |= bit;
+		}
+		if (!change_mask) {
+			mutex_unlock(&driver->md_session_lock);
+			return 0;
+		}
+
+		/*
+		 * Change is needed. Check if this md_session has set all the
+		 * requested peripherals. If another md session set a requested
+		 * peripheral then we cannot switch that peripheral to USB.
+		 * If this session owns all the requested peripherals, then
+		 * call function to switch the modes/masks for the md_session
+		 */
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (!session_info) {
+			*change_mode = 1;
+			mutex_unlock(&driver->md_session_lock);
+			return 0;
+		}
+		peripheral_mask = session_info->peripheral_mask;
+		if ((change_mask & peripheral_mask)
+							!= change_mask) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			    "Another MD Session owns a requested peripheral\n");
+			mutex_unlock(&driver->md_session_lock);
+			return -EINVAL;
+		}
+		*change_mode = 1;
+
+		/* If all peripherals are being set to USB Mode, call close */
+		if (~change_mask & peripheral_mask) {
+			err = diag_md_peripheral_switch(current->tgid,
+					change_mask, DIAG_USB_MODE);
+		} else
+			diag_md_session_close(current->tgid);
+		mutex_unlock(&driver->md_session_lock);
+		return err;
+
+	} else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
+		/*
+		 * Get bit mask that represents what peripherals already have
+		 * been set. Check that requested peripherals already set are
+		 * owned by this md session
+		 */
+		mutex_lock(&driver->md_session_lock);
+		change_mask = driver->md_session_mask & param->peripheral_mask;
+		session_info = diag_md_session_get_pid(current->tgid);
+
+		if (session_info) {
+			if ((session_info->peripheral_mask & change_mask)
+							!= change_mask) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				    "Another MD Session owns a requested peripheral\n");
+				mutex_unlock(&driver->md_session_lock);
+				return -EINVAL;
+			}
+			err = diag_md_peripheral_switch(current->tgid,
+					change_mask, DIAG_USB_MODE);
+			mutex_unlock(&driver->md_session_lock);
+		} else {
+			mutex_unlock(&driver->md_session_lock);
+			if (change_mask) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				    "Another MD Session owns a requested peripheral\n");
+				return -EINVAL;
+			}
+			err = diag_md_session_create(DIAG_MD_PERIPHERAL,
+				param->peripheral_mask, DIAG_LOCAL_PROC);
+		}
+		*change_mode = 1;
+		return err;
+	}
+	return -EINVAL;
+}
+
+static uint32_t diag_translate_mask(uint32_t peripheral_mask)
+{
+	uint32_t ret = 0;
+
+	if (peripheral_mask & DIAG_CON_APSS)
+		ret |= (1 << APPS_DATA);
+	if (peripheral_mask & DIAG_CON_MPSS)
+		ret |= (1 << PERIPHERAL_MODEM);
+	if (peripheral_mask & DIAG_CON_LPASS)
+		ret |= (1 << PERIPHERAL_LPASS);
+	if (peripheral_mask & DIAG_CON_WCNSS)
+		ret |= (1 << PERIPHERAL_WCNSS);
+	if (peripheral_mask & DIAG_CON_SENSORS)
+		ret |= (1 << PERIPHERAL_SENSORS);
+
+	return ret;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param)
+{
+	int new_mode;
+	int curr_mode;
+	int err = 0;
+	uint8_t do_switch = 1;
+	uint32_t peripheral_mask = 0;
+
+	if (!param)
+		return -EINVAL;
+
+	if (!param->peripheral_mask) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"asking for mode switch with no peripheral mask set\n");
+		return -EINVAL;
+	}
+
+	peripheral_mask = diag_translate_mask(param->peripheral_mask);
+	param->peripheral_mask = peripheral_mask;
+
+	switch (param->req_mode) {
+	case CALLBACK_MODE:
+	case UART_MODE:
+	case SOCKET_MODE:
+	case MEMORY_DEVICE_MODE:
+		new_mode = DIAG_MEMORY_DEVICE_MODE;
+		break;
+	case USB_MODE:
+		new_mode = DIAG_USB_MODE;
+		break;
+	default:
+		pr_err("diag: In %s, request to switch to invalid mode: %d\n",
+		       __func__, param->req_mode);
+		return -EINVAL;
+	}
+
+	curr_mode = driver->logging_mode;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		"request to switch logging from %d mask:%0x to %d mask:%0x\n",
+		curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
+
+	err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "err from diag_md_session_check, err: %d\n", err);
+		return err;
+	}
+
+	if (do_switch == 0) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "not switching modes c: %d n: %d\n",
+			 curr_mode, new_mode);
+		return 0;
+	}
+
+	diag_ws_reset(DIAG_WS_MUX);
+	err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
+	if (err) {
+		pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
+		       __func__, curr_mode, new_mode, err);
+		driver->logging_mode = curr_mode;
+		goto fail;
+	}
+	driver->logging_mode = new_mode;
+	driver->logging_mask = peripheral_mask;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		"Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
+
+	/* Update to take peripheral_mask */
+	if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
+		diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
+					   MODE_REALTIME, ALL_PROC);
+	} else {
+		diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
+				      ALL_PROC);
+	}
+
+	if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
+	      curr_mode == DIAG_USB_MODE)) {
+		queue_work(driver->diag_real_time_wq,
+			   &driver->diag_real_time_work);
+	}
+
+	return 0;
+fail:
+	return err;
+}
+
+static int diag_ioctl_dci_reg(unsigned long ioarg)
+{
+	int result = -EINVAL;
+	struct diag_dci_reg_tbl_t dci_reg_params;
+
+	if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
+				sizeof(struct diag_dci_reg_tbl_t)))
+		return -EFAULT;
+
+	result = diag_dci_register_client(&dci_reg_params);
+
+	return result;
+}
+
+static int diag_ioctl_dci_health_stats(unsigned long ioarg)
+{
+	int result = -EINVAL;
+	struct diag_dci_health_stats_proc stats;
+
+	if (copy_from_user(&stats, (void __user *)ioarg,
+				sizeof(struct diag_dci_health_stats_proc)))
+		return -EFAULT;
+
+	result = diag_dci_copy_health_stats(&stats);
+	if (result == DIAG_DCI_NO_ERROR) {
+		if (copy_to_user((void __user *)ioarg, &stats,
+			sizeof(struct diag_dci_health_stats_proc)))
+			return -EFAULT;
+	}
+
+	return result;
+}
+
+static int diag_ioctl_dci_log_status(unsigned long ioarg)
+{
+	struct diag_log_event_stats le_stats;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&le_stats, (void __user *)ioarg,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	dci_client = diag_dci_get_client_entry(le_stats.client_id);
+	if (!dci_client)
+		return DIAG_DCI_NOT_SUPPORTED;
+	le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
+	if (copy_to_user((void __user *)ioarg, &le_stats,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_dci_event_status(unsigned long ioarg)
+{
+	struct diag_log_event_stats le_stats;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&le_stats, (void __user *)ioarg,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	dci_client = diag_dci_get_client_entry(le_stats.client_id);
+	if (!dci_client)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
+	if (copy_to_user((void __user *)ioarg, &le_stats,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_lsm_deinit(void)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == current->tgid)
+			break;
+
+	if (i == driver->num_clients) {
+		mutex_unlock(&driver->diagchar_mutex);
+		return -EINVAL;
+	}
+	if (!(driver->data_ready[i] & DEINIT_TYPE)) {
+		driver->data_ready[i] |= DEINIT_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+	wake_up_interruptible(&driver->wait_q);
+
+	return 1;
+}
+
+static int diag_ioctl_vote_real_time(unsigned long ioarg)
+{
+	int real_time = 0;
+	int temp_proc = ALL_PROC;
+	struct real_time_vote_t vote;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&vote, (void __user *)ioarg,
+			sizeof(struct real_time_vote_t)))
+		return -EFAULT;
+
+	if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
+		vote.real_time_vote > MODE_UNKNOWN ||
+		vote.client_id < 0) {
+		pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
+			__func__, vote.proc, vote.real_time_vote,
+			vote.client_id);
+		return -EINVAL;
+	}
+
+	driver->real_time_update_busy++;
+	if (vote.proc == DIAG_PROC_DCI) {
+		dci_client = diag_dci_get_client_entry(vote.client_id);
+		if (!dci_client) {
+			driver->real_time_update_busy--;
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		diag_dci_set_real_time(dci_client, vote.real_time_vote);
+		real_time = diag_dci_get_cumulative_real_time(
+					dci_client->client_info.token);
+		diag_update_real_time_vote(vote.proc, real_time,
+					dci_client->client_info.token);
+	} else {
+		real_time = vote.real_time_vote;
+		temp_proc = vote.client_id;
+		diag_update_real_time_vote(vote.proc, real_time,
+					   temp_proc);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	return 0;
+}
+
+static int diag_ioctl_get_real_time(unsigned long ioarg)
+{
+	int i;
+	int retry_count = 0;
+	int timer = 0;
+	struct real_time_query_t rt_query;
+
+	if (copy_from_user(&rt_query, (void __user *)ioarg,
+					sizeof(struct real_time_query_t)))
+		return -EFAULT;
+	while (retry_count < 3) {
+		if (driver->real_time_update_busy > 0) {
+			retry_count++;
+			/*
+			 * The value 10000 was chosen empirically as an
+			 * optimum value in order to give the work in
+			 * diag_real_time_wq to complete processing.
+			 */
+			for (timer = 0; timer < 5; timer++)
+				usleep_range(10000, 10100);
+		} else {
+			break;
+		}
+	}
+
+	if (driver->real_time_update_busy > 0)
+		return -EAGAIN;
+
+	if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
+		pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
+		       __func__);
+		return -EINVAL;
+	}
+	rt_query.real_time = driver->real_time_mode[rt_query.proc];
+	/*
+	 * For the local processor, if any of the peripherals is in buffering
+	 * mode, overwrite the value of real time with UNKNOWN_MODE
+	 */
+	if (rt_query.proc == DIAG_LOCAL_PROC) {
+		for (i = 0; i < NUM_PERIPHERALS; i++) {
+			if (!driver->feature[i].peripheral_buffering)
+				continue;
+			switch (driver->buffering_mode[i].mode) {
+			case DIAG_BUFFERING_MODE_CIRCULAR:
+			case DIAG_BUFFERING_MODE_THRESHOLD:
+				rt_query.real_time = MODE_UNKNOWN;
+				break;
+			}
+		}
+	}
+
+	if (copy_to_user((void __user *)ioarg, &rt_query,
+			 sizeof(struct real_time_query_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
+{
+	struct diag_buffering_mode_t params;
+
+	if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
+		return -EFAULT;
+
+	if (params.peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	mutex_lock(&driver->mode_lock);
+	driver->buffering_flag[params.peripheral] = 1;
+	mutex_unlock(&driver->mode_lock);
+
+	return diag_send_peripheral_buffering_mode(&params);
+}
+
+static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
+{
+	uint8_t peripheral;
+
+	if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
+		return -EFAULT;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+		       __func__, peripheral);
+		return -EIO;
+	}
+
+	return diag_send_peripheral_drain_immediate(peripheral);
+}
+
+static int diag_ioctl_dci_support(unsigned long ioarg)
+{
+	struct diag_dci_peripherals_t dci_support;
+	int result = -EINVAL;
+
+	if (copy_from_user(&dci_support, (void __user *)ioarg,
+				sizeof(struct diag_dci_peripherals_t)))
+		return -EFAULT;
+
+	result = diag_dci_get_support_list(&dci_support);
+	if (result == DIAG_DCI_NO_ERROR)
+		if (copy_to_user((void __user *)ioarg, &dci_support,
+				sizeof(struct diag_dci_peripherals_t)))
+			return -EFAULT;
+
+	return result;
+}
+
+static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
+{
+	uint8_t hdlc_support;
+	struct diag_md_session_t *session_info = NULL;
+
+	if (copy_from_user(&hdlc_support, (void __user *)ioarg,
+				sizeof(uint8_t)))
+		return -EFAULT;
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_pid(current->tgid);
+	if (session_info)
+		session_info->hdlc_disabled = hdlc_support;
+	else
+		driver->hdlc_disabled = hdlc_support;
+	mutex_unlock(&driver->md_session_lock);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+	return 0;
+}
+
+static int diag_ioctl_register_callback(unsigned long ioarg)
+{
+	int err = 0;
+	struct diag_callback_reg_t reg;
+
+	if (copy_from_user(&reg, (void __user *)ioarg,
+			   sizeof(struct diag_callback_reg_t))) {
+		return -EFAULT;
+	}
+
+	if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
+		pr_err("diag: In %s, invalid proc %d for callback registration\n",
+		       __func__, reg.proc);
+		return -EINVAL;
+	}
+
+	if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
+		return -EIO;
+
+	return err;
+}
+
+static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
+{
+	int i;
+	int err = 0;
+	uint32_t count = 0;
+	struct diag_cmd_reg_entry_t *entries = NULL;
+	const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
+
+
+	if (!reg_tbl) {
+		pr_err("diag: In %s, invalid registration table\n", __func__);
+		return -EINVAL;
+	}
+
+	count = reg_tbl->count;
+	if ((UINT_MAX / entry_len) < count) {
+		pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
+		return -EFAULT;
+	}
+
+	entries = kzalloc(count * entry_len, GFP_KERNEL);
+	if (!entries) {
+		pr_err("diag: In %s, unable to create memory for registration table entries\n",
+		       __func__);
+		return -ENOMEM;
+	}
+
+	err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
+	if (err) {
+		pr_err("diag: In %s, error copying data from userspace, err: %d\n",
+		       __func__, err);
+		kfree(entries);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < count; i++) {
+		err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
+		if (err) {
+			pr_err("diag: In %s, unable to register command, err: %d\n",
+			       __func__, err);
+			break;
+		}
+	}
+
+	kfree(entries);
+	return err;
+}
+
+static int diag_ioctl_cmd_reg(unsigned long ioarg)
+{
+	struct diag_cmd_reg_tbl_t reg_tbl;
+
+	if (copy_from_user(&reg_tbl, (void __user *)ioarg,
+			   sizeof(struct diag_cmd_reg_tbl_t))) {
+		return -EFAULT;
+	}
+
+	return diag_cmd_register_tbl(&reg_tbl);
+}
+
+static int diag_ioctl_cmd_dereg(void)
+{
+	diag_cmd_remove_reg_by_pid(current->tgid);
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @params: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_compat_t {
+	char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+	uint32_t count;
+	compat_uptr_t entries;
+};
+
+static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
+{
+	struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
+	struct diag_cmd_reg_tbl_t reg_tbl;
+
+	if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
+			   sizeof(struct diag_cmd_reg_tbl_compat_t))) {
+		return -EFAULT;
+	}
+
+	strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
+		MAX_SYNC_OBJ_NAME_SIZE);
+	reg_tbl.count = reg_tbl_compat.count;
+	reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
+			  (uintptr_t)reg_tbl_compat.entries;
+
+	return diag_cmd_register_tbl(&reg_tbl);
+}
+
+long diagchar_compat_ioctl(struct file *filp,
+			   unsigned int iocmd, unsigned long ioarg)
+{
+	int result = -EINVAL;
+	int client_id = 0;
+	uint16_t delayed_rsp_id = 0;
+	uint16_t remote_dev;
+	struct diag_dci_client_tbl *dci_client = NULL;
+	struct diag_logging_mode_param_t mode_param;
+
+	switch (iocmd) {
+	case DIAG_IOCTL_COMMAND_REG:
+		result = diag_ioctl_cmd_reg_compat(ioarg);
+		break;
+	case DIAG_IOCTL_COMMAND_DEREG:
+		result = diag_ioctl_cmd_dereg();
+		break;
+	case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+		delayed_rsp_id = diag_get_next_delayed_rsp_id();
+		if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+				 sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
+	case DIAG_IOCTL_DCI_REG:
+		result = diag_ioctl_dci_reg(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_DEINIT:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		dci_client = diag_dci_get_client_entry(client_id);
+		if (!dci_client) {
+			mutex_unlock(&driver->dci_mutex);
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		result = diag_dci_deinit_client(dci_client);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_SUPPORT:
+		result = diag_ioctl_dci_support(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_HEALTH_STATS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_health_stats(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_LOG_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_log_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_event_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_LOGS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_log_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user(&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_event_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_LSM_DEINIT:
+		result = diag_ioctl_lsm_deinit();
+		break;
+	case DIAG_IOCTL_SWITCH_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		mutex_lock(&driver->diagchar_mutex);
+		result = diag_switch_logging(&mode_param);
+		mutex_unlock(&driver->diagchar_mutex);
+		break;
+	case DIAG_IOCTL_REMOTE_DEV:
+		remote_dev = diag_get_remote_device_mask();
+		if (copy_to_user((void __user *)ioarg, &remote_dev,
+			sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 1;
+		break;
+	case DIAG_IOCTL_VOTE_REAL_TIME:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_vote_real_time(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_GET_REAL_TIME:
+		result = diag_ioctl_get_real_time(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+		result = diag_ioctl_set_buffering_mode(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+		result = diag_ioctl_peripheral_drain_immediate(ioarg);
+		break;
+	case DIAG_IOCTL_REGISTER_CALLBACK:
+		result = diag_ioctl_register_callback(ioarg);
+		break;
+	case DIAG_IOCTL_HDLC_TOGGLE:
+		result = diag_ioctl_hdlc_toggle(ioarg);
+		break;
+	}
+	return result;
+}
+#endif
+
+long diagchar_ioctl(struct file *filp,
+			   unsigned int iocmd, unsigned long ioarg)
+{
+	int result = -EINVAL;
+	int client_id = 0;
+	uint16_t delayed_rsp_id;
+	uint16_t remote_dev;
+	struct diag_dci_client_tbl *dci_client = NULL;
+	struct diag_logging_mode_param_t mode_param;
+
+	switch (iocmd) {
+	case DIAG_IOCTL_COMMAND_REG:
+		result = diag_ioctl_cmd_reg(ioarg);
+		break;
+	case DIAG_IOCTL_COMMAND_DEREG:
+		result = diag_ioctl_cmd_dereg();
+		break;
+	case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+		delayed_rsp_id = diag_get_next_delayed_rsp_id();
+		if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+				 sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
+	case DIAG_IOCTL_DCI_REG:
+		result = diag_ioctl_dci_reg(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_DEINIT:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		dci_client = diag_dci_get_client_entry(client_id);
+		if (!dci_client) {
+			mutex_unlock(&driver->dci_mutex);
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		result = diag_dci_deinit_client(dci_client);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_SUPPORT:
+		result = diag_ioctl_dci_support(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_HEALTH_STATS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_health_stats(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_LOG_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_log_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_event_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_LOGS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_log_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user(&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_event_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_LSM_DEINIT:
+		result = diag_ioctl_lsm_deinit();
+		break;
+	case DIAG_IOCTL_SWITCH_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		mutex_lock(&driver->diagchar_mutex);
+		result = diag_switch_logging(&mode_param);
+		mutex_unlock(&driver->diagchar_mutex);
+		break;
+	case DIAG_IOCTL_REMOTE_DEV:
+		remote_dev = diag_get_remote_device_mask();
+		if (copy_to_user((void __user *)ioarg, &remote_dev,
+			sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 1;
+		break;
+	case DIAG_IOCTL_VOTE_REAL_TIME:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_vote_real_time(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_GET_REAL_TIME:
+		result = diag_ioctl_get_real_time(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+		result = diag_ioctl_set_buffering_mode(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+		result = diag_ioctl_peripheral_drain_immediate(ioarg);
+		break;
+	case DIAG_IOCTL_REGISTER_CALLBACK:
+		result = diag_ioctl_register_callback(ioarg);
+		break;
+	case DIAG_IOCTL_HDLC_TOGGLE:
+		result = diag_ioctl_hdlc_toggle(ioarg);
+		break;
+	}
+	return result;
+}
+
+static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
+				       int pkt_type)
+{
+	int err = 0;
+	int ret = PKT_DROP;
+	struct diag_apps_data_t *data = &hdlc_data;
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	/*
+	 * The maximum encoded size of the buffer can be atmost twice the length
+	 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
+	 * delimiter (1 byte).
+	 */
+	const uint32_t max_encoded_size = ((2 * len) + 3);
+
+	if (!buf || len <= 0) {
+		pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+		       __func__, buf, len);
+		return -EIO;
+	}
+
+	if (DIAG_MAX_HDLC_BUF_SIZE < max_encoded_size) {
+		pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
+		       __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
+		return -EBADMSG;
+	}
+
+	send.state = DIAG_STATE_START;
+	send.pkt = buf;
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+
+	if (!data->buf)
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+	if (!data->buf) {
+		ret = PKT_DROP;
+		goto fail_ret;
+	}
+
+	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	enc.dest = data->buf + data->len;
+	enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
+	diag_hdlc_encode(&send, &enc);
+
+	/*
+	 * This is to check if after HDLC encoding, we are still within
+	 * the limits of aggregation buffer. If not, we write out the
+	 * current buffer and start aggregation in a newly allocated
+	 * buffer.
+	 */
+	if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
+					       DIAG_MAX_HDLC_BUF_SIZE)) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					 POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+
+		enc.dest = data->buf + data->len;
+		enc.dest_last = (void *)(data->buf + data->len +
+					 max_encoded_size);
+		diag_hdlc_encode(&send, &enc);
+	}
+
+	data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
+			DIAG_MAX_HDLC_BUF_SIZE) ?
+			((uintptr_t)enc.dest - (uintptr_t)data->buf) :
+			DIAG_MAX_HDLC_BUF_SIZE;
+
+	if (pkt_type == DATA_TYPE_RESPONSE) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+	}
+
+	return PKT_ALLOC;
+
+fail_free_buf:
+	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	data->buf = NULL;
+	data->len = 0;
+
+fail_ret:
+	return ret;
+}
+
+static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
+					   int pkt_type)
+{
+	int err = 0;
+	int ret = PKT_DROP;
+	struct diag_pkt_frame_t header;
+	struct diag_apps_data_t *data = &non_hdlc_data;
+	/*
+	 * The maximum packet size, when the data is non hdlc encoded is equal
+	 * to the size of the packet frame header and the length. Add 1 for the
+	 * delimiter 0x7E at the end.
+	 */
+	const uint32_t max_pkt_size = sizeof(header) + len + 1;
+
+	if (!buf || len <= 0) {
+		pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+		       __func__, buf, len);
+		return -EIO;
+	}
+
+	if (!data->buf) {
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.length = len;
+	memcpy(data->buf + data->len, &header, sizeof(header));
+	data->len += sizeof(header);
+	memcpy(data->buf + data->len, buf, len);
+	data->len += len;
+	*(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
+	data->len += sizeof(uint8_t);
+	if (pkt_type == DATA_TYPE_RESPONSE) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+	}
+
+	return PKT_ALLOC;
+
+fail_free_buf:
+	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	data->buf = NULL;
+	data->len = 0;
+
+fail_ret:
+	return ret;
+}
+
+static int diag_user_process_dci_data(const char __user *buf, int len)
+{
+	int err = 0;
+	const int mempool = POOL_TYPE_USER;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
+				   __func__, err);
+		err = DIAG_DCI_SEND_DATA_FAIL;
+		goto fail;
+	}
+
+	err = diag_process_dci_transaction(user_space_data, len);
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return err;
+}
+
+static int diag_user_process_dci_apps_data(const char __user *buf, int len,
+					   int pkt_type)
+{
+	int err = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
+	if (!pkt_type) {
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+			 __func__, err);
+		goto fail;
+	}
+
+	diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return err;
+}
+
+static int diag_user_process_raw_data(const char __user *buf, int len)
+{
+	int err = 0;
+	int ret = 0;
+	int token_offset = 0;
+	int remote_proc = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_err("diag: copy failed for user space data\n");
+		goto fail;
+	}
+
+	/* Check for proc_type */
+	if (len >= sizeof(int))
+		remote_proc = diag_get_remote(*(int *)user_space_data);
+	if (remote_proc) {
+		token_offset = sizeof(int);
+		if (len <= MIN_SIZ_ALLOW) {
+			pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
+		       __func__, len);
+			diagmem_free(driver, user_space_data, mempool);
+			user_space_data = NULL;
+			return -EBADMSG;
+		}
+		len -= sizeof(int);
+	}
+	if (driver->mask_check) {
+		if (!mask_request_validate(user_space_data +
+						token_offset, len)) {
+			pr_alert("diag: mask request Invalid\n");
+			diagmem_free(driver, user_space_data, mempool);
+			user_space_data = NULL;
+			return -EFAULT;
+		}
+	}
+	if (remote_proc) {
+		ret = diag_send_raw_data_remote(remote_proc,
+				(void *)(user_space_data + token_offset),
+				len, USER_SPACE_RAW_DATA);
+		if (ret) {
+			pr_err("diag: Error sending data to remote proc %d, err: %d\n",
+				remote_proc, ret);
+		}
+	} else {
+		wait_event_interruptible(driver->wait_q,
+					 (driver->in_busy_pktdata == 0));
+		ret = diag_process_apps_pkt(user_space_data, len,
+			current->tgid);
+		if (ret == 1)
+			diag_send_error_rsp((void *)(user_space_data), len);
+	}
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return ret;
+}
+
+static int diag_user_process_userspace_data(const char __user *buf, int len)
+{
+	int err = 0;
+	int max_retries = 3;
+	int retry_count = 0;
+	int remote_proc = 0;
+	int token_offset = 0;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	if (!buf || len <= 0 || len > USER_SPACE_DATA) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	do {
+		if (!driver->user_space_data_busy)
+			break;
+		retry_count++;
+		usleep_range(10000, 10100);
+	} while (retry_count < max_retries);
+
+	if (driver->user_space_data_busy)
+		return -EAGAIN;
+
+	err = copy_from_user(driver->user_space_data_buf, buf, len);
+	if (err) {
+		pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
+		       __func__, err);
+		return -EIO;
+	}
+
+	/* Check for proc_type */
+	remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
+	if (remote_proc) {
+		if (len <= MIN_SIZ_ALLOW) {
+			pr_err("diag: Integer underflow in %s, payload size: %d",
+			       __func__, len);
+			return -EBADMSG;
+		}
+		token_offset = sizeof(int);
+		len -= sizeof(int);
+	}
+
+	/* Check masks for On-Device logging */
+	if (driver->mask_check) {
+		if (!mask_request_validate(driver->user_space_data_buf +
+					   token_offset, len)) {
+			pr_alert("diag: mask request Invalid\n");
+			return -EFAULT;
+		}
+	}
+
+	if (remote_proc && (*(driver->user_space_data_buf) == 0xFF ||
+	    *(driver->user_space_data_buf) == 0xFE) &&
+	    *(driver->user_space_data_buf + sizeof(int) + 1) == 0x0b) {
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (session_info)
+			diag_process_hdlc_pkt((void *)(driver->user_space_data_buf + token_offset),
+					      len, current->tgid);
+		return 0;
+	}
+
+	/* send masks to local processor now */
+	if (!remote_proc) {
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (!session_info) {
+			pr_err("diag:In %s request came from invalid md session pid:%d",
+				__func__, current->tgid);
+			mutex_unlock(&driver->md_session_lock);
+			return -EINVAL;
+		}
+		if (session_info)
+			hdlc_disabled = session_info->hdlc_disabled;
+		else
+			hdlc_disabled = driver->hdlc_disabled;
+		mutex_unlock(&driver->md_session_lock);
+		if (!hdlc_disabled)
+			diag_process_hdlc_pkt((void *)
+				(driver->user_space_data_buf),
+				len, current->tgid);
+		else
+			diag_process_non_hdlc_pkt((char *)
+						(driver->user_space_data_buf),
+						len, current->tgid);
+		return 0;
+	}
+
+	err = diag_process_userspace_remote(remote_proc,
+					    driver->user_space_data_buf +
+					    token_offset, len);
+	if (err) {
+		driver->user_space_data_busy = 0;
+		pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
+		       remote_proc, err);
+	}
+
+	return err;
+}
+
+static int diag_user_process_apps_data(const char __user *buf, int len,
+				       int pkt_type)
+{
+	int ret = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	switch (pkt_type) {
+	case DATA_TYPE_EVENT:
+	case DATA_TYPE_F3:
+	case DATA_TYPE_LOG:
+	case DATA_TYPE_RESPONSE:
+	case DATA_TYPE_DELAYED_RESPONSE:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data) {
+		diag_record_stats(pkt_type, PKT_DROP);
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(user_space_data, buf, len);
+	if (ret) {
+		pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+			 __func__, ret);
+		diagmem_free(driver, user_space_data, mempool);
+		user_space_data = NULL;
+		diag_record_stats(pkt_type, PKT_DROP);
+		return -EBADMSG;
+	}
+
+	if (driver->stm_state[APPS_DATA] &&
+	    (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
+#if 0
+		/*
+		 * disable this, we don't activate coresight-stm
+		 * anyway ...
+		 */
+		stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
+					  len);
+		if (stm_size == 0) {
+			pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
+				 __func__);
+		}
+#endif
+		diagmem_free(driver, user_space_data, mempool);
+		user_space_data = NULL;
+
+		return 0;
+	}
+
+	mutex_lock(&apps_data_mutex);
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (hdlc_disabled)
+		ret = diag_process_apps_data_non_hdlc(user_space_data, len,
+						      pkt_type);
+	else
+		ret = diag_process_apps_data_hdlc(user_space_data, len,
+						  pkt_type);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	mutex_unlock(&apps_data_mutex);
+
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+
+	check_drain_timer();
+
+	if (ret == PKT_DROP)
+		diag_record_stats(pkt_type, PKT_DROP);
+	else if (ret == PKT_ALLOC)
+		diag_record_stats(pkt_type, PKT_ALLOC);
+	else
+		return ret;
+
+	return 0;
+}
+
+static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
+			  loff_t *ppos)
+{
+	struct diag_dci_client_tbl *entry;
+	struct list_head *start, *temp;
+	int index = -1, i = 0, ret = 0;
+	int data_type;
+	int copy_dci_data = 0;
+	int exit_stat = 0;
+	int write_len = 0;
+	struct diag_md_session_t *session_info = NULL;
+	struct pid *pid_struct = NULL;
+	struct task_struct *task_s = NULL;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == current->tgid)
+			index = i;
+	mutex_unlock(&driver->diagchar_mutex);
+
+	if (index == -1) {
+		pr_err("diag: Client PID not found in table");
+		return -EINVAL;
+	}
+	if (!buf) {
+		pr_err("diag: bad address from user side\n");
+		return -EFAULT;
+	}
+	wait_event_interruptible(driver->wait_q,
+			atomic_read(&driver->data_ready_notif[index]) > 0);
+
+	mutex_lock(&driver->diagchar_mutex);
+
+	if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
+	    (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+	     driver->logging_mode == DIAG_MULTI_MODE)) {
+		pr_debug("diag: process woken up\n");
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
+		/* place holder for number of data field */
+		ret += sizeof(int);
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		exit_stat = diag_md_copy_to_user(buf, &ret, count,
+						 session_info);
+		mutex_unlock(&driver->md_session_lock);
+		goto exit;
+	} else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
+		/* In case, the thread wakes up and the logging mode is
+		not memory device any more, the condition needs to be cleared */
+		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+	}
+
+	if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
+		data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
+		driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (session_info) {
+			COPY_USER_SPACE_OR_ERR(buf+4,
+					session_info->hdlc_disabled,
+					sizeof(uint8_t));
+			if (ret == -EFAULT) {
+				mutex_unlock(&driver->md_session_lock);
+				goto exit;
+			}
+		}
+		mutex_unlock(&driver->md_session_lock);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DEINIT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DEINIT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		driver->data_ready[index] ^= DEINIT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		mutex_unlock(&driver->diagchar_mutex);
+		diag_remove_client_entry(file);
+		return ret;
+	}
+
+	if (driver->data_ready[index] & MSG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT) {
+			mutex_unlock(&driver->md_session_lock);
+			goto exit;
+		}
+		write_len = diag_copy_to_user_msg_mask(buf + ret, count,
+						       session_info);
+		mutex_unlock(&driver->md_session_lock);
+		if (write_len > 0)
+			ret += write_len;
+		driver->data_ready[index] ^= MSG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+		if (ret == -EFAULT) {
+			mutex_unlock(&driver->md_session_lock);
+			goto exit;
+		}
+		if (session_info && session_info->event_mask &&
+		    session_info->event_mask->ptr) {
+			COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+					*(session_info->event_mask->ptr),
+					session_info->event_mask->mask_len);
+			if (ret == -EFAULT) {
+				mutex_unlock(&driver->md_session_lock);
+				goto exit;
+			}
+		} else {
+			COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+						*(event_mask.ptr),
+						event_mask.mask_len);
+			if (ret == -EFAULT) {
+				mutex_unlock(&driver->md_session_lock);
+				goto exit;
+			}
+		}
+		mutex_unlock(&driver->md_session_lock);
+		driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+		mutex_lock(&driver->md_session_lock);
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT) {
+			mutex_unlock(&driver->md_session_lock);
+			goto exit;
+		}
+		write_len = diag_copy_to_user_log_mask(buf + ret, count,
+						       session_info);
+		mutex_unlock(&driver->md_session_lock);
+		if (write_len > 0)
+			ret += write_len;
+		driver->data_ready[index] ^= LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & PKT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & PKT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(data_type));
+		COPY_USER_SPACE_OR_EXIT(buf + sizeof(data_type),
+					*(driver->apps_req_buf),
+					driver->apps_req_buf_len);
+		driver->data_ready[index] ^= PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		driver->in_busy_pktdata = 0;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_PKT_TYPE) {
+		/* Copy the type of data being passed */
+		data_type = driver->data_ready[index] & DCI_PKT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
+					driver->dci_pkt_length);
+		driver->data_ready[index] ^= DCI_PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		driver->in_busy_dcipktdata = 0;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+		COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
+				event_mask_composite), DCI_EVENT_MASK_SIZE);
+		driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
+				log_mask_composite), DCI_LOG_MASK_SIZE);
+		driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
+		goto exit;
+	}
+
+exit:
+	if (driver->data_ready[index] & DCI_DATA_TYPE) {
+		data_type = driver->data_ready[index] & DCI_DATA_TYPE;
+		mutex_unlock(&driver->diagchar_mutex);
+		/* Copy the type of data being passed */
+		mutex_lock(&driver->dci_mutex);
+		list_for_each_safe(start, temp, &driver->dci_client_list) {
+			entry = list_entry(start, struct diag_dci_client_tbl,
+									track);
+			pid_struct = find_get_pid(entry->tgid);
+			if (!pid_struct)
+				continue;
+			task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+			if (!task_s) {
+				DIAG_LOG(DIAG_DEBUG_DCI,
+				"diag: valid task doesn't exist for pid = %d\n",
+				entry->tgid);
+				continue;
+			}
+			if (task_s == entry->client)
+				if (entry->client->tgid != current->tgid)
+					continue;
+			if (!entry->in_service)
+				continue;
+			if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+			ret += sizeof(int);
+			if (copy_to_user(buf + ret, &entry->client_info.token,
+				sizeof(int))) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+			ret += sizeof(int);
+			copy_dci_data = 1;
+			exit_stat = diag_copy_dci(buf, count, entry, &ret);
+			mutex_lock(&driver->diagchar_mutex);
+			driver->data_ready[index] ^= DCI_DATA_TYPE;
+			atomic_dec(&driver->data_ready_notif[index]);
+			mutex_unlock(&driver->diagchar_mutex);
+			if (exit_stat == 1) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+		}
+		mutex_unlock(&driver->dci_mutex);
+		goto end;
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+end:
+	/*
+	 * Flush any read that is currently pending on DCI data and
+	 * command channnels. This will ensure that the next read is not
+	 * missed.
+	 */
+	if (copy_dci_data) {
+		diag_ws_on_copy_complete(DIAG_WS_DCI);
+		flush_workqueue(driver->diag_dci_wq);
+	}
+	return ret;
+}
+
+static ssize_t diagchar_write(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	int err = 0;
+	int pkt_type = 0;
+	int payload_len = 0;
+	const char __user *payload_buf = NULL;
+
+	/*
+	 * The data coming from the user sapce should at least have the
+	 * packet type heeader.
+	 */
+	if (count < sizeof(int)) {
+		pr_err("diag: In %s, client is sending short data, len: %d\n",
+		       __func__, (int)count);
+		return -EBADMSG;
+	}
+
+	err = copy_from_user((&pkt_type), buf, sizeof(int));
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
+				   __func__, err);
+		return -EIO;
+	}
+
+#ifdef CONFIG_DIAG_OVER_USB
+	if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
+		if (!((pkt_type == DCI_DATA_TYPE) ||
+		    (pkt_type == DCI_PKT_TYPE) ||
+		    (pkt_type & DATA_TYPE_DCI_LOG) ||
+		    (pkt_type & DATA_TYPE_DCI_EVENT))) {
+			pr_debug("diag: In %s, Dropping non DCI packet type\n",
+				 __func__);
+			return -EIO;
+		}
+	}
+#endif
+
+	payload_buf = buf + sizeof(int);
+	payload_len = count - sizeof(int);
+
+	if (pkt_type == DCI_PKT_TYPE)
+		return diag_user_process_dci_apps_data(payload_buf,
+						       payload_len,
+						       pkt_type);
+	else if (pkt_type == DCI_DATA_TYPE)
+		return diag_user_process_dci_data(payload_buf, payload_len);
+	else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
+		return diag_user_process_raw_data(payload_buf,
+							    payload_len);
+	else if (pkt_type == USER_SPACE_DATA_TYPE)
+		return diag_user_process_userspace_data(payload_buf,
+							payload_len);
+	if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
+		err = diag_user_process_dci_apps_data(payload_buf, payload_len,
+						      pkt_type);
+		if (pkt_type & DATA_TYPE_DCI_LOG)
+			pkt_type ^= DATA_TYPE_DCI_LOG;
+		if (pkt_type & DATA_TYPE_DCI_EVENT)
+			pkt_type ^= DATA_TYPE_DCI_EVENT;
+		/*
+		 * Check if the log or event is selected even on the regular
+		 * stream. If USB is not connected and we are not in memory
+		 * device mode, we should not process these logs/events.
+		 */
+#ifdef CONFIG_DIAG_OVER_USB
+		if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
+		    !driver->usb_connected)
+			return err;
+#endif
+	}
+
+	switch (pkt_type) {
+	case DATA_TYPE_EVENT:
+	case DATA_TYPE_F3:
+	case DATA_TYPE_LOG:
+	case DATA_TYPE_DELAYED_RESPONSE:
+	case DATA_TYPE_RESPONSE:
+		return diag_user_process_apps_data(payload_buf, payload_len,
+						   pkt_type);
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EINVAL;
+	}
+
+	return err;
+}
+
+void diag_ws_init()
+{
+	driver->dci_ws.ref_count = 0;
+	driver->dci_ws.copy_count = 0;
+	spin_lock_init(&driver->dci_ws.lock);
+
+	driver->md_ws.ref_count = 0;
+	driver->md_ws.copy_count = 0;
+	spin_lock_init(&driver->md_ws.lock);
+}
+
+static void diag_stats_init(void)
+{
+	if (!driver)
+		return;
+
+	driver->msg_stats.alloc_count = 0;
+	driver->msg_stats.drop_count = 0;
+
+	driver->log_stats.alloc_count = 0;
+	driver->log_stats.drop_count = 0;
+
+	driver->event_stats.alloc_count = 0;
+	driver->event_stats.drop_count = 0;
+}
+
+void diag_ws_on_notify()
+{
+	/*
+	 * Do not deal with reference count here as there can be spurious
+	 * interrupts.
+	 */
+	pm_stay_awake(driver->diag_dev);
+}
+
+void diag_ws_on_read(int type, int pkt_len)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	if (pkt_len > 0) {
+		ws_ref->ref_count++;
+	} else {
+		if (ws_ref->ref_count < 1) {
+			ws_ref->ref_count = 0;
+			ws_ref->copy_count = 0;
+		}
+		diag_ws_release();
+	}
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+
+void diag_ws_on_copy(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->copy_count++;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+void diag_ws_on_copy_fail(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count--;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_on_copy_complete(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count -= ws_ref->copy_count;
+		if (ws_ref->ref_count < 1)
+			ws_ref->ref_count = 0;
+		ws_ref->copy_count = 0;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_reset(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count = 0;
+	ws_ref->copy_count = 0;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_release()
+{
+	if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
+		pm_relax(driver->diag_dev);
+}
+
+#ifdef DIAG_DEBUG
+static void diag_debug_init(void)
+{
+	diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
+	if (!diag_ipc_log) {
+#ifdef CONFIG_IPC_LOGGING
+		pr_err("diag: Failed to create IPC logging context\n");
+#else
+		pr_err("diag: IPC Logging disabled\n");
+#endif
+	}
+	/*
+	 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
+	 * to be logged to IPC
+	 */
+	diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
+				DIAG_DEBUG_BRIDGE;
+}
+#else
+static void diag_debug_init(void)
+{
+
+}
+#endif
+
+static int diag_real_time_info_init(void)
+{
+	int i;
+	if (!driver)
+		return -EIO;
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		driver->real_time_mode[i] = 1;
+		driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
+		driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
+	}
+	driver->real_time_update_busy = 0;
+	driver->proc_active_mask = 0;
+	driver->diag_real_time_wq = create_singlethread_workqueue(
+							"diag_real_time_wq");
+	if (!driver->diag_real_time_wq)
+		return -ENOMEM;
+	INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
+	mutex_init(&driver->real_time_mutex);
+	return 0;
+}
+
+static const struct file_operations diagcharfops = {
+	.owner = THIS_MODULE,
+	.read = diagchar_read,
+	.write = diagchar_write,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = diagchar_compat_ioctl,
+#endif
+	.unlocked_ioctl = diagchar_ioctl,
+	.open = diagchar_open,
+	.release = diagchar_close
+};
+
+static int diagchar_setup_cdev(dev_t devno)
+{
+
+	int err;
+
+	cdev_init(driver->cdev, &diagcharfops);
+
+	driver->cdev->owner = THIS_MODULE;
+	driver->cdev->ops = &diagcharfops;
+
+	err = cdev_add(driver->cdev, devno, 1);
+
+	if (err) {
+		printk(KERN_INFO "diagchar cdev registration failed !\n\n");
+		return -1;
+	}
+
+	driver->diagchar_class = class_create("diag");
+
+	if (IS_ERR(driver->diagchar_class)) {
+		printk(KERN_ERR "Error creating diagchar class.\n");
+		return -1;
+	}
+
+	driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
+					 (void *)driver, "diag");
+
+	if (!driver->diag_dev)
+		return -EIO;
+#ifdef CONFIG_PMCONFIG_PM_SLEEP
+	driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
+#endif
+	return 0;
+
+}
+
+static int diagchar_cleanup(void)
+{
+	if (driver) {
+		if (driver->cdev) {
+			/* TODO - Check if device exists before deleting */
+			device_destroy(driver->diagchar_class,
+				       MKDEV(driver->major,
+					     driver->minor_start));
+			cdev_del(driver->cdev);
+		}
+		if (!IS_ERR(driver->diagchar_class))
+			class_destroy(driver->diagchar_class);
+		kfree(driver);
+	}
+	return 0;
+}
+
+static int diag_probe(struct platform_device *pdev)
+{
+	dev_t dev;
+	int error, ret, i;
+
+	pr_debug("diagfwd initializing ..\n");
+
+#ifdef CONFIG_MSM_MHI
+	if (!mhi_is_device_ready(&pdev->dev, "qcom,mhi"))
+		return -EPROBE_DEFER;
+
+	pr_debug("mhi device is ready\n");
+#endif
+
+	ret = 0;
+	driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+	if (!driver)
+		return -ENOMEM;
+	driver->pdev = pdev;
+	kmemleak_not_leak(driver);
+
+	timer_in_progress = 0;
+	driver->delayed_rsp_id = 0;
+	driver->hdlc_disabled = 0;
+	driver->dci_state = DIAG_DCI_NO_ERROR;
+	timer_setup(&drain_timer, drain_timer_func, 0);
+	driver->supports_sockets = 1;
+	driver->time_sync_enabled = 0;
+	driver->uses_time_api = 0;
+	driver->poolsize = poolsize;
+	driver->poolsize_hdlc = poolsize_hdlc;
+	driver->poolsize_dci = poolsize_dci;
+	driver->poolsize_user = poolsize_user;
+	/*
+	 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
+	 * The number of buffers encompasses Diag data generated on
+	 * the Apss processor + 1 for the responses generated exclusively on
+	 * the Apps processor + data from data channels (4 channels per
+	 * peripheral) + data from command channels (2)
+	 */
+	diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
+			poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
+	driver->num_clients = max_clients;
+	driver->logging_mode = DIAG_USB_MODE;
+	driver->mask_check = 0;
+	driver->in_busy_pktdata = 0;
+	driver->in_busy_dcipktdata = 0;
+	driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
+	hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	hdlc_data.len = 0;
+	non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	non_hdlc_data.len = 0;
+	mutex_init(&driver->hdlc_disable_mutex);
+	mutex_init(&driver->diagchar_mutex);
+	mutex_init(&driver->diag_maskclear_mutex);
+	mutex_init(&driver->diag_notifier_mutex);
+	mutex_init(&driver->diag_file_mutex);
+	mutex_init(&driver->delayed_rsp_mutex);
+	mutex_init(&apps_data_mutex);
+	mutex_init(&driver->msg_mask_lock);
+	mutex_init(&driver->hdlc_recovery_mutex);
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		mutex_init(&driver->diagfwd_channel_mutex[i]);
+	init_waitqueue_head(&driver->wait_q);
+	INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
+	INIT_WORK(&(driver->update_user_clients),
+			diag_update_user_client_work_fn);
+	INIT_WORK(&(driver->update_md_clients),
+			diag_update_md_client_work_fn);
+	diag_ws_init();
+	diag_stats_init();
+	diag_debug_init();
+	diag_md_session_init();
+
+	driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
+	driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+	if (!driver->incoming_pkt.data)
+		goto fail;
+	kmemleak_not_leak(driver->incoming_pkt.data);
+	driver->incoming_pkt.processing = 0;
+	driver->incoming_pkt.read_len = 0;
+	driver->incoming_pkt.remaining = 0;
+	driver->incoming_pkt.total_len = 0;
+
+	ret = diag_real_time_info_init();
+	if (ret)
+		goto fail;
+	ret = diag_debugfs_init();
+	if (ret)
+		goto fail;
+	ret = diag_masks_init();
+	if (ret)
+		goto fail;
+	ret = diag_mux_init();
+	if (ret)
+		goto fail;
+	ret = diagfwd_init();
+	if (ret)
+		goto fail;
+	ret = diagfwd_cntl_init();
+	if (ret)
+		goto fail;
+	driver->dci_state = diag_dci_init();
+	ret = diagfwd_peripheral_init();
+	if (ret)
+		goto fail;
+	diagfwd_cntl_channel_init();
+	if (driver->dci_state == DIAG_DCI_NO_ERROR)
+		diag_dci_channel_init();
+	pr_debug("diagchar initializing ..\n");
+	driver->num = 1;
+	driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
+	strlcpy(driver->name, "diag", 5);
+	/* Get major number from kernel and initialize */
+	error = alloc_chrdev_region(&dev, driver->minor_start,
+				    driver->num, driver->name);
+	if (!error) {
+		driver->major = MAJOR(dev);
+		driver->minor_start = MINOR(dev);
+	} else {
+		pr_err("diag: Major number not allocated\n");
+		goto fail;
+	}
+	driver->cdev = cdev_alloc();
+	error = diagchar_setup_cdev(dev);
+	if (error)
+		goto fail;
+
+	pr_debug("diagchar initialized now");
+	#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	diag_register_with_mhi();
+	#endif
+	return 0;
+
+fail:
+	pr_err("diagchar is not initialized, ret: %d\n", ret);
+	diag_debugfs_cleanup();
+	diagchar_cleanup();
+	diag_mux_exit();
+	diagfwd_peripheral_exit();
+	diagfwd_bridge_exit();
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_dci_exit();
+	diag_masks_exit();
+	diag_remote_exit();
+	return -1;
+
+}
+
+static int diag_remove(struct platform_device *pdev)
+{
+	printk(KERN_INFO "diagchar exiting ..\n");
+	diag_mempool_exit();
+	diag_mux_exit();
+	diagfwd_peripheral_exit();
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_dci_exit();
+	diag_masks_exit();
+	diag_md_session_exit();
+	diag_remote_exit();
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	diag_unregister_mhi();
+#endif
+	diag_debugfs_cleanup();
+	diagchar_cleanup();
+
+	return 0;
+}
+
+static struct of_device_id diag_table[] = {
+	{.compatible = "qcom,diag"},
+	{},
+};
+
+static struct platform_driver diag_driver = {
+	.probe = diag_probe,
+	.remove = diag_remove,
+	.driver = {
+		.name = "DIAG Platform",
+		.owner = THIS_MODULE,
+		.of_match_table = diag_table,
+	},
+};
+
+static int __init diagchar_init(void)
+{
+	return platform_driver_register(&diag_driver);
+}
+
+static void diagchar_exit(void)
+{
+	platform_driver_unregister(&diag_driver);
+	printk(KERN_INFO "done diagchar exit\n");
+}
+
+module_init(diagchar_init);
+module_exit(diagchar_exit);
+
+
+u16 diag_debug_mask;
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagchar_hdlc.c linux-6.4-fbx/drivers/char/diag/diagchar_hdlc.c
--- linux-6.4-fbx/drivers/char/diag./diagchar_hdlc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagchar_hdlc.c	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,270 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+#include <linux/crc-ccitt.h>
+#include "diagchar_hdlc.h"
+#include "diagchar.h"
+
+
+MODULE_LICENSE("GPL v2");
+
+#define CRC_16_L_SEED           0xFFFF
+
+#define CRC_16_L_STEP(xx_crc, xx_c) \
+	crc_ccitt_byte(xx_crc, xx_c)
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc)
+{
+	uint8_t *dest;
+	uint8_t *dest_last;
+	const uint8_t *src;
+	const uint8_t *src_last;
+	uint16_t crc;
+	unsigned char src_byte = 0;
+	enum diag_send_state_enum_type state;
+	unsigned int used = 0;
+
+	if (src_desc && enc) {
+
+		/* Copy parts to local variables. */
+		src = src_desc->pkt;
+		src_last = src_desc->last;
+		state = src_desc->state;
+		dest = enc->dest;
+		dest_last = enc->dest_last;
+
+		if (state == DIAG_STATE_START) {
+			crc = CRC_16_L_SEED;
+			state++;
+		} else {
+			/* Get a local copy of the CRC */
+			crc = enc->crc;
+		}
+
+		/* dest or dest_last may be NULL to trigger a
+		   state transition only */
+		if (dest && dest_last) {
+			/* This condition needs to include the possibility
+			   of 2 dest bytes for an escaped byte */
+			while (src <= src_last && dest <= dest_last) {
+
+				src_byte = *src++;
+
+				if ((src_byte == CONTROL_CHAR) ||
+				    (src_byte == ESC_CHAR)) {
+
+					/* If the escape character is not the
+					   last byte */
+					if (dest != dest_last) {
+						crc = CRC_16_L_STEP(crc,
+								    src_byte);
+
+						*dest++ = ESC_CHAR;
+						used++;
+
+						*dest++ = src_byte
+							  ^ ESC_MASK;
+						used++;
+					} else {
+
+						src--;
+						break;
+					}
+
+				} else {
+					crc = CRC_16_L_STEP(crc, src_byte);
+					*dest++ = src_byte;
+					used++;
+				}
+			}
+
+			if (src > src_last) {
+
+				if (state == DIAG_STATE_BUSY) {
+					if (src_desc->terminate) {
+						crc = ~crc;
+						state++;
+					} else {
+						/* Done with fragment */
+						state = DIAG_STATE_COMPLETE;
+					}
+				}
+
+				while (dest <= dest_last &&
+				       state >= DIAG_STATE_CRC1 &&
+				       state < DIAG_STATE_TERM) {
+					/* Encode a byte of the CRC next */
+					src_byte = crc & 0xFF;
+
+					if ((src_byte == CONTROL_CHAR)
+					    || (src_byte == ESC_CHAR)) {
+
+						if (dest != dest_last) {
+
+							*dest++ = ESC_CHAR;
+							used++;
+							*dest++ = src_byte ^
+								  ESC_MASK;
+							used++;
+
+							crc >>= 8;
+						} else {
+
+							break;
+						}
+					} else {
+
+						crc >>= 8;
+						*dest++ = src_byte;
+						used++;
+					}
+
+					state++;
+				}
+
+				if (state == DIAG_STATE_TERM) {
+					if (dest_last >= dest) {
+						*dest++ = CONTROL_CHAR;
+						used++;
+						state++;	/* Complete */
+					}
+				}
+			}
+		}
+		/* Copy local variables back into the encode structure. */
+
+		enc->dest = dest;
+		enc->dest_last = dest_last;
+		enc->crc = crc;
+		src_desc->pkt = src;
+		src_desc->last = src_last;
+		src_desc->state = state;
+	}
+
+	return;
+}
+
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc)
+{
+	uint8_t *src_ptr = NULL, *dest_ptr = NULL;
+	unsigned int src_length = 0, dest_length = 0;
+
+	unsigned int len = 0;
+	unsigned int i;
+	uint8_t src_byte;
+
+	int pkt_bnd = HDLC_INCOMPLETE;
+	int msg_start;
+
+	if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
+	    (hdlc->src_size > hdlc->src_idx) &&
+	    (hdlc->dest_size > hdlc->dest_idx)) {
+
+		msg_start = (hdlc->src_idx == 0) ? 1 : 0;
+
+		src_ptr = hdlc->src_ptr;
+		src_ptr = &src_ptr[hdlc->src_idx];
+		src_length = hdlc->src_size - hdlc->src_idx;
+
+		dest_ptr = hdlc->dest_ptr;
+		dest_ptr = &dest_ptr[hdlc->dest_idx];
+		dest_length = hdlc->dest_size - hdlc->dest_idx;
+
+		for (i = 0; i < src_length; i++) {
+
+			src_byte = src_ptr[i];
+
+			if (hdlc->escaping) {
+				dest_ptr[len++] = src_byte ^ ESC_MASK;
+				hdlc->escaping = 0;
+			} else if (src_byte == ESC_CHAR) {
+				if (i == (src_length - 1)) {
+					hdlc->escaping = 1;
+					i++;
+					break;
+				} else {
+					dest_ptr[len++] = src_ptr[++i]
+							  ^ ESC_MASK;
+				}
+			} else if (src_byte == CONTROL_CHAR) {
+				if (msg_start && i == 0 && src_length > 1)
+					continue;
+				/* Byte 0x7E will be considered
+					as end of packet */
+				dest_ptr[len++] = src_byte;
+				i++;
+				pkt_bnd = HDLC_COMPLETE;
+				break;
+			} else {
+				dest_ptr[len++] = src_byte;
+			}
+
+			if (len >= dest_length) {
+				i++;
+				break;
+			}
+		}
+
+		hdlc->src_idx += i;
+		hdlc->dest_idx += len;
+	}
+
+	return pkt_bnd;
+}
+
+int crc_check(uint8_t *buf, uint16_t len)
+{
+	uint16_t crc = CRC_16_L_SEED;
+	uint8_t sent_crc[2] = {0, 0};
+
+	/*
+	 * The minimum length of a valid incoming packet is 4. 1 byte
+	 * of data and 3 bytes for CRC
+	 */
+	if (!buf || len < 4) {
+		pr_err_ratelimited("diag: In %s, invalid packet or length, buf: 0x%p, len: %d",
+				   __func__, buf, len);
+		return -EIO;
+	}
+
+	/*
+	 * Run CRC check for the original input. Skip the last 3 CRC
+	 * bytes
+	 */
+	crc = crc_ccitt(crc, buf, len-3);
+	crc ^= CRC_16_L_SEED;
+
+	/* Check the computed CRC against the original CRC bytes. */
+#if IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+	sent_crc[1] = buf[len-3];
+	sent_crc[0] = buf[len-2];
+#else
+	sent_crc[0] = buf[len-3];
+	sent_crc[1] = buf[len-2];
+#endif
+	if (crc != *((uint16_t *)sent_crc)) {
+		pr_debug("diag: In %s, crc mismatch. expected: %x, sent %x.\n",
+				__func__, crc, *((uint16_t *)sent_crc));
+		return -EIO;
+	}
+
+	return 0;
+}
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagchar_hdlc.h linux-6.4-fbx/drivers/char/diag/diagchar_hdlc.h
--- linux-6.4-fbx/drivers/char/diag./diagchar_hdlc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagchar_hdlc.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,65 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_HDLC
+#define DIAGCHAR_HDLC
+
+enum diag_send_state_enum_type {
+	DIAG_STATE_START,
+	DIAG_STATE_BUSY,
+	DIAG_STATE_CRC1,
+	DIAG_STATE_CRC2,
+	DIAG_STATE_TERM,
+	DIAG_STATE_COMPLETE
+};
+
+struct diag_send_desc_type {
+	const void *pkt;
+	const void *last;	/* Address of last byte to send. */
+	enum diag_send_state_enum_type state;
+	unsigned char terminate;	/* True if this fragment
+					   terminates the packet */
+};
+
+struct diag_hdlc_dest_type {
+	void *dest;
+	void *dest_last;
+	/* Below: internal use only */
+	uint16_t crc;
+};
+
+struct diag_hdlc_decode_type {
+	uint8_t *src_ptr;
+	unsigned int src_idx;
+	unsigned int src_size;
+	uint8_t *dest_ptr;
+	unsigned int dest_idx;
+	unsigned int dest_size;
+	int escaping;
+
+};
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc);
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc);
+
+int crc_check(uint8_t *buf, uint16_t len);
+
+#define ESC_CHAR     0x7D
+#define ESC_MASK     0x20
+
+#define HDLC_INCOMPLETE		0
+#define HDLC_COMPLETE		1
+
+#define HDLC_FOOTER_LEN		3
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd.c linux-6.4-fbx/drivers/char/diag/diagfwd.c
--- linux-6.4-fbx/drivers/char/diag./diagfwd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd.c	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,1693 @@
+/* Copyright (c) 2008-2020 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diagchar_hdlc.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diag_usb.h"
+#include "diag_mux.h"
+
+#define STM_CMD_VERSION_OFFSET	4
+#define STM_CMD_MASK_OFFSET	5
+#define STM_CMD_DATA_OFFSET	6
+#define STM_CMD_NUM_BYTES	7
+
+#define STM_RSP_SUPPORTED_INDEX		7
+#define STM_RSP_STATUS_INDEX		8
+#define STM_RSP_NUM_BYTES		9
+
+static int timestamp_switch;
+module_param(timestamp_switch, int, 0644);
+
+int wrap_enabled;
+uint16_t wrap_count;
+static struct diag_hdlc_decode_type *hdlc_decode;
+
+#define DIAG_NUM_COMMON_CMD	1
+static uint8_t common_cmds[DIAG_NUM_COMMON_CMD] = {
+	DIAG_CMD_LOG_ON_DMND
+};
+
+static uint8_t hdlc_timer_in_progress;
+
+/* Determine if this device uses a device tree */
+#ifdef CONFIG_OF
+static int has_device_tree(void)
+{
+	struct device_node *node;
+
+	node = of_find_node_by_path("/");
+	if (node) {
+		of_node_put(node);
+		return 1;
+	}
+	return 0;
+}
+#else
+static int has_device_tree(void)
+{
+	return 0;
+}
+#endif
+
+int chk_config_get_id(void)
+{
+	if (driver->use_device_tree) {
+		if (of_machine_is_compatible("qcom,msm8974"))
+			return 4083;
+		else if (of_machine_is_compatible("qcom,apq8974"))
+			return 4090;
+		else
+			return 0;
+	} else
+		return 0;
+}
+
+/*
+ * This will return TRUE for targets which support apps only mode and hence SSR.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_only(void)
+{
+	if (driver->use_device_tree)
+		return 1;
+	return 0;
+}
+
+/*
+ * This will return TRUE for targets which support apps as master.
+ * Thus, SW DLOAD and Mode Reset are supported on apps processor.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_master(void)
+{
+	if (driver->use_device_tree)
+		return 1;
+	else
+		return 0;
+}
+
+int chk_polling_response(void)
+{
+	if (!(driver->polling_reg_flag) && chk_apps_master())
+		/*
+		 * If the apps processor is master and no other processor
+		 * has registered to respond for polling
+		 */
+		return 1;
+	else if (!(driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+		   driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+		 (driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask))
+		/*
+		 * If the apps processor is not the master and the modem
+		 * is not up or we did not receive the feature masks from Modem
+		 */
+		return 1;
+	else
+		return 0;
+}
+
+/*
+ * This function should be called if you feel that the logging process may
+ * need to be woken up. For instance, if the logging mode is MEMORY_DEVICE MODE
+ * and while trying to read data from data channel there are no buffers
+ * available to read the data into, then this function should be called to
+ * determine if the logging process needs to be woken up.
+ */
+void chk_logging_wakeup(void)
+{
+	int i;
+	int j;
+	int pid = 0;
+
+	for (j = 0; j < NUM_MD_SESSIONS; j++) {
+		if (!driver->md_session_map[j])
+			continue;
+		pid = driver->md_session_map[j]->pid;
+
+		/* Find the index of the logging process */
+		for (i = 0; i < driver->num_clients; i++) {
+			if (driver->client_map[i].pid != pid)
+				continue;
+			if (driver->data_ready[i] & USER_SPACE_DATA_TYPE)
+				continue;
+			/*
+			 * At very high logging rates a race condition can
+			 * occur where the buffers containing the data from
+			 * a channel are all in use, but the data_ready flag
+			 * is cleared. In this case, the buffers never have
+			 * their data read/logged. Detect and remedy this
+			 * situation.
+			 */
+			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+			atomic_inc(&driver->data_ready_notif[i]);
+			pr_debug("diag: Force wakeup of logging process\n");
+			wake_up_interruptible(&driver->wait_q);
+			break;
+		}
+		/*
+		 * Diag Memory Device is in normal. Check only for the first
+		 * index as all the indices point to the same session
+		 * structure.
+		 */
+		if ((driver->md_session_mask == DIAG_CON_ALL) && (j == 0))
+			break;
+	}
+}
+
+static void pack_rsp_and_send(unsigned char *buf, int len)
+{
+	int err;
+	int retry_count = 0;
+	uint32_t write_len = 0;
+	unsigned long flags;
+	unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+	struct diag_pkt_frame_t header;
+
+	if (!rsp_ptr || !buf)
+		return;
+
+	if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+		pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+		       __func__, len, DIAG_MAX_RSP_SIZE);
+		return;
+	}
+
+	/*
+	 * Keep trying till we get the buffer back. It should probably
+	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * means we did not get a write complete for the previous
+	 * response.
+	 */
+	while (retry_count < UINT_MAX) {
+		if (!driver->rsp_buf_busy)
+			break;
+		/*
+		 * Wait for sometime and try again. The value 10000 was chosen
+		 * empirically as an optimum value for USB to complete a write
+		 */
+		usleep_range(10000, 10100);
+		retry_count++;
+
+		/*
+		 * There can be a race conditon that clears the data ready flag
+		 * for responses. Make sure we don't miss previous wakeups for
+		 * draining responses when we are in Memory Device Mode.
+		 */
+		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+				driver->logging_mode == DIAG_MULTI_MODE) {
+			mutex_lock(&driver->md_session_lock);
+			chk_logging_wakeup();
+			mutex_unlock(&driver->md_session_lock);
+		}
+	}
+	if (driver->rsp_buf_busy) {
+		pr_err("diag: unable to get hold of response buffer\n");
+		return;
+	}
+
+	driver->rsp_buf_busy = 1;
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.length = len;
+	memcpy(rsp_ptr, &header, sizeof(header));
+	write_len += sizeof(header);
+	memcpy(rsp_ptr + write_len, buf, len);
+	write_len += len;
+	*(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len,
+			     driver->rsp_buf_ctxt);
+	if (err) {
+		pr_err("diag: In %s, unable to write to mux, err: %d\n",
+		       __func__, err);
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+}
+
+static void encode_rsp_and_send(unsigned char *buf, int len)
+{
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+	int err, retry_count = 0;
+	unsigned long flags;
+
+	if (!rsp_ptr || !buf)
+		return;
+
+	if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+		pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+		       __func__, len, DIAG_MAX_RSP_SIZE);
+		return;
+	}
+
+	/*
+	 * Keep trying till we get the buffer back. It should probably
+	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * means we did not get a write complete for the previous
+	 * response.
+	 */
+	while (retry_count < UINT_MAX) {
+		if (!driver->rsp_buf_busy)
+			break;
+		/*
+		 * Wait for sometime and try again. The value 10000 was chosen
+		 * empirically as an optimum value for USB to complete a write
+		 */
+		usleep_range(10000, 10100);
+		retry_count++;
+
+		/*
+		 * There can be a race conditon that clears the data ready flag
+		 * for responses. Make sure we don't miss previous wakeups for
+		 * draining responses when we are in Memory Device Mode.
+		 */
+		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+				driver->logging_mode == DIAG_MULTI_MODE) {
+			mutex_lock(&driver->md_session_lock);
+			chk_logging_wakeup();
+			mutex_unlock(&driver->md_session_lock);
+		}
+	}
+
+	if (driver->rsp_buf_busy) {
+		pr_err("diag: unable to get hold of response buffer\n");
+		return;
+	}
+
+	spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+	driver->rsp_buf_busy = 1;
+	spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	send.state = DIAG_STATE_START;
+	send.pkt = buf;
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+	enc.dest = rsp_ptr;
+	enc.dest_last = (void *)(rsp_ptr + DIAG_MAX_HDLC_BUF_SIZE - 1);
+	diag_hdlc_encode(&send, &enc);
+	driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
+	err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
+			     driver->rsp_buf_ctxt);
+	if (err) {
+		pr_err("diag: In %s, Unable to write to device, err: %d\n",
+			__func__, err);
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+	memset(buf, '\0', DIAG_MAX_RSP_SIZE);
+}
+
+void diag_send_rsp(unsigned char *buf, int len)
+{
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (hdlc_disabled)
+		pack_rsp_and_send(buf, len);
+	else
+		encode_rsp_and_send(buf, len);
+}
+
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
+{
+	unsigned char *ptr = NULL;
+	unsigned char *temp = buf;
+	int *in_busy = NULL;
+	uint32_t *length = NULL;
+	uint32_t max_len = 0;
+
+	if (!buf || len == 0) {
+		pr_err("diag: In %s, Invalid ptr %pK and length %d\n",
+		       __func__, buf, len);
+		return;
+	}
+
+	switch (type) {
+	case PKT_TYPE:
+		ptr = driver->apps_req_buf;
+		length = &driver->apps_req_buf_len;
+		max_len = DIAG_MAX_REQ_SIZE;
+		in_busy = &driver->in_busy_pktdata;
+		break;
+	case DCI_PKT_TYPE:
+		ptr = driver->dci_pkt_buf;
+		length = &driver->dci_pkt_length;
+		max_len = DCI_BUF_SIZE;
+		in_busy = &driver->in_busy_dcipktdata;
+		break;
+	default:
+		pr_err("diag: Invalid type %d in %s\n", type, __func__);
+		return;
+	}
+
+	mutex_lock(&driver->diagchar_mutex);
+	if (CHK_OVERFLOW(ptr, ptr, ptr + max_len, len)) {
+		memcpy(ptr, temp , len);
+		*length = len;
+		*in_busy = 1;
+	} else {
+		pr_alert("diag: In %s, no space for response packet, len: %d, type: %d\n",
+			 __func__, len, type);
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_userspace_clients(unsigned int type)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid != 0 &&
+			!(driver->data_ready[i] & type)) {
+			driver->data_ready[i] |= type;
+			atomic_inc(&driver->data_ready_notif[i]);
+		}
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_md_clients(unsigned int type)
+{
+	int i, j;
+
+	mutex_lock(&driver->diagchar_mutex);
+	mutex_lock(&driver->md_session_lock);
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] != NULL)
+			for (j = 0; j < driver->num_clients; j++) {
+				if (driver->client_map[j].pid != 0 &&
+					driver->client_map[j].pid ==
+					driver->md_session_map[i]->pid) {
+					if (!(driver->data_ready[i] & type)) {
+						driver->data_ready[j] |= type;
+						atomic_inc(
+						&driver->data_ready_notif[j]);
+					}
+					break;
+				}
+			}
+	}
+	mutex_unlock(&driver->md_session_lock);
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+void diag_update_sleeping_process(int process_id, int data_type)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == process_id) {
+			if (!(driver->data_ready[i] & data_type)) {
+				driver->data_ready[i] |= data_type;
+				atomic_inc(&driver->data_ready_notif[i]);
+			}
+			break;
+		}
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_send_data(struct diag_cmd_reg_t *entry, unsigned char *buf,
+			  int len)
+{
+	if (!entry)
+		return -EIO;
+
+	if (entry->proc == APPS_DATA) {
+		diag_update_pkt_buffer(buf, len, PKT_TYPE);
+		diag_update_sleeping_process(entry->pid, PKT_TYPE);
+		return 0;
+	}
+
+	return diagfwd_write(entry->proc, TYPE_CMD, buf, len);
+}
+
+void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type)
+{
+	int status = 0;
+	if (data_type >= PERIPHERAL_MODEM && data_type <= PERIPHERAL_SENSORS) {
+		if (driver->feature[data_type].stm_support) {
+			status = diag_send_stm_state(data_type, cmd);
+			if (status == 0)
+				driver->stm_state[data_type] = cmd;
+		}
+		driver->stm_state_requested[data_type] = cmd;
+	} else if (data_type == APPS_DATA) {
+		driver->stm_state[data_type] = cmd;
+		driver->stm_state_requested[data_type] = cmd;
+	}
+}
+
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
+{
+	uint8_t version, mask, cmd;
+	uint8_t rsp_supported = 0;
+	uint8_t rsp_status = 0;
+	int i;
+
+	if (!buf || !dest_buf) {
+		pr_err("diag: Invalid pointers buf: %pK, dest_buf %pK in %s\n",
+		       buf, dest_buf, __func__);
+		return -EIO;
+	}
+
+	version = *(buf + STM_CMD_VERSION_OFFSET);
+	mask = *(buf + STM_CMD_MASK_OFFSET);
+	cmd = *(buf + STM_CMD_DATA_OFFSET);
+
+	/*
+	 * Check if command is valid. If the command is asking for
+	 * status, then the processor mask field is to be ignored.
+	 */
+	if ((version != 2) || (cmd > STATUS_STM) ||
+		((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+		/* Command is invalid. Send bad param message response */
+		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+		for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+			dest_buf[i+1] = *(buf + i);
+		return STM_CMD_NUM_BYTES+1;
+	} else if (cmd != STATUS_STM) {
+		if (mask & DIAG_STM_MODEM)
+			diag_process_stm_mask(cmd, DIAG_STM_MODEM,
+					      PERIPHERAL_MODEM);
+
+		if (mask & DIAG_STM_LPASS)
+			diag_process_stm_mask(cmd, DIAG_STM_LPASS,
+					      PERIPHERAL_LPASS);
+
+		if (mask & DIAG_STM_WCNSS)
+			diag_process_stm_mask(cmd, DIAG_STM_WCNSS,
+					      PERIPHERAL_WCNSS);
+
+		if (mask & DIAG_STM_SENSORS)
+			diag_process_stm_mask(cmd, DIAG_STM_SENSORS,
+						PERIPHERAL_SENSORS);
+
+		if (mask & DIAG_STM_APPS)
+			diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
+	}
+
+	for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+		dest_buf[i] = *(buf + i);
+
+	/* Set mask denoting which peripherals support STM */
+	if (driver->feature[PERIPHERAL_MODEM].stm_support)
+		rsp_supported |= DIAG_STM_MODEM;
+
+	if (driver->feature[PERIPHERAL_LPASS].stm_support)
+		rsp_supported |= DIAG_STM_LPASS;
+
+	if (driver->feature[PERIPHERAL_WCNSS].stm_support)
+		rsp_supported |= DIAG_STM_WCNSS;
+
+	if (driver->feature[PERIPHERAL_SENSORS].stm_support)
+		rsp_supported |= DIAG_STM_SENSORS;
+
+	rsp_supported |= DIAG_STM_APPS;
+
+	/* Set mask denoting STM state/status for each peripheral/APSS */
+	if (driver->stm_state[PERIPHERAL_MODEM])
+		rsp_status |= DIAG_STM_MODEM;
+
+	if (driver->stm_state[PERIPHERAL_LPASS])
+		rsp_status |= DIAG_STM_LPASS;
+
+	if (driver->stm_state[PERIPHERAL_WCNSS])
+		rsp_status |= DIAG_STM_WCNSS;
+
+	if (driver->stm_state[PERIPHERAL_SENSORS])
+		rsp_status |= DIAG_STM_SENSORS;
+
+	if (driver->stm_state[APPS_DATA])
+		rsp_status |= DIAG_STM_APPS;
+
+	dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
+	dest_buf[STM_RSP_STATUS_INDEX] = rsp_status;
+
+	return STM_RSP_NUM_BYTES;
+}
+
+int diag_process_time_sync_query_cmd(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_cmd_time_sync_query_req_t *req = NULL;
+	struct diag_cmd_time_sync_query_rsp_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+		src_len < sizeof(struct diag_cmd_time_sync_query_req_t)) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+			__func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	req = (struct diag_cmd_time_sync_query_req_t *)src_buf;
+	rsp.header.cmd_code = req->header.cmd_code;
+	rsp.header.subsys_id = req->header.subsys_id;
+	rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+	rsp.version = req->version;
+	rsp.time_api = driver->uses_time_api;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len = sizeof(rsp);
+	return write_len;
+}
+
+int diag_process_time_sync_switch_cmd(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len)
+{
+	uint8_t peripheral, status = 0;
+	struct diag_cmd_time_sync_switch_req_t *req = NULL;
+	struct diag_cmd_time_sync_switch_rsp_t rsp;
+	struct diag_ctrl_msg_time_sync time_sync_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+	int err = 0, write_len = 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+		src_len < sizeof(struct diag_cmd_time_sync_switch_req_t)) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+			__func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	req = (struct diag_cmd_time_sync_switch_req_t *)src_buf;
+	rsp.header.cmd_code = req->header.cmd_code;
+	rsp.header.subsys_id = req->header.subsys_id;
+	rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+	rsp.version = req->version;
+	rsp.time_api = req->time_api;
+	if ((req->version > 1) || (req->time_api > 1) ||
+					(req->persist_time > 0)) {
+		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+		rsp.time_api_status = 0;
+		rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+		memcpy(dest_buf + 1, &rsp, sizeof(rsp));
+		write_len = sizeof(rsp) + 1;
+		timestamp_switch = 0;
+		return write_len;
+	}
+
+	time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+	time_sync_msg.ctrl_pkt_data_len = 5;
+	time_sync_msg.version = 1;
+	time_sync_msg.time_api = req->time_api;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg,
+					msg_size);
+		if (err && err != -ENODEV) {
+			pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+				__func__, peripheral, TYPE_CNTL,
+				msg_size, err);
+			status |= (1 << peripheral);
+		}
+	}
+
+	driver->time_sync_enabled = 1;
+	driver->uses_time_api = req->time_api;
+
+	switch (req->time_api) {
+	case 0:
+		timestamp_switch = 0;
+		break;
+	case 1:
+		timestamp_switch = 1;
+		break;
+	default:
+		timestamp_switch = 0;
+		break;
+	}
+
+	rsp.time_api_status = status;
+	rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len = sizeof(rsp);
+	return write_len;
+}
+
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_log_on_demand_rsp_t header;
+
+	if (!driver->diagfwd_cntl[PERIPHERAL_MODEM] ||
+	    !driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open ||
+	    !driver->log_on_demand_support)
+		return 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	header.cmd_code = DIAG_CMD_LOG_ON_DMND;
+	header.log_code = *(uint16_t *)(src_buf + 1);
+	header.status = 1;
+	memcpy(dest_buf, &header, sizeof(struct diag_log_on_demand_rsp_t));
+	write_len += sizeof(struct diag_log_on_demand_rsp_t);
+
+	return write_len;
+}
+
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_ext_mobile_rsp_t rsp = {0};
+
+	if (!src_buf || src_len != sizeof(*header) || !dest_buf ||
+	    dest_len < sizeof(rsp))
+		return -EIO;
+
+	header = (struct diag_pkt_header_t *)src_buf;
+	rsp.header.cmd_code = header->cmd_code;
+	rsp.header.subsys_id = header->subsys_id;
+	rsp.header.subsys_cmd_code = header->subsys_cmd_code;
+	rsp.version = 2;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.family = 0;
+
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	return write_len;
+}
+
+int diag_check_common_cmd(struct diag_pkt_header_t *header)
+{
+	int i;
+
+	if (!header)
+		return -EIO;
+
+	for (i = 0; i < DIAG_NUM_COMMON_CMD; i++) {
+		if (header->cmd_code == common_cmds[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+static int diag_cmd_chk_stats(unsigned char *src_buf, int src_len,
+			      unsigned char *dest_buf, int dest_len)
+{
+	int payload = 0;
+	int write_len = 0;
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_stats_rsp_t rsp;
+
+	if (!src_buf || src_len < sizeof(struct diag_pkt_header_t) ||
+	    !dest_buf || dest_len < sizeof(rsp))
+		return -EINVAL;
+
+	header = (struct diag_pkt_header_t *)src_buf;
+
+	if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+	    header->subsys_id != DIAG_SS_DIAG)
+		return -EINVAL;
+
+	switch (header->subsys_cmd_code) {
+	case DIAG_CMD_OP_GET_MSG_ALLOC:
+		payload = driver->msg_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_MSG_DROP:
+		payload = driver->msg_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_MSG_STATS:
+		diag_record_stats(DATA_TYPE_F3, PKT_RESET);
+		break;
+	case DIAG_CMD_OP_GET_LOG_ALLOC:
+		payload = driver->log_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_LOG_DROP:
+		payload = driver->log_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_LOG_STATS:
+		diag_record_stats(DATA_TYPE_LOG, PKT_RESET);
+		break;
+	case DIAG_CMD_OP_GET_EVENT_ALLOC:
+		payload = driver->event_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_EVENT_DROP:
+		payload = driver->event_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_EVENT_STATS:
+		diag_record_stats(DATA_TYPE_EVENT, PKT_RESET);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+	rsp.payload = payload;
+	write_len = sizeof(rsp);
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+
+	return write_len;
+}
+
+static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len)
+{
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_hdlc_disable_rsp_t rsp;
+	int write_len = 0;
+
+	if (!src_buf || src_len < sizeof(*header) ||
+	    !dest_buf || dest_len < sizeof(rsp)) {
+		return -EIO;
+	}
+
+	header = (struct diag_pkt_header_t *)src_buf;
+	if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+	    header->subsys_id != DIAG_SS_DIAG ||
+	    header->subsys_cmd_code != le16_to_cpu(DIAG_CMD_OP_HDLC_DISABLE)) {
+		return -EINVAL;
+	}
+
+	memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+	rsp.framing_version = 1;
+	rsp.result = 0;
+	write_len = sizeof(rsp);
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+
+	return write_len;
+}
+
+void diag_send_error_rsp(unsigned char *buf, int len)
+{
+	/* -1 to accomodate the first byte 0x13 */
+	if (len > (DIAG_MAX_RSP_SIZE - 1)) {
+		pr_err("diag: cannot send err rsp, huge length: %d\n", len);
+		return;
+	}
+
+	*(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
+	memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
+	diag_send_rsp(driver->apps_rsp_buf, len + 1);
+}
+
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
+{
+	int i, p_mask = 0;
+	int mask_ret;
+	int write_len = 0;
+	unsigned char *temp = NULL;
+	struct diag_cmd_reg_entry_t entry;
+	struct diag_cmd_reg_entry_t *temp_entry = NULL;
+	struct diag_cmd_reg_t *reg_item = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	if (!buf)
+		return -EIO;
+
+	/* Check if the command is a supported mask command */
+	mask_ret = diag_process_apps_masks(buf, len, pid);
+	if (mask_ret > 0) {
+		diag_send_rsp(driver->apps_rsp_buf, mask_ret);
+		return 0;
+	}
+
+	temp = buf;
+	entry.cmd_code = (uint16_t)(*(uint8_t *)temp);
+	temp += sizeof(uint8_t);
+	entry.subsys_id = (uint16_t)(*(uint8_t *)temp);
+	temp += sizeof(uint8_t);
+	entry.cmd_code_hi = (uint16_t)(*(uint16_t *)temp);
+	entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
+	temp += sizeof(uint16_t);
+
+	entry.cmd_code_hi = cpu_to_le16(entry.cmd_code_hi);
+	entry.cmd_code_lo = cpu_to_le16(entry.cmd_code_lo);
+
+	pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
+		 __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
+
+	if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
+	    driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+		write_len = diag_cmd_log_on_demand(buf, len,
+						   driver->apps_rsp_buf,
+						   DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(&entry, ALL_PROC);
+	if (temp_entry) {
+		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+								entry);
+		mutex_lock(&driver->md_session_lock);
+		info = diag_md_session_get_pid(pid);
+		if (info) {
+			p_mask = info->peripheral_mask;
+			mutex_unlock(&driver->md_session_lock);
+			if (MD_PERIPHERAL_MASK(reg_item->proc) & p_mask)
+				write_len = diag_send_data(reg_item, buf, len);
+		} else {
+			mutex_unlock(&driver->md_session_lock);
+			if (MD_PERIPHERAL_MASK(reg_item->proc) &
+				driver->logging_mask)
+				diag_send_error_rsp(buf, len);
+			else
+				write_len = diag_send_data(reg_item, buf, len);
+		}
+		mutex_unlock(&driver->cmd_reg_mutex);
+		return write_len;
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	/* Check for the command/respond msg for the maximum packet length */
+	if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+		(*(uint16_t *)(buf+2) == 0x0055)) {
+		for (i = 0; i < 4; i++)
+			*(driver->apps_rsp_buf+i) = *(buf+i);
+		*(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
+		diag_send_rsp(driver->apps_rsp_buf, 8);
+		return 0;
+	} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+		(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
+		len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
+		if (len > 0) {
+			diag_send_rsp(driver->apps_rsp_buf, len);
+			return 0;
+		}
+		return len;
+	}
+	/* Check for time sync query command */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_DIAG) &&
+		(*(uint16_t *)(buf+2) == DIAG_GET_TIME_API)) {
+		write_len = diag_process_time_sync_query_cmd(buf, len,
+							driver->apps_rsp_buf,
+							DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+	/* Check for time sync switch command */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_DIAG) &&
+		(*(uint16_t *)(buf+2) == DIAG_SET_TIME_API)) {
+		write_len = diag_process_time_sync_switch_cmd(buf, len,
+							driver->apps_rsp_buf,
+							DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+	/* Check for download command */
+	else if ((chk_apps_master()) && (*buf == 0x3A)) {
+		/* send response back */
+		driver->apps_rsp_buf[0] = *buf;
+		diag_send_rsp(driver->apps_rsp_buf, 1);
+		msleep(5000);
+		kernel_restart(NULL);
+		/* Not required, represents that command isnt sent to modem */
+		return 0;
+	}
+	/* Check for polling for Apps only DIAG */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x03)) {
+		/* If no one has registered for polling */
+		if (chk_polling_response()) {
+			/* Respond to polling for Apps only DIAG */
+			for (i = 0; i < 3; i++)
+				driver->apps_rsp_buf[i] = *(buf+i);
+			for (i = 0; i < 13; i++)
+				driver->apps_rsp_buf[i+3] = 0;
+
+			diag_send_rsp(driver->apps_rsp_buf, 16);
+			return 0;
+		}
+	}
+	/* Return the Delayed Response Wrap Status */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
+		memcpy(driver->apps_rsp_buf, buf, 4);
+		driver->apps_rsp_buf[4] = wrap_enabled;
+		diag_send_rsp(driver->apps_rsp_buf, 5);
+		return 0;
+	}
+	/* Wrap the Delayed Rsp ID */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x05) && (*(buf+3) == 0x0)) {
+		wrap_enabled = true;
+		memcpy(driver->apps_rsp_buf, buf, 4);
+		driver->apps_rsp_buf[4] = wrap_count;
+		diag_send_rsp(driver->apps_rsp_buf, 6);
+		return 0;
+	}
+	/* Mobile ID Rsp */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_PARAMS) &&
+		(*(buf+2) == DIAG_EXT_MOBILE_ID) && (*(buf+3) == 0x0))  {
+			write_len = diag_cmd_get_mobile_id(buf, len,
+						   driver->apps_rsp_buf,
+						   DIAG_MAX_RSP_SIZE);
+		if (write_len > 0) {
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+			return 0;
+		}
+	}
+	 /*
+	  * If the apps processor is master and no other
+	  * processor has registered for polling command.
+	  * If modem is not up and we have not received feature
+	  * mask update from modem, in that case APPS should
+	  * respond for 0X7C command
+	  */
+	else if (chk_apps_master() &&
+		 !(driver->polling_reg_flag) &&
+		 !(driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+		 !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+		/* respond to 0x0 command */
+		if (*buf == 0x00) {
+			for (i = 0; i < 55; i++)
+				driver->apps_rsp_buf[i] = 0;
+
+			diag_send_rsp(driver->apps_rsp_buf, 55);
+			return 0;
+		}
+		/* respond to 0x7c command */
+		else if (*buf == 0x7c) {
+			driver->apps_rsp_buf[0] = 0x7c;
+			for (i = 1; i < 8; i++)
+				driver->apps_rsp_buf[i] = 0;
+			/* Tools ID for APQ 8060 */
+			*(int *)(driver->apps_rsp_buf + 8) =
+							 chk_config_get_id();
+			*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
+			*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
+			diag_send_rsp(driver->apps_rsp_buf, 14);
+			return 0;
+		}
+	}
+	write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
+				       DIAG_MAX_RSP_SIZE);
+	if (write_len > 0) {
+		diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+	write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
+					  DIAG_MAX_RSP_SIZE);
+	if (write_len > 0) {
+		/*
+		 * This mutex lock is necessary since we need to drain all the
+		 * pending buffers from peripherals which may be HDLC encoded
+		 * before disabling HDLC encoding on Apps processor.
+		 */
+		mutex_lock(&driver->hdlc_disable_mutex);
+		diag_send_rsp(driver->apps_rsp_buf, write_len);
+		/*
+		 * Set the value of hdlc_disabled after sending the response to
+		 * the tools. This is required since the tools is expecting a
+		 * HDLC encoded reponse for this request.
+		 */
+		pr_debug("diag: In %s, disabling HDLC encoding\n",
+		       __func__);
+		mutex_lock(&driver->md_session_lock);
+		info = diag_md_session_get_pid(pid);
+		if (info)
+			info->hdlc_disabled = 1;
+		else
+			driver->hdlc_disabled = 1;
+		mutex_unlock(&driver->md_session_lock);
+		diag_update_md_clients(HDLC_SUPPORT_TYPE);
+		mutex_unlock(&driver->hdlc_disable_mutex);
+		return 0;
+	}
+
+	/* We have now come to the end of the function. */
+	if (chk_apps_only())
+		diag_send_error_rsp(buf, len);
+
+	return 0;
+}
+
+void diag_process_hdlc_pkt(void *data, unsigned len, int pid)
+{
+	int err = 0;
+	int ret = 0;
+
+	if (len > DIAG_MAX_HDLC_BUF_SIZE) {
+		pr_err("diag: In %s, invalid length: %d\n", __func__, len);
+		return;
+	}
+
+	mutex_lock(&driver->diag_hdlc_mutex);
+	pr_debug("diag: In %s, received packet of length: %d, req_buf_len: %d\n",
+		 __func__, len, driver->hdlc_buf_len);
+
+	if (driver->hdlc_buf_len >= DIAG_MAX_REQ_SIZE) {
+		pr_err("diag: In %s, request length is more than supported len. Dropping packet.\n",
+		       __func__);
+		goto fail;
+	}
+
+	hdlc_decode->dest_ptr = driver->hdlc_buf + driver->hdlc_buf_len;
+	hdlc_decode->dest_size = DIAG_MAX_HDLC_BUF_SIZE - driver->hdlc_buf_len;
+	hdlc_decode->src_ptr = data;
+	hdlc_decode->src_size = len;
+	hdlc_decode->src_idx = 0;
+	hdlc_decode->dest_idx = 0;
+
+	ret = diag_hdlc_decode(hdlc_decode);
+	/*
+	 * driver->hdlc_buf is of size DIAG_MAX_HDLC_BUF_SIZE. But the decoded
+	 * packet should be within DIAG_MAX_REQ_SIZE.
+	 */
+	if (driver->hdlc_buf_len + hdlc_decode->dest_idx <= DIAG_MAX_REQ_SIZE) {
+		driver->hdlc_buf_len += hdlc_decode->dest_idx;
+	} else {
+		pr_err_ratelimited("diag: In %s, Dropping packet. pkt_size: %d, max: %d\n",
+				   __func__,
+				   driver->hdlc_buf_len + hdlc_decode->dest_idx,
+				   DIAG_MAX_REQ_SIZE);
+		goto fail;
+	}
+
+	if (ret == HDLC_COMPLETE) {
+		err = crc_check(driver->hdlc_buf, driver->hdlc_buf_len);
+		if (err) {
+			/* CRC check failed. */
+			pr_err_ratelimited("diag: In %s, bad CRC. Dropping packet\n",
+					   __func__);
+			goto fail;
+		}
+		driver->hdlc_buf_len -= HDLC_FOOTER_LEN;
+
+		if (driver->hdlc_buf_len < 1) {
+			pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
+					   __func__, driver->hdlc_buf_len,
+					   hdlc_decode->dest_idx);
+			goto fail;
+		}
+
+		err = diag_process_apps_pkt(driver->hdlc_buf,
+					    driver->hdlc_buf_len, pid);
+		if (err < 0)
+			goto fail;
+	} else {
+		goto end;
+	}
+
+	driver->hdlc_buf_len = 0;
+	mutex_unlock(&driver->diag_hdlc_mutex);
+	return;
+
+fail:
+	/*
+	 * Tools needs to get a response in order to start its
+	 * recovery algorithm. Send an error response if the
+	 * packet is not in expected format.
+	 */
+	diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len);
+	driver->hdlc_buf_len = 0;
+end:
+	mutex_unlock(&driver->diag_hdlc_mutex);
+}
+
+static int diagfwd_mux_open(int id, int mode)
+{
+	uint8_t i;
+	unsigned long flags;
+
+	switch (mode) {
+#ifdef CONFIG_DIAG_OVER_USB
+	case DIAG_USB_MODE:
+		driver->usb_connected = 1;
+		break;
+#endif
+	case DIAG_MEMORY_DEVICE_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (driver->rsp_buf_busy) {
+		/*
+		 * When a client switches from callback mode to USB mode
+		 * explicitly, there can be a situation when the last response
+		 * is not drained to the user space application. Reset the
+		 * in_busy flag in this case.
+		 */
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		diagfwd_open(i, TYPE_DATA);
+		diagfwd_open(i, TYPE_CMD);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	return 0;
+}
+
+static int diagfwd_mux_close(int id, int mode)
+{
+	uint8_t i;
+
+	switch (mode) {
+#ifdef CONFIG_DIAG_OVER_USB
+	case DIAG_USB_MODE:
+		driver->usb_connected = 0;
+		break;
+#endif
+	case DIAG_MEMORY_DEVICE_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((driver->logging_mode == DIAG_MULTI_MODE &&
+		driver->md_session_mode == DIAG_MD_NONE) ||
+		(driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
+		/*
+		 * This case indicates that the USB is removed
+		 * but there is a client running in background
+		 * with Memory Device mode.
+		 */
+	} else {
+		/*
+		 * With sysfs parameter to clear masks set,
+		 * peripheral masks are cleared on ODL exit and
+		 * USB disconnection and buffers are not marked busy.
+		 * This enables read and drop of stale packets.
+		 *
+		 * With sysfs parameter to clear masks cleared,
+		 * masks are not cleared and buffers are to be marked
+		 * busy to ensure traffic generated by peripheral
+		 * are not read
+		 */
+		if (!(diag_mask_param())) {
+			for (i = 0; i < NUM_PERIPHERALS; i++) {
+				diagfwd_close(i, TYPE_DATA);
+				diagfwd_close(i, TYPE_CMD);
+			}
+		}
+		/* Re enable HDLC encoding */
+		pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+		mutex_lock(&driver->hdlc_disable_mutex);
+		if (driver->md_session_mode == DIAG_MD_NONE)
+			driver->hdlc_disabled = 0;
+		mutex_unlock(&driver->hdlc_disable_mutex);
+		queue_work(driver->diag_wq,
+			&(driver->update_user_clients));
+	}
+	queue_work(driver->diag_real_time_wq,
+		   &driver->diag_real_time_work);
+	return 0;
+}
+
+static uint8_t hdlc_reset;
+
+static void hdlc_reset_timer_start(int pid)
+{
+	struct diag_md_session_t *info = NULL;
+
+	mutex_lock(&driver->md_session_lock);
+	info = diag_md_session_get_pid(pid);
+	if (!hdlc_timer_in_progress) {
+		hdlc_timer_in_progress = 1;
+		if (info)
+			mod_timer(&info->hdlc_reset_timer,
+			  jiffies + msecs_to_jiffies(200));
+		else
+			mod_timer(&driver->hdlc_reset_timer,
+			  jiffies + msecs_to_jiffies(200));
+	}
+	mutex_unlock(&driver->md_session_lock);
+}
+
+static void hdlc_reset_timer_func(struct timer_list *t)
+{
+	pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+	if (hdlc_reset) {
+		driver->hdlc_disabled = 0;
+		queue_work(driver->diag_wq,
+			&(driver->update_user_clients));
+	}
+	hdlc_timer_in_progress = 0;
+}
+
+void diag_md_hdlc_reset_timer_func(struct timer_list *t)
+{
+	struct diag_md_session_t *session_info = NULL;
+
+	pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+	if (hdlc_reset) {
+		session_info = container_of(t, struct diag_md_session_t,
+					    hdlc_reset_timer);
+		if (session_info)
+			session_info->hdlc_disabled = 0;
+		queue_work(driver->diag_wq,
+			&(driver->update_md_clients));
+	}
+	hdlc_timer_in_progress = 0;
+}
+
+static void diag_hdlc_start_recovery(unsigned char *buf, int len,
+				     int pid)
+{
+	int i;
+	static uint32_t bad_byte_counter;
+	unsigned char *start_ptr = NULL;
+	struct diag_pkt_frame_t *actual_pkt = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	hdlc_reset = 1;
+	hdlc_reset_timer_start(pid);
+
+	actual_pkt = (struct diag_pkt_frame_t *)buf;
+	for (i = 0; i < len; i++) {
+		if (actual_pkt->start == CONTROL_CHAR &&
+			actual_pkt->version == 1 &&
+			actual_pkt->length < len &&
+			(*(uint8_t *)(buf + sizeof(struct diag_pkt_frame_t) +
+			actual_pkt->length) == CONTROL_CHAR)) {
+				start_ptr = &buf[i];
+				break;
+		}
+		bad_byte_counter++;
+		if (bad_byte_counter > (DIAG_MAX_REQ_SIZE +
+				sizeof(struct diag_pkt_frame_t) + 1)) {
+			bad_byte_counter = 0;
+			pr_err("diag: In %s, re-enabling HDLC encoding\n",
+					__func__);
+			mutex_lock(&driver->hdlc_disable_mutex);
+			mutex_lock(&driver->md_session_lock);
+			info = diag_md_session_get_pid(pid);
+			if (info)
+				info->hdlc_disabled = 0;
+			else
+				driver->hdlc_disabled = 0;
+			mutex_unlock(&driver->md_session_lock);
+			mutex_unlock(&driver->hdlc_disable_mutex);
+			diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+			return;
+		}
+	}
+
+	if (start_ptr) {
+		/* Discard any partial packet reads */
+		mutex_lock(&driver->hdlc_recovery_mutex);
+		driver->incoming_pkt.processing = 0;
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		diag_process_non_hdlc_pkt(start_ptr, len - i, pid);
+	}
+}
+
+void diag_process_non_hdlc_pkt(unsigned char *buf, int len, int pid)
+{
+	int err = 0;
+	uint16_t pkt_len = 0;
+	uint32_t read_bytes = 0;
+	const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
+	struct diag_pkt_frame_t *actual_pkt = NULL;
+	unsigned char *data_ptr = NULL;
+	struct diag_partial_pkt_t *partial_pkt = NULL;
+
+	mutex_lock(&driver->hdlc_recovery_mutex);
+	if (!buf || len <= 0) {
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		return;
+	}
+	partial_pkt = &driver->incoming_pkt;
+	if (!partial_pkt->processing) {
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		goto start;
+	}
+
+	if (partial_pkt->remaining > len) {
+		if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
+			pr_err("diag: Invalid length %d, %d received in %s\n",
+			       partial_pkt->read_len, len, __func__);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			goto end;
+		}
+		memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
+		read_bytes += len;
+		buf += read_bytes;
+		partial_pkt->read_len += len;
+		partial_pkt->remaining -= len;
+	} else {
+		if ((partial_pkt->read_len + partial_pkt->remaining) >
+						partial_pkt->capacity) {
+			pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+			       partial_pkt->read_len,
+			       partial_pkt->remaining, __func__);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			goto end;
+		}
+		memcpy(partial_pkt->data + partial_pkt->read_len, buf,
+						partial_pkt->remaining);
+		read_bytes += partial_pkt->remaining;
+		buf += read_bytes;
+		partial_pkt->read_len += partial_pkt->remaining;
+		partial_pkt->remaining = 0;
+	}
+
+	if (partial_pkt->remaining == 0) {
+		actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
+		data_ptr = partial_pkt->data + header_len;
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+						CONTROL_CHAR) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			diag_hdlc_start_recovery(buf, len, pid);
+			mutex_lock(&driver->hdlc_recovery_mutex);
+		}
+		err = diag_process_apps_pkt(data_ptr,
+					    actual_pkt->length, pid);
+		if (err) {
+			pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
+			       __func__, err);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			goto end;
+		}
+		partial_pkt->read_len = 0;
+		partial_pkt->total_len = 0;
+		partial_pkt->processing = 0;
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+		goto start;
+	}
+	mutex_unlock(&driver->hdlc_recovery_mutex);
+	goto end;
+
+start:
+	while (read_bytes < len) {
+		actual_pkt = (struct diag_pkt_frame_t *)buf;
+		pkt_len = actual_pkt->length;
+
+		if (actual_pkt->start != CONTROL_CHAR) {
+			diag_hdlc_start_recovery(buf, len, pid);
+			diag_send_error_rsp(buf, len);
+			goto end;
+		}
+		mutex_lock(&driver->hdlc_recovery_mutex);
+		if (pkt_len + header_len > partial_pkt->capacity) {
+			pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
+			       __func__, pkt_len);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			diag_hdlc_start_recovery(buf, len, pid);
+			break;
+		}
+		if ((pkt_len + header_len) > (len - read_bytes)) {
+			partial_pkt->read_len = len - read_bytes;
+			partial_pkt->total_len = pkt_len + header_len;
+			partial_pkt->remaining = partial_pkt->total_len -
+						 partial_pkt->read_len;
+			partial_pkt->processing = 1;
+			memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			break;
+		}
+		data_ptr = buf + header_len;
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+						CONTROL_CHAR) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			diag_hdlc_start_recovery(buf, len, pid);
+			mutex_lock(&driver->hdlc_recovery_mutex);
+		}
+		else
+			hdlc_reset = 0;
+		err = diag_process_apps_pkt(data_ptr,
+					    actual_pkt->length, pid);
+		if (err) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
+			break;
+		}
+		read_bytes += header_len + pkt_len + 1;
+		buf += header_len + pkt_len + 1; /* advance to next pkt */
+		mutex_unlock(&driver->hdlc_recovery_mutex);
+	}
+end:
+	return;
+}
+
+static int diagfwd_mux_read_done(unsigned char *buf, int len, int ctxt)
+{
+	if (!buf || len <= 0)
+		return -EINVAL;
+
+	if (!driver->hdlc_disabled)
+		diag_process_hdlc_pkt(buf, len, 0);
+	else
+		diag_process_non_hdlc_pkt(buf, len, 0);
+
+	diag_mux_queue_read(ctxt);
+	return 0;
+}
+
+static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
+				  int ctxt)
+{
+	unsigned long flags;
+	int peripheral = -1;
+	int type = -1;
+	int num = -1;
+
+	if (!buf || len < 0)
+		return -EINVAL;
+
+	peripheral = GET_BUF_PERIPHERAL(buf_ctxt);
+	type = GET_BUF_TYPE(buf_ctxt);
+	num = GET_BUF_NUM(buf_ctxt);
+
+	switch (type) {
+	case TYPE_DATA:
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+			diagfwd_write_done(peripheral, type, num);
+			diag_ws_on_copy(DIAG_WS_MUX);
+		} else if (peripheral == APPS_DATA) {
+			diagmem_free(driver, (unsigned char *)buf,
+				     POOL_TYPE_HDLC);
+			buf = NULL;
+		} else {
+			pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+					   peripheral, __func__, type);
+		}
+		break;
+	case TYPE_CMD:
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+			diagfwd_write_done(peripheral, type, num);
+		} else if (peripheral == APPS_DATA) {
+			spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+			driver->rsp_buf_busy = 0;
+			driver->encoded_rsp_len = 0;
+			spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
+					       flags);
+		} else {
+			pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+					   peripheral, __func__, type);
+		}
+		break;
+	default:
+		pr_err_ratelimited("diag: Incorrect data type %d, buf_ctxt: %d in %s\n",
+				   type, buf_ctxt, __func__);
+		break;
+	}
+
+	return 0;
+}
+
+static struct diag_mux_ops diagfwd_mux_ops = {
+	.open = diagfwd_mux_open,
+	.close = diagfwd_mux_close,
+	.read_done = diagfwd_mux_read_done,
+	.write_done = diagfwd_mux_write_done
+};
+
+int diagfwd_init(void)
+{
+	int ret;
+	int i;
+
+	wrap_enabled = 0;
+	wrap_count = 0;
+	driver->use_device_tree = has_device_tree();
+	for (i = 0; i < DIAG_NUM_PROC; i++)
+		driver->real_time_mode[i] = 1;
+	driver->supports_separate_cmdrsp = 1;
+	driver->supports_apps_hdlc_encoding = 1;
+	mutex_init(&driver->diag_hdlc_mutex);
+	mutex_init(&driver->diag_cntl_mutex);
+	mutex_init(&driver->mode_lock);
+	driver->encoded_rsp_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE +
+				APF_DIAG_PADDING, GFP_KERNEL);
+	if (!driver->encoded_rsp_buf)
+		goto err;
+	kmemleak_not_leak(driver->encoded_rsp_buf);
+	hdlc_decode = kzalloc(sizeof(struct diag_hdlc_decode_type),
+			      GFP_KERNEL);
+	if (!hdlc_decode)
+		goto err;
+	timer_setup(&driver->hdlc_reset_timer, hdlc_reset_timer_func, 0);
+	kmemleak_not_leak(hdlc_decode);
+	driver->encoded_rsp_len = 0;
+	driver->rsp_buf_busy = 0;
+	spin_lock_init(&driver->rsp_buf_busy_lock);
+	driver->user_space_data_busy = 0;
+	driver->hdlc_buf_len = 0;
+	INIT_LIST_HEAD(&driver->cmd_reg_list);
+	driver->cmd_reg_count = 0;
+	mutex_init(&driver->cmd_reg_mutex);
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		driver->feature[i].separate_cmd_rsp = 0;
+		driver->feature[i].stm_support = DISABLE_STM;
+		driver->feature[i].rcvd_feature_mask = 0;
+		driver->feature[i].peripheral_buffering = 0;
+		driver->feature[i].encode_hdlc = 0;
+		driver->feature[i].mask_centralization = 0;
+		driver->feature[i].log_on_demand = 0;
+		driver->feature[i].sent_feature_mask = 0;
+		driver->buffering_mode[i].peripheral = i;
+		driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
+		driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
+		driver->buffering_mode[i].low_wm_val = DEFAULT_LOW_WM_VAL;
+	}
+
+	for (i = 0; i < NUM_STM_PROCESSORS; i++) {
+		driver->stm_state_requested[i] = DISABLE_STM;
+		driver->stm_state[i] = DISABLE_STM;
+	}
+
+	if (driver->hdlc_buf == NULL) {
+		driver->hdlc_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+		if (!driver->hdlc_buf)
+			goto err;
+		kmemleak_not_leak(driver->hdlc_buf);
+	}
+	if (driver->user_space_data_buf == NULL)
+		driver->user_space_data_buf = kzalloc(USER_SPACE_DATA,
+							GFP_KERNEL);
+	if (driver->user_space_data_buf == NULL)
+		goto err;
+	kmemleak_not_leak(driver->user_space_data_buf);
+	if (driver->client_map == NULL &&
+	    (driver->client_map = kzalloc
+	     ((driver->num_clients) * sizeof(struct diag_client_map),
+		   GFP_KERNEL)) == NULL)
+		goto err;
+	kmemleak_not_leak(driver->client_map);
+	if (driver->data_ready == NULL &&
+	     (driver->data_ready = kzalloc(driver->num_clients * sizeof(int)
+							, GFP_KERNEL)) == NULL)
+		goto err;
+	kmemleak_not_leak(driver->data_ready);
+	for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+		atomic_set(&driver->data_ready_notif[i], 0);
+	if (driver->apps_req_buf == NULL) {
+		driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+		if (!driver->apps_req_buf)
+			goto err;
+		kmemleak_not_leak(driver->apps_req_buf);
+	}
+	if (driver->dci_pkt_buf == NULL) {
+		driver->dci_pkt_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+		if (!driver->dci_pkt_buf)
+			goto err;
+		kmemleak_not_leak(driver->dci_pkt_buf);
+	}
+	if (driver->apps_rsp_buf == NULL) {
+		driver->apps_rsp_buf = kzalloc(DIAG_MAX_RSP_SIZE, GFP_KERNEL);
+		if (driver->apps_rsp_buf == NULL)
+			goto err;
+		kmemleak_not_leak(driver->apps_rsp_buf);
+	}
+	driver->diag_wq = create_singlethread_workqueue("diag_wq");
+	if (!driver->diag_wq)
+		goto err;
+	ret = diag_mux_register(DIAG_LOCAL_PROC, DIAG_LOCAL_PROC,
+				&diagfwd_mux_ops);
+	if (ret) {
+		pr_err("diag: Unable to register with USB, err: %d\n", ret);
+		goto err;
+	}
+
+	return 0;
+err:
+	pr_err("diag: In %s, couldn't initialize diag\n", __func__);
+
+	diag_usb_exit(DIAG_USB_LOCAL);
+	kfree(driver->encoded_rsp_buf);
+	kfree(driver->hdlc_buf);
+	kfree(driver->client_map);
+	kfree(driver->data_ready);
+	kfree(driver->apps_req_buf);
+	kfree(driver->dci_pkt_buf);
+	kfree(driver->apps_rsp_buf);
+	kfree(hdlc_decode);
+	kfree(driver->user_space_data_buf);
+	if (driver->diag_wq)
+		destroy_workqueue(driver->diag_wq);
+	return -ENOMEM;
+}
+
+void diagfwd_exit(void)
+{
+	kfree(driver->encoded_rsp_buf);
+	kfree(driver->hdlc_buf);
+	kfree(hdlc_decode);
+	kfree(driver->client_map);
+	kfree(driver->data_ready);
+	kfree(driver->apps_req_buf);
+	kfree(driver->dci_pkt_buf);
+	kfree(driver->apps_rsp_buf);
+	kfree(driver->user_space_data_buf);
+	destroy_workqueue(driver->diag_wq);
+}
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd.h linux-6.4-fbx/drivers/char/diag/diagfwd.h
--- linux-6.4-fbx/drivers/char/diag./diagfwd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,50 @@
+/* Copyright (c) 2008-2015, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_H
+#define DIAGFWD_H
+
+/*
+ * The context applies to Diag SMD data buffers. It is used to identify the
+ * buffer once these buffers are writtent to USB.
+ */
+#define SET_BUF_CTXT(p, d, n) \
+	(((p & 0xFF) << 16) | ((d & 0xFF) << 8) | (n & 0xFF))
+#define GET_BUF_PERIPHERAL(p)	((p & 0xFF0000) >> 16)
+#define GET_BUF_TYPE(d)		((d & 0x00FF00) >> 8)
+#define GET_BUF_NUM(n)		((n & 0x0000FF))
+
+#define CHK_OVERFLOW(bufStart, start, end, length) \
+	((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
+
+int diagfwd_init(void);
+void diagfwd_exit(void);
+void diag_process_hdlc_pkt(void *data, unsigned int len, int pid);
+void diag_process_non_hdlc_pkt(unsigned char *data, int len, int pid);
+int chk_config_get_id(void);
+int chk_apps_only(void);
+int chk_apps_master(void);
+int chk_polling_response(void);
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len);
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len);
+int diag_check_common_cmd(struct diag_pkt_header_t *header);
+void diag_update_userspace_clients(unsigned int type);
+void diag_update_sleeping_process(int process_id, int data_type);
+int diag_process_apps_pkt(unsigned char *buf, int len, int pid);
+void diag_send_error_rsp(unsigned char *buf, int len);
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
+void diag_md_hdlc_reset_timer_func(struct timer_list *);
+void diag_update_md_clients(unsigned int type);
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd_cntl.c linux-6.4-fbx/drivers/char/diag/diagfwd_cntl.c
--- linux-6.4-fbx/drivers/char/diag./diagfwd_cntl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd_cntl.c	2023-10-05 12:33:41.363634732 +0200
@@ -0,0 +1,1361 @@
+/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_peripheral.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#include "diag_dci.h"
+#include "diagmem.h"
+#include "diag_masks.h"
+#include "diag_ipc_logging.h"
+#include "diag_mux.h"
+
+#define FEATURE_SUPPORTED(x)	((feature_mask << (i * 8)) & (1 << x))
+
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
+static void diag_notify_md_client(uint8_t peripheral, int data);
+
+static void diag_mask_update_work_fn(struct work_struct *work)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+		if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
+			continue;
+		mutex_lock(&driver->cntl_lock);
+		driver->mask_update ^= PERIPHERAL_MASK(peripheral);
+		mutex_unlock(&driver->cntl_lock);
+		diag_send_updates_peripheral(peripheral);
+	}
+}
+
+void diag_cntl_channel_open(struct diagfwd_info *p_info)
+{
+	if (!p_info)
+		return;
+	driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
+	queue_work(driver->cntl_wq, &driver->mask_update_work);
+	diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
+}
+
+void diag_cntl_channel_close(struct diagfwd_info *p_info)
+{
+	uint8_t peripheral;
+
+	if (!p_info)
+		return;
+
+	peripheral = p_info->peripheral;
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	driver->feature[peripheral].sent_feature_mask = 0;
+	driver->feature[peripheral].rcvd_feature_mask = 0;
+	flush_workqueue(driver->cntl_wq);
+	reg_dirty |= PERIPHERAL_MASK(peripheral);
+	diag_cmd_remove_reg_by_proc(peripheral);
+	driver->feature[peripheral].stm_support = DISABLE_STM;
+	driver->feature[peripheral].log_on_demand = 0;
+	driver->stm_state[peripheral] = DISABLE_STM;
+	driver->stm_state_requested[peripheral] = DISABLE_STM;
+	reg_dirty ^= PERIPHERAL_MASK(peripheral);
+	diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
+}
+
+static void diag_stm_update_work_fn(struct work_struct *work)
+{
+	uint8_t i;
+	uint16_t peripheral_mask = 0;
+	int err = 0;
+
+	mutex_lock(&driver->cntl_lock);
+	peripheral_mask = driver->stm_peripheral;
+	driver->stm_peripheral = 0;
+	mutex_unlock(&driver->cntl_lock);
+
+	if (peripheral_mask == 0)
+		return;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->feature[i].stm_support)
+				continue;
+		if (peripheral_mask & PERIPHERAL_MASK(i)) {
+			err = diag_send_stm_state(i,
+				(uint8_t)(driver->stm_state_requested[i]));
+			if (!err) {
+				driver->stm_state[i] =
+					driver->stm_state_requested[i];
+			}
+		}
+	}
+}
+
+void diag_notify_md_client(uint8_t peripheral, int data)
+{
+	int stat = 0;
+	struct kernel_siginfo info;
+
+	if (peripheral > NUM_PERIPHERALS)
+		return;
+
+	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+		return;
+
+	mutex_lock(&driver->md_session_lock);
+	memset(&info, 0, sizeof(info));
+	info.si_code = SI_QUEUE;
+	info.si_int = (PERIPHERAL_MASK(peripheral) | data);
+	info.si_signo = SIGCONT;
+	if (driver->md_session_map[peripheral] &&
+		driver->md_session_map[peripheral]->task) {
+		if (driver->md_session_map[peripheral]->pid ==
+			driver->md_session_map[peripheral]->task->tgid) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"md_session %d pid = %d, md_session %d task tgid = %d\n",
+				peripheral,
+				driver->md_session_map[peripheral]->pid,
+				peripheral,
+				driver->md_session_map[peripheral]->task->tgid);
+			stat = send_sig_info(info.si_signo, &info,
+				driver->md_session_map[peripheral]->task);
+			if (stat)
+				pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
+					info.si_int, stat);
+		} else
+			pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
+				peripheral, info.si_int, stat);
+	}
+	mutex_unlock(&driver->md_session_lock);
+}
+
+static void process_pd_status(uint8_t *buf, uint32_t len,
+			      uint8_t peripheral)
+{
+	struct diag_ctrl_msg_pd_status *pd_msg = NULL;
+	uint32_t pd;
+	int status = DIAG_STATUS_CLOSED;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
+		return;
+
+	pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
+	pd = pd_msg->pd_id;
+	status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
+	diag_notify_md_client(peripheral, status);
+}
+
+static void enable_stm_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	mutex_lock(&driver->cntl_lock);
+	driver->feature[peripheral].stm_support = ENABLE_STM;
+	driver->stm_peripheral |= PERIPHERAL_MASK(peripheral);
+	mutex_unlock(&driver->cntl_lock);
+
+	queue_work(driver->cntl_wq, &(driver->stm_update_work));
+}
+
+static void enable_socket_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (driver->supports_sockets)
+		driver->feature[peripheral].sockets_enabled = 1;
+	else
+		driver->feature[peripheral].sockets_enabled = 0;
+}
+
+static void process_hdlc_encoding_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (driver->supports_apps_hdlc_encoding) {
+		driver->feature[peripheral].encode_hdlc =
+					ENABLE_APPS_HDLC_ENCODING;
+	} else {
+		driver->feature[peripheral].encode_hdlc =
+					DISABLE_APPS_HDLC_ENCODING;
+	}
+}
+
+static void process_command_deregistration(uint8_t *buf, uint32_t len,
+					   uint8_t peripheral)
+{
+	uint8_t *ptr = buf;
+	int i;
+	int header_len = sizeof(struct diag_ctrl_cmd_dereg);
+	int read_len = 0;
+	struct diag_ctrl_cmd_dereg *dereg = NULL;
+	struct cmd_code_range *range = NULL;
+	struct diag_cmd_reg_entry_t del_entry;
+
+	/*
+	 * Perform Basic sanity. The len field is the size of the data payload.
+	 * This doesn't include the header size.
+	 */
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	dereg = (struct diag_ctrl_cmd_dereg *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	if (dereg->count_entries == 0) {
+		pr_debug("diag: In %s, received reg tbl with no entries\n",
+			 __func__);
+		return;
+	}
+
+	for (i = 0; i < dereg->count_entries && read_len < len; i++) {
+		range = (struct cmd_code_range *)ptr;
+		ptr += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+		read_len += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+		del_entry.cmd_code = dereg->cmd_code;
+		del_entry.subsys_id = dereg->subsysid;
+		del_entry.cmd_code_hi = range->cmd_code_hi;
+		del_entry.cmd_code_lo = range->cmd_code_lo;
+		diag_cmd_remove_reg(&del_entry, peripheral);
+	}
+
+	if (i != dereg->count_entries) {
+		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+		       __func__, read_len, len, dereg->count_entries);
+	}
+}
+static void process_command_registration(uint8_t *buf, uint32_t len,
+					 uint8_t peripheral)
+{
+	uint8_t *ptr = buf;
+	int i;
+	int header_len = sizeof(struct diag_ctrl_cmd_reg);
+	int read_len = 0;
+	struct diag_ctrl_cmd_reg *reg = NULL;
+	struct cmd_code_range *range = NULL;
+	struct diag_cmd_reg_entry_t new_entry;
+
+	/*
+	 * Perform Basic sanity. The len field is the size of the data payload.
+	 * This doesn't include the header size.
+	 */
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	reg = (struct diag_ctrl_cmd_reg *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	if (reg->count_entries == 0) {
+		pr_debug("diag: In %s, received reg tbl with no entries\n",
+			 __func__);
+		return;
+	}
+
+	for (i = 0; i < reg->count_entries && read_len < len; i++) {
+		range = (struct cmd_code_range *)ptr;
+		ptr += sizeof(struct cmd_code_range);
+		read_len += sizeof(struct cmd_code_range);
+		new_entry.cmd_code = reg->cmd_code;
+		new_entry.subsys_id = reg->subsysid;
+		new_entry.cmd_code_hi = range->cmd_code_hi;
+		new_entry.cmd_code_lo = range->cmd_code_lo;
+		diag_cmd_add_reg(&new_entry, peripheral, INVALID_PID);
+	}
+
+	if (i != reg->count_entries) {
+		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+		       __func__, read_len, len, reg->count_entries);
+	}
+}
+
+static void diag_close_transport_work_fn(struct work_struct *work)
+{
+	uint8_t transport;
+	uint8_t peripheral;
+
+	mutex_lock(&driver->cntl_lock);
+	for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+		if (!(driver->close_transport & PERIPHERAL_MASK(peripheral)))
+			continue;
+		driver->close_transport ^= PERIPHERAL_MASK(peripheral);
+		transport = driver->feature[peripheral].sockets_enabled ?
+					TRANSPORT_SMD : TRANSPORT_SOCKET;
+		diagfwd_close_transport(transport, peripheral);
+	}
+	mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_socket_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	mutex_lock(&driver->cntl_lock);
+	driver->close_transport |= PERIPHERAL_MASK(peripheral);
+	queue_work(driver->cntl_wq, &driver->close_transport_work);
+	mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_log_on_demand_feature(uint8_t peripheral)
+{
+	/* Log On Demand command is registered only on Modem */
+	if (peripheral != PERIPHERAL_MODEM)
+		return;
+
+	if (driver->feature[PERIPHERAL_MODEM].log_on_demand)
+		driver->log_on_demand_support = 1;
+	else
+		driver->log_on_demand_support = 0;
+}
+
+static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
+					  uint8_t peripheral)
+{
+	int i;
+	int header_len = sizeof(struct diag_ctrl_feature_mask);
+	int read_len = 0;
+	struct diag_ctrl_feature_mask *header = NULL;
+	uint32_t feature_mask_len = 0;
+	uint32_t feature_mask = 0;
+	uint8_t *ptr = buf;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	header = (struct diag_ctrl_feature_mask *)ptr;
+	ptr += header_len;
+	feature_mask_len = header->feature_mask_len;
+
+	if (feature_mask_len == 0) {
+		pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (feature_mask_len > FEATURE_MASK_LEN) {
+		pr_alert("diag: Receiving feature mask length more than Apps support\n");
+		feature_mask_len = FEATURE_MASK_LEN;
+	}
+
+	diag_cmd_remove_reg_by_proc(peripheral);
+
+	driver->feature[peripheral].rcvd_feature_mask = 1;
+
+	for (i = 0; i < feature_mask_len && read_len < len; i++) {
+		feature_mask = *(uint8_t *)ptr;
+		driver->feature[peripheral].feature_mask[i] = feature_mask;
+		ptr += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+
+		if (FEATURE_SUPPORTED(F_DIAG_LOG_ON_DEMAND_APPS))
+			driver->feature[peripheral].log_on_demand = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_REQ_RSP_SUPPORT))
+			driver->feature[peripheral].separate_cmd_rsp = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
+			process_hdlc_encoding_feature(peripheral);
+		if (FEATURE_SUPPORTED(F_DIAG_STM))
+			enable_stm_feature(peripheral);
+		if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
+			driver->feature[peripheral].mask_centralization = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_PERIPHERAL_BUFFERING))
+			driver->feature[peripheral].peripheral_buffering = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
+			enable_socket_feature(peripheral);
+	}
+
+	process_socket_feature(peripheral);
+	process_log_on_demand_feature(peripheral);
+}
+
+static void process_last_event_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	struct diag_ctrl_last_event_report *header = NULL;
+	uint8_t *ptr = buf;
+	uint8_t *temp = NULL;
+	uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
+	uint16_t event_size = 0;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
+		return;
+
+	mutex_lock(&event_mask.lock);
+	header = (struct diag_ctrl_last_event_report *)ptr;
+	event_size = ((header->event_last_id / 8) + 1);
+	if (event_size >= driver->event_mask_size) {
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+		"diag: receiving event mask size more that Apps can handle\n");
+		temp = krealloc(driver->event_mask->ptr, event_size,
+				GFP_KERNEL);
+		if (!temp) {
+			pr_err("diag: In %s, unable to reallocate event mask to support events from %d\n",
+			       __func__, peripheral);
+			goto err;
+		}
+		driver->event_mask->ptr = temp;
+		driver->event_mask_size = event_size;
+	}
+
+	driver->num_event_id[peripheral] = header->event_last_id;
+	if (header->event_last_id > driver->last_event_id)
+		driver->last_event_id = header->event_last_id;
+err:
+	mutex_unlock(&event_mask.lock);
+}
+
+static void process_log_range_report(uint8_t *buf, uint32_t len,
+				     uint8_t peripheral)
+{
+	int i;
+	int read_len = 0;
+	int header_len = sizeof(struct diag_ctrl_log_range_report);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_log_range_report *header = NULL;
+	struct diag_ctrl_log_range *log_range = NULL;
+	struct diag_log_mask_t *mask_ptr = NULL;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
+		return;
+
+	header = (struct diag_ctrl_log_range_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	driver->num_equip_id[peripheral] = header->num_ranges;
+	for (i = 0; i < header->num_ranges && read_len < len; i++) {
+		log_range = (struct diag_ctrl_log_range *)ptr;
+		ptr += sizeof(struct diag_ctrl_log_range);
+		read_len += sizeof(struct diag_ctrl_log_range);
+
+		if (log_range->equip_id >= MAX_EQUIP_ID) {
+			pr_err("diag: receiving log equip id %d more than supported equip id: %d from peripheral: %d\n",
+			       log_range->equip_id, MAX_EQUIP_ID, peripheral);
+			continue;
+		}
+		mask_ptr = (struct diag_log_mask_t *)log_mask.ptr;
+		mask_ptr = &mask_ptr[log_range->equip_id];
+
+		mutex_lock(&(mask_ptr->lock));
+		mask_ptr->num_items = log_range->num_items;
+		mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
+		mutex_unlock(&(mask_ptr->lock));
+	}
+}
+
+static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
+				     struct diag_ssid_range_t *range)
+{
+	uint32_t temp_range;
+
+	if (!mask || !range)
+		return -EIO;
+	if (range->ssid_last < range->ssid_first) {
+		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+		       __func__, range->ssid_first, range->ssid_last);
+		return -EINVAL;
+	}
+	if (range->ssid_last >= mask->ssid_last) {
+		temp_range = range->ssid_last - mask->ssid_first + 1;
+		if (temp_range > MAX_SSID_PER_RANGE) {
+			temp_range = MAX_SSID_PER_RANGE;
+			mask->ssid_last = mask->ssid_first + temp_range - 1;
+		} else
+			mask->ssid_last = range->ssid_last;
+		mask->ssid_last_tools = mask->ssid_last;
+		mask->range = temp_range;
+	}
+
+	return 0;
+}
+
+static void process_ssid_range_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	int i;
+	int j;
+	int read_len = 0;
+	int found = 0;
+	int new_size = 0;
+	int err = 0;
+	struct diag_ctrl_ssid_range_report *header = NULL;
+	struct diag_ssid_range_t *ssid_range = NULL;
+	int header_len = sizeof(struct diag_ctrl_ssid_range_report);
+	struct diag_msg_mask_t *mask_ptr = NULL;
+	uint8_t *ptr = buf;
+	uint8_t *temp = NULL;
+	uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
+		return;
+
+	header = (struct diag_ctrl_ssid_range_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	mutex_lock(&driver->msg_mask_lock);
+	driver->max_ssid_count[peripheral] = header->count;
+	for (i = 0; i < header->count && read_len < len; i++) {
+		ssid_range = (struct diag_ssid_range_t *)ptr;
+		ptr += sizeof(struct diag_ssid_range_t);
+		read_len += sizeof(struct diag_ssid_range_t);
+		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+		found = 0;
+		for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
+			if (!mask_ptr || !ssid_range) {
+				found = 1;
+				break;
+			}
+			if (mask_ptr->ssid_first != ssid_range->ssid_first)
+				continue;
+			mutex_lock(&mask_ptr->lock);
+			err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
+			mutex_unlock(&mask_ptr->lock);
+			if (err == -ENOMEM) {
+				pr_err("diag: In %s, unable to increase the msg mask table range\n",
+				       __func__);
+			}
+			found = 1;
+			break;
+		}
+
+		if (found)
+			continue;
+
+		new_size = (driver->msg_mask_tbl_count + 1) *
+			   sizeof(struct diag_msg_mask_t);
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			"diag: receiving msg mask size more that Apps can handle\n");
+		temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
+		if (!temp) {
+			pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
+			       __func__, ssid_range->ssid_first,
+			       ssid_range->ssid_last);
+			continue;
+		}
+		msg_mask.ptr = temp;
+		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+		for (j = 0; j < driver->msg_mask_tbl_count; j++)
+			++mask_ptr;
+
+		err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
+		if (err) {
+			pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
+			       __func__, ssid_range->ssid_first,
+			       ssid_range->ssid_last, err);
+			continue;
+		}
+		driver->msg_mask_tbl_count += 1;
+	}
+	mutex_unlock(&driver->msg_mask_lock);
+}
+
+static void diag_build_time_mask_update(uint8_t *buf,
+					struct diag_ssid_range_t *range)
+{
+	int i;
+	int j;
+	int num_items = 0;
+	int err = 0;
+	int found = 0;
+	int new_size = 0;
+	uint8_t *temp = NULL;
+	uint32_t *mask_ptr = (uint32_t *)buf;
+	uint32_t *dest_ptr = NULL;
+	struct diag_msg_mask_t *build_mask = NULL;
+
+	if (!range || !buf)
+		return;
+
+	if (range->ssid_last < range->ssid_first) {
+		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+		       __func__, range->ssid_first, range->ssid_last);
+		return;
+	}
+	mutex_lock(&driver->msg_mask_lock);
+	build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
+	num_items = range->ssid_last - range->ssid_first + 1;
+
+	for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
+		if (!build_mask) {
+			found = 1;
+			break;
+		}
+		if (build_mask->ssid_first != range->ssid_first)
+			continue;
+		found = 1;
+		mutex_lock(&build_mask->lock);
+		err = update_msg_mask_tbl_entry(build_mask, range);
+		if (err == -ENOMEM) {
+			pr_err("diag: In %s, unable to increase the msg build mask table range\n",
+			       __func__);
+		}
+		dest_ptr = build_mask->ptr;
+		for (j = 0; (j < build_mask->range) && mask_ptr && dest_ptr;
+			j++, mask_ptr++, dest_ptr++)
+			*(uint32_t *)dest_ptr |= *mask_ptr;
+		mutex_unlock(&build_mask->lock);
+		break;
+	}
+
+	if (found)
+		goto end;
+
+	new_size = (driver->bt_msg_mask_tbl_count + 1) *
+		   sizeof(struct diag_msg_mask_t);
+	DIAG_LOG(DIAG_DEBUG_MASKS,
+		"diag: receiving build time mask size more that Apps can handle\n");
+
+	temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
+	if (!temp) {
+		pr_err("diag: In %s, unable to create a new entry for build time mask\n",
+		       __func__);
+		goto end;
+	}
+	driver->build_time_mask->ptr = temp;
+	build_mask = (struct diag_msg_mask_t*)(driver->build_time_mask->ptr);
+	for (i = 0; i < driver->bt_msg_mask_tbl_count; i++)
+		++build_mask;
+
+	err = diag_create_msg_mask_table_entry(build_mask, range);
+	if (err) {
+		pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
+		       __func__, err);
+		goto end;
+	}
+	driver->bt_msg_mask_tbl_count += 1;
+end:
+	mutex_unlock(&driver->msg_mask_lock);
+
+	return;
+}
+
+static void process_build_mask_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	int i;
+	int read_len = 0;
+	int num_items = 0;
+	int header_len = sizeof(struct diag_ctrl_build_mask_report);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_build_mask_report *header = NULL;
+	struct diag_ssid_range_t *range = NULL;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
+		return;
+
+	header = (struct diag_ctrl_build_mask_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	for (i = 0; i < header->count && read_len < len; i++) {
+		range = (struct diag_ssid_range_t *)ptr;
+		ptr += sizeof(struct diag_ssid_range_t);
+		read_len += sizeof(struct diag_ssid_range_t);
+		num_items = range->ssid_last - range->ssid_first + 1;
+		diag_build_time_mask_update(ptr, range);
+		ptr += num_items * sizeof(uint32_t);
+		read_len += num_items * sizeof(uint32_t);
+	}
+}
+
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+				 int len)
+{
+	uint32_t read_len = 0;
+	uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
+
+	if (!buf || len <= 0 || !p_info)
+		return;
+
+	if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
+		pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
+		       p_info->peripheral);
+		return;
+	}
+
+	while (read_len + header_len < len) {
+		ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+		switch (ctrl_pkt->pkt_id) {
+		case DIAG_CTRL_MSG_REG:
+			process_command_registration(ptr, ctrl_pkt->len,
+						     p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_DEREG:
+			process_command_deregistration(ptr, ctrl_pkt->len,
+						       p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_FEATURE:
+			process_incoming_feature_mask(ptr, ctrl_pkt->len,
+						      p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_LAST_EVENT_REPORT:
+			process_last_event_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_LOG_RANGE_REPORT:
+			process_log_range_report(ptr, ctrl_pkt->len,
+						 p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_SSID_RANGE_REPORT:
+			process_ssid_range_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_BUILD_MASK_REPORT:
+			process_build_mask_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_PD_STATUS:
+			process_pd_status(ptr, ctrl_pkt->len,
+						p_info->peripheral);
+			break;
+		default:
+			pr_debug("diag: Control packet %d not supported\n",
+				 ctrl_pkt->pkt_id);
+		}
+		ptr += header_len + ctrl_pkt->len;
+		read_len += header_len + ctrl_pkt->len;
+	}
+
+	return;
+}
+
+#ifdef CONFIG_DIAG_OVER_USB
+static int diag_compute_real_time(int idx)
+{
+	int real_time = MODE_REALTIME;
+	if (driver->proc_active_mask == 0) {
+		/*
+		 * There are no DCI or Memory Device processes. Diag should
+		 * be in Real Time mode irrespective of USB connection
+		 */
+		real_time = MODE_REALTIME;
+	} else if (driver->proc_rt_vote_mask[idx] & driver->proc_active_mask) {
+		/*
+		 * Atleast one process is alive and is voting for Real Time
+		 * data - Diag should be in real time mode irrespective of USB
+		 * connection.
+		 */
+		real_time = MODE_REALTIME;
+	} else if (driver->usb_connected) {
+		/*
+		 * If USB is connected, check individual process. If Memory
+		 * Device Mode is active, set the mode requested by Memory
+		 * Device process. Set to realtime mode otherwise.
+		 */
+		if ((driver->proc_rt_vote_mask[idx] &
+						DIAG_PROC_MEMORY_DEVICE) == 0)
+			real_time = MODE_NONREALTIME;
+		else
+			real_time = MODE_REALTIME;
+	} else {
+		/*
+		 * We come here if USB is not connected and the active
+		 * processes are voting for Non realtime mode.
+		 */
+		real_time = MODE_NONREALTIME;
+	}
+	return real_time;
+}
+#endif
+
+static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
+					   int real_time)
+{
+	struct diag_ctrl_msg_diagmode diagmode;
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+
+	if (!dest_buf)
+		return;
+
+	diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+	diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
+	diagmode.version = 1;
+	diagmode.sleep_vote = real_time ? 1 : 0;
+	/*
+	 * 0 - Disables real-time logging (to prevent
+	 *     frequent APPS wake-ups, etc.).
+	 * 1 - Enable real-time logging
+	 */
+	diagmode.real_time = real_time;
+	diagmode.use_nrt_values = 0;
+	diagmode.commit_threshold = 0;
+	diagmode.sleep_threshold = 0;
+	diagmode.sleep_time = 0;
+	diagmode.drain_timer_val = 0;
+	diagmode.event_stale_timer_val = 0;
+
+	memcpy(dest_buf, &diagmode, msg_size);
+}
+
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
+{
+	int i;
+
+	mutex_lock(&driver->real_time_mutex);
+	if (vote)
+		driver->proc_active_mask |= proc;
+	else {
+		driver->proc_active_mask &= ~proc;
+		if (index == ALL_PROC) {
+			for (i = 0; i < DIAG_NUM_PROC; i++)
+				driver->proc_rt_vote_mask[i] |= proc;
+		} else {
+			driver->proc_rt_vote_mask[index] |= proc;
+		}
+	}
+	mutex_unlock(&driver->real_time_mutex);
+}
+
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index)
+{
+	int i;
+
+	if (index >= DIAG_NUM_PROC) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mutex_lock(&driver->real_time_mutex);
+	if (index <= ALL_PROC) {
+		for (i = 0; i < DIAG_NUM_PROC; i++) {
+			if (real_time)
+				driver->proc_rt_vote_mask[i] |= proc;
+			else
+				driver->proc_rt_vote_mask[i] &= ~proc;
+		}
+	} else {
+		if (real_time)
+			driver->proc_rt_vote_mask[index] |= proc;
+		else
+			driver->proc_rt_vote_mask[index] &= ~proc;
+	}
+	mutex_unlock(&driver->real_time_mutex);
+}
+
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+	unsigned char *buf = NULL;
+	int err = 0;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+	uint32_t write_len = 0;
+
+	if (token < 0 || token >= NUM_DCI_PROC) {
+		pr_err("diag: Invalid remote device channel in %s, token: %d\n",
+							__func__, token);
+		return;
+	}
+
+	if (real_time != MODE_REALTIME && real_time != MODE_NONREALTIME) {
+		pr_err("diag: Invalid real time value in %s, type: %d\n",
+							__func__, real_time);
+		return;
+	}
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return;
+	}
+	/* Frame the DCI header */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = msg_size + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	diag_create_diag_mode_ctrl_pkt(buf + write_len, real_time);
+	write_len += msg_size;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+	err = diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, write_len);
+	if (err != write_len) {
+		pr_err("diag: cannot send nrt mode ctrl pkt, err: %d\n", err);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	} else {
+		driver->real_time_mode[token + 1] = real_time;
+	}
+}
+#else
+static inline void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+}
+#endif
+
+#ifdef CONFIG_DIAG_OVER_USB
+void diag_real_time_work_fn(struct work_struct *work)
+{
+	int temp_real_time = MODE_REALTIME, i, j;
+	uint8_t send_update = 1;
+
+	/*
+	 * If any peripheral in the local processor is in either threshold or
+	 * circular buffering mode, don't send the real time mode control
+	 * packet.
+	 */
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->feature[i].peripheral_buffering)
+			continue;
+		switch (driver->buffering_mode[i].mode) {
+		case DIAG_BUFFERING_MODE_THRESHOLD:
+		case DIAG_BUFFERING_MODE_CIRCULAR:
+			send_update = 0;
+			break;
+		}
+	}
+
+	mutex_lock(&driver->mode_lock);
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		temp_real_time = diag_compute_real_time(i);
+		if (temp_real_time == driver->real_time_mode[i]) {
+			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+				i, temp_real_time);
+			continue;
+		}
+
+		if (i == DIAG_LOCAL_PROC) {
+			if (!send_update) {
+				pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
+					 __func__);
+				break;
+			}
+			for (j = 0; j < NUM_PERIPHERALS; j++)
+				diag_send_real_time_update(j,
+						temp_real_time);
+		} else {
+			diag_send_diag_mode_update_remote(i - 1,
+							   temp_real_time);
+		}
+	}
+	mutex_unlock(&driver->mode_lock);
+
+	if (driver->real_time_update_busy > 0)
+		driver->real_time_update_busy--;
+}
+#else
+void diag_real_time_work_fn(struct work_struct *work)
+{
+	int temp_real_time = MODE_REALTIME, i, j;
+
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		if (driver->proc_active_mask == 0) {
+			/*
+			 * There are no DCI or Memory Device processes.
+			 * Diag should be in Real Time mode.
+			 */
+			temp_real_time = MODE_REALTIME;
+		} else if (!(driver->proc_rt_vote_mask[i] &
+						driver->proc_active_mask)) {
+			/* No active process is voting for real time mode */
+			temp_real_time = MODE_NONREALTIME;
+		}
+		if (temp_real_time == driver->real_time_mode[i]) {
+			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+				i, temp_real_time);
+			continue;
+		}
+
+		if (i == DIAG_LOCAL_PROC) {
+			for (j = 0; j < NUM_PERIPHERALS; j++)
+				diag_send_real_time_update(
+					j, temp_real_time);
+		} else {
+			diag_send_diag_mode_update_remote(i - 1,
+							  temp_real_time);
+		}
+	}
+
+	if (driver->real_time_update_busy > 0)
+		driver->real_time_update_busy--;
+}
+#endif
+
+static int __diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+	char buf[sizeof(struct diag_ctrl_msg_diagmode)];
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return err;
+	}
+
+	if (real_time != MODE_NONREALTIME && real_time != MODE_REALTIME) {
+		pr_err("diag: In %s, invalid real time mode %d, peripheral: %d\n",
+		       __func__, real_time, peripheral);
+		return -EINVAL;
+	}
+
+	diag_create_diag_mode_ctrl_pkt(buf, real_time);
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to smd, peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       msg_size, err);
+	} else {
+		driver->real_time_mode[DIAG_LOCAL_PROC] = real_time;
+	}
+
+	mutex_unlock(&driver->diag_cntl_mutex);
+
+	return err;
+}
+
+int diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+	int i;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->buffering_flag[i])
+			continue;
+		/*
+		 * One of the peripherals is in buffering mode. Don't set
+		 * the RT value.
+		 */
+		return -EINVAL;
+	}
+
+	return __diag_send_real_time_update(peripheral, real_time);
+}
+
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	int mode = MODE_REALTIME;
+	uint8_t peripheral = 0;
+
+	if (!params)
+		return -EIO;
+
+	peripheral = params->peripheral;
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->buffering_flag[peripheral])
+		return -EINVAL;
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+		mode = MODE_REALTIME;
+		break;
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		mode = MODE_NONREALTIME;
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
+			 __func__, peripheral);
+		driver->buffering_flag[peripheral] = 0;
+		return -EIO;
+	}
+
+	/*
+	 * Perform sanity on watermark values. These values must be
+	 * checked irrespective of the buffering mode.
+	 */
+	if (((params->high_wm_val > DIAG_MAX_WM_VAL) ||
+	     (params->low_wm_val > DIAG_MAX_WM_VAL)) ||
+	    (params->low_wm_val > params->high_wm_val) ||
+	    ((params->low_wm_val == params->high_wm_val) &&
+	     (params->low_wm_val != DIAG_MIN_WM_VAL))) {
+		pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
+		       __func__, params->high_wm_val, params->low_wm_val,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	mutex_lock(&driver->mode_lock);
+	err = diag_send_buffering_tx_mode_pkt(peripheral, params);
+	if (err) {
+		pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
+		       __func__, peripheral, err);
+		goto fail;
+	}
+	err = diag_send_buffering_wm_values(peripheral, params);
+	if (err) {
+		pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
+		       __func__, peripheral, err);
+		goto fail;
+	}
+	err = __diag_send_real_time_update(peripheral, mode);
+	if (err) {
+		pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
+		       __func__, peripheral, mode, err);
+		goto fail;
+	}
+	driver->buffering_mode[peripheral].peripheral = peripheral;
+	driver->buffering_mode[peripheral].mode = params->mode;
+	driver->buffering_mode[peripheral].low_wm_val = params->low_wm_val;
+	driver->buffering_mode[peripheral].high_wm_val = params->high_wm_val;
+	if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
+		driver->buffering_flag[peripheral] = 0;
+fail:
+	mutex_unlock(&driver->mode_lock);
+	return err;
+}
+
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
+{
+	struct diag_ctrl_msg_stm stm_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_stm);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EIO;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	if (driver->feature[peripheral].stm_support == DISABLE_STM)
+		return -EINVAL;
+
+	stm_msg.ctrl_pkt_id = 21;
+	stm_msg.ctrl_pkt_data_len = 5;
+	stm_msg.version = 1;
+	stm_msg.control_data = stm_control_data;
+	err = diagfwd_write(peripheral, TYPE_CNTL, &stm_msg, msg_size);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to smd, peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       msg_size, err);
+	}
+
+	return err;
+}
+
+int diag_send_peripheral_drain_immediate(uint8_t peripheral)
+{
+	int err = 0;
+	struct diag_ctrl_drain_immediate ctrl_pkt;
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+	/* The length of the ctrl pkt is size of version and stream id */
+	ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
+	ctrl_pkt.version = 1;
+	ctrl_pkt.stream_id = 1;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
+	if (err && err != -ENODEV) {
+		pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+		       peripheral, err);
+	}
+
+	return err;
+}
+
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+				    struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
+
+	if (!params)
+		return -EIO;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (params->peripheral != peripheral)
+		return -EINVAL;
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+	/* Control packet length is size of version, stream_id and tx_mode */
+	ctrl_pkt.len = sizeof(uint32_t) +  (2 * sizeof(uint8_t));
+	ctrl_pkt.version = 1;
+	ctrl_pkt.stream_id = 1;
+	ctrl_pkt.tx_mode = params->mode;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
+	if (err && err != -ENODEV) {
+		pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+		       peripheral, err);
+		goto fail;
+	}
+	driver->buffering_mode[peripheral].mode = params->mode;
+
+fail:
+	return err;
+}
+
+int diag_send_buffering_wm_values(uint8_t peripheral,
+				  struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	struct diag_ctrl_set_wq_val ctrl_pkt;
+
+	if (!params)
+		return -EIO;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	if (params->peripheral != peripheral)
+		return -EINVAL;
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+	/* Control packet length is size of version, stream_id and wmq values */
+	ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+	ctrl_pkt.version = 1;
+	ctrl_pkt.stream_id = 1;
+	ctrl_pkt.high_wm_val = params->high_wm_val;
+	ctrl_pkt.low_wm_val = params->low_wm_val;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+			    sizeof(ctrl_pkt));
+	if (err && err != -ENODEV) {
+		pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+		       peripheral, err);
+	}
+
+	return err;
+}
+
+int diagfwd_cntl_init(void)
+{
+	uint8_t peripheral = 0;
+
+	reg_dirty = 0;
+	driver->polling_reg_flag = 0;
+	driver->log_on_demand_support = 1;
+	driver->stm_peripheral = 0;
+	driver->close_transport = 0;
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
+		driver->buffering_flag[peripheral] = 0;
+
+	mutex_init(&driver->cntl_lock);
+	INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
+	INIT_WORK(&(driver->mask_update_work), diag_mask_update_work_fn);
+	INIT_WORK(&(driver->close_transport_work),
+		  diag_close_transport_work_fn);
+
+	driver->cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
+	if (!driver->cntl_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void diagfwd_cntl_channel_init(void)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		diagfwd_early_open(peripheral);
+		diagfwd_open(peripheral, TYPE_CNTL);
+	}
+}
+
+void diagfwd_cntl_exit(void)
+{
+	if (driver->cntl_wq)
+		destroy_workqueue(driver->cntl_wq);
+	return;
+}
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd_cntl.h linux-6.4-fbx/drivers/char/diag/diagfwd_cntl.h
--- linux-6.4-fbx/drivers/char/diag./diagfwd_cntl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd_cntl.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,281 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_CNTL_H
+#define DIAGFWD_CNTL_H
+
+/* Message registration commands */
+#define DIAG_CTRL_MSG_REG		1
+/* Message passing for DTR events */
+#define DIAG_CTRL_MSG_DTR		2
+/* Control Diag sleep vote, buffering etc */
+#define DIAG_CTRL_MSG_DIAGMODE		3
+/* Diag data based on "light" diag mask */
+#define DIAG_CTRL_MSG_DIAGDATA		4
+/* Send diag internal feature mask 'diag_int_feature_mask' */
+#define DIAG_CTRL_MSG_FEATURE		8
+/* Send Diag log mask for a particular equip id */
+#define DIAG_CTRL_MSG_EQUIP_LOG_MASK	9
+/* Send Diag event mask */
+#define DIAG_CTRL_MSG_EVENT_MASK_V2	10
+/* Send Diag F3 mask */
+#define DIAG_CTRL_MSG_F3_MASK_V2	11
+#define DIAG_CTRL_MSG_NUM_PRESETS	12
+#define DIAG_CTRL_MSG_SET_PRESET_ID	13
+#define DIAG_CTRL_MSG_LOG_MASK_WITH_PRESET_ID	14
+#define DIAG_CTRL_MSG_EVENT_MASK_WITH_PRESET_ID	15
+#define DIAG_CTRL_MSG_F3_MASK_WITH_PRESET_ID	16
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE	17
+#define DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM	18
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL	19
+#define DIAG_CTRL_MSG_DCI_CONNECTION_STATUS	20
+#define DIAG_CTRL_MSG_LAST_EVENT_REPORT		22
+#define DIAG_CTRL_MSG_LOG_RANGE_REPORT		23
+#define DIAG_CTRL_MSG_SSID_RANGE_REPORT		24
+#define DIAG_CTRL_MSG_BUILD_MASK_REPORT		25
+#define DIAG_CTRL_MSG_DEREG		27
+#define DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT		29
+#define DIAG_CTRL_MSG_PD_STATUS			30
+#define DIAG_CTRL_MSG_TIME_SYNC_PKT		31
+
+/*
+ * Feature Mask Definitions: Feature mask is used to sepcify Diag features
+ * supported by the Apps processor
+ *
+ * F_DIAG_FEATURE_MASK_SUPPORT - Denotes we support sending and receiving
+ *                               feature masks
+ * F_DIAG_LOG_ON_DEMAND_APPS - Apps responds to Log on Demand request
+ * F_DIAG_REQ_RSP_SUPPORT - Apps supported dedicated request response Channel
+ * F_DIAG_APPS_HDLC_ENCODE - HDLC encoding is done on the forward channel
+ * F_DIAG_STM - Denotes Apps supports Diag over STM
+ */
+#define F_DIAG_FEATURE_MASK_SUPPORT		0
+#define F_DIAG_LOG_ON_DEMAND_APPS		2
+#define F_DIAG_REQ_RSP_SUPPORT			4
+#define F_DIAG_APPS_HDLC_ENCODE			6
+#define F_DIAG_STM				9
+#define F_DIAG_PERIPHERAL_BUFFERING		10
+#define F_DIAG_MASK_CENTRALIZATION		11
+#define F_DIAG_SOCKETS_ENABLED			13
+
+#define ENABLE_SEPARATE_CMDRSP	1
+#define DISABLE_SEPARATE_CMDRSP	0
+
+#define DISABLE_STM	0
+#define ENABLE_STM	1
+#define STATUS_STM	2
+
+#define UPDATE_PERIPHERAL_STM_STATE	1
+#define CLEAR_PERIPHERAL_STM_STATE	2
+
+#define ENABLE_APPS_HDLC_ENCODING	1
+#define DISABLE_APPS_HDLC_ENCODING	0
+
+#define DIAG_MODE_PKT_LEN	36
+
+struct diag_ctrl_pkt_header_t {
+	uint32_t pkt_id;
+	uint32_t len;
+};
+
+struct cmd_code_range {
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+	uint32_t data;
+};
+
+struct diag_ctrl_cmd_reg {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t cmd_code;
+	uint16_t subsysid;
+	uint16_t count_entries;
+	uint16_t port;
+};
+
+struct diag_ctrl_cmd_dereg {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t cmd_code;
+	uint16_t subsysid;
+	uint16_t count_entries;
+} __packed;
+
+struct diag_ctrl_event_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t event_config;
+	uint32_t event_mask_size;
+	/* Copy event mask here */
+} __packed;
+
+struct diag_ctrl_log_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t equip_id;
+	uint32_t num_items; /* Last log code for this equip_id */
+	uint32_t log_mask_size; /* Size of log mask stored in log_mask[] */
+	/* Copy log mask here */
+} __packed;
+
+struct diag_ctrl_msg_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t msg_mode;
+	uint16_t ssid_first; /* Start of range of supported SSIDs */
+	uint16_t ssid_last; /* Last SSID in range */
+	uint32_t msg_mask_size; /* ssid_last - ssid_first + 1 */
+	/* Copy msg mask here */
+} __packed;
+
+struct diag_ctrl_feature_mask {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t feature_mask_len;
+	/* Copy feature mask here */
+} __packed;
+
+struct diag_ctrl_msg_diagmode {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t sleep_vote;
+	uint32_t real_time;
+	uint32_t use_nrt_values;
+	uint32_t commit_threshold;
+	uint32_t sleep_threshold;
+	uint32_t sleep_time;
+	uint32_t drain_timer_val;
+	uint32_t event_stale_timer_val;
+} __packed;
+
+struct diag_ctrl_msg_stm {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t  control_data;
+} __packed;
+
+struct diag_ctrl_msg_time_sync {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t  time_api;
+} __packed;
+
+struct diag_ctrl_dci_status {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t count;
+} __packed;
+
+struct diag_ctrl_dci_handshake_pkt {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t magic;
+} __packed;
+
+struct diag_ctrl_msg_pd_status {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t pd_id;
+	uint8_t status;
+} __packed;
+
+struct diag_ctrl_last_event_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t event_last_id;
+} __packed;
+
+struct diag_ctrl_log_range_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t last_equip_id;
+	uint32_t num_ranges;
+} __packed;
+
+struct diag_ctrl_log_range {
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_ctrl_ssid_range_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t count;
+} __packed;
+
+struct diag_ctrl_build_mask_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t count;
+} __packed;
+
+struct diag_ctrl_peripheral_tx_mode {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+	uint8_t tx_mode;
+} __packed;
+
+struct diag_ctrl_drain_immediate {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+} __packed;
+
+struct diag_ctrl_set_wq_val {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+	uint8_t high_wm_val;
+	uint8_t low_wm_val;
+} __packed;
+
+int diagfwd_cntl_init(void);
+void diagfwd_cntl_channel_init(void);
+void diagfwd_cntl_exit(void);
+void diag_cntl_channel_open(struct diagfwd_info *p_info);
+void diag_cntl_channel_close(struct diagfwd_info *p_info);
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+				 int len);
+int diag_send_real_time_update(uint8_t peripheral, int real_time);
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params);
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
+void diag_real_time_work_fn(struct work_struct *work);
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data);
+int diag_send_peripheral_drain_immediate(uint8_t peripheral);
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+				    struct diag_buffering_mode_t *params);
+int diag_send_buffering_wm_values(uint8_t peripheral,
+				  struct diag_buffering_mode_t *params);
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd_mhi.h linux-6.4-fbx/drivers/char/diag/diagfwd_mhi.h
--- linux-6.4-fbx/drivers/char/diag./diagfwd_mhi.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd_mhi.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,91 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_MHI_H
+#define DIAGFWD_MHI_H
+
+#include "diagchar.h"
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/ipc_logging.h>
+#include <linux/mhi.h>
+
+enum {
+	MHI_1,
+	MHI_2,
+#ifdef CONFIG_MHI_DCI
+	MHI_DCI_1,
+#endif
+	NUM_MHI_DEV
+};
+
+#define TYPE_MHI_READ_CH	0
+#define TYPE_MHI_WRITE_CH	1
+
+#define DIAG_MHI_NAME_SZ	24
+
+struct diag_mhi_buf_tbl_t {
+	struct list_head link;
+	unsigned char *buf;
+	int len;
+};
+
+struct diag_mhi_ch_t {
+	uint8_t type;
+	spinlock_t lock;
+	atomic_t opened;
+	struct list_head buf_tbl;
+};
+
+struct diag_mhi_info {
+	int id;
+	int dev_id;
+	int mempool;
+	int mempool_init;
+	int num_read;
+	uint8_t enabled;
+	struct mhi_device *mhi_dev;
+	char name[DIAG_MHI_NAME_SZ];
+	struct work_struct read_work;
+	struct list_head read_done_list;
+	struct work_struct read_done_work;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct workqueue_struct *mhi_wq;
+	wait_queue_head_t mhi_wait_q;
+	struct diag_mhi_ch_t read_ch;
+	struct diag_mhi_ch_t write_ch;
+	spinlock_t lock;
+};
+
+extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV];
+
+int diag_mhi_init(void);
+void diag_mhi_exit(void);
+void diag_register_with_mhi(void);
+void diag_unregister_mhi(void);
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd_peripheral.c linux-6.4-fbx/drivers/char/diag/diagfwd_peripheral.c
--- linux-6.4-fbx/drivers/char/diag./diagfwd_peripheral.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd_peripheral.c	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,1137 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "diagchar.h"
+#include "diagchar_hdlc.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diag_dci.h"
+#include "diagfwd.h"
+#ifdef CONFIG_QCOM_SMD
+#include "diagfwd_smd.h"
+#endif
+#include "diagfwd_socket.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+
+struct data_header {
+	uint8_t control_char;
+	uint8_t version;
+	uint16_t length;
+};
+
+static struct diagfwd_info *early_init_info[NUM_TRANSPORT];
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info);
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len);
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len);
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+				  unsigned char *buf, int len);
+
+struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+static struct diag_channel_ops data_ch_ops = {
+	.open = NULL,
+	.close = NULL,
+	.read_done = diagfwd_data_read_done
+};
+
+static struct diag_channel_ops cntl_ch_ops = {
+	.open = diagfwd_cntl_open,
+	.close = diagfwd_cntl_close,
+	.read_done = diagfwd_cntl_read_done
+};
+
+static struct diag_channel_ops dci_ch_ops = {
+	.open = diagfwd_dci_open,
+	.close = diagfwd_dci_close,
+	.read_done = diagfwd_dci_read_done
+};
+
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+	diag_cntl_channel_open(fwd_info);
+}
+
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+	diag_cntl_channel_close(fwd_info);
+}
+
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+			       DIAG_STATUS_OPEN, DCI_LOCAL_PROC);
+}
+
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+			       DIAG_STATUS_CLOSED, DCI_LOCAL_PROC);
+}
+
+static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
+				  unsigned char *buf, int len)
+{
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	struct data_header *header;
+	int header_size = sizeof(struct data_header);
+	uint8_t *end_control_char = NULL;
+	uint8_t *payload = NULL;
+	uint8_t *temp_buf = NULL;
+	uint8_t *temp_encode_buf = NULL;
+	int src_pkt_len;
+	int encoded_pkt_length;
+	int max_size;
+	int total_processed = 0;
+	int bytes_remaining;
+	int err = 0;
+	uint8_t loop_count = 0;
+
+	if (!dest_buf || !dest_len || !buf)
+		return -EIO;
+
+	temp_buf = buf;
+	temp_encode_buf = dest_buf;
+	bytes_remaining = *dest_len;
+
+	while (total_processed < len) {
+		loop_count++;
+		header = (struct data_header *)temp_buf;
+		/* Perform initial error checking */
+		if (header->control_char != CONTROL_CHAR ||
+		    header->version != 1) {
+			err = -EINVAL;
+			break;
+		}
+
+		if (header->length >= bytes_remaining)
+			break;
+
+		payload = temp_buf + header_size;
+		end_control_char = payload + header->length;
+		if (*end_control_char != CONTROL_CHAR) {
+			err = -EINVAL;
+			break;
+		}
+
+		max_size = 2 * header->length + 3;
+		if (bytes_remaining < max_size) {
+			err = -EINVAL;
+			break;
+		}
+
+		/* Prepare for encoding the data */
+		send.state = DIAG_STATE_START;
+		send.pkt = payload;
+		send.last = (void *)(payload + header->length - 1);
+		send.terminate = 1;
+
+		enc.dest = temp_encode_buf;
+		enc.dest_last = (void *)(temp_encode_buf + max_size);
+		enc.crc = 0;
+		diag_hdlc_encode(&send, &enc);
+
+		/* Prepare for next packet */
+		src_pkt_len = (header_size + header->length + 1);
+		total_processed += src_pkt_len;
+		temp_buf += src_pkt_len;
+
+		encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+		bytes_remaining -= encoded_pkt_length;
+		temp_encode_buf = enc.dest;
+	}
+
+	*dest_len = (int)(temp_encode_buf - dest_buf);
+
+	return err;
+}
+
+static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
+{
+	uint32_t max_size = 0;
+	unsigned char *temp_buf = NULL;
+
+	if (!buf || len == 0)
+		return -EINVAL;
+
+	max_size = (2 * len) + 3;
+	if (max_size > PERIPHERAL_BUF_SZ) {
+		if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) {
+			pr_err("diag: In %s, max_size is going beyond limit %d\n",
+			       __func__, max_size);
+			max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
+		}
+
+		if (buf->len < max_size) {
+			temp_buf = krealloc(buf->data, max_size +
+						APF_DIAG_PADDING,
+					    GFP_KERNEL);
+			if (!temp_buf)
+				return -ENOMEM;
+			buf->data = temp_buf;
+			buf->len = max_size;
+		}
+	}
+
+	return buf->len;
+}
+
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len)
+{
+	int err = 0;
+	int write_len = 0;
+	unsigned char *write_buf = NULL;
+	struct diagfwd_buf_t *temp_buf = NULL;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+	if (!fwd_info || !buf || len <= 0) {
+		diag_ws_release();
+		return;
+	}
+
+	switch (fwd_info->type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+				   __func__, fwd_info->type,
+				   fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&fwd_info->data_mutex);
+	mutex_lock(&driver->md_session_lock);
+	session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	mutex_unlock(&driver->md_session_lock);
+	if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
+			temp_buf = fwd_info->buf_1;
+			write_buf = fwd_info->buf_1->data;
+		} else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) {
+			temp_buf = fwd_info->buf_2;
+			write_buf = fwd_info->buf_2->data;
+		} else {
+			pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		write_len = len;
+	} else if (hdlc_disabled) {
+		/* The data is raw and and on APPS side HDLC is disabled */
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+			temp_buf = fwd_info->buf_1;
+		} else if (fwd_info->buf_2 &&
+			   fwd_info->buf_2->data_raw == buf) {
+			temp_buf = fwd_info->buf_2;
+		} else {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		if (len > PERIPHERAL_BUF_SZ) {
+			pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+			       __func__, len, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		write_len = len;
+		write_buf = buf;
+	} else {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+			temp_buf = fwd_info->buf_1;
+		} else if (fwd_info->buf_2 &&
+			   fwd_info->buf_2->data_raw == buf) {
+			temp_buf = fwd_info->buf_2;
+		} else {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+				__func__, buf, fwd_info->peripheral,
+				fwd_info->type);
+			goto end;
+		}
+		write_len = check_bufsize_for_encoding(temp_buf, len);
+		if (write_len <= 0) {
+			pr_err("diag: error in checking buf for encoding\n");
+			goto end;
+		}
+		write_buf = temp_buf->data;
+		err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len);
+		if (err) {
+			pr_err("diag: error in adding hdlc encoding\n");
+			goto end;
+		}
+	}
+
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+
+	if (write_len > 0) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+				     temp_buf->ctxt);
+		if (err) {
+			pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+					   __func__, err);
+			goto end_write;
+		}
+	}
+	diagfwd_queue_read(fwd_info);
+	return;
+
+end:
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+end_write:
+	diag_ws_release();
+	if (temp_buf) {
+		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+				   GET_BUF_NUM(temp_buf->ctxt));
+	}
+	diagfwd_queue_read(fwd_info);
+	return;
+}
+
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len)
+{
+	if (!fwd_info) {
+		diag_ws_release();
+		return;
+	}
+
+	if (fwd_info->type != TYPE_CNTL) {
+		pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+		       __func__, fwd_info->type, fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+
+	diag_ws_on_read(DIAG_WS_MUX, len);
+	diag_cntl_process_read_data(fwd_info, buf, len);
+	/*
+	 * Control packets are not consumed by the clients. Mimic
+	 * consumption by setting and clearing the wakeup source copy_count
+	 * explicitly.
+	 */
+	diag_ws_on_copy_fail(DIAG_WS_MUX);
+	/* Reset the buffer in_busy value after processing the data */
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+	diagfwd_queue_read(fwd_info);
+	diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]);
+}
+
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+				  unsigned char *buf, int len)
+{
+	if (!fwd_info)
+		return;
+
+	switch (fwd_info->type) {
+	case TYPE_DCI:
+	case TYPE_DCI_CMD:
+		break;
+	default:
+		pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+		       __func__, fwd_info->type, fwd_info->peripheral);
+		return;
+	}
+
+	diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
+	/* Reset the buffer in_busy value after processing the data */
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
+				  unsigned char *buf)
+{
+	if (!fwd_info || !buf)
+		return;
+
+	if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data == buf)
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+		else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf)
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+	} else {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf)
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+		else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+	}
+}
+
+int diagfwd_peripheral_init(void)
+{
+	uint8_t peripheral;
+	uint8_t transport;
+	uint8_t type;
+	struct diagfwd_info *fwd_info = NULL;
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		early_init_info[transport] = kzalloc(
+				sizeof(struct diagfwd_info) * NUM_PERIPHERALS,
+				GFP_KERNEL);
+		if (!early_init_info[transport])
+			return -ENOMEM;
+		kmemleak_not_leak(early_init_info[transport]);
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+			fwd_info = &early_init_info[transport][peripheral];
+			fwd_info->peripheral = peripheral;
+			fwd_info->type = TYPE_CNTL;
+			fwd_info->transport = transport;
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			fwd_info->inited = 1;
+			fwd_info->read_bytes = 0;
+			fwd_info->write_bytes = 0;
+			mutex_init(&fwd_info->buf_mutex);
+			mutex_init(&fwd_info->data_mutex);
+		}
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (type = 0; type < NUM_TYPES; type++) {
+			fwd_info = &peripheral_info[type][peripheral];
+			fwd_info->peripheral = peripheral;
+			fwd_info->type = type;
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			fwd_info->read_bytes = 0;
+			fwd_info->write_bytes = 0;
+			mutex_init(&fwd_info->buf_mutex);
+			mutex_init(&fwd_info->data_mutex);
+			/*
+			 * This state shouldn't be set for Control channels
+			 * during initialization. This is set when the feature
+			 * mask is received for the first time.
+			 */
+			if (type != TYPE_CNTL)
+				fwd_info->inited = 1;
+		}
+		driver->diagfwd_data[peripheral] =
+			&peripheral_info[TYPE_DATA][peripheral];
+		driver->diagfwd_cntl[peripheral] =
+			&peripheral_info[TYPE_CNTL][peripheral];
+		driver->diagfwd_dci[peripheral] =
+			&peripheral_info[TYPE_DCI][peripheral];
+		driver->diagfwd_cmd[peripheral] =
+			&peripheral_info[TYPE_CMD][peripheral];
+		driver->diagfwd_dci_cmd[peripheral] =
+			&peripheral_info[TYPE_DCI_CMD][peripheral];
+	}
+
+#ifdef CONFIG_QCOM_SMD
+	diag_smd_init();
+#endif
+	if (driver->supports_sockets)
+		diag_socket_init();
+
+	return 0;
+}
+
+void diagfwd_peripheral_exit(void)
+{
+	uint8_t peripheral;
+	uint8_t type;
+	struct diagfwd_info *fwd_info = NULL;
+	int transport = 0;
+
+#ifdef CONFIG_QCOM_SMD
+	diag_smd_exit();
+#endif
+	diag_socket_exit();
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (type = 0; type < NUM_TYPES; type++) {
+			fwd_info = &peripheral_info[type][peripheral];
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			diagfwd_buffers_exit(fwd_info);
+		}
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		driver->diagfwd_data[peripheral] = NULL;
+		driver->diagfwd_cntl[peripheral] = NULL;
+		driver->diagfwd_dci[peripheral] = NULL;
+		driver->diagfwd_cmd[peripheral] = NULL;
+		driver->diagfwd_dci_cmd[peripheral] = NULL;
+	}
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		kfree(early_init_info[transport]);
+		early_init_info[transport] = NULL;
+	}
+}
+
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+			  struct diag_peripheral_ops *ops,
+			  struct diagfwd_info **fwd_ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (!ctxt || !ops)
+		return -EIO;
+
+	if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	fwd_info = &early_init_info[transport][peripheral];
+	*fwd_ctxt = &early_init_info[transport][peripheral];
+	fwd_info->ctxt = ctxt;
+	fwd_info->p_ops = ops;
+	fwd_info->c_ops = &cntl_ch_ops;
+
+	return 0;
+}
+
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+		     void *ctxt, struct diag_peripheral_ops *ops,
+		     struct diagfwd_info **fwd_ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES ||
+	    !ctxt || !ops || transport >= NUM_TRANSPORT) {
+		pr_err("diag: In %s, returning error\n", __func__);
+		return -EIO;
+	}
+
+	fwd_info = &peripheral_info[type][peripheral];
+	*fwd_ctxt = &peripheral_info[type][peripheral];
+	fwd_info->ctxt = ctxt;
+	fwd_info->p_ops = ops;
+	fwd_info->transport = transport;
+	fwd_info->ch_open = 0;
+
+	switch (type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		fwd_info->c_ops = &data_ch_ops;
+		break;
+	case TYPE_DCI:
+	case TYPE_DCI_CMD:
+		fwd_info->c_ops = &dci_ch_ops;
+		break;
+	default:
+		pr_err("diag: In %s, invalid type: %d\n", __func__, type);
+		return -EINVAL;
+	}
+
+	if (atomic_read(&fwd_info->opened) &&
+	    fwd_info->p_ops && fwd_info->p_ops->open) {
+		/*
+		 * The registration can happen late, like in the case of
+		 * sockets. fwd_info->opened reflects diag_state. Propogate the
+		 * state to the peipherals.
+		 */
+		fwd_info->p_ops->open(fwd_info->ctxt);
+	}
+
+	return 0;
+}
+
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (fwd_info->ctxt != ctxt) {
+		pr_err("diag: In %s, unable to find a match for p: %d t: %d\n",
+		       __func__, peripheral, type);
+		return;
+	}
+	fwd_info->ctxt = NULL;
+	fwd_info->p_ops = NULL;
+	fwd_info->ch_open = 0;
+	diagfwd_buffers_exit(fwd_info);
+
+	switch (type) {
+	case TYPE_DATA:
+		driver->diagfwd_data[peripheral] = NULL;
+		break;
+	case TYPE_CNTL:
+		driver->diagfwd_cntl[peripheral] = NULL;
+		break;
+	case TYPE_DCI:
+		driver->diagfwd_dci[peripheral] = NULL;
+		break;
+	case TYPE_CMD:
+		driver->diagfwd_cmd[peripheral] = NULL;
+		break;
+	case TYPE_DCI_CMD:
+		driver->diagfwd_dci_cmd[peripheral] = NULL;
+		break;
+	}
+}
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
+{
+	struct diagfwd_info *fwd_info = NULL;
+	struct diagfwd_info *dest_info = NULL;
+	int (*init_fn)(uint8_t) = NULL;
+	void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
+	int (*check_channel_state)(void *) = NULL;
+	uint8_t transport_open = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	switch (transport) {
+	case TRANSPORT_SMD:
+		transport_open = TRANSPORT_SOCKET;
+		init_fn = diag_socket_init_peripheral;
+		invalidate_fn = diag_socket_invalidate;
+		check_channel_state = diag_socket_check_state;
+		break;
+	case TRANSPORT_SOCKET:
+#ifdef CONFIG_QCOM_SMD
+		transport_open = TRANSPORT_SMD;
+		init_fn = diag_smd_init_peripheral;
+		invalidate_fn = diag_smd_invalidate;
+		check_channel_state = diag_smd_check_state;
+		break;
+#else
+		pr_err("Transport SMD is not configured\n");
+		return;
+#endif
+	default:
+		return;
+	}
+
+	mutex_lock(&driver->diagfwd_channel_mutex[peripheral]);
+	fwd_info = &early_init_info[transport][peripheral];
+	if (fwd_info->p_ops && fwd_info->p_ops->close)
+		fwd_info->p_ops->close(fwd_info->ctxt);
+	fwd_info = &early_init_info[transport_open][peripheral];
+	dest_info = &peripheral_info[TYPE_CNTL][peripheral];
+	dest_info->inited = 1;
+	dest_info->ctxt = fwd_info->ctxt;
+	dest_info->p_ops = fwd_info->p_ops;
+	dest_info->c_ops = fwd_info->c_ops;
+	dest_info->ch_open = fwd_info->ch_open;
+	dest_info->read_bytes = fwd_info->read_bytes;
+	dest_info->write_bytes = fwd_info->write_bytes;
+	dest_info->inited = fwd_info->inited;
+	dest_info->buf_1 = fwd_info->buf_1;
+	dest_info->buf_2 = fwd_info->buf_2;
+	dest_info->transport = fwd_info->transport;
+	invalidate_fn(dest_info->ctxt, dest_info);
+	if (!check_channel_state(dest_info->ctxt))
+		diagfwd_late_open(dest_info);
+	diagfwd_cntl_open(dest_info);
+	init_fn(peripheral);
+	mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
+}
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
+{
+	struct diagfwd_info *fwd_info = NULL;
+	int err = 0;
+	uint8_t retry_count = 0;
+	uint8_t max_retries = 3;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return -EINVAL;
+
+	if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
+		if (!driver->feature[peripheral].rcvd_feature_mask ||
+			!driver->feature[peripheral].sent_feature_mask) {
+			pr_debug_ratelimited("diag: In %s, feature mask for peripheral: %d not received or sent yet\n",
+					     __func__, peripheral);
+			return 0;
+		}
+		if (!driver->feature[peripheral].separate_cmd_rsp)
+			type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI;
+	}
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
+		return -ENODEV;
+
+	if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
+		return -EIO;
+
+	while (retry_count < max_retries) {
+		err = 0;
+		err = fwd_info->p_ops->write(fwd_info->ctxt, buf, len);
+		if (err && err != -ENODEV) {
+			usleep_range(100000, 101000);
+			retry_count++;
+			continue;
+		}
+		break;
+	}
+
+	if (!err)
+		fwd_info->write_bytes += len;
+
+	return err;
+}
+
+static void __diag_fwd_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	atomic_set(&fwd_info->opened, 1);
+	if (!fwd_info->inited)
+		return;
+
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+	if (fwd_info->buf_2)
+		atomic_set(&fwd_info->buf_2->in_busy, 0);
+
+	if (fwd_info->p_ops && fwd_info->p_ops->open)
+		fwd_info->p_ops->open(fwd_info->ctxt);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+void diagfwd_early_open(uint8_t peripheral)
+{
+	uint8_t transport = 0;
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		fwd_info = &early_init_info[transport][peripheral];
+		__diag_fwd_open(fwd_info);
+	}
+}
+
+void diagfwd_open(uint8_t peripheral, uint8_t type)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	__diag_fwd_open(fwd_info);
+}
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info)
+{
+	__diag_fwd_open(fwd_info);
+}
+
+void diagfwd_close(uint8_t peripheral, uint8_t type)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	atomic_set(&fwd_info->opened, 0);
+	if (!fwd_info->inited)
+		return;
+
+	if (fwd_info->p_ops && fwd_info->p_ops->close)
+		fwd_info->p_ops->close(fwd_info->ctxt);
+
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 1);
+	/*
+	 * Only Data channels have two buffers. Set both the buffers
+	 * to busy on close.
+	 */
+	if (fwd_info->buf_2)
+		atomic_set(&fwd_info->buf_2->in_busy, 1);
+}
+
+int diagfwd_channel_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return -EIO;
+
+	if (!fwd_info->inited) {
+		pr_debug("diag: In %s, channel is not inited, p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+		return -EINVAL;
+	}
+
+	if (fwd_info->ch_open) {
+		pr_debug("diag: In %s, channel is already open, p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+		return 0;
+	}
+
+	fwd_info->ch_open = 1;
+	diagfwd_buffers_init(fwd_info);
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
+		fwd_info->c_ops->open(fwd_info);
+	diagfwd_queue_read(fwd_info);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
+		 fwd_info->peripheral, fwd_info->type);
+
+	if (atomic_read(&fwd_info->opened)) {
+		if (fwd_info->p_ops && fwd_info->p_ops->open)
+			fwd_info->p_ops->open(fwd_info->ctxt);
+	}
+
+	return 0;
+}
+
+int diagfwd_channel_close(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return -EIO;
+
+	fwd_info->ch_open = 0;
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
+		fwd_info->c_ops->close(fwd_info);
+
+	if (fwd_info->buf_1 && fwd_info->buf_1->data)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+	if (fwd_info->buf_2 && fwd_info->buf_2->data)
+		atomic_set(&fwd_info->buf_2->in_busy, 0);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
+		 fwd_info->peripheral, fwd_info->type);
+
+	return 0;
+}
+
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+			      unsigned char *buf, uint32_t len)
+{
+	if (!fwd_info) {
+		diag_ws_release();
+		return -EIO;
+	}
+
+	/*
+	 * Diag peripheral layers should send len as 0 if there is any error
+	 * in reading data from the transport. Use this information to reset the
+	 * in_busy flags. No need to queue read in this case.
+	 */
+	if (len == 0) {
+		diagfwd_reset_buffers(fwd_info, buf);
+		diag_ws_release();
+		return 0;
+	}
+
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done)
+		fwd_info->c_ops->read_done(fwd_info, buf, len);
+	fwd_info->read_bytes += len;
+
+	return 0;
+}
+
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (ctxt == 1 && fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+	else if (ctxt == 2 && fwd_info->buf_2)
+		atomic_set(&fwd_info->buf_2->in_busy, 0);
+	else
+		pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+void diagfwd_channel_read(struct diagfwd_info *fwd_info)
+{
+	int err = 0;
+	uint32_t read_len = 0;
+	unsigned char *read_buf = NULL;
+	struct diagfwd_buf_t *temp_buf = NULL;
+
+	if (!fwd_info) {
+		diag_ws_release();
+		return;
+	}
+
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type,
+			 fwd_info->inited, atomic_read(&fwd_info->opened),
+			 fwd_info->ch_open);
+		diag_ws_release();
+		return;
+	}
+
+	if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
+		if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+		    (fwd_info->type == TYPE_DATA ||
+		     fwd_info->type == TYPE_CMD)) {
+			read_buf = fwd_info->buf_1->data_raw;
+			read_len = fwd_info->buf_1->len_raw;
+		} else {
+			read_buf = fwd_info->buf_1->data;
+			read_len = fwd_info->buf_1->len;
+		}
+		if (read_buf) {
+			temp_buf = fwd_info->buf_1;
+			atomic_set(&temp_buf->in_busy, 1);
+		}
+	} else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
+		if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+		    (fwd_info->type == TYPE_DATA ||
+		     fwd_info->type == TYPE_CMD)) {
+			read_buf = fwd_info->buf_2->data_raw;
+			read_len = fwd_info->buf_2->len_raw;
+		} else {
+			read_buf = fwd_info->buf_2->data;
+			read_len = fwd_info->buf_2->len;
+		}
+		if (read_buf) {
+			temp_buf = fwd_info->buf_2;
+			atomic_set(&temp_buf->in_busy, 1);
+		}
+	} else {
+		pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+	}
+
+	if (!read_buf) {
+		diag_ws_release();
+		return;
+	}
+
+	if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt))
+		goto fail_return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %pK\n",
+		 fwd_info->peripheral, fwd_info->type, read_buf);
+	err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len);
+	if (err)
+		goto fail_return;
+
+	return;
+
+fail_return:
+	diag_ws_release();
+	atomic_set(&temp_buf->in_busy, 0);
+	return;
+}
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type,
+			 fwd_info->inited, atomic_read(&fwd_info->opened),
+			 fwd_info->ch_open);
+		return;
+	}
+
+	/*
+	 * Don't queue a read on the data and command channels before receiving
+	 * the feature mask from the peripheral. We won't know which buffer to
+	 * use - HDLC or non HDLC buffer for reading.
+	 */
+	if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) &&
+	    (fwd_info->type != TYPE_CNTL)) {
+		return;
+	}
+
+	if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt)
+		fwd_info->p_ops->queue_read(fwd_info->ctxt);
+}
+
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
+{
+
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited) {
+		pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+		       __func__, fwd_info->peripheral, fwd_info->type);
+		return;
+	}
+
+	mutex_lock(&fwd_info->buf_mutex);
+	if (!fwd_info->buf_1) {
+		fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
+					  GFP_KERNEL);
+		if (!fwd_info->buf_1)
+			goto err;
+		kmemleak_not_leak(fwd_info->buf_1);
+	}
+	if (!fwd_info->buf_1->data) {
+		fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
+					APF_DIAG_PADDING,
+					GFP_KERNEL);
+		if (!fwd_info->buf_1->data)
+			goto err;
+		fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
+		kmemleak_not_leak(fwd_info->buf_1->data);
+		fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral,
+						     fwd_info->type, 1);
+	}
+
+	if (fwd_info->type == TYPE_DATA) {
+		if (!fwd_info->buf_2) {
+			fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
+					      GFP_KERNEL);
+			if (!fwd_info->buf_2)
+				goto err;
+			kmemleak_not_leak(fwd_info->buf_2);
+		}
+
+		if (!fwd_info->buf_2->data) {
+			fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+						    GFP_KERNEL);
+			if (!fwd_info->buf_2->data)
+				goto err;
+			fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(fwd_info->buf_2->data);
+			fwd_info->buf_2->ctxt = SET_BUF_CTXT(
+							fwd_info->peripheral,
+							fwd_info->type, 2);
+		}
+
+		if (driver->supports_apps_hdlc_encoding) {
+			/* In support of hdlc encoding */
+			if (!fwd_info->buf_1->data_raw) {
+				fwd_info->buf_1->data_raw =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+						GFP_KERNEL);
+				if (!fwd_info->buf_1->data_raw)
+					goto err;
+				fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(fwd_info->buf_1->data_raw);
+			}
+			if (!fwd_info->buf_2->data_raw) {
+				fwd_info->buf_2->data_raw =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+						GFP_KERNEL);
+				if (!fwd_info->buf_2->data_raw)
+					goto err;
+				fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(fwd_info->buf_2->data_raw);
+			}
+		}
+	}
+
+	if (fwd_info->type == TYPE_CMD && driver->supports_apps_hdlc_encoding) {
+		/* In support of hdlc encoding */
+		if (!fwd_info->buf_1->data_raw) {
+			fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+							GFP_KERNEL);
+			if (!fwd_info->buf_1->data_raw)
+				goto err;
+			fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(fwd_info->buf_1->data_raw);
+		}
+	}
+
+	mutex_unlock(&fwd_info->buf_mutex);
+	return;
+
+err:
+	mutex_unlock(&fwd_info->buf_mutex);
+	diagfwd_buffers_exit(fwd_info);
+
+	return;
+}
+
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
+{
+
+	if (!fwd_info)
+		return;
+
+	mutex_lock(&fwd_info->buf_mutex);
+	if (fwd_info->buf_1) {
+		kfree(fwd_info->buf_1->data);
+		fwd_info->buf_1->data = NULL;
+		kfree(fwd_info->buf_1->data_raw);
+		fwd_info->buf_1->data_raw = NULL;
+		kfree(fwd_info->buf_1);
+		fwd_info->buf_1 = NULL;
+	}
+	if (fwd_info->buf_2) {
+		kfree(fwd_info->buf_2->data);
+		fwd_info->buf_2->data = NULL;
+		kfree(fwd_info->buf_2->data_raw);
+		fwd_info->buf_2->data_raw = NULL;
+		kfree(fwd_info->buf_2);
+		fwd_info->buf_2 = NULL;
+	}
+	mutex_unlock(&fwd_info->buf_mutex);
+}
+
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd_peripheral.h linux-6.4-fbx/drivers/char/diag/diagfwd_peripheral.h
--- linux-6.4-fbx/drivers/char/diag./diagfwd_peripheral.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd_peripheral.h	2023-03-15 19:52:23.517979189 +0100
@@ -0,0 +1,112 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_PERIPHERAL_H
+#define DIAGFWD_PERIPHERAL_H
+
+#define PERIPHERAL_BUF_SZ		16384
+#define MAX_PERIPHERAL_BUF_SZ		32768
+#define MAX_PERIPHERAL_HDLC_BUF_SZ	65539
+
+#define TRANSPORT_UNKNOWN		-1
+#define TRANSPORT_SMD			0
+#define TRANSPORT_SOCKET		1
+#define NUM_TRANSPORT			2
+
+#define PERIPHERAL_MASK(x)					\
+	((x == PERIPHERAL_MODEM) ? DIAG_CON_MPSS :		\
+	((x == PERIPHERAL_LPASS) ? DIAG_CON_LPASS :		\
+	((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS :		\
+	((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : 0))))	\
+
+#define PERIPHERAL_STRING(x)					\
+	((x == PERIPHERAL_MODEM) ? "MODEM" :			\
+	((x == PERIPHERAL_LPASS) ? "LPASS" :			\
+	((x == PERIPHERAL_WCNSS) ? "WCNSS" :			\
+	((x == PERIPHERAL_SENSORS) ? "SENSORS" : "UNKNOWN"))))	\
+
+struct diagfwd_buf_t {
+	unsigned char *data;
+	unsigned char *data_raw;
+	uint32_t len;
+	uint32_t len_raw;
+	atomic_t in_busy;
+	int ctxt;
+};
+
+struct diag_channel_ops {
+	void (*open)(struct diagfwd_info *fwd_info);
+	void (*close)(struct diagfwd_info *fwd_info);
+	void (*read_done)(struct diagfwd_info *fwd_info,
+			  unsigned char *buf, int len);
+};
+
+struct diag_peripheral_ops {
+	void (*open)(void *ctxt);
+	void (*close)(void *ctxt);
+	int (*write)(void *ctxt, unsigned char *buf, int len);
+	int (*read)(void *ctxt, unsigned char *buf, int len);
+	void (*queue_read)(void *ctxt);
+};
+
+struct diagfwd_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t transport;
+	uint8_t inited;
+	uint8_t ch_open;
+	atomic_t opened;
+	unsigned long read_bytes;
+	unsigned long write_bytes;
+	struct mutex buf_mutex;
+	struct mutex data_mutex;
+	void *ctxt;
+	struct diagfwd_buf_t *buf_1;
+	struct diagfwd_buf_t *buf_2;
+	struct diag_peripheral_ops *p_ops;
+	struct diag_channel_ops *c_ops;
+};
+
+extern struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+int diagfwd_peripheral_init(void);
+void diagfwd_peripheral_exit(void);
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral);
+
+void diagfwd_open(uint8_t peripheral, uint8_t type);
+void diagfwd_early_open(uint8_t peripheral);
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info);
+void diagfwd_close(uint8_t peripheral, uint8_t type);
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+		     void *ctxt, struct diag_peripheral_ops *ops,
+		     struct diagfwd_info **fwd_ctxt);
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+			  struct diag_peripheral_ops *ops,
+			  struct diagfwd_info **fwd_ctxt);
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt);
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
+
+/*
+ * The following functions are called by the channels
+ */
+int diagfwd_channel_open(struct diagfwd_info *fwd_info);
+int diagfwd_channel_close(struct diagfwd_info *fwd_info);
+void diagfwd_channel_read(struct diagfwd_info *fwd_info);
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+			      unsigned char *buf, uint32_t len);
+
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagfwd_socket.h linux-6.4-fbx/drivers/char/diag/diagfwd_socket.h
--- linux-6.4-fbx/drivers/char/diag./diagfwd_socket.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagfwd_socket.h	2023-03-15 19:52:23.521979297 +0100
@@ -0,0 +1,121 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SOCKET_H
+#define DIAGFWD_SOCKET_H
+
+#include <linux/socket.h>
+#ifdef CONFIG_DIAG_OVER_QRTR
+#include <linux/soc/qcom/qmi.h>
+#else
+#include <linux/msm_ipc.h>
+#endif
+
+#define DIAG_SOCKET_NAME_SZ		24
+
+#define DIAG_SOCK_MODEM_SVC_ID		64
+#define DIAG_SOCK_MODEM_INS_ID		3
+
+#define PORT_TYPE_SERVER		0
+#define PORT_TYPE_CLIENT		1
+
+#define PERIPHERAL_AFTER_BOOT		0
+#define PERIPHERAL_SSR_DOWN		1
+#define PERIPHERAL_SSR_UP		2
+
+#define CNTL_CMD_NEW_SERVER		4
+#define CNTL_CMD_REMOVE_SERVER		5
+#define CNTL_CMD_REMOVE_CLIENT		6
+
+enum {
+	SOCKET_MODEM,
+	SOCKET_ADSP,
+	SOCKET_WCNSS,
+	SOCKET_SLPI,
+	SOCKET_APPS,
+	NUM_SOCKET_SUBSYSTEMS,
+};
+
+struct diag_socket_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t port_type;
+	uint8_t inited;
+	atomic_t opened;
+	atomic_t diag_state;
+	uint32_t pkt_len;
+	uint32_t pkt_read;
+	uint32_t svc_id;
+	uint32_t ins_id;
+	uint32_t data_ready;
+	atomic_t flow_cnt;
+	char name[DIAG_SOCKET_NAME_SZ];
+	spinlock_t lock;
+	wait_queue_head_t wait_q;
+#ifdef CONFIG_DIAG_OVER_QRTR
+	struct sockaddr_qrtr remote_addr;
+#else
+	struct sockaddr_msm_ipc remote_addr;
+#endif
+	struct socket *hdl;
+	struct workqueue_struct *wq;
+	struct work_struct init_work;
+	struct work_struct read_work;
+	struct diagfwd_info *fwd_ctxt;
+	wait_queue_head_t read_wait_q;
+};
+
+union cntl_port_msg {
+	struct {
+		uint32_t cmd;
+		uint32_t service;
+		uint32_t instance;
+		uint32_t node_id;
+		uint32_t port_id;
+	} srv;
+	struct {
+		uint32_t cmd;
+		uint32_t node_id;
+		uint32_t port_id;
+	} cli;
+};
+
+struct diag_cntl_socket_info {
+	uint32_t svc_id;
+	uint32_t ins_id;
+	atomic_t data_ready;
+	struct workqueue_struct *wq;
+	struct work_struct read_work;
+	struct work_struct init_work;
+	wait_queue_head_t read_wait_q;
+	struct socket *hdl;
+};
+
+extern struct diag_socket_info socket_data[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cntl[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cmd[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS];
+
+#ifdef CONFIG_DIAG_OVER_QRTR
+extern struct qmi_handle *cntl_qmi;
+#else
+extern struct diag_cntl_socket_info *cntl_socket;
+#endif
+
+int diag_socket_init(void);
+int diag_socket_init_peripheral(uint8_t peripheral);
+void diag_socket_exit(void);
+void diag_socket_early_exit(void);
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_socket_check_state(void *ctxt);
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagmem.c linux-6.4-fbx/drivers/char/diag/diagmem.c
--- linux-6.4-fbx/drivers/char/diag./diagmem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagmem.c	2023-03-15 19:52:23.521979297 +0100
@@ -0,0 +1,294 @@
+/* Copyright (c) 2008-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/ratelimit.h>
+#include <asm/atomic.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+
+#include "diagchar.h"
+#include "diagmem.h"
+
+struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS] = {
+	{
+		.id = POOL_TYPE_COPY,
+		.name = "POOL_COPY",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_HDLC,
+		.name = "POOL_HDLC",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_USER,
+		.name = "POOL_USER",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MUX_APPS,
+		.name = "POOL_MUX_APPS",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_DCI,
+		.name = "POOL_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = POOL_TYPE_MDM,
+		.name = "POOL_MDM",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2,
+		.name = "POOL_MDM2",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_DCI,
+		.name = "POOL_MDM_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_DCI,
+		.name = "POOL_MDM2_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_MUX,
+		.name = "POOL_MDM_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_MUX,
+		.name = "POOL_MDM2_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_DCI_WRITE,
+		.name = "POOL_MDM_DCI_WRITE",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_DCI_WRITE,
+		.name = "POOL_MDM2_DCI_WRITE",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_QSC_MUX,
+		.name = "POOL_QSC_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	}
+#endif
+};
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize)
+{
+	if (pool_idx < 0 || pool_idx >= NUM_MEMORY_POOLS) {
+		pr_err("diag: Invalid pool index %d in %s\n", pool_idx,
+		       __func__);
+		return;
+	}
+
+	diag_mempools[pool_idx].itemsize = itemsize;
+	diag_mempools[pool_idx].poolsize = poolsize;
+	pr_debug("diag: Mempool %s sizes: itemsize %d poolsize %d\n",
+		 diag_mempools[pool_idx].name, diag_mempools[pool_idx].itemsize,
+		 diag_mempools[pool_idx].poolsize);
+}
+
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
+{
+	void *buf = NULL;
+	int i = 0;
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver)
+		return NULL;
+
+	for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		if (pool_type != mempool->id)
+			continue;
+		if (!mempool->pool) {
+			pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+					   mempool->name);
+			break;
+		}
+		if (size == 0 || size > mempool->itemsize) {
+			pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
+					   mempool->name, size);
+			break;
+		}
+		spin_lock_irqsave(&mempool->lock, flags);
+		if (mempool->count < mempool->poolsize) {
+			atomic_add(1, (atomic_t *)&mempool->count);
+			buf = mempool_alloc(mempool->pool, GFP_ATOMIC);
+			kmemleak_not_leak(buf);
+		}
+		spin_unlock_irqrestore(&mempool->lock, flags);
+		if (!buf) {
+			pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n",
+					     mempool->name,
+					     size, mempool->itemsize,
+					     mempool->count,
+					     mempool->poolsize);
+		}
+		break;
+	}
+
+	return buf;
+}
+
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
+{
+	int i = 0;
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver || !buf)
+		return;
+
+	for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		if (pool_type != mempool->id)
+			continue;
+		if (!mempool->pool) {
+			pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+					   mempool->name);
+			break;
+		}
+		spin_lock_irqsave(&mempool->lock, flags);
+		if (mempool->count > 0) {
+			mempool_free(buf, mempool->pool);
+			atomic_add(-1, (atomic_t *)&mempool->count);
+		} else {
+			pr_err_ratelimited("diag: Attempting to free items from %s mempool which is already empty\n",
+					   mempool->name);
+		}
+		spin_unlock_irqrestore(&mempool->lock, flags);
+		break;
+	}
+}
+
+void diagmem_init(struct diagchar_dev *driver, int index)
+{
+	struct diag_mempool_t *mempool = NULL;
+	if (!driver)
+		return;
+
+	if (index < 0 || index >= NUM_MEMORY_POOLS) {
+		pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mempool = &diag_mempools[index];
+	if (mempool->pool) {
+		pr_debug("diag: mempool %s is already initialized\n",
+			 mempool->name);
+		return;
+	}
+	if (mempool->itemsize <= 0 || mempool->poolsize <= 0) {
+		pr_err("diag: Unable to initialize %s mempool, itemsize: %d poolsize: %d\n",
+		       mempool->name, mempool->itemsize,
+		       mempool->poolsize);
+		return;
+	}
+
+	mempool->pool = mempool_create_kmalloc_pool(mempool->poolsize,
+						    mempool->itemsize);
+	if (!mempool->pool)
+		pr_err("diag: cannot allocate %s mempool\n", mempool->name);
+	else
+		kmemleak_not_leak(mempool->pool);
+
+	spin_lock_init(&mempool->lock);
+}
+
+void diagmem_exit(struct diagchar_dev *driver, int index)
+{
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver)
+		return;
+
+	if (index < 0 || index >= NUM_MEMORY_POOLS) {
+		pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mempool = &diag_mempools[index];
+	spin_lock_irqsave(&mempool->lock, flags);
+	if (mempool->count == 0 && mempool->pool != NULL) {
+		mempool_destroy(mempool->pool);
+		mempool->pool = NULL;
+	} else {
+		pr_err("diag: Unable to destory %s pool, count: %d\n",
+		       mempool->name, mempool->count);
+	}
+	spin_unlock_irqrestore(&mempool->lock, flags);
+}
+
diff -Nruw linux-6.4-fbx/drivers/char/diag./diagmem.h linux-6.4-fbx/drivers/char/diag/diagmem.h
--- linux-6.4-fbx/drivers/char/diag./diagmem.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/diagmem.h	2023-03-15 19:52:23.521979297 +0100
@@ -0,0 +1,63 @@
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGMEM_H
+#define DIAGMEM_H
+#include "diagchar.h"
+
+#define POOL_TYPE_COPY			0
+#define POOL_TYPE_HDLC			1
+#define POOL_TYPE_USER			2
+#define POOL_TYPE_MUX_APPS		3
+#define POOL_TYPE_DCI			4
+#define POOL_TYPE_LOCAL_LAST		5
+
+#define POOL_TYPE_REMOTE_BASE		POOL_TYPE_LOCAL_LAST
+#define POOL_TYPE_MDM			POOL_TYPE_REMOTE_BASE
+#define POOL_TYPE_MDM2			(POOL_TYPE_REMOTE_BASE + 1)
+#define POOL_TYPE_MDM_DCI		(POOL_TYPE_REMOTE_BASE + 2)
+#define POOL_TYPE_MDM2_DCI		(POOL_TYPE_REMOTE_BASE + 3)
+#define POOL_TYPE_MDM_MUX		(POOL_TYPE_REMOTE_BASE + 4)
+#define POOL_TYPE_MDM2_MUX		(POOL_TYPE_REMOTE_BASE + 5)
+#define POOL_TYPE_MDM_DCI_WRITE		(POOL_TYPE_REMOTE_BASE + 6)
+#define POOL_TYPE_MDM2_DCI_WRITE	(POOL_TYPE_REMOTE_BASE + 7)
+#define POOL_TYPE_QSC_MUX		(POOL_TYPE_REMOTE_BASE + 8)
+#define POOL_TYPE_REMOTE_LAST		(POOL_TYPE_REMOTE_BASE + 9)
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MEMORY_POOLS		POOL_TYPE_REMOTE_LAST
+#else
+#define NUM_MEMORY_POOLS		POOL_TYPE_LOCAL_LAST
+#endif
+
+#define DIAG_MEMPOOL_NAME_SZ		24
+#define DIAG_MEMPOOL_GET_NAME(x)	(diag_mempools[x].name)
+
+struct diag_mempool_t {
+	int id;
+	char name[DIAG_MEMPOOL_NAME_SZ];
+	mempool_t *pool;
+	unsigned int itemsize;
+	unsigned int poolsize;
+	int count;
+	spinlock_t lock;
+} __packed;
+
+extern struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS];
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize);
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type);
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
+void diagmem_init(struct diagchar_dev *driver, int type);
+void diagmem_exit(struct diagchar_dev *driver, int type);
+
+#endif
diff -Nruw linux-6.4-fbx/drivers/char/diag./qcom_diagfwd_socket.c linux-6.4-fbx/drivers/char/diag/qcom_diagfwd_socket.c
--- linux-6.4-fbx/drivers/char/diag./qcom_diagfwd_socket.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/char/diag/qcom_diagfwd_socket.c	2023-10-05 12:33:41.363634732 +0200
@@ -0,0 +1,1128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/socket.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <asm/current.h>
+#include <net/sock.h>
+#include <linux/notifier.h>
+#include <linux/qrtr.h>
+#include <linux/termios.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_socket.h"
+#include "diag_ipc_logging.h"
+
+#define DIAG_SVC_ID		0x1001
+
+#define MODEM_INST_BASE		0
+#define LPASS_INST_BASE		64
+#define WCNSS_INST_BASE		128
+#define SENSORS_INST_BASE	192
+#define CDSP_INST_BASE		256
+#define WDSP_INST_BASE		320
+
+#define INST_ID_CNTL		0
+#define INST_ID_CMD		1
+#define INST_ID_DATA		2
+#define INST_ID_DCI_CMD		3
+#define INST_ID_DCI		4
+
+#define MAX_BUF_SIZE 		0x4400
+#define MAX_NO_PACKETS		10
+#define DIAG_SO_RCVBUF_SIZE	(MAX_BUF_SIZE * MAX_NO_PACKETS)
+
+struct qmi_handle *cntl_qmi;
+static uint64_t bootup_req[NUM_SOCKET_SUBSYSTEMS];
+
+static unsigned long peripheral_mask = BIT(PERIPHERAL_WCNSS);
+module_param(peripheral_mask, ulong, S_IRUGO | S_IWUSR | S_IWGRP);
+
+struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DATA,
+		.name = "MODEM_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DATA,
+		.name = "LPASS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DATA,
+		.name = "WCNSS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DATA,
+		.name = "SENSORS_DATA"
+	}
+};
+
+struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CNTL,
+		.name = "MODEM_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CNTL,
+		.name = "LPASS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CNTL,
+		.name = "WCNSS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CNTL,
+		.name = "SENSORS_CNTL"
+	}
+};
+
+struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI,
+		.name = "MODEM_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI,
+		.name = "LPASS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI,
+		.name = "WCNSS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI,
+		.name = "SENSORS_DCI"
+	}
+};
+
+struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CMD,
+		.name = "MODEM_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CMD,
+		.name = "LPASS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CMD,
+		.name = "WCNSS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CMD,
+		.name = "SENSORS_CMD"
+	}
+};
+
+struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI_CMD,
+		.name = "MODEM_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI_CMD,
+		.name = "LPASS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI_CMD,
+		.name = "WCNSS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI_CMD,
+		.name = "SENSORS_DCI_CMD"
+	}
+};
+
+#if 0 // QCOM_DRIVERS_NOT_AVAILABLE
+struct restart_notifier_block {
+	unsigned int processor;
+	char *name;
+	struct notifier_block nb;
+};
+
+static int restart_notifier_cb(struct notifier_block *this, unsigned long code,
+	void *_cmd)
+{
+	struct restart_notifier_block *notifier;
+
+	notifier = container_of(this,
+			struct restart_notifier_block, nb);
+	if (!notifier) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: %s: invalid notifier block\n", __func__);
+		return NOTIFY_DONE;
+	}
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"%s: ssr for processor %d ('%s')\n",
+		__func__, notifier->processor, notifier->name);
+
+	switch (code) {
+
+	case SUBSYS_BEFORE_SHUTDOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: %s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
+		mutex_lock(&driver->diag_notifier_mutex);
+		bootup_req[notifier->processor] = PERIPHERAL_SSR_DOWN;
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: bootup_req[%s] = %d\n",
+		notifier->name, (int)bootup_req[notifier->processor]);
+		mutex_unlock(&driver->diag_notifier_mutex);
+		break;
+
+	case SUBSYS_AFTER_SHUTDOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: %s: SUBSYS_AFTER_SHUTDOWN\n", __func__);
+		break;
+
+	case SUBSYS_BEFORE_POWERUP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: %s: SUBSYS_BEFORE_POWERUP\n", __func__);
+		break;
+
+	case SUBSYS_AFTER_POWERUP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: %s: SUBSYS_AFTER_POWERUP\n", __func__);
+		mutex_lock(&driver->diag_notifier_mutex);
+		if (!bootup_req[notifier->processor]) {
+			bootup_req[notifier->processor] = PERIPHERAL_SSR_DOWN;
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: bootup_req[%s] = %d\n",
+			notifier->name, (int)bootup_req[notifier->processor]);
+			mutex_unlock(&driver->diag_notifier_mutex);
+			break;
+		}
+		bootup_req[notifier->processor] = PERIPHERAL_SSR_UP;
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: bootup_req[%s] = %d\n",
+		notifier->name, (int)bootup_req[notifier->processor]);
+		mutex_unlock(&driver->diag_notifier_mutex);
+		break;
+
+	default:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag: code: %lu\n", code);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct restart_notifier_block restart_notifiers[] = {
+	{SOCKET_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_ADSP, "adsp", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_SLPI, "slpi", .nb.notifier_call = restart_notifier_cb},
+};
+#endif
+
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt || !fwd_ctxt)
+		return;
+
+	info = (struct diag_socket_info *)ctxt;
+	info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_socket_check_state(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return 0;
+
+	info = (struct diag_socket_info *)ctxt;
+	return (int)(atomic_read(&info->diag_state));
+}
+
+static void diag_state_open_socket(void *ctxt);
+static void diag_state_close_socket(void *ctxt);
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len);
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_socket_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops socket_ops = {
+	.open = diag_state_open_socket,
+	.close = diag_state_close_socket,
+	.write = diag_socket_write,
+	.read = diag_socket_read,
+	.queue_read = diag_socket_queue_read
+};
+
+static void diag_state_open_socket(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)(ctxt);
+	atomic_set(&info->diag_state, 1);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 1", info->name);
+}
+
+static void diag_state_close_socket(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)(ctxt);
+	atomic_set(&info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 0", info->name);
+	wake_up_interruptible(&info->read_wait_q);
+	flush_workqueue(info->wq);
+}
+
+static void __socket_open_channel(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	if (!info->inited) {
+		pr_debug("diag: In %s, socket %s is not initialized\n",
+			 __func__, info->name);
+		return;
+	}
+
+	if (atomic_read(&info->opened)) {
+		pr_debug("diag: In %s, socket %s already opened\n",
+			 __func__, info->name);
+		return;
+	}
+
+	atomic_set(&info->opened, 1);
+	diagfwd_channel_open(info->fwd_ctxt);
+}
+
+static void socket_data_ready(struct sock *sk_ptr)
+{
+	struct diag_socket_info *info;
+	unsigned long flags;
+
+	if (!sk_ptr) {
+		pr_err_ratelimited("diag: In %s, invalid sk_ptr", __func__);
+		return;
+	}
+
+	info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+	if (!info) {
+		pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&info->lock, flags);
+	info->data_ready++;
+	spin_unlock_irqrestore(&info->lock, flags);
+	diag_ws_on_notify();
+
+	queue_work(info->wq, &(info->read_work));
+	wake_up_interruptible(&info->read_wait_q);
+}
+
+static void socket_open_client(struct diag_socket_info *info)
+{
+	int ret;
+
+	if (!info || info->port_type != PORT_TYPE_CLIENT)
+		return;
+
+	if (!info->hdl) {
+		ret = sock_create(AF_QIPCRTR, SOCK_DGRAM, PF_QIPCRTR, &info->hdl);
+		if (ret < 0 || !info->hdl) {
+			pr_err("diag: In %s, socket not initialized for %s\n",
+							__func__, info->name);
+			return;
+		}
+	}
+
+	write_lock_bh(&info->hdl->sk->sk_callback_lock);
+	info->hdl->sk->sk_user_data = (void *)(info);
+	info->hdl->sk->sk_data_ready = socket_data_ready;
+	info->hdl->sk->sk_error_report = socket_data_ready;
+	write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+	if (!info->remote_addr.sq_node && !info->remote_addr.sq_port) {
+		pr_err("diag: In %s, failed to get remote_addr\n", __func__);
+		return;
+	}
+	__socket_open_channel(info);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened client\n", info->name);
+}
+
+static void socket_open_server(struct diag_socket_info *info)
+{
+	struct qrtr_ctrl_pkt pkt;
+	struct sockaddr_qrtr sq;
+	struct msghdr msg = {0};
+	struct kvec iv = { &pkt, sizeof(pkt) };
+	int ret;
+	unsigned int size = DIAG_SO_RCVBUF_SIZE;
+
+	if (!info || info->port_type != PORT_TYPE_SERVER)
+		return;
+
+	ret = sock_create(AF_QIPCRTR, SOCK_DGRAM, PF_QIPCRTR, &info->hdl);
+	if (ret < 0 || !info->hdl) {
+		pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+		       info->name);
+		return;
+	}
+	ret = kernel_getsockname(info->hdl, (struct sockaddr *)&sq);
+	if (ret < 0) {
+		pr_err("diag: In %s, getsockname failed %d\n", __func__,
+		       ret);
+		sock_release(info->hdl);
+		return;
+	}
+
+	sock_set_rcvbuf(info->hdl->sk, size);
+
+	write_lock_bh(&info->hdl->sk->sk_callback_lock);
+	info->hdl->sk->sk_user_data = (void *)(info);
+	info->hdl->sk->sk_data_ready = socket_data_ready;
+	info->hdl->sk->sk_error_report = socket_data_ready;
+	write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+
+	memset(&pkt, 0, sizeof(pkt));
+	pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
+	pkt.server.service = cpu_to_le32(info->svc_id);
+	pkt.server.instance = cpu_to_le32(info->ins_id);
+	pkt.server.node = sq.sq_node;
+	pkt.server.port = sq.sq_port;
+
+	sq.sq_port = QRTR_PORT_CTRL;
+	msg.msg_name = &sq;
+	msg.msg_namelen = sizeof(sq);
+
+	ret = kernel_sendmsg(info->hdl, &msg, &iv, 1, sizeof(pkt));
+	if (ret < 0) {
+		pr_err("%s: failed to send new_server: %d\n", __func__, ret);
+		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened server svc: %d ins: %d\n",
+		 info->name, info->svc_id, info->ins_id);
+}
+
+static void __socket_close_channel(struct diag_socket_info *info)
+{
+	unsigned long flags;
+
+	if (!info || !info->hdl)
+		return;
+
+	memset(&info->remote_addr, 0, sizeof(info->remote_addr));
+	diagfwd_channel_close(info->fwd_ctxt);
+
+	atomic_set(&info->opened, 0);
+	/* Don't close the server. Server should always remain open */
+	if (info->port_type == PORT_TYPE_SERVER)
+		return;
+
+	write_lock_bh(&info->hdl->sk->sk_callback_lock);
+	info->hdl->sk->sk_user_data = NULL;
+	info->hdl->sk->sk_data_ready = NULL;
+	info->hdl->sk->sk_error_report = NULL;
+	write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+
+	/*
+	 * Do not release socket. Release it whenever go for rmmod
+	 * sock_release(info->hdl);
+	 * info->hdl = NULL;
+	 *
+	 * cancel_work_sync(&info->read_work);
+	 */
+	wake_up_interruptible(&info->read_wait_q);
+
+	spin_lock_irqsave(&info->lock, flags);
+	info->data_ready = 0;
+	spin_unlock_irqrestore(&info->lock, flags);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static void socket_close_channel(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	__socket_close_channel(info);
+}
+
+
+static void socket_init_work_fn(struct work_struct *work)
+{
+	struct diag_socket_info *info = container_of(work,
+						     struct diag_socket_info,
+						     init_work);
+
+	if (!info)
+		return;
+
+	if (!info->inited) {
+		pr_err("diag: In %s, socket %s is not initialized\n",
+			 __func__, info->name);
+		return;
+	}
+
+	switch (info->port_type) {
+	case PORT_TYPE_SERVER:
+		socket_open_server(info);
+		break;
+	case PORT_TYPE_CLIENT:
+		socket_open_client(info);
+		break;
+	default:
+		pr_err("diag: In %s, unknown type %d\n", __func__,
+		       info->port_type);
+		break;
+	}
+}
+
+static void socket_read_work_fn(struct work_struct *work)
+{
+	int err;
+	struct diag_socket_info *info = container_of(work,
+						     struct diag_socket_info,
+						     read_work);
+
+	if (!info || !info->hdl)
+		return;
+
+	err = sock_error(info->hdl->sk);
+	if (unlikely(err == -ENETRESET)) {
+		socket_close_channel(info);
+		if (info->port_type == PORT_TYPE_SERVER)
+			socket_init_work_fn(&info->init_work);
+		return;
+	}
+
+	if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
+		diagfwd_buffers_init(info->fwd_ctxt);
+
+	diagfwd_channel_read(info->fwd_ctxt);
+}
+
+static void diag_socket_queue_read(void *ctxt)
+{
+	struct diag_socket_info *info;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)ctxt;
+	if (info->hdl && info->wq)
+		queue_work(info->wq, &(info->read_work));
+}
+
+static void handle_ctrl_pkt(struct diag_socket_info *info, void *buf, int len)
+{
+	const struct qrtr_ctrl_pkt *pkt = buf;
+	u32 node;
+	u32 port;
+
+	if (len < sizeof(struct qrtr_ctrl_pkt))
+		return;
+
+	switch (le32_to_cpu(pkt->cmd)) {
+	case QRTR_TYPE_BYE:
+		node = le32_to_cpu(pkt->client.node);
+		if (info->remote_addr.sq_node == node) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s rcvd bye\n",
+				 info->name);
+
+			mutex_lock(&driver->diag_notifier_mutex);
+			if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP) {
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: %s is up, stopping cleanup: bootup_req = %d\n",
+				info->name, (int)bootup_req[info->peripheral]);
+				mutex_unlock(&driver->diag_notifier_mutex);
+				break;
+			}
+			mutex_unlock(&driver->diag_notifier_mutex);
+			socket_close_channel(info);
+		}
+		break;
+	case QRTR_TYPE_DEL_CLIENT:
+		node = le32_to_cpu(pkt->client.node);
+		port = le32_to_cpu(pkt->client.port);
+
+		if (info->remote_addr.sq_node == node &&
+		    info->remote_addr.sq_port == port) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s rcvd del client\n",
+				 info->name);
+
+			mutex_lock(&driver->diag_notifier_mutex);
+			if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP) {
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"diag: %s is up, stopping cleanup: bootup_req = %d\n",
+				info->name, (int)bootup_req[info->peripheral]);
+				mutex_unlock(&driver->diag_notifier_mutex);
+				break;
+			}
+			mutex_unlock(&driver->diag_notifier_mutex);
+			socket_close_channel(info);
+		}
+		break;
+	}
+}
+
+static int __tiocinq(struct socket *sock)
+{
+	struct sk_buff *skb;
+	int ret = 0;
+
+	lock_sock(sock->sk);
+	skb = skb_peek(&sock->sk->sk_receive_queue);
+	if (skb)
+		ret = skb->len;
+	release_sock(sock->sk);
+
+	return ret;
+}
+
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+	int err = 0;
+	int pkt_len = 0;
+	int read_len = 0;
+	int bytes_remaining = 0;
+	int total_recd = 0;
+	int qrtr_ctrl_recd = 0;
+	uint8_t buf_full = 0;
+	unsigned char *temp = NULL;
+	struct kvec iov = {0};
+	struct msghdr read_msg = {0};
+	struct sockaddr_qrtr src_addr = {0};
+	struct diag_socket_info *info;
+	struct mutex *channel_mutex;
+	unsigned long flags;
+
+	info = (struct diag_socket_info *)(ctxt);
+	if (!info)
+		return -ENODEV;
+
+	if (!buf || !ctxt || buf_len <= 0)
+		return -EINVAL;
+
+	temp = buf;
+	bytes_remaining = buf_len;
+	channel_mutex = &driver->diagfwd_channel_mutex[info->peripheral];
+
+	err = wait_event_interruptible(info->read_wait_q,
+				      (info->data_ready > 0) || (!info->hdl) ||
+				      (atomic_read(&info->diag_state) == 0));
+	if (err) {
+		mutex_lock(channel_mutex);
+		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+		mutex_unlock(channel_mutex);
+		return -ERESTARTSYS;
+	}
+
+	/*
+	 * There is no need to continue reading over peripheral in this case.
+	 * Release the wake source hold earlier.
+	 */
+	if (atomic_read(&info->diag_state) == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s closing read thread. diag state is closed\n",
+			 info->name);
+		mutex_lock(channel_mutex);
+		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+		mutex_unlock(channel_mutex);
+		return 0;
+	}
+
+	if (!info->hdl) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread\n",
+			 info->name);
+		goto fail;
+	}
+
+	do {
+		iov.iov_base = temp;
+		iov.iov_len = bytes_remaining;
+		read_msg.msg_name = &src_addr;
+		read_msg.msg_namelen = sizeof(src_addr);
+
+		pkt_len = __tiocinq(info->hdl);
+		if (pkt_len <= 0)
+			break;
+
+		if (pkt_len > bytes_remaining) {
+			buf_full = 1;
+			break;
+		}
+
+		spin_lock_irqsave(&info->lock, flags);
+		info->data_ready--;
+		spin_unlock_irqrestore(&info->lock, flags);
+
+		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
+					  pkt_len, MSG_DONTWAIT);
+		if (unlikely(read_len == -ENETRESET)) {
+			mutex_lock(channel_mutex);
+			diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+			mutex_unlock(channel_mutex);
+			socket_close_channel(info);
+			if (info->port_type == PORT_TYPE_SERVER)
+				socket_init_work_fn(&info->init_work);
+			return read_len;
+		} else if (read_len <= 0) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"Invalid read_len: %d\n", read_len);
+			continue;
+		}
+
+		if (src_addr.sq_port == QRTR_PORT_CTRL) {
+			handle_ctrl_pkt(info, temp, read_len);
+			qrtr_ctrl_recd += read_len;
+			continue;
+		}
+		if (info->type == TYPE_CNTL) {
+			memcpy(&info->remote_addr, &src_addr, sizeof(src_addr));
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"%s client node:port::[0x%x]:[0x%x]\n",
+				info->name, src_addr.sq_node, src_addr.sq_port);
+
+			if (!atomic_read(&info->opened))
+				__socket_open_channel(info);
+		} else {
+			if (!atomic_read(&info->opened) &&
+			    info->port_type == PORT_TYPE_SERVER) {
+				/*
+				 * This is the first packet from the client.
+				 * Copy its address to the connection object.
+				 * Consider this channel open for communication.
+				 */
+				memcpy(&info->remote_addr, &src_addr,
+					sizeof(src_addr));
+				DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+					 "%s client node:port::[0x%x]:[0x%x]\n",
+					 info->name, src_addr.sq_node,
+					 src_addr.sq_port);
+
+				if (info->ins_id == INST_ID_DCI)
+					atomic_set(&info->opened, 1);
+				else
+					__socket_open_channel(info);
+			}
+		}
+		temp += read_len;
+		total_recd += read_len;
+		bytes_remaining -= read_len;
+	} while (info->data_ready > 0);
+
+	if (buf_full || (info->type == TYPE_DATA && pkt_len))
+		err = queue_work(info->wq, &(info->read_work));
+
+	if (total_recd > 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
+			 info->name, total_recd);
+		mutex_lock(channel_mutex);
+		err = diagfwd_channel_read_done(info->fwd_ctxt, buf,
+						total_recd);
+		mutex_unlock(channel_mutex);
+		if (err)
+			goto fail;
+	} else {
+		if (qrtr_ctrl_recd > 0)
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"%s read qrtr ctrl bytes: %d\n",
+				info->name, qrtr_ctrl_recd);
+		else
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"%s error in read, err: %d\n",
+				info->name, total_recd);
+		goto fail;
+	}
+
+	diag_socket_queue_read(info);
+	return 0;
+
+fail:
+	mutex_lock(channel_mutex);
+	diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+	mutex_unlock(channel_mutex);
+	return -EIO;
+}
+
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len)
+{
+	int err = 0;
+	int write_len = 0;
+	struct kvec iov = {0};
+	struct msghdr write_msg = {0};
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt || !buf || len <= 0)
+		return -EIO;
+
+	info = (struct diag_socket_info *)(ctxt);
+	if (!atomic_read(&info->opened) || !info->hdl)
+		return -ENODEV;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+	write_msg.msg_name = &info->remote_addr;
+	write_msg.msg_namelen = sizeof(info->remote_addr);
+	write_msg.msg_flags |= MSG_DONTWAIT;
+	write_len = kernel_sendmsg(info->hdl, &write_msg, &iov, 1, len);
+	if (write_len < 0) {
+		err = write_len;
+		/*
+		 * -EAGAIN means that the number of packets in flight is at
+		 * max capactity and the peripheral hasn't read the data.
+		 */
+		if (err != -EAGAIN && err != -ECONNRESET) {
+			pr_err_ratelimited("diag: In %s, error sending data, err: %d, ch: %s\n",
+					   __func__, err, info->name);
+		}
+	} else if (write_len != len) {
+		err = write_len;
+		pr_err_ratelimited("diag: In %s, wrote partial packet to %s, len: %d, wrote: %d\n",
+				   __func__, info->name, len, write_len);
+	}
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to socket, len: %d\n",
+		 info->name, write_len);
+
+	return err;
+}
+
+static void __diag_socket_init(struct diag_socket_info *info)
+{
+	uint16_t ins_base = 0;
+	uint16_t ins_offset = 0;
+	char wq_name[DIAG_SOCKET_NAME_SZ + 10];
+
+	if (!info)
+		return;
+
+	info->inited = 0;
+	atomic_set(&info->opened, 0);
+	atomic_set(&info->diag_state, 0);
+	info->pkt_len = 0;
+	info->pkt_read = 0;
+	info->hdl = NULL;
+	info->fwd_ctxt = NULL;
+	info->data_ready = 0;
+	atomic_set(&info->flow_cnt, 0);
+	spin_lock_init(&info->lock);
+	strlcpy(wq_name, info->name, sizeof(wq_name));
+	init_waitqueue_head(&info->read_wait_q);
+	info->wq = create_singlethread_workqueue(wq_name);
+	if (!info->wq) {
+		pr_err("diag: In %s, unable to create workqueue for socket channel %s\n",
+		       __func__, info->name);
+		return;
+	}
+	INIT_WORK(&(info->init_work), socket_init_work_fn);
+	INIT_WORK(&(info->read_work), socket_read_work_fn);
+	memset(&info->remote_addr, 0, sizeof(info->remote_addr));
+
+	switch (info->peripheral) {
+	case PERIPHERAL_MODEM:
+		ins_base = MODEM_INST_BASE;
+		break;
+	case PERIPHERAL_LPASS:
+		ins_base = LPASS_INST_BASE;
+		break;
+	case PERIPHERAL_WCNSS:
+		ins_base = WCNSS_INST_BASE;
+		break;
+	case PERIPHERAL_SENSORS:
+		ins_base = SENSORS_INST_BASE;
+		break;
+	}
+
+	switch (info->type) {
+	case TYPE_DATA:
+		ins_offset = INST_ID_DATA;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_CNTL:
+		ins_offset = INST_ID_CNTL;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_DCI:
+		ins_offset = INST_ID_DCI;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_CMD:
+		ins_offset = INST_ID_CMD;
+		info->port_type = PORT_TYPE_CLIENT;
+		break;
+	case TYPE_DCI_CMD:
+		ins_offset = INST_ID_DCI_CMD;
+		info->port_type = PORT_TYPE_CLIENT;
+		break;
+	}
+
+	info->svc_id = DIAG_SVC_ID;
+	info->ins_id = ins_base + ins_offset;
+	info->inited = 1;
+}
+
+static struct diag_socket_info *diag_get_svc_sock_info(struct qmi_service *svc)
+{
+	struct diag_socket_info *info = NULL;
+	u32 inst;
+	int i;
+
+	inst = svc->version | (svc->instance << 8);
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!test_bit(i, &peripheral_mask))
+			continue;
+		if ((svc->service == socket_cmd[i].svc_id) &&
+		    (inst == socket_cmd[i].ins_id)) {
+			info = &socket_cmd[i];
+			break;
+		}
+
+		if ((svc->service == socket_dci_cmd[i].svc_id) &&
+		    (inst == socket_dci_cmd[i].ins_id)) {
+			info = &socket_dci_cmd[i];
+			break;
+		}
+	}
+	return info;
+}
+
+static int diag_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct diag_socket_info *info;
+	int ret;
+
+	info = diag_get_svc_sock_info(svc);
+	if (!info)
+		return -EINVAL;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s rcvd new server\n", info->name);
+	ret = diagfwd_register(TRANSPORT_SOCKET, info->peripheral, info->type,
+			       (void *)info, &socket_ops, &info->fwd_ctxt);
+	info->remote_addr.sq_family = AF_QIPCRTR;
+	info->remote_addr.sq_node = svc->node;
+	info->remote_addr.sq_port = svc->port;
+	socket_init_work_fn(&info->init_work);
+
+	return 0;
+}
+
+static void diag_del_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct diag_socket_info *info;
+
+	info = diag_get_svc_sock_info(svc);
+	if (!info)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s rcvd del server\n", info->name);
+	socket_close_channel(info);
+}
+
+static struct qmi_ops diag_qmi_cntl_ops = {
+	.new_server = diag_new_server,
+	.del_server = diag_del_server,
+};
+
+static void *diag_subsys_handle =  NULL;
+
+int diag_socket_init(void)
+{
+	struct diag_socket_info *info = NULL;
+	int peripheral;
+	int rc;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		pr_err("TRACK: P[%d]\tPMask[%lu]\n", peripheral, peripheral_mask);
+		if (!test_bit(peripheral, &peripheral_mask))
+			continue;
+
+		info = &socket_cntl[peripheral];
+		__diag_socket_init(&socket_cntl[peripheral]);
+
+		diagfwd_cntl_register(TRANSPORT_SOCKET, peripheral,
+			(void *)info, &socket_ops, &(info->fwd_ctxt));
+
+		__diag_socket_init(&socket_data[peripheral]);
+		__diag_socket_init(&socket_cmd[peripheral]);
+		__diag_socket_init(&socket_dci[peripheral]);
+		__diag_socket_init(&socket_dci_cmd[peripheral]);
+	}
+
+#if 0  // QCOM_DRIVERS_NOT_AVAILABLE
+	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+		if (!test_bit(i, &peripheral_mask))
+			continue;
+
+		nb = &restart_notifiers[i];
+		handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+		diag_subsys_handle = handle;
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s: registering notifier for '%s', handle=%p\n",
+			 __func__, nb->name, handle);
+	}
+#endif
+
+	cntl_qmi = kzalloc(sizeof(*cntl_qmi), GFP_KERNEL);
+	if (!cntl_qmi) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	rc = qmi_handle_init(cntl_qmi, 0, &diag_qmi_cntl_ops, NULL);
+	if (rc < 0)
+		goto fail;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		if (!test_bit(peripheral, &peripheral_mask))
+			continue;
+		info = &socket_cmd[peripheral];
+		pr_info("nik %d info->svc_id\n", info->svc_id);
+		qmi_add_lookup(cntl_qmi, info->svc_id,
+				info->ins_id & 0xFF, info->ins_id >> 8);
+
+		info = &socket_dci_cmd[peripheral];
+		qmi_add_lookup(cntl_qmi, info->svc_id,
+			       info->ins_id & 0xFF, info->ins_id >> 8);
+
+		info = &socket_cntl[peripheral];
+		socket_init_work_fn(&info->init_work);
+
+		info = &socket_data[peripheral];
+		socket_init_work_fn(&info->init_work);
+		/* Read function should always be there after server init,
+		 * otherwise there could be loss of packets and eventually
+		 * memory leak in kernel*/
+		diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+				info->type, (void *)info, &socket_ops,
+				&info->fwd_ctxt);
+		diagfwd_open(peripheral, TYPE_DATA);
+		queue_work(info->wq, &(info->read_work));
+
+		info = &socket_dci[peripheral];
+		socket_init_work_fn(&info->init_work);
+		diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+				info->type, (void *)info, &socket_ops,
+				&info->fwd_ctxt);
+		diagfwd_open(peripheral, TYPE_DCI);
+		queue_work(info->wq, &(info->read_work));
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s: init done\n", __func__);
+
+fail:
+	return rc;
+}
+
+int diag_socket_init_peripheral(uint8_t peripheral)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	info = &socket_data[peripheral];
+	diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+			 info->type, (void *)info, &socket_ops,
+			 &info->fwd_ctxt);
+
+	info = &socket_dci[peripheral];
+	diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+			 info->type, (void *)info, &socket_ops,
+			 &info->fwd_ctxt);
+	return 0;
+}
+
+static void __diag_socket_exit(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	diagfwd_deregister(info->peripheral, info->type, (void *)info);
+	info->fwd_ctxt = NULL;
+	if (info->hdl)
+		sock_release(info->hdl);
+	info->hdl = NULL;
+	wake_up_interruptible(&info->read_wait_q);
+	if (info->wq)
+		destroy_workqueue(info->wq);
+}
+
+void diag_socket_exit(void)
+{
+	int i;
+
+	if (cntl_qmi) {
+		qmi_handle_release(cntl_qmi);
+		kfree(cntl_qmi);
+	}
+
+#if 0  // QCOM_DRIVERS_NOT_AVAILABLE
+	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+		if (!test_bit(i, &peripheral_mask))
+			continue;
+
+		nb = &restart_notifiers[i];
+		if (diag_subsys_handle)
+			subsys_notif_unregister_notifier(diag_subsys_handle, &nb->nb);
+	}
+#endif
+
+	diag_subsys_handle = NULL;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!test_bit(i, &peripheral_mask))
+			continue;
+
+		__diag_socket_exit(&socket_cntl[i]);
+		__diag_socket_exit(&socket_data[i]);
+		__diag_socket_exit(&socket_cmd[i]);
+		__diag_socket_exit(&socket_dci[i]);
+		__diag_socket_exit(&socket_dci_cmd[i]);
+	}
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/char/hw_random/qcom-rng.c	2023-05-22 20:30:14.537853935 +0200
@@ -0,0 +1,160 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/reset.h>
+#include <linux/of.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT		0x0000
+#define PRNG_STATUS		0x0004
+#define PRNG_LFSR_CFG		0x0100
+#define PRNG_CONFIG		0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS	0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE	BIT(1)
+#define PRNG_STATUS_DATA_AVAIL	BIT(0)
+
+#define WORD_SZ			4
+
+struct qcom_rng_priv {
+	struct hwrng rng;
+	struct clk *clk;
+	struct reset_control *rst;
+	void __iomem *base;
+};
+
+#define to_rng_priv(rng)	container_of(rng, struct qcom_rng_priv, rng)
+
+static int qcom_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct qcom_rng_priv *priv = to_rng_priv(rng);
+	size_t total;
+
+	total = 0;
+	while (total < max) {
+		size_t to_read;
+		u32 val;
+
+		val = readl_relaxed(priv->base + PRNG_STATUS);
+		if (!(val & PRNG_STATUS_DATA_AVAIL)) {
+			int ret;
+			if (!wait)
+				break;
+
+			ret = readl_poll_timeout(priv->base + PRNG_STATUS, val,
+						 val & PRNG_STATUS_DATA_AVAIL,
+						 200, 10000);
+			if (ret)
+				return ret;
+		}
+
+		val = readl_relaxed(priv->base + PRNG_DATA_OUT);
+		to_read = max - total;
+ 		if (to_read >= WORD_SZ)
+			to_read = WORD_SZ;
+
+		memcpy(data + total, &val, WORD_SZ);
+		total += to_read;
+	}
+
+	return total;
+}
+
+static int qcom_rng_init(struct hwrng *rng)
+{
+	struct qcom_rng_priv *priv = to_rng_priv(rng);
+	int ret;
+	u32 val;
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	/* FIXME: those registers causes SERROR access, even in READ mode  */
+	if (0) {
+		val = readl_relaxed(priv->base + PRNG_LFSR_CFG);
+		val &= ~PRNG_LFSR_CFG_MASK;
+		val |= PRNG_LFSR_CFG_CLOCKS;
+		writel(val, priv->base + PRNG_LFSR_CFG);
+
+		val = readl_relaxed(priv->base + PRNG_CONFIG);
+		val |= PRNG_CONFIG_HW_ENABLE;
+		writel(val, priv->base + PRNG_CONFIG);
+	}
+
+	return 0;
+}
+
+static void qcom_rng_cleanup(struct hwrng *rng)
+{
+	struct qcom_rng_priv *priv = to_rng_priv(rng);
+	u32 val;
+
+	val = readl_relaxed(priv->base + PRNG_CONFIG);
+	val &= ~PRNG_CONFIG_HW_ENABLE;
+	writel(val, priv->base + PRNG_CONFIG);
+
+	clk_disable_unprepare(priv->clk);
+}
+
+static int qcom_rng_probe(struct platform_device *pdev)
+{
+	struct qcom_rng_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->rng.name = pdev->name;
+	priv->rng.init = qcom_rng_init;
+	priv->rng.cleanup = qcom_rng_cleanup;
+	priv->rng.read = qcom_rng_read;
+	priv->rng.quality = 900;
+
+	priv->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->clk = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register rng device: %d\n",
+			ret);
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "registered RNG driver\n");
+	return 0;
+}
+
+static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
+	{ .compatible = "qcom,prng", .data = (void *)0},
+	{ .compatible = "qcom,prng-ee", .data = (void *)0},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
+
+static struct platform_driver qcom_rng_driver = {
+	.probe = qcom_rng_probe,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = of_match_ptr(qcom_rng_of_match),
+	}
+};
+module_platform_driver(qcom_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_DESCRIPTION("Qualcomm random number generator driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/clk/qcom/nsscc-ipq9574.c	2023-05-31 17:11:03.409680387 +0200
@@ -0,0 +1,3243 @@
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,nsscc-ipq9574.h>
+#include <dt-bindings/reset/qcom,nsscc-ipq9574.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+	DT_PARENT_NOC_SNOC,
+	DT_PARENT_NOC_SNOC1,
+	DT_PARENT_NOC_NSSCC,
+	DT_PARENT_NSSCC,
+
+	DT_XO,
+	DT_BIAS_PLL_CC_CLK,
+	DT_BIAS_PLL_NSS_NOC_CLK,
+	DT_BIAS_PLL_UBI_NC_CLK,
+	DT_GCC_GPLL0_OUT_AUX,
+
+	DT_UNIPHY0_GCC_RX_CLK,
+	DT_UNIPHY0_GCC_TX_CLK,
+	DT_UNIPHY1_GCC_RX_CLK,
+	DT_UNIPHY1_GCC_TX_CLK,
+	DT_UNIPHY2_GCC_RX_CLK,
+	DT_UNIPHY2_GCC_TX_CLK,
+};
+
+enum {
+	P_BIAS_PLL_CC_CLK,
+	P_BIAS_PLL_NSS_NOC_CLK,
+	P_BIAS_PLL_UBI_NC_CLK,
+	P_GCC_GPLL0_OUT_AUX,
+	P_UBI32_PLL_OUT_MAIN,
+	P_UNIPHY0_GCC_RX_CLK,
+	P_UNIPHY0_GCC_TX_CLK,
+	P_UNIPHY1_GCC_RX_CLK,
+	P_UNIPHY1_GCC_TX_CLK,
+	P_UNIPHY2_GCC_RX_CLK,
+	P_UNIPHY2_GCC_TX_CLK,
+	P_XO,
+};
+
+static const struct clk_parent_data gcc_xo_data[] = {
+	{ .index = DT_XO },
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+	.l = 0x3E,
+	.alpha = 0x6666,
+	.config_ctl_val = 0x200d4aa8,
+	.config_ctl_hi_val = 0x3c,
+	.main_output_mask = BIT(0),
+	.aux_output_mask = BIT(1),
+	.pre_div_val = 0x0,
+	.pre_div_mask = BIT(12),
+	.post_div_val = 0x0,
+	.post_div_mask = GENMASK(9, 8),
+	.alpha_en_mask = BIT(24),
+	.test_ctl_val = 0x1C0000C0,
+	.test_ctl_hi_val = 0x4000,
+};
+
+static struct clk_alpha_pll ubi32_pll_main = {
+	.offset = 0x28000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+	.flags = SUPPORTS_DYNAMIC_UPDATE,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "ubi32_pll_main",
+			.parent_data = gcc_xo_data,
+			.num_parents = ARRAY_SIZE(gcc_xo_data),
+			.ops = &clk_alpha_pll_huayra_ops,
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv ubi32_pll = {
+	.offset = 0x28000,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+	.width = 2,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "ubi32_pll",
+		.parent_hws = (const struct clk_hw *[]) {
+                        &ubi32_pll_main.clkr.hw
+                },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_ro_ops,
+		.flags = CLK_SET_RATE_PARENT,
+	},
+};
+
+// was static const struct clk_parent_data nss_cc_parent_data_1[] = {
+static const struct clk_parent_data
+x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc[] = {
+	{ .index = DT_XO },
+	{ .index = DT_BIAS_PLL_UBI_NC_CLK, },
+	{ .index = DT_GCC_GPLL0_OUT_AUX, },
+	{ .index = DT_BIAS_PLL_CC_CLK, },
+};
+
+//was static const struct parent_map nss_cc_parent_map_1[] = {
+static const struct parent_map
+x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc_map[] = {
+	{ P_XO, 0 },
+	{ P_BIAS_PLL_UBI_NC_CLK, 1 },
+	{ P_GCC_GPLL0_OUT_AUX, 2 },
+	{ P_BIAS_PLL_CC_CLK, 6 },
+};
+
+// was static const struct clk_parent_data nss_cc_parent_data_2[] = {
+static const struct clk_parent_data
+x0_ubi32_pll_out_main_gcc_gpll0_out_aux[] = {
+	{ .index = DT_XO },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .index = DT_GCC_GPLL0_OUT_AUX, },
+};
+
+// was static const struct parent_map nss_cc_parent_map_2[] = {
+static const struct parent_map
+x0_ubi32_pll_out_main_gcc_gpll0_out_aux_map[] = {
+	{ P_XO, 0 },
+	{ P_UBI32_PLL_OUT_MAIN, 1 },
+	{ P_GCC_GPLL0_OUT_AUX, 2 },
+};
+
+// was static const struct clk_parent_data nss_cc_parent_data_3[] = {
+static const struct clk_parent_data
+x0_bias_pll_cc_gcc_gpll0_out_aux[] = {
+	{ .index = DT_XO },
+	{ .index = DT_BIAS_PLL_CC_CLK },
+	{ .index = DT_GCC_GPLL0_OUT_AUX, },
+};
+
+// was static const struct parent_map nss_cc_parent_map_3[] = {
+static const struct parent_map
+x0_bias_pll_cc_gcc_gpll0_out_aux_map[] = {
+	{ P_XO, 0 },
+	{ P_BIAS_PLL_CC_CLK, 1 },
+	{ P_GCC_GPLL0_OUT_AUX, 2 },
+};
+
+//was static const struct clk_parent_data nss_cc_parent_data_0[] = {
+static const struct clk_parent_data
+x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx[] = {
+	{ .index = DT_XO },
+	{ .index = DT_BIAS_PLL_CC_CLK, },
+	{ .index = DT_UNIPHY0_GCC_RX_CLK },
+	{ .index = DT_UNIPHY0_GCC_TX_CLK },
+	{ .index = DT_UNIPHY1_GCC_RX_CLK },
+	{ .index = DT_UNIPHY1_GCC_TX_CLK },
+};
+
+// was static const struct parent_map nss_cc_parent_map_0[] = {
+static const struct parent_map
+x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx_map[] = {
+	{ P_XO, 0 },
+	{ P_BIAS_PLL_CC_CLK, 1 },
+	{ P_UNIPHY0_GCC_RX_CLK, 2 },
+	{ P_UNIPHY0_GCC_TX_CLK, 3 },
+	{ P_UNIPHY1_GCC_RX_CLK, 4 },
+	{ P_UNIPHY1_GCC_TX_CLK, 5 },
+};
+
+// was static const struct clk_parent_data nss_cc_parent_data_4[] = {
+static const struct clk_parent_data
+x0_bias_pll_cc_uni0rx_uni0tx[] = {
+	{ .index = DT_XO },
+	{ .index = DT_BIAS_PLL_CC_CLK, },
+	{ .index = DT_UNIPHY0_GCC_RX_CLK },
+	{ .index = DT_UNIPHY0_GCC_TX_CLK },
+};
+
+// was static const struct parent_map nss_cc_parent_map_4[] = {
+static const struct parent_map
+x0_bias_pll_cc_uni0rx_uni0tx_map[] = {
+	{ P_XO, 0 },
+	{ P_BIAS_PLL_CC_CLK, 1 },
+	{ P_UNIPHY0_GCC_RX_CLK, 2 },
+	{ P_UNIPHY0_GCC_TX_CLK, 3 },
+};
+
+// was static const struct clk_parent_data nss_cc_parent_data_5[] = {
+static const struct clk_parent_data
+x0_bias_pll_cc_uni2rx_uni2tx[] = {
+	{ .index = DT_XO },
+	{ .index = DT_BIAS_PLL_CC_CLK, },
+	{ .index = DT_UNIPHY2_GCC_RX_CLK },
+	{ .index = DT_UNIPHY2_GCC_TX_CLK },
+};
+
+// was static const struct parent_map nss_cc_parent_map_5[] = {
+static const struct parent_map
+x0_bias_pll_cc_uni2rx_uni2tx_map[] = {
+	{ P_XO, 0 },
+	{ P_BIAS_PLL_CC_CLK, 1 },
+	{ P_UNIPHY2_GCC_RX_CLK, 2 },
+	{ P_UNIPHY2_GCC_TX_CLK, 3 },
+};
+
+// was static const struct clk_parent_data nss_cc_parent_data_6[] = {
+static const struct clk_parent_data
+x0_bias_pll_nss_noc_gpll0_out_aux_bias_pll_cc[] = {
+	{ .index = DT_XO },
+	{ .index = DT_BIAS_PLL_NSS_NOC_CLK, },
+	{ .index = DT_GCC_GPLL0_OUT_AUX },
+	{ .index = DT_BIAS_PLL_CC_CLK },
+};
+
+// was static const struct parent_map nss_cc_parent_map_6[] = {
+static const struct parent_map
+x0_bias_pll_nss_noc_gpll0_out_aux_bias_pll_cc_map[] = {
+	{ P_XO, 0 },
+	{ P_BIAS_PLL_NSS_NOC_CLK, 1 },
+	{ P_GCC_GPLL0_OUT_AUX, 2 },
+	{ P_BIAS_PLL_CC_CLK, 6 },
+};
+
+// was static const struct clk_parent_data nss_cc_parent_data_7[] = {
+static const struct clk_parent_data
+x0_ubi32_pll_out_main_gcc_gpll0_out_aux_bias_pll_cc[] = {
+	{ .index = DT_XO },
+	{ .hw = &ubi32_pll.clkr.hw },
+	{ .index = DT_GCC_GPLL0_OUT_AUX },
+	{ .index = DT_BIAS_PLL_CC_CLK },
+};
+
+// was static const struct parent_map nss_cc_parent_map_7[] = {
+static const struct parent_map
+x0_ubi32_pll_out_main_gcc_gpll0_out_aux_bias_pll_cc_map[] = {
+	{ P_XO, 0 },
+	{ P_UBI32_PLL_OUT_MAIN, 1 },
+	{ P_GCC_GPLL0_OUT_AUX, 2 },
+	{ P_BIAS_PLL_CC_CLK, 6 },
+};
+
+static const struct freq_tbl ftbl_nss_cc_ce_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(353000000, P_BIAS_PLL_UBI_NC_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 nss_cc_ce_clk_src = {
+	.cmd_rcgr = 0x28404,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc_map,
+	.freq_tbl = ftbl_nss_cc_ce_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ce_clk_src",
+		.parent_data = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_cc_cfg_clk_src[] = {
+	F(100000000, P_GCC_GPLL0_OUT_AUX, 8, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 nss_cc_cfg_clk_src = {
+	.cmd_rcgr = 0x28104,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_gcc_gpll0_out_aux_map,
+	.freq_tbl = ftbl_nss_cc_cfg_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_cfg_clk_src",
+		.parent_data = x0_bias_pll_cc_gcc_gpll0_out_aux,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_gcc_gpll0_out_aux),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_cc_clc_clk_src[] = {
+	F(533333333, P_GCC_GPLL0_OUT_AUX, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 nss_cc_clc_clk_src = {
+	.cmd_rcgr = 0x28604,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_nss_noc_gpll0_out_aux_bias_pll_cc_map,
+	.freq_tbl = ftbl_nss_cc_clc_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_clc_clk_src",
+		.parent_data = x0_bias_pll_nss_noc_gpll0_out_aux_bias_pll_cc,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_nss_noc_gpll0_out_aux_bias_pll_cc),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_cc_crypto_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(300000000, P_BIAS_PLL_CC_CLK, 4, 0, 0),
+	F(600000000, P_BIAS_PLL_CC_CLK, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 nss_cc_crypto_clk_src = {
+	.cmd_rcgr = 0x16008,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_gcc_gpll0_out_aux_map,
+	.freq_tbl = ftbl_nss_cc_crypto_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_crypto_clk_src",
+		.parent_data = x0_bias_pll_cc_gcc_gpll0_out_aux,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_gcc_gpll0_out_aux),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_haq_clk_src = {
+	.cmd_rcgr = 0x28304,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc_map,
+	.freq_tbl = ftbl_nss_cc_ce_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_haq_clk_src",
+		.parent_data = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_imem_clk_src = {
+	.cmd_rcgr = 0xe008,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc_map,
+	.freq_tbl = ftbl_nss_cc_ce_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_imem_clk_src",
+		.parent_data = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_cc_int_cfg_clk_src[] = {
+	F(200000000, P_GCC_GPLL0_OUT_AUX, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 nss_cc_int_cfg_clk_src = {
+	.cmd_rcgr = 0x287b4,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_gcc_gpll0_out_aux_map,
+	.freq_tbl = ftbl_nss_cc_int_cfg_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_int_cfg_clk_src",
+		.parent_data = x0_bias_pll_cc_gcc_gpll0_out_aux,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_gcc_gpll0_out_aux),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+/*
+ * clock value vs mode:
+ *   - 125Mhz (1000base-x, sgmii)
+ *      => we need 2.5Mhz, 25Mhz, 125Mhz from it
+ *   - 250Mhz (qsgmii)
+ *      => we need 2.5Mhz, 25Mhz, 125Mhz
+ *   - 312.5Mhz (10G, 2500base-x, psgmii, usxgmii & multiport variants)
+ *      => we need 1.25Mhz, 12.5Mhz, 78.5Mhz, 125Mhz, 156.25Mhz & 312.5Mhz from it
+ *
+ * We don't have to generate all those frequencies at this leve, some
+ * are obtained through the second level divider.
+ */
+
+/*
+ * nss ports [1234], source can be uniphy0 only
+ */
+static const struct freq_conf ftbl_nss_cc_port1234_rx_clk_src_25[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 5, 0, 0),
+	// from 250Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 10, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1234_rx_clk_src_125[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 1, 0, 0),
+	// from 250Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 2, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 2.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1234_tx_clk_src_25[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 5, 0, 0),
+	// from 250Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 10, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 12.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1234_tx_clk_src_125[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 1, 0, 0),
+	// from 250Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 2, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 2.5, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1234_rx_clk_src[] = {
+	FMS(24000000, P_XO, 1, 0, 0),
+	FM(25000000, ftbl_nss_cc_port1234_rx_clk_src_25),
+	FMS(78125000, P_UNIPHY0_GCC_RX_CLK, 4, 0, 0),
+	FM(125000000, ftbl_nss_cc_port1234_rx_clk_src_125),
+	FMS(156250000, P_UNIPHY0_GCC_RX_CLK, 2, 0, 0),
+	FMS(312500000, P_UNIPHY0_GCC_RX_CLK, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1234_tx_clk_src[] = {
+	FMS(24000000, P_XO, 1, 0, 0),
+	FM(25000000, ftbl_nss_cc_port1234_tx_clk_src_25),
+	FMS(78125000, P_UNIPHY0_GCC_TX_CLK, 4, 0, 0),
+	FM(125000000, ftbl_nss_cc_port1234_tx_clk_src_125),
+	FMS(156250000, P_UNIPHY0_GCC_TX_CLK, 2, 0, 0),
+	FMS(312500000, P_UNIPHY0_GCC_TX_CLK, 1, 0, 0),
+	{ }
+};
+
+/*
+ * nss port 5, source can be uniphy0 or uniphy1
+ *
+ * when source is uniphy0, not all frequencies are needed since the
+ * port can only be sgmii or one psgmii channel
+ */
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_25[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 5, 0, 0),
+	C(P_UNIPHY1_GCC_RX_CLK, 5, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 12.5, 0, 0),
+	C(P_UNIPHY1_GCC_RX_CLK, 12.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_125[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 1, 0, 0),
+	C(P_UNIPHY1_GCC_RX_CLK, 1, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY0_GCC_RX_CLK, 2.5, 0, 0),
+	C(P_UNIPHY1_GCC_RX_CLK, 2.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_25[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 5, 0, 0),
+	C(P_UNIPHY1_GCC_TX_CLK, 5, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 12.5, 0, 0),
+	C(P_UNIPHY1_GCC_TX_CLK, 12.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_125[] = {
+	// from 125Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 1, 0, 0),
+	C(P_UNIPHY1_GCC_TX_CLK, 1, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY0_GCC_TX_CLK, 2.5, 0, 0),
+	C(P_UNIPHY1_GCC_TX_CLK, 2.5, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port5_rx_clk_src[] = {
+	FMS(24000000, P_XO, 1, 0, 0),
+	FM(25000000, ftbl_nss_cc_port5_rx_clk_src_25),
+	FMS(78125000, P_UNIPHY1_GCC_RX_CLK, 4, 0, 0),
+	FM(125000000, ftbl_nss_cc_port5_rx_clk_src_125),
+	FMS(156250000, P_UNIPHY1_GCC_RX_CLK, 2, 0, 0),
+	FMS(312500000, P_UNIPHY1_GCC_RX_CLK, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port5_tx_clk_src[] = {
+	FMS(24000000, P_XO, 1, 0, 0),
+	FM(25000000, ftbl_nss_cc_port5_tx_clk_src_25),
+	FMS(78125000, P_UNIPHY1_GCC_TX_CLK, 4, 0, 0),
+	FM(125000000, ftbl_nss_cc_port5_tx_clk_src_125),
+	FMS(156250000, P_UNIPHY1_GCC_TX_CLK, 2, 0, 0),
+	FMS(312500000, P_UNIPHY1_GCC_TX_CLK, 1, 0, 0),
+	{ }
+};
+
+/*
+ * nss port 6, source can be uniphy2 only
+ */
+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_25[] = {
+	// from 125Mhz
+	C(P_UNIPHY2_GCC_RX_CLK, 5, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY2_GCC_RX_CLK, 12.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_125[] = {
+	// from 125Mhz
+	C(P_UNIPHY2_GCC_RX_CLK, 1, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY2_GCC_RX_CLK, 2.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_25[] = {
+	// from 125Mhz
+	C(P_UNIPHY2_GCC_TX_CLK, 5, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY2_GCC_TX_CLK, 12.5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_125[] = {
+	// from 125Mhz
+	C(P_UNIPHY2_GCC_TX_CLK, 1, 0, 0),
+	// from 312.5Mhz
+	C(P_UNIPHY2_GCC_TX_CLK, 2.5, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port6_rx_clk_src[] = {
+	FMS(24000000, P_XO, 1, 0, 0),
+	FM(25000000, ftbl_nss_cc_port6_rx_clk_src_25),
+	FMS(78125000, P_UNIPHY2_GCC_RX_CLK, 4, 0, 0),
+	FM(125000000, ftbl_nss_cc_port6_rx_clk_src_125),
+	FMS(156250000, P_UNIPHY2_GCC_RX_CLK, 2, 0, 0),
+	FMS(312500000, P_UNIPHY2_GCC_RX_CLK, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port6_tx_clk_src[] = {
+	FMS(24000000, P_XO, 1, 0, 0),
+	FM(25000000, ftbl_nss_cc_port6_tx_clk_src_25),
+	FMS(78125000, P_UNIPHY2_GCC_TX_CLK, 4, 0, 0),
+	FM(125000000, ftbl_nss_cc_port6_tx_clk_src_125),
+	FMS(156250000, P_UNIPHY2_GCC_TX_CLK, 2, 0, 0),
+	FMS(312500000, P_UNIPHY2_GCC_TX_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 nss_cc_port1_rx_clk_src = {
+	.cmd_rcgr = 0x28110,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port1_rx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port1_tx_clk_src = {
+	.cmd_rcgr = 0x2811c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_tx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port1_tx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port2_rx_clk_src = {
+	.cmd_rcgr = 0x28128,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port2_rx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port2_tx_clk_src = {
+	.cmd_rcgr = 0x28134,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_tx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port2_tx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port3_rx_clk_src = {
+	.cmd_rcgr = 0x28140,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port3_rx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port3_tx_clk_src = {
+	.cmd_rcgr = 0x2814c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_tx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port3_tx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port4_rx_clk_src = {
+	.cmd_rcgr = 0x28158,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port4_rx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port4_tx_clk_src = {
+	.cmd_rcgr = 0x28164,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port1234_tx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port4_tx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port5_rx_clk_src = {
+	.cmd_rcgr = 0x28170,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port5_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port5_rx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port5_tx_clk_src = {
+	.cmd_rcgr = 0x2817c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port5_tx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port5_tx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni0rx_uni0tx_uni1rx_uni1tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port6_rx_clk_src = {
+	.cmd_rcgr = 0x28188,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni2rx_uni2tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port6_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port6_rx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni2rx_uni2tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni2rx_uni2tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_port6_tx_clk_src = {
+	.cmd_rcgr = 0x28194,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_cc_uni2rx_uni2tx_map,
+	.freq_multi_tbl = ftbl_nss_cc_port6_tx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_port6_tx_clk_src",
+		.parent_data = x0_bias_pll_cc_uni2rx_uni2tx,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_cc_uni2rx_uni2tx),
+		.flags = CLK_SET_RATE_NO_REPARENT,
+		.ops = &clk_rcg2_fm_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_ppe_clk_src = {
+	.cmd_rcgr = 0x28204,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc_map,
+	.freq_tbl = ftbl_nss_cc_ce_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ppe_clk_src",
+		.parent_data = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_nss_cc_ubi0_clk_src[] = {
+	F(24000000, P_XO, 1, 0, 0),
+	F(187200000, P_UBI32_PLL_OUT_MAIN, 8, 0, 0),
+	F(748800000, P_UBI32_PLL_OUT_MAIN, 2, 0, 0),
+	F(1497600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
+	F(1689600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 nss_cc_ubi0_clk_src = {
+	.cmd_rcgr = 0x28704,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_ubi32_pll_out_main_gcc_gpll0_out_aux_map,
+	.freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ubi0_clk_src",
+		.parent_data = x0_ubi32_pll_out_main_gcc_gpll0_out_aux,
+		.num_parents = ARRAY_SIZE(x0_ubi32_pll_out_main_gcc_gpll0_out_aux),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_ubi1_clk_src = {
+	.cmd_rcgr = 0x2870c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_ubi32_pll_out_main_gcc_gpll0_out_aux_map,
+	.freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ubi1_clk_src",
+		.parent_data = x0_ubi32_pll_out_main_gcc_gpll0_out_aux,
+		.num_parents = ARRAY_SIZE(x0_ubi32_pll_out_main_gcc_gpll0_out_aux),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_ubi2_clk_src = {
+	.cmd_rcgr = 0x28714,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_ubi32_pll_out_main_gcc_gpll0_out_aux_map,
+	.freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ubi2_clk_src",
+		.parent_data = x0_ubi32_pll_out_main_gcc_gpll0_out_aux,
+		.num_parents = ARRAY_SIZE(x0_ubi32_pll_out_main_gcc_gpll0_out_aux),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_ubi3_clk_src = {
+	.cmd_rcgr = 0x2871c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_ubi32_pll_out_main_gcc_gpll0_out_aux_map,
+	.freq_tbl = ftbl_nss_cc_ubi0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ubi3_clk_src",
+		.parent_data = x0_ubi32_pll_out_main_gcc_gpll0_out_aux,
+		.num_parents = ARRAY_SIZE(x0_ubi32_pll_out_main_gcc_gpll0_out_aux),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_ubi_axi_clk_src = {
+	.cmd_rcgr = 0x28724,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_ubi32_pll_out_main_gcc_gpll0_out_aux_bias_pll_cc_map,
+	.freq_tbl = ftbl_nss_cc_clc_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ubi_axi_clk_src",
+		.parent_data = x0_ubi32_pll_out_main_gcc_gpll0_out_aux_bias_pll_cc,
+		.num_parents = ARRAY_SIZE(x0_ubi32_pll_out_main_gcc_gpll0_out_aux_bias_pll_cc),
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_rcg2 nss_cc_ubi_nc_axi_bfdcd_clk_src = {
+	.cmd_rcgr = 0x2872c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc_map,
+	.freq_tbl = ftbl_nss_cc_ce_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "nss_cc_ubi_nc_axi_bfdcd_clk_src",
+		.parent_data = x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc,
+		.num_parents = ARRAY_SIZE(x0_bias_pll_ubi_gpll0_out_aux_bias_pll_cc),
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port1_rx_div_clk_src = {
+	.reg = 0x28118,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port1_rx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port1_rx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port1_tx_div_clk_src = {
+	.reg = 0x28124,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port1_tx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port1_tx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port2_rx_div_clk_src = {
+	.reg = 0x28130,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port2_rx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port2_rx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port2_tx_div_clk_src = {
+	.reg = 0x2813c,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port2_tx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port2_tx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port3_rx_div_clk_src = {
+	.reg = 0x28148,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port3_rx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port3_rx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port3_tx_div_clk_src = {
+	.reg = 0x28154,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port3_tx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port3_tx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port4_rx_div_clk_src = {
+	.reg = 0x28160,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port4_rx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port4_rx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port4_tx_div_clk_src = {
+	.reg = 0x2816c,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port4_tx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port4_tx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port5_rx_div_clk_src = {
+	.reg = 0x28178,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port5_rx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port5_rx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port5_tx_div_clk_src = {
+	.reg = 0x28184,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port5_tx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port5_tx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port6_rx_div_clk_src = {
+	.reg = 0x28190,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port6_rx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port6_rx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_port6_tx_div_clk_src = {
+	.reg = 0x2819c,
+	.shift = 0,
+	.width = 9,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_port6_tx_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_port6_tx_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_ubi0_div_clk_src = {
+	.reg = 0x287a4,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_ubi0_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ubi0_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_ubi1_div_clk_src = {
+	.reg = 0x287a8,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_ubi1_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ubi1_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_ubi2_div_clk_src = {
+	.reg = 0x287ac,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_ubi2_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ubi2_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_ubi3_div_clk_src = {
+	.reg = 0x287b0,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_ubi3_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ubi3_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_xgmac0_ptp_ref_div_clk_src = {
+	.reg = 0x28214,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_xgmac0_ptp_ref_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ppe_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_xgmac1_ptp_ref_div_clk_src = {
+	.reg = 0x28218,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_xgmac1_ptp_ref_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ppe_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_xgmac2_ptp_ref_div_clk_src = {
+	.reg = 0x2821c,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_xgmac2_ptp_ref_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ppe_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_xgmac3_ptp_ref_div_clk_src = {
+	.reg = 0x28220,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_xgmac3_ptp_ref_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ppe_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_xgmac4_ptp_ref_div_clk_src = {
+	.reg = 0x28224,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_xgmac4_ptp_ref_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ppe_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_regmap_div nss_cc_xgmac5_ptp_ref_div_clk_src = {
+	.reg = 0x28228,
+	.shift = 0,
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data) {
+		.name = "nss_cc_xgmac5_ptp_ref_div_clk_src",
+		.parent_data = &(const struct clk_parent_data){
+			.hw = &nss_cc_ppe_clk_src.clkr.hw,
+		},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_regmap_div_ro_ops,
+	},
+};
+
+static struct clk_branch nss_cc_ce_apb_clk = {
+	.halt_reg = 0x2840c,
+	.clkr = {
+		.enable_reg = 0x2840c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ce_apb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ce_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ce_axi_clk = {
+	.halt_reg = 0x28410,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28410,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ce_axi_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ce_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_clc_axi_clk = {
+	.halt_reg = 0x2860c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2860c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_clc_axi_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_clc_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_crypto_clk = {
+	.halt_reg = 0x1601c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1601c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_crypto_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_crypto_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_crypto_ppe_clk = {
+	.halt_reg = 0x28240,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28240,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_crypto_ppe_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_haq_ahb_clk = {
+	.halt_reg = 0x2830c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2830c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_haq_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_haq_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_haq_axi_clk = {
+	.halt_reg = 0x28310,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28310,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_haq_axi_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_haq_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_imem_ahb_clk = {
+	.halt_reg = 0xe018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xe018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_imem_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_imem_qsb_clk = {
+	.halt_reg = 0xe010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xe010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_imem_qsb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_imem_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nss_csr_clk = {
+	.halt_reg = 0x281d0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281d0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nss_csr_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_apb_clk = {
+	.halt_reg = 0x28414,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28414,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ce_apb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ce_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_axi_clk = {
+	.halt_reg = 0x28418,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28418,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ce_axi_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ce_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_clc_axi_clk = {
+	.halt_reg = 0x28610,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28610,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_clc_axi_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_clc_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_crypto_clk = {
+	.halt_reg = 0x16020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x16020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_crypto_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_crypto_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_haq_ahb_clk = {
+	.halt_reg = 0x28314,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28314,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_haq_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_haq_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_haq_axi_clk = {
+	.halt_reg = 0x28318,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28318,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_haq_axi_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_haq_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_imem_ahb_clk = {
+	.halt_reg = 0xe01c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xe01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_imem_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_imem_qsb_clk = {
+	.halt_reg = 0xe014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xe014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_imem_qsb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_imem_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_nss_csr_clk = {
+	.halt_reg = 0x281d4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281d4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_nss_csr_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_cfg_clk = {
+	.halt_reg = 0x28248,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28248,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ppe_cfg_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_clk = {
+	.halt_reg = 0x28244,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28244,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ppe_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_ahb0_clk = {
+	.halt_reg = 0x28788,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28788,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ubi32_ahb0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_axi0_clk = {
+	.halt_reg = 0x287a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x287a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ubi32_axi0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_int0_ahb_clk = {
+	.halt_reg = 0x2878c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2878c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ubi32_int0_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_1_clk = {
+	.halt_reg = 0x287bc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x287bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ubi32_nc_axi0_1_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_clk = {
+	.halt_reg = 0x28764,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28764,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_nssnoc_ubi32_nc_axi0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port1_mac_clk = {
+	.halt_reg = 0x2824c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2824c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port1_mac_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port1_rx_clk = {
+	.halt_reg = 0x281a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port1_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port1_tx_clk = {
+	.halt_reg = 0x281a4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port1_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port2_mac_clk = {
+	.halt_reg = 0x28250,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28250,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port2_mac_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port2_rx_clk = {
+	.halt_reg = 0x281a8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port2_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port2_tx_clk = {
+	.halt_reg = 0x281ac,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281ac,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port2_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port3_mac_clk = {
+	.halt_reg = 0x28254,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28254,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port3_mac_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port3_rx_clk = {
+	.halt_reg = 0x281b0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281b0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port3_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port3_tx_clk = {
+	.halt_reg = 0x281b4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port3_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port4_mac_clk = {
+	.halt_reg = 0x28258,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28258,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port4_mac_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port4_rx_clk = {
+	.halt_reg = 0x281b8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port4_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port4_tx_clk = {
+	.halt_reg = 0x281bc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port4_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port5_mac_clk = {
+	.halt_reg = 0x2825c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2825c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port5_mac_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port5_rx_clk = {
+	.halt_reg = 0x281c0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281c0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port5_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port5_tx_clk = {
+	.halt_reg = 0x281c4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port5_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port6_mac_clk = {
+	.halt_reg = 0x28260,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28260,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port6_mac_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port6_rx_clk = {
+	.halt_reg = 0x281c8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port6_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_port6_tx_clk = {
+	.halt_reg = 0x281cc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x281cc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_port6_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ppe_edma_cfg_clk = {
+	.halt_reg = 0x2823c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2823c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ppe_edma_cfg_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ppe_edma_clk = {
+	.halt_reg = 0x28238,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28238,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ppe_edma_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ppe_switch_btq_clk = {
+	.halt_reg = 0x2827c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2827c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ppe_switch_btq_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ppe_switch_cfg_clk = {
+	.halt_reg = 0x28234,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28234,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ppe_switch_cfg_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ppe_switch_clk = {
+	.halt_reg = 0x28230,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28230,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ppe_switch_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ppe_switch_ipe_clk = {
+	.halt_reg = 0x2822c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2822c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ppe_switch_ipe_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ppe_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_ahb0_clk = {
+	.halt_reg = 0x28768,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28768,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_ahb0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_ahb1_clk = {
+	.halt_reg = 0x28770,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28770,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_ahb1_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_ahb2_clk = {
+	.halt_reg = 0x28778,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28778,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_ahb2_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_ahb3_clk = {
+	.halt_reg = 0x28780,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28780,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_ahb3_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_axi0_clk = {
+	.halt_reg = 0x28790,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28790,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_axi0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_axi1_clk = {
+	.halt_reg = 0x28794,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28794,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_axi1_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_axi2_clk = {
+	.halt_reg = 0x28798,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28798,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_axi2_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_axi3_clk = {
+	.halt_reg = 0x2879c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2879c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_axi3_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_core0_clk = {
+	.halt_reg = 0x28734,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28734,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_core0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi0_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_core1_clk = {
+	.halt_reg = 0x28738,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28738,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_core1_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi1_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_core2_clk = {
+	.halt_reg = 0x2873c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2873c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_core2_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi2_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_core3_clk = {
+	.halt_reg = 0x28740,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28740,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_core3_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi3_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_intr0_ahb_clk = {
+	.halt_reg = 0x2876c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2876c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_intr0_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_intr1_ahb_clk = {
+	.halt_reg = 0x28774,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28774,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_intr1_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_intr2_ahb_clk = {
+	.halt_reg = 0x2877c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2877c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_intr2_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_intr3_ahb_clk = {
+	.halt_reg = 0x28784,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28784,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_intr3_ahb_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_int_cfg_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi0_clk = {
+	.halt_reg = 0x28744,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28744,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_nc_axi0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi1_clk = {
+	.halt_reg = 0x2874c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2874c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_nc_axi1_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi2_clk = {
+	.halt_reg = 0x28754,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28754,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_nc_axi2_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_nc_axi3_clk = {
+	.halt_reg = 0x2875c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2875c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_nc_axi3_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_utcm0_clk = {
+	.halt_reg = 0x28748,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28748,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_utcm0_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_utcm1_clk = {
+	.halt_reg = 0x28750,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28750,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_utcm1_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_utcm2_clk = {
+	.halt_reg = 0x28758,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28758,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_utcm2_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_ubi32_utcm3_clk = {
+	.halt_reg = 0x28760,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28760,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_ubi32_utcm3_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port1_rx_clk = {
+	.halt_reg = 0x28904,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28904,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port1_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port1_tx_clk = {
+	.halt_reg = 0x28908,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28908,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port1_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port2_rx_clk = {
+	.halt_reg = 0x2890c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2890c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port2_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port2_tx_clk = {
+	.halt_reg = 0x28910,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28910,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port2_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port3_rx_clk = {
+	.halt_reg = 0x28914,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28914,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port3_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port3_tx_clk = {
+	.halt_reg = 0x28918,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28918,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port3_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port4_rx_clk = {
+	.halt_reg = 0x2891c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2891c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port4_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port4_tx_clk = {
+	.halt_reg = 0x28920,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28920,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port4_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port5_rx_clk = {
+	.halt_reg = 0x28924,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28924,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port5_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port5_tx_clk = {
+	.halt_reg = 0x28928,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28928,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port5_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port6_rx_clk = {
+	.halt_reg = 0x2892c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2892c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port6_rx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_uniphy_port6_tx_clk = {
+	.halt_reg = 0x28930,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28930,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_uniphy_port6_tx_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_xgmac0_ptp_ref_clk = {
+	.halt_reg = 0x28264,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28264,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_xgmac0_ptp_ref_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw =
+					&nss_cc_xgmac0_ptp_ref_div_clk_src.clkr.
+						hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_xgmac1_ptp_ref_clk = {
+	.halt_reg = 0x28268,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28268,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_xgmac1_ptp_ref_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw =
+					&nss_cc_xgmac1_ptp_ref_div_clk_src.clkr.
+						hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_xgmac2_ptp_ref_clk = {
+	.halt_reg = 0x2826c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2826c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_xgmac2_ptp_ref_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw =
+					&nss_cc_xgmac2_ptp_ref_div_clk_src.clkr.
+						hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_xgmac3_ptp_ref_clk = {
+	.halt_reg = 0x28270,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28270,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_xgmac3_ptp_ref_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw =
+					&nss_cc_xgmac3_ptp_ref_div_clk_src.clkr.
+						hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_xgmac4_ptp_ref_clk = {
+	.halt_reg = 0x28274,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28274,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_xgmac4_ptp_ref_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw =
+					&nss_cc_xgmac4_ptp_ref_div_clk_src.clkr.
+						hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch nss_cc_xgmac5_ptp_ref_clk = {
+	.halt_reg = 0x28278,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x28278,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "nss_cc_xgmac5_ptp_ref_clk",
+			.parent_data = &(const struct clk_parent_data){
+				.hw =
+					&nss_cc_xgmac5_ptp_ref_div_clk_src.clkr.
+						hw,
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *nss_cc_ipq9574_clocks[] = {
+	[NSS_CC_CE_APB_CLK] = &nss_cc_ce_apb_clk.clkr,
+	[NSS_CC_CE_AXI_CLK] = &nss_cc_ce_axi_clk.clkr,
+	[NSS_CC_CE_CLK_SRC] = &nss_cc_ce_clk_src.clkr,
+	[NSS_CC_CFG_CLK_SRC] = &nss_cc_cfg_clk_src.clkr,
+	[NSS_CC_CLC_AXI_CLK] = &nss_cc_clc_axi_clk.clkr,
+	[NSS_CC_CLC_CLK_SRC] = &nss_cc_clc_clk_src.clkr,
+	[NSS_CC_CRYPTO_CLK] = &nss_cc_crypto_clk.clkr,
+	[NSS_CC_CRYPTO_CLK_SRC] = &nss_cc_crypto_clk_src.clkr,
+	[NSS_CC_CRYPTO_PPE_CLK] = &nss_cc_crypto_ppe_clk.clkr,
+	[NSS_CC_HAQ_AHB_CLK] = &nss_cc_haq_ahb_clk.clkr,
+	[NSS_CC_HAQ_AXI_CLK] = &nss_cc_haq_axi_clk.clkr,
+	[NSS_CC_HAQ_CLK_SRC] = &nss_cc_haq_clk_src.clkr,
+	[NSS_CC_IMEM_AHB_CLK] = &nss_cc_imem_ahb_clk.clkr,
+	[NSS_CC_IMEM_CLK_SRC] = &nss_cc_imem_clk_src.clkr,
+	[NSS_CC_IMEM_QSB_CLK] = &nss_cc_imem_qsb_clk.clkr,
+	[NSS_CC_INT_CFG_CLK_SRC] = &nss_cc_int_cfg_clk_src.clkr,
+	[NSS_CC_NSS_CSR_CLK] = &nss_cc_nss_csr_clk.clkr,
+	[NSS_CC_NSSNOC_CE_APB_CLK] = &nss_cc_nssnoc_ce_apb_clk.clkr,
+	[NSS_CC_NSSNOC_CE_AXI_CLK] = &nss_cc_nssnoc_ce_axi_clk.clkr,
+	[NSS_CC_NSSNOC_CLC_AXI_CLK] = &nss_cc_nssnoc_clc_axi_clk.clkr,
+	[NSS_CC_NSSNOC_CRYPTO_CLK] = &nss_cc_nssnoc_crypto_clk.clkr,
+	[NSS_CC_NSSNOC_HAQ_AHB_CLK] = &nss_cc_nssnoc_haq_ahb_clk.clkr,
+	[NSS_CC_NSSNOC_HAQ_AXI_CLK] = &nss_cc_nssnoc_haq_axi_clk.clkr,
+	[NSS_CC_NSSNOC_IMEM_AHB_CLK] = &nss_cc_nssnoc_imem_ahb_clk.clkr,
+	[NSS_CC_NSSNOC_IMEM_QSB_CLK] = &nss_cc_nssnoc_imem_qsb_clk.clkr,
+	[NSS_CC_NSSNOC_NSS_CSR_CLK] = &nss_cc_nssnoc_nss_csr_clk.clkr,
+	[NSS_CC_NSSNOC_PPE_CFG_CLK] = &nss_cc_nssnoc_ppe_cfg_clk.clkr,
+	[NSS_CC_NSSNOC_PPE_CLK] = &nss_cc_nssnoc_ppe_clk.clkr,
+	[NSS_CC_NSSNOC_UBI32_AHB0_CLK] = &nss_cc_nssnoc_ubi32_ahb0_clk.clkr,
+	[NSS_CC_NSSNOC_UBI32_AXI0_CLK] = &nss_cc_nssnoc_ubi32_axi0_clk.clkr,
+	[NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK] =
+		&nss_cc_nssnoc_ubi32_int0_ahb_clk.clkr,
+	[NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK] =
+		&nss_cc_nssnoc_ubi32_nc_axi0_1_clk.clkr,
+	[NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK] =
+		&nss_cc_nssnoc_ubi32_nc_axi0_clk.clkr,
+	[NSS_CC_PORT1_MAC_CLK] = &nss_cc_port1_mac_clk.clkr,
+	[NSS_CC_PORT1_RX_CLK] = &nss_cc_port1_rx_clk.clkr,
+	[NSS_CC_PORT1_RX_CLK_SRC] = &nss_cc_port1_rx_clk_src.clkr,
+	[NSS_CC_PORT1_RX_DIV_CLK_SRC] = &nss_cc_port1_rx_div_clk_src.clkr,
+	[NSS_CC_PORT1_TX_CLK] = &nss_cc_port1_tx_clk.clkr,
+	[NSS_CC_PORT1_TX_CLK_SRC] = &nss_cc_port1_tx_clk_src.clkr,
+	[NSS_CC_PORT1_TX_DIV_CLK_SRC] = &nss_cc_port1_tx_div_clk_src.clkr,
+	[NSS_CC_PORT2_MAC_CLK] = &nss_cc_port2_mac_clk.clkr,
+	[NSS_CC_PORT2_RX_CLK] = &nss_cc_port2_rx_clk.clkr,
+	[NSS_CC_PORT2_RX_CLK_SRC] = &nss_cc_port2_rx_clk_src.clkr,
+	[NSS_CC_PORT2_RX_DIV_CLK_SRC] = &nss_cc_port2_rx_div_clk_src.clkr,
+	[NSS_CC_PORT2_TX_CLK] = &nss_cc_port2_tx_clk.clkr,
+	[NSS_CC_PORT2_TX_CLK_SRC] = &nss_cc_port2_tx_clk_src.clkr,
+	[NSS_CC_PORT2_TX_DIV_CLK_SRC] = &nss_cc_port2_tx_div_clk_src.clkr,
+	[NSS_CC_PORT3_MAC_CLK] = &nss_cc_port3_mac_clk.clkr,
+	[NSS_CC_PORT3_RX_CLK] = &nss_cc_port3_rx_clk.clkr,
+	[NSS_CC_PORT3_RX_CLK_SRC] = &nss_cc_port3_rx_clk_src.clkr,
+	[NSS_CC_PORT3_RX_DIV_CLK_SRC] = &nss_cc_port3_rx_div_clk_src.clkr,
+	[NSS_CC_PORT3_TX_CLK] = &nss_cc_port3_tx_clk.clkr,
+	[NSS_CC_PORT3_TX_CLK_SRC] = &nss_cc_port3_tx_clk_src.clkr,
+	[NSS_CC_PORT3_TX_DIV_CLK_SRC] = &nss_cc_port3_tx_div_clk_src.clkr,
+	[NSS_CC_PORT4_MAC_CLK] = &nss_cc_port4_mac_clk.clkr,
+	[NSS_CC_PORT4_RX_CLK] = &nss_cc_port4_rx_clk.clkr,
+	[NSS_CC_PORT4_RX_CLK_SRC] = &nss_cc_port4_rx_clk_src.clkr,
+	[NSS_CC_PORT4_RX_DIV_CLK_SRC] = &nss_cc_port4_rx_div_clk_src.clkr,
+	[NSS_CC_PORT4_TX_CLK] = &nss_cc_port4_tx_clk.clkr,
+	[NSS_CC_PORT4_TX_CLK_SRC] = &nss_cc_port4_tx_clk_src.clkr,
+	[NSS_CC_PORT4_TX_DIV_CLK_SRC] = &nss_cc_port4_tx_div_clk_src.clkr,
+	[NSS_CC_PORT5_MAC_CLK] = &nss_cc_port5_mac_clk.clkr,
+	[NSS_CC_PORT5_RX_CLK] = &nss_cc_port5_rx_clk.clkr,
+	[NSS_CC_PORT5_RX_CLK_SRC] = &nss_cc_port5_rx_clk_src.clkr,
+	[NSS_CC_PORT5_RX_DIV_CLK_SRC] = &nss_cc_port5_rx_div_clk_src.clkr,
+	[NSS_CC_PORT5_TX_CLK] = &nss_cc_port5_tx_clk.clkr,
+	[NSS_CC_PORT5_TX_CLK_SRC] = &nss_cc_port5_tx_clk_src.clkr,
+	[NSS_CC_PORT5_TX_DIV_CLK_SRC] = &nss_cc_port5_tx_div_clk_src.clkr,
+	[NSS_CC_PORT6_MAC_CLK] = &nss_cc_port6_mac_clk.clkr,
+	[NSS_CC_PORT6_RX_CLK] = &nss_cc_port6_rx_clk.clkr,
+	[NSS_CC_PORT6_RX_CLK_SRC] = &nss_cc_port6_rx_clk_src.clkr,
+	[NSS_CC_PORT6_RX_DIV_CLK_SRC] = &nss_cc_port6_rx_div_clk_src.clkr,
+	[NSS_CC_PORT6_TX_CLK] = &nss_cc_port6_tx_clk.clkr,
+	[NSS_CC_PORT6_TX_CLK_SRC] = &nss_cc_port6_tx_clk_src.clkr,
+	[NSS_CC_PORT6_TX_DIV_CLK_SRC] = &nss_cc_port6_tx_div_clk_src.clkr,
+	[NSS_CC_PPE_CLK_SRC] = &nss_cc_ppe_clk_src.clkr,
+	[NSS_CC_PPE_EDMA_CFG_CLK] = &nss_cc_ppe_edma_cfg_clk.clkr,
+	[NSS_CC_PPE_EDMA_CLK] = &nss_cc_ppe_edma_clk.clkr,
+	[NSS_CC_PPE_SWITCH_BTQ_CLK] = &nss_cc_ppe_switch_btq_clk.clkr,
+	[NSS_CC_PPE_SWITCH_CFG_CLK] = &nss_cc_ppe_switch_cfg_clk.clkr,
+	[NSS_CC_PPE_SWITCH_CLK] = &nss_cc_ppe_switch_clk.clkr,
+	[NSS_CC_PPE_SWITCH_IPE_CLK] = &nss_cc_ppe_switch_ipe_clk.clkr,
+	[NSS_CC_UBI0_CLK_SRC] = &nss_cc_ubi0_clk_src.clkr,
+	[NSS_CC_UBI0_DIV_CLK_SRC] = &nss_cc_ubi0_div_clk_src.clkr,
+	[NSS_CC_UBI1_CLK_SRC] = &nss_cc_ubi1_clk_src.clkr,
+	[NSS_CC_UBI1_DIV_CLK_SRC] = &nss_cc_ubi1_div_clk_src.clkr,
+	[NSS_CC_UBI2_CLK_SRC] = &nss_cc_ubi2_clk_src.clkr,
+	[NSS_CC_UBI2_DIV_CLK_SRC] = &nss_cc_ubi2_div_clk_src.clkr,
+	[NSS_CC_UBI32_AHB0_CLK] = &nss_cc_ubi32_ahb0_clk.clkr,
+	[NSS_CC_UBI32_AHB1_CLK] = &nss_cc_ubi32_ahb1_clk.clkr,
+	[NSS_CC_UBI32_AHB2_CLK] = &nss_cc_ubi32_ahb2_clk.clkr,
+	[NSS_CC_UBI32_AHB3_CLK] = &nss_cc_ubi32_ahb3_clk.clkr,
+	[NSS_CC_UBI32_AXI0_CLK] = &nss_cc_ubi32_axi0_clk.clkr,
+	[NSS_CC_UBI32_AXI1_CLK] = &nss_cc_ubi32_axi1_clk.clkr,
+	[NSS_CC_UBI32_AXI2_CLK] = &nss_cc_ubi32_axi2_clk.clkr,
+	[NSS_CC_UBI32_AXI3_CLK] = &nss_cc_ubi32_axi3_clk.clkr,
+	[NSS_CC_UBI32_CORE0_CLK] = &nss_cc_ubi32_core0_clk.clkr,
+	[NSS_CC_UBI32_CORE1_CLK] = &nss_cc_ubi32_core1_clk.clkr,
+	[NSS_CC_UBI32_CORE2_CLK] = &nss_cc_ubi32_core2_clk.clkr,
+	[NSS_CC_UBI32_CORE3_CLK] = &nss_cc_ubi32_core3_clk.clkr,
+	[NSS_CC_UBI32_INTR0_AHB_CLK] = &nss_cc_ubi32_intr0_ahb_clk.clkr,
+	[NSS_CC_UBI32_INTR1_AHB_CLK] = &nss_cc_ubi32_intr1_ahb_clk.clkr,
+	[NSS_CC_UBI32_INTR2_AHB_CLK] = &nss_cc_ubi32_intr2_ahb_clk.clkr,
+	[NSS_CC_UBI32_INTR3_AHB_CLK] = &nss_cc_ubi32_intr3_ahb_clk.clkr,
+	[NSS_CC_UBI32_NC_AXI0_CLK] = &nss_cc_ubi32_nc_axi0_clk.clkr,
+	[NSS_CC_UBI32_NC_AXI1_CLK] = &nss_cc_ubi32_nc_axi1_clk.clkr,
+	[NSS_CC_UBI32_NC_AXI2_CLK] = &nss_cc_ubi32_nc_axi2_clk.clkr,
+	[NSS_CC_UBI32_NC_AXI3_CLK] = &nss_cc_ubi32_nc_axi3_clk.clkr,
+	[NSS_CC_UBI32_UTCM0_CLK] = &nss_cc_ubi32_utcm0_clk.clkr,
+	[NSS_CC_UBI32_UTCM1_CLK] = &nss_cc_ubi32_utcm1_clk.clkr,
+	[NSS_CC_UBI32_UTCM2_CLK] = &nss_cc_ubi32_utcm2_clk.clkr,
+	[NSS_CC_UBI32_UTCM3_CLK] = &nss_cc_ubi32_utcm3_clk.clkr,
+	[NSS_CC_UBI3_CLK_SRC] = &nss_cc_ubi3_clk_src.clkr,
+	[NSS_CC_UBI3_DIV_CLK_SRC] = &nss_cc_ubi3_div_clk_src.clkr,
+	[NSS_CC_UBI_AXI_CLK_SRC] = &nss_cc_ubi_axi_clk_src.clkr,
+	[NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC] =
+		&nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr,
+	[NSS_CC_UNIPHY_PORT1_RX_CLK] = &nss_cc_uniphy_port1_rx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT1_TX_CLK] = &nss_cc_uniphy_port1_tx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT2_RX_CLK] = &nss_cc_uniphy_port2_rx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT2_TX_CLK] = &nss_cc_uniphy_port2_tx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT3_RX_CLK] = &nss_cc_uniphy_port3_rx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT3_TX_CLK] = &nss_cc_uniphy_port3_tx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT4_RX_CLK] = &nss_cc_uniphy_port4_rx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT4_TX_CLK] = &nss_cc_uniphy_port4_tx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT5_RX_CLK] = &nss_cc_uniphy_port5_rx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT5_TX_CLK] = &nss_cc_uniphy_port5_tx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT6_RX_CLK] = &nss_cc_uniphy_port6_rx_clk.clkr,
+	[NSS_CC_UNIPHY_PORT6_TX_CLK] = &nss_cc_uniphy_port6_tx_clk.clkr,
+	[NSS_CC_XGMAC0_PTP_REF_CLK] = &nss_cc_xgmac0_ptp_ref_clk.clkr,
+	[NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC] =
+		&nss_cc_xgmac0_ptp_ref_div_clk_src.clkr,
+	[NSS_CC_XGMAC1_PTP_REF_CLK] = &nss_cc_xgmac1_ptp_ref_clk.clkr,
+	[NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC] =
+		&nss_cc_xgmac1_ptp_ref_div_clk_src.clkr,
+	[NSS_CC_XGMAC2_PTP_REF_CLK] = &nss_cc_xgmac2_ptp_ref_clk.clkr,
+	[NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC] =
+		&nss_cc_xgmac2_ptp_ref_div_clk_src.clkr,
+	[NSS_CC_XGMAC3_PTP_REF_CLK] = &nss_cc_xgmac3_ptp_ref_clk.clkr,
+	[NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC] =
+		&nss_cc_xgmac3_ptp_ref_div_clk_src.clkr,
+	[NSS_CC_XGMAC4_PTP_REF_CLK] = &nss_cc_xgmac4_ptp_ref_clk.clkr,
+	[NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC] =
+		&nss_cc_xgmac4_ptp_ref_div_clk_src.clkr,
+	[NSS_CC_XGMAC5_PTP_REF_CLK] = &nss_cc_xgmac5_ptp_ref_clk.clkr,
+	[NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC] =
+		&nss_cc_xgmac5_ptp_ref_div_clk_src.clkr,
+	[UBI32_PLL] = &ubi32_pll.clkr,
+	[UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
+};
+
+static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
+	[NSS_CC_CE_BCR] = { 0x28400, 0 },
+	[NSS_CC_CLC_BCR] = { 0x28600, 0 },
+	[NSS_CC_EIP197_BCR] = { 0x16004, 0 },
+	[NSS_CC_HAQ_BCR] = { 0x28300, 0 },
+	[NSS_CC_IMEM_BCR] = { 0xe004, 0 },
+	[NSS_CC_MAC_BCR] = { 0x28100, 0 },
+	[NSS_CC_PPE_BCR] = { 0x28200, 0 },
+	[NSS_CC_UBI_BCR] = { 0x28700, 0 },
+	[NSS_CC_UNIPHY_BCR] = { 0x28900, 0 },
+	[UBI3_CLKRST_CLAMP_ENABLE] = { 0x28A04, 9 },
+	[UBI3_CORE_CLAMP_ENABLE] = { 0x28A04, 8 },
+	[UBI2_CLKRST_CLAMP_ENABLE] = { 0x28A04, 7 },
+	[UBI2_CORE_CLAMP_ENABLE] = { 0x28A04, 6 },
+	[UBI1_CLKRST_CLAMP_ENABLE] = { 0x28A04, 5 },
+	[UBI1_CORE_CLAMP_ENABLE] = { 0x28A04, 4 },
+	[UBI0_CLKRST_CLAMP_ENABLE] = { 0x28A04, 3 },
+	[UBI0_CORE_CLAMP_ENABLE] = { 0x28A04, 2 },
+	[NSSNOC_NSS_CSR_ARES] = { 0x28A04, 1 },
+	[NSS_CSR_ARES]  { 0x28A04, 0 },
+	[PPE_BTQ_ARES] = { 0x28A08, 20 },
+	[PPE_IPE_ARES] = { 0x28A08, 19 },
+	[PPE_ARES] = { 0x28A08, 18 },
+	[PPE_CFG_ARES] = { 0x28A08, 17 },
+	[PPE_EDMA_ARES] = { 0x28A08, 16 },
+	[PPE_EDMA_CFG_ARES] = { 0x28A08, 15 },
+	[CRY_PPE_ARES] = { 0x28A08, 14 },
+	[NSSNOC_PPE_ARES] = { 0x28A08, 13 },
+	[NSSNOC_PPE_CFG_ARES] = { 0x28A08, 12 },
+	[PORT1_MAC_ARES] = { 0x28A08, 11 },
+	[PORT2_MAC_ARES] = { 0x28A08, 10 },
+	[PORT3_MAC_ARES] = { 0x28A08, 9 },
+	[PORT4_MAC_ARES] = { 0x28A08, 8 },
+	[PORT5_MAC_ARES] = { 0x28A08, 7 },
+	[PORT6_MAC_ARES] = { 0x28A08, 6 },
+	[XGMAC0_PTP_REF_ARES] = { 0x28A08, 5 },
+	[XGMAC1_PTP_REF_ARES] = { 0x28A08, 4 },
+	[XGMAC2_PTP_REF_ARES] = { 0x28A08, 3 },
+	[XGMAC3_PTP_REF_ARES] = { 0x28A08, 2 },
+	[XGMAC4_PTP_REF_ARES] = { 0x28A08, 1 },
+	[XGMAC5_PTP_REF_ARES] = { 0x28A08, 0 },
+	[HAQ_AHB_ARES] = { 0x28A0C, 3 },
+	[HAQ_AXI_ARES] = { 0x28A0C, 2 },
+	[NSSNOC_HAQ_AHB_ARES] = { 0x28A0C, 1 },
+	[NSSNOC_HAQ_AXI_ARES] = { 0x28A0C, 0 },
+	[CE_APB_ARES] = { 0x28A10, 3 },
+	[CE_AXI_ARES] = { 0x28A10, 2 },
+	[NSSNOC_CE_APB_ARES] = { 0x28A10, 1 },
+	[NSSNOC_CE_AXI_ARES] = { 0x28A10, 0 },
+	[CRYPTO_ARES] = { 0x28A14, 1 },
+	[NSSNOC_CRYPTO_ARES] = { 0x28A14, 0 },
+	[NSSNOC_NC_AXI0_1_ARES] = { 0x28A1C, 28 },
+	[UBI0_CORE_ARES] = { 0x28A1C, 27 },
+	[UBI1_CORE_ARES] = { 0x28A1C, 26 },
+	[UBI2_CORE_ARES] = { 0x28A1C, 25 },
+	[UBI3_CORE_ARES] = { 0x28A1C, 24 },
+	[NC_AXI0_ARES] = { 0x28A1C, 23 },
+	[UTCM0_ARES] = { 0x28A1C, 22 },
+	[NC_AXI1_ARES] = { 0x28A1C, 21 },
+	[UTCM1_ARES] = { 0x28A1C, 20 },
+	[NC_AXI2_ARES] = { 0x28A1C, 19 },
+	[UTCM2_ARES] = { 0x28A1C, 18 },
+	[NC_AXI3_ARES] = { 0x28A1C, 17 },
+	[UTCM3_ARES] = { 0x28A1C, 16 },
+	[NSSNOC_NC_AXI0_ARES] = { 0x28A1C, 15 },
+	[AHB0_ARES] = { 0x28A1C, 14 },
+	[INTR0_AHB_ARES] = { 0x28A1C, 13 },
+	[AHB1_ARES] = { 0x28A1C, 12 },
+	[INTR1_AHB_ARES] = { 0x28A1C, 11 },
+	[AHB2_ARES] = { 0x28A1C, 10 },
+	[INTR2_AHB_ARES] = { 0x28A1C, 9 },
+	[AHB3_ARES] = { 0x28A1C, 8 },
+	[INTR3_AHB_ARES] = { 0x28A1C, 7 },
+	[NSSNOC_AHB0_ARES] = { 0x28A1C, 6 },
+	[NSSNOC_INT0_AHB_ARES] = { 0x28A1C, 5 },
+	[AXI0_ARES] = { 0x28A1C, 4 },
+	[AXI1_ARES] = { 0x28A1C, 3 },
+	[AXI2_ARES] = { 0x28A1C, 2 },
+	[AXI3_ARES] = { 0x28A1C, 1 },
+	[NSSNOC_AXI0_ARES] = { 0x28A1C, 0 },
+	[IMEM_QSB_ARES] = { 0x28A20, 3 },
+	[NSSNOC_IMEM_QSB_ARES] = { 0x28A20, 2 },
+	[IMEM_AHB_ARES] = { 0x28A20, 1 },
+	[NSSNOC_IMEM_AHB_ARES] = { 0x28A20, 0 },
+	[UNIPHY_PORT1_RX_ARES] = { 0x28A24, 23 },
+	[UNIPHY_PORT1_TX_ARES] = { 0x28A24, 22 },
+	[UNIPHY_PORT2_RX_ARES] = { 0x28A24, 21 },
+	[UNIPHY_PORT2_TX_ARES] = { 0x28A24, 20 },
+	[UNIPHY_PORT3_RX_ARES] = { 0x28A24, 19 },
+	[UNIPHY_PORT3_TX_ARES] = { 0x28A24, 18 },
+	[UNIPHY_PORT4_RX_ARES] = { 0x28A24, 17 },
+	[UNIPHY_PORT4_TX_ARES] = { 0x28A24, 16 },
+	[UNIPHY_PORT5_RX_ARES] = { 0x28A24, 15 },
+	[UNIPHY_PORT5_TX_ARES] = { 0x28A24, 14 },
+	[UNIPHY_PORT6_RX_ARES] = { 0x28A24, 13 },
+	[UNIPHY_PORT6_TX_ARES] = { 0x28A24, 12 },
+	[PORT1_RX_ARES] = { 0x28A24, 11 },
+	[PORT1_TX_ARES] = { 0x28A24, 10 },
+	[PORT2_RX_ARES] = { 0x28A24, 9 },
+	[PORT2_TX_ARES] = { 0x28A24, 8 },
+	[PORT3_RX_ARES] = { 0x28A24, 7 },
+	[PORT3_TX_ARES] = { 0x28A24, 6 },
+	[PORT4_RX_ARES] = { 0x28A24, 5 },
+	[PORT4_TX_ARES] = { 0x28A24, 4 },
+	[PORT5_RX_ARES] = { 0x28A24, 3 },
+	[PORT5_TX_ARES] = { 0x28A24, 2 },
+	[PORT6_RX_ARES] = { 0x28A24, 1 },
+	[PORT6_TX_ARES] = { 0x28A24, 0 },
+	[PPE_FULL_RESET] = { 0x28A08, 0, 0, 0x1E0000 },
+	[UNIPHY0_SOFT_RESET] = {0x28A24, 0, 0, 0xFFC000 },
+	[UNIPHY1_SOFT_RESET] = {0x28A24, 0, 0, 0x00C000 },
+	[UNIPHY2_SOFT_RESET] = {0x28A24, 0, 0, 0x003000 },
+	[UNIPHY_PORT1_ARES] = {0x28A24, 0,  0, 0xC00000 },
+	[UNIPHY_PORT2_ARES] = {0x28A24, 0,  0, 0x300000 },
+	[UNIPHY_PORT3_ARES] = {0x28A24, 0,  0, 0x0C0000 },
+	[UNIPHY_PORT4_ARES] = {0x28A24, 0,  0, 0x030000 },
+	[UNIPHY_PORT5_ARES] = {0x28A24, 0,  0, 0x00C000 },
+	[UNIPHY_PORT6_ARES] = {0x28A24, 0,  0, 0x003000 },
+	[NSSPORT1_RESET] = { 0x28A24, 0,    0, 0x000C00 },
+	[NSSPORT2_RESET] = { 0x28A24, 0,    0, 0x000300 },
+	[NSSPORT3_RESET] = { 0x28A24, 0,    0, 0x0000C0 },
+	[NSSPORT4_RESET] = { 0x28A24, 0,    0, 0x000030 },
+	[NSSPORT5_RESET] = { 0x28A24, 0,    0, 0x00000C },
+	[NSSPORT6_RESET] = { 0x28A24, 0,    0, 0x000003 },
+	[EDMA_HW_RESET] = { 0x28A08, 0, 0, 0x18000 },
+};
+
+static const struct regmap_config nss_cc_ipq9574_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x28a34,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc nss_cc_ipq9574_desc = {
+	.config = &nss_cc_ipq9574_regmap_config,
+	.clks = nss_cc_ipq9574_clocks,
+	.num_clks = ARRAY_SIZE(nss_cc_ipq9574_clocks),
+	.resets = nss_cc_ipq9574_resets,
+	.num_resets = ARRAY_SIZE(nss_cc_ipq9574_resets),
+};
+
+static const struct of_device_id nss_cc_ipq9574_match_table[] = {
+	{ .compatible = "qcom,nsscc-ipq9574" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, nss_cc_ipq9574_match_table);
+
+static const struct clk_bulk_data nsscc_reqclks[] = {
+	{
+		.id = "noc_snoc",
+	},
+	{
+		.id = "noc_snoc1",
+	},
+	{
+		.id = "noc_nsscc",
+	},
+	{
+		.id = "nsscc",
+	},
+};
+
+static int nss_cc_ipq9574_probe(struct platform_device *pdev)
+{
+	struct qcom_cc_desc nsscc_ipq9574_desc = nss_cc_ipq9574_desc;
+	struct clk_bulk_data reqclks[ARRAY_SIZE(nsscc_reqclks)];
+	struct regmap *regmap;
+	int ret;
+
+	memcpy(reqclks, nsscc_reqclks, sizeof(reqclks));
+	ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(reqclks), reqclks);
+	if (ret < 0)
+		return ret;
+
+	ret = clk_set_rate(reqclks[0].clk, 342857143);
+	if (ret)
+		return ret;
+
+	ret = clk_set_rate(reqclks[1].clk, 342857143);
+	if (ret)
+		return ret;
+
+	ret = clk_set_rate(reqclks[2].clk, 100000000);
+	if (ret)
+		return ret;
+
+	ret = clk_set_rate(reqclks[3].clk, 100000000);
+	if (ret)
+		return ret;
+
+	ret = clk_bulk_prepare_enable(ARRAY_SIZE(reqclks), reqclks);
+	if (ret)
+		return ret;
+
+	regmap = qcom_cc_map(pdev, &nsscc_ipq9574_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* SW Workaround for UBI Huayra PLL */
+	regmap_update_bits(regmap, 0x2800C, BIT(26), BIT(26));
+
+	clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+
+	ret = qcom_cc_really_probe(pdev, &nsscc_ipq9574_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register NSS CC clocks\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct platform_driver nss_cc_ipq9574_driver = {
+	.probe = nss_cc_ipq9574_probe,
+	.driver = {
+		.name = "qcom,nsscc-ipq9574",
+		.owner  = THIS_MODULE,
+		.of_match_table = nss_cc_ipq9574_match_table,
+	},
+};
+
+static int __init nss_cc_ipq9574_init(void)
+{
+	return platform_driver_register(&nss_cc_ipq9574_driver);
+}
+subsys_initcall(nss_cc_ipq9574_init);
+
+static void __exit nss_cc_ipq9574_exit(void)
+{
+	platform_driver_unregister(&nss_cc_ipq9574_driver);
+}
+module_exit(nss_cc_ipq9574_exit);
+
+MODULE_DESCRIPTION("QTI NSS_CC IPQ9574 Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.4-fbx/drivers/fbxgpio./Kconfig linux-6.4-fbx/drivers/fbxgpio/Kconfig
--- linux-6.4-fbx/drivers/fbxgpio./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxgpio/Kconfig	2023-02-27 17:08:58.870666225 +0100
@@ -0,0 +1,7 @@
+config FREEBOX_GPIO
+	tristate "Freebox GPIO control interface"
+	default n
+
+config FREEBOX_GPIO_DT
+	tristate "Freebox GPIO DT binding."
+	default n
diff -Nruw linux-6.4-fbx/drivers/fbxgpio./Makefile linux-6.4-fbx/drivers/fbxgpio/Makefile
--- linux-6.4-fbx/drivers/fbxgpio./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxgpio/Makefile	2023-02-27 17:08:58.870666225 +0100
@@ -0,0 +1,2 @@
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio_core.o
+obj-$(CONFIG_FREEBOX_GPIO_DT)	+= fbxgpio_dt.o
diff -Nruw linux-6.4-fbx/drivers/fbxgpio./fbxgpio_core.c linux-6.4-fbx/drivers/fbxgpio/fbxgpio_core.c
--- linux-6.4-fbx/drivers/fbxgpio./fbxgpio_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxgpio/fbxgpio_core.c	2023-05-22 20:06:38.183692610 +0200
@@ -0,0 +1,334 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fbxgpio_core.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+
+#define PFX	"fbxgpio_core: "
+
+/* #define DEBUG */
+#ifdef DEBUG
+#define dprint(Fmt, Arg...)	printk(PFX Fmt, Arg)
+#else
+#define dprint(Fmt, Arg...)	do { } while (0)
+#endif
+
+static struct class *fbxgpio_class;
+
+/*
+ * retrieval of a struct fbxgpio_pin from a phandle in the device
+ * tree.
+ *
+ * can be removed when fbxjtag uses standard gpio library instead of
+ * fbxgpio
+ */
+struct fbxgpio_of_mach_data {
+	struct fbxgpio_pin *match;
+	struct device_node *np;
+};
+
+static int match_fbxgpio_of_node(struct device *dev, void *data)
+{
+	struct fbxgpio_of_mach_data *md = data;
+	struct fbxgpio_pin *pin = dev_get_drvdata(dev);
+
+	if (pin->of_node == md->np) {
+		md->match = pin;
+		return 1;
+	}
+	return 0;
+}
+
+struct fbxgpio_pin *fbxgpio_of_get(struct device_node *np, const char *propname,
+				   int index)
+{
+	struct fbxgpio_of_mach_data md;
+
+	/*
+	 * get the pin device_node.
+	 */
+	md.match = NULL;
+	md.np = of_parse_phandle(np, propname, index);
+	if (!md.np)
+		return ERR_PTR(-ENOENT);
+
+	/*
+	 * find the struct fbxgpio_pin behind that device_node.
+	 */
+	class_for_each_device(fbxgpio_class, NULL, &md,
+			      match_fbxgpio_of_node);
+
+	return md.match ? md.match : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(fbxgpio_of_get);
+
+/*
+ * can be removed when fbxjtag uses standard gpio library instead of
+ * fbxgpio
+ */
+int fbxgpio_set_data_out(struct fbxgpio_pin *p, int val)
+{
+	struct gpio_desc *desc;
+
+	p->cur_dataout = val;
+
+	if (p->use_desc) {
+		desc = p->request_desc(p);
+		if (IS_ERR(desc))
+			return PTR_ERR(desc);
+		gpiod_set_value_cansleep(desc, val);
+		p->release_desc(p);
+	} else {
+		if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+			val = 1 - val;
+		gpio_set_value_cansleep(p->pin_num, val);
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(fbxgpio_set_data_out);
+
+/*
+ * can be removed when fbxjtag uses standard gpio library instead of
+ * fbxgpio
+ */
+int fbxgpio_get_data_in(struct fbxgpio_pin *p)
+{
+	const struct gpio_desc *desc;
+	int val;
+
+	if (p->use_desc) {
+		desc = p->request_desc(p);
+		if (IS_ERR(desc))
+			return PTR_ERR(desc);
+
+		val = gpiod_get_value_cansleep(desc);
+		p->release_desc(p);
+	} else {
+		val = gpio_get_value_cansleep(p->pin_num);
+		if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+			val = 1 - val;
+	}
+
+	return val;
+}
+
+EXPORT_SYMBOL(fbxgpio_get_data_in);
+
+/*
+ * show direction in for gpio associated with class_device dev.
+ */
+static ssize_t show_direction(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct fbxgpio_pin *p;
+	int dir, ret = 0;
+
+	p = dev_get_drvdata(dev);
+
+	dir = p->direction;
+
+	switch (dir) {
+	case GPIO_DIR_IN:
+		ret += sprintf(buf, "input\n");
+		break;
+	case GPIO_DIR_OUT:
+		ret += sprintf(buf, "output\n");
+		break;
+	default:
+		ret += sprintf(buf, "unknown\n");
+		break;
+	}
+	return ret;
+}
+
+/*
+ * show input data for input gpio pins.
+ */
+static ssize_t show_datain(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct fbxgpio_pin *p;
+	int val;
+
+	p = dev_get_drvdata(dev);
+	if (p->direction == GPIO_DIR_OUT)
+		return -EINVAL;
+
+	val = fbxgpio_get_data_in(p);
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * show output data for output gpio pins.
+ */
+static ssize_t show_dataout(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	p = dev_get_drvdata(dev);
+	if (p->direction == GPIO_DIR_IN)
+		return -EINVAL;
+
+	val = p->cur_dataout;
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * store new dataout value for output gpio pins.
+ */
+static ssize_t store_dataout(struct device *dev,
+	    struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct fbxgpio_pin *p;
+	int val;
+
+	if (*buf == ' ' || *buf == '\t' || *buf == '\r' || *buf == '\n')
+		/* silently eat any spaces/tab/linefeed/carriagereturn */
+		return 1;
+
+	p = dev_get_drvdata(dev);
+
+	if (p->direction != GPIO_DIR_OUT)
+		return -EINVAL;
+
+	switch (*buf) {
+	case '0':
+		val = 0;
+		break ;
+	case '1':
+		val = 1;
+		break ;
+	default:
+		return -EINVAL;
+	}
+
+	fbxgpio_set_data_out(p, val);
+	return 1;
+}
+
+/*
+ * attribute list associated with each class device.
+ */
+static struct device_attribute gpio_attributes[] = {
+	__ATTR(direction, 0400, show_direction, NULL),
+	__ATTR(data_in,   0400, show_datain, NULL),
+	__ATTR(data_out,  0600, show_dataout, store_dataout),
+};
+
+static int fbxgpio_register_pin(struct platform_device *ppdev,
+				struct fbxgpio_pin *pin)
+{
+	struct device *dev;
+	int i, ret;
+
+	if (pin->use_desc && (!pin->request_desc || !pin->release_desc))
+		return -EINVAL;
+
+	dprint("registering pin %s\n", pin->pin_name);
+
+	dev = device_create(fbxgpio_class, &ppdev->dev, 0, pin,
+			    "%s", pin->pin_name);
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+
+	for (i = 0; i < ARRAY_SIZE(gpio_attributes); i++) {
+		ret = device_create_file(dev, &gpio_attributes[i]);
+		if (ret)
+			goto err_out;
+	}
+
+	pin->dev = dev;
+	return 0;
+
+err_out:
+	for (; i >= 0; i--)
+		device_remove_file(dev, &gpio_attributes[i]);
+	device_unregister(dev);
+	return ret;
+}
+
+static void fbxgpio_unregister_pin(struct fbxgpio_pin *pin)
+{
+	struct device *dev;
+	int i;
+
+	dprint("unregistering pin %s\n", pin->pin_name);
+	dev = pin->dev;
+	pin->dev = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(gpio_attributes); i++)
+		device_remove_file(dev, &gpio_attributes[i]);
+	device_unregister(dev);
+}
+
+static int fbxgpio_platform_probe(struct platform_device *pdev)
+{
+	struct fbxgpio_pin *p;
+	int err = 0;
+
+	p = pdev->dev.platform_data;
+	while (p->pin_name) {
+		err = fbxgpio_register_pin(pdev, p);
+		if (err)
+			return err;
+		++p;
+	}
+	return 0;
+}
+
+static int fbxgpio_platform_remove(struct platform_device *pdev)
+{
+	struct fbxgpio_pin *p;
+
+	p = pdev->dev.platform_data;
+	while (p->pin_name) {
+		fbxgpio_unregister_pin(p);
+		++p;
+	}
+	return 0;
+}
+
+static struct platform_driver fbxgpio_platform_driver =
+{
+	.probe	= fbxgpio_platform_probe,
+	.remove	= fbxgpio_platform_remove,
+	.driver	= {
+		.name	= "fbxgpio",
+	}
+};
+
+static int __init fbxgpio_init(void)
+{
+	int ret;
+
+	fbxgpio_class = class_create("fbxgpio");
+	if (IS_ERR(fbxgpio_class))
+		return PTR_ERR(fbxgpio_class);
+
+	ret = platform_driver_register(&fbxgpio_platform_driver);
+	if (ret) {
+		printk(KERN_ERR PFX "unable to register fbxgpio driver.\n");
+		class_destroy(fbxgpio_class);
+		return ret;
+	}
+	return 0;
+}
+
+static void __exit fbxgpio_exit(void)
+{
+	platform_driver_unregister(&fbxgpio_platform_driver);
+	class_destroy(fbxgpio_class);
+}
+
+subsys_initcall(fbxgpio_init);
+module_exit(fbxgpio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nicolas.schichan@freebox.fr>");
diff -Nruw linux-6.4-fbx/drivers/fbxgpio./fbxgpio_dt.c linux-6.4-fbx/drivers/fbxgpio/fbxgpio_dt.c
--- linux-6.4-fbx/drivers/fbxgpio./fbxgpio_dt.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxgpio/fbxgpio_dt.c	2023-05-22 20:06:38.183692610 +0200
@@ -0,0 +1,256 @@
+/*
+ * fbxgpio_dt.c for fbxgpio
+ * Created by <nschichan@freebox.fr> on Tue Aug  1 14:01:01 2017
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/fbxgpio_core.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+
+static atomic_t last_id = ATOMIC_INIT(0);
+
+/*
+ * fbxgpio driver fetching gpios names and configuration from
+ * device-tree.
+ */
+
+struct fbxgpio_dt_priv {
+	struct fbxgpio_pin *pins;
+	unsigned int npins;
+
+	/* dynamically created platform_device for fbxgpio_core */
+	struct platform_device *top_pdev;
+};
+
+/*
+ *
+ */
+static struct gpio_desc *request_desc_cb(struct fbxgpio_pin *pin)
+{
+	struct gpio_desc *desc;
+	int ret;
+
+	if (!pin->dt.no_claim) {
+		/* was requested earlier */
+		return pin->dt.desc;
+	}
+
+	/* try to request it for real first */
+	desc = fwnode_gpiod_get_index(of_fwnode_handle(pin->of_node),
+				      NULL, 0, pin->dt.flags,
+				      pin->dt.pin_name);
+	if (!IS_ERR(desc)) {
+		/* we want to release it later */
+		pin->dt.desc = desc;
+		return desc;
+	}
+
+	ret = PTR_ERR(desc);
+	if (ret != -EBUSY)
+		return desc;
+
+	/* device is busy, which is expected for no-claim, just fetch
+	 * a "light" reference, which we won't need to put */
+	return fwnode_gpiod_get_index(of_fwnode_handle(pin->of_node),
+				      NULL, 0, GPIOD_ASIS |
+				      GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+				      pin->dt.pin_name);
+}
+
+/*
+ *
+ */
+static void release_desc_cb(struct fbxgpio_pin *pin)
+{
+	if (pin->dt.no_claim && pin->dt.desc) {
+		gpiod_put(pin->dt.desc);
+		pin->dt.desc = NULL;
+	}
+}
+
+/*
+ * fill an fbxgpio_pin with the configuration found in a device tree
+ * node.
+ *
+ * required properties are:
+ * - gpio: a phandle to a standard linux gpio.
+ *
+ * - the name of the node: the name of the gpio as it will appear under
+ *   /sys/class/fbxgpio/
+ *
+ * - <input>/<output-high>/<output-low>: how to declare gpio and
+ *   actually setup it unless no-claim is given
+ *
+ * - <no-claim>: just declare gpio, but don't request & setup it
+ */
+static int fbxgpio_dt_fill_gpio(struct platform_device *pdev,
+				struct device_node *np,
+				struct fbxgpio_pin *pin)
+{
+	enum gpiod_flags flags;
+	int error;
+
+	error = of_property_read_string(np, "name", &pin->pin_name);
+	if (error) {
+		dev_err(&pdev->dev, "gpio has no name.\n");
+		return error;
+	}
+
+	if (of_property_read_bool(np, "input")) {
+		pin->direction = GPIO_DIR_IN;
+		flags = GPIOD_IN;
+	} else if (of_property_read_bool(np, "output-low")) {
+		pin->direction = GPIO_DIR_OUT;
+		pin->cur_dataout = 0;
+		flags = GPIOD_OUT_LOW;
+	} else if (of_property_read_bool(np, "output-high")) {
+		pin->direction = GPIO_DIR_OUT;
+		pin->cur_dataout = 1;
+		flags = GPIOD_OUT_HIGH;
+	} else {
+		dev_err(&pdev->dev,
+			"no state specified for %s\n",
+			pin->pin_name);
+		return -EINVAL;
+	}
+
+	pin->use_desc = true;
+	pin->of_node = np;
+	pin->dt.flags = flags;
+	pin->request_desc = request_desc_cb;
+	pin->release_desc = release_desc_cb;
+	scnprintf(pin->dt.pin_name, sizeof (pin->dt.pin_name),
+		  "fbxgpio-dt/%s", pin->pin_name);
+
+	if (of_property_read_bool(np, "no-claim")) {
+		/* will be requested on demain */
+		pin->dt.no_claim = true;
+		return 0;
+	}
+
+	pin->dt.desc = devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(np),
+					     NULL, flags, pin->dt.pin_name);
+	if (IS_ERR(pin->dt.desc)) {
+		int ret = PTR_ERR(pin->dt.desc);
+
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"unable to get gpio desc for %s: %d.\n",
+				pin->pin_name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int fbxgpio_dt_probe(struct platform_device *pdev)
+{
+	struct fbxgpio_dt_priv *priv;
+	struct device_node *fbxgpio_node;
+	u32 cur_gpio;
+	int error = 0;
+	size_t priv_alloc_size;
+	int i;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	/*
+	 * first pass to get the number of struct fbxgpio_pin to
+	 * allocate.
+	 */
+	for_each_available_child_of_node(pdev->dev.of_node, fbxgpio_node) {
+		++priv->npins;
+	}
+
+	/*
+	 * allocate pins: use npins + 1 for zeroed end sentinel.
+	 */
+	priv_alloc_size = (priv->npins + 1) * sizeof (struct fbxgpio_pin);
+	priv->pins = devm_kzalloc(&pdev->dev, priv_alloc_size, GFP_KERNEL);
+	if (!priv->pins)
+		return -ENOMEM;
+
+	/*
+	 * second pass to fill the priv->pins array.
+	 */
+	cur_gpio = 0;
+	for_each_available_child_of_node(pdev->dev.of_node, fbxgpio_node) {
+		error = fbxgpio_dt_fill_gpio(pdev, fbxgpio_node,
+					     &priv->pins[cur_gpio]);
+		if (error)
+			return error;
+		++cur_gpio;
+	}
+
+	dev_info(&pdev->dev, "%u gpios.\n", priv->npins);
+
+	/*
+	 * create and register a platform device for fbxgpio_core.
+	 */
+	priv->top_pdev = platform_device_register_data(&pdev->dev,
+						       "fbxgpio",
+						       atomic_inc_return(&last_id),
+						       priv->pins,
+						       priv_alloc_size);
+
+	if (IS_ERR(priv->top_pdev)) {
+		dev_err(&pdev->dev, "unable to register fbxgpio platform "
+			"device: %ld\n", PTR_ERR(priv->top_pdev));
+		return PTR_ERR(priv->top_pdev);
+	}
+
+	for (i = 0; i < priv->npins; i++) {
+		struct fbxgpio_pin *pin = &priv->pins[i];
+
+		if (pin->direction == GPIO_DIR_OUT)
+			dev_dbg(&pdev->dev,
+				"%sgpio %s is output, default %d\n",
+				pin->dt.no_claim ? "unclaimed " : "",
+				pin->pin_name, pin->cur_dataout);
+		else
+			dev_dbg(&pdev->dev,
+				"%sgpio %s is input\n",
+				pin->dt.no_claim ? "unclaimed " : "",
+				pin->pin_name);
+	}
+
+	return 0;
+}
+
+static int fbxgpio_dt_remove(struct platform_device *pdev)
+{
+	struct fbxgpio_dt_priv *priv = dev_get_drvdata(&pdev->dev);
+	platform_device_unregister(priv->top_pdev);
+	return 0;
+}
+
+static const struct of_device_id fbxgpio_dt_of_match_table[] = {
+	{ .compatible = "fbx,fbxgpio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, fbxgpio_dt_of_match_table);
+
+static struct platform_driver fbxgpio_dt_platform_driver = {
+	.probe		= fbxgpio_dt_probe,
+	.remove		= fbxgpio_dt_remove,
+	.driver		= {
+		.name		= "fbxgpio-dt",
+		.of_match_table	= fbxgpio_dt_of_match_table,
+	},
+};
+
+module_platform_driver(fbxgpio_dt_platform_driver);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("DT Freebox GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.4-fbx/drivers/fbxjtag./Kconfig linux-6.4-fbx/drivers/fbxjtag/Kconfig
--- linux-6.4-fbx/drivers/fbxjtag./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxjtag/Kconfig	2023-03-09 15:06:11.348233797 +0100
@@ -0,0 +1,3 @@
+config FREEBOX_JTAG
+	tristate "Freebox JTAG control interface"
+	default n
diff -Nruw linux-6.4-fbx/drivers/fbxjtag./Makefile linux-6.4-fbx/drivers/fbxjtag/Makefile
--- linux-6.4-fbx/drivers/fbxjtag./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxjtag/Makefile	2023-03-09 15:06:11.348233797 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_FREEBOX_JTAG)	+= fbxjtag.o
diff -Nruw linux-6.4-fbx/drivers/fbxprocfs./Kconfig linux-6.4-fbx/drivers/fbxprocfs/Kconfig
--- linux-6.4-fbx/drivers/fbxprocfs./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxprocfs/Kconfig	2023-02-27 19:50:21.524231407 +0100
@@ -0,0 +1,2 @@
+config FREEBOX_PROCFS
+	tristate "Freebox procfs interface"
diff -Nruw linux-6.4-fbx/drivers/fbxprocfs./Makefile linux-6.4-fbx/drivers/fbxprocfs/Makefile
--- linux-6.4-fbx/drivers/fbxprocfs./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxprocfs/Makefile	2023-02-27 19:50:21.524231407 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_FREEBOX_PROCFS) += fbxprocfs.o
diff -Nruw linux-6.4-fbx/drivers/fbxprocfs./fbxprocfs.c linux-6.4-fbx/drivers/fbxprocfs/fbxprocfs.c
--- linux-6.4-fbx/drivers/fbxprocfs./fbxprocfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxprocfs/fbxprocfs.c	2023-05-22 20:06:38.183692610 +0200
@@ -0,0 +1,299 @@
+/*
+ * Freebox ProcFs interface
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/sizes.h>
+
+#include <linux/fbxprocfs.h>
+
+#define PFX	"fbxprocfs: "
+
+
+static struct list_head clients;
+static struct mutex clients_mutex;
+
+static struct proc_dir_entry *root;
+
+/*
+ * register  a  fbxprocfs client  with  given  dirname, caller  should
+ * consider returned struct opaque
+ */
+struct fbxprocfs_client *fbxprocfs_add_client(const char *dirname,
+					      struct module *owner)
+{
+	struct fbxprocfs_client *ret, *p;
+
+	ret = NULL;
+	mutex_lock(&clients_mutex);
+
+	/* check for duplicate */
+	list_for_each_entry(p, &clients, list) {
+		if (!strcmp(dirname, p->dirname))
+			goto out;
+	}
+
+	if (!(ret = kmalloc(sizeof (*ret), GFP_KERNEL))) {
+		printk(KERN_ERR PFX "kmalloc failed\n");
+		goto out;
+	}
+
+	/* try to create client directory */
+	if (!(ret->dir = proc_mkdir(dirname, root))) {
+		printk(KERN_ERR PFX "can't create %s dir\n", dirname);
+		kfree(ret);
+		ret = NULL;
+		goto out;
+	}
+
+	atomic_set(&ret->refcount, 1);
+	ret->dirname = dirname;
+	list_add(&ret->list, &clients);
+
+out:
+	mutex_unlock(&clients_mutex);
+	return ret;
+}
+
+/*
+ * unregister  a  fbxprocfs client, make sure usage count is zero
+ */
+int fbxprocfs_remove_client(struct fbxprocfs_client *client)
+{
+	int ret;
+
+	mutex_lock(&clients_mutex);
+
+	ret = 0;
+	if (atomic_read(&client->refcount) > 1) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	remove_proc_entry(client->dirname, root);
+	list_del(&client->list);
+	kfree(client);
+
+out:
+	mutex_unlock(&clients_mutex);
+	return ret;
+}
+
+/*
+ * remove given entries from client directory
+ */
+static int
+__remove_entries(struct fbxprocfs_client *client,
+		 const struct fbxprocfs_desc *ro_desc,
+		 const struct fbxprocfs_desc *rw_desc)
+{
+	int i;
+
+	for (i = 0; ro_desc && ro_desc[i].name; i++) {
+		remove_proc_entry(ro_desc[i].name, client->dir);
+		atomic_dec(&client->refcount);
+	}
+
+	for (i = 0; rw_desc && rw_desc[i].name; i++) {
+		remove_proc_entry(rw_desc[i].name, client->dir);
+		atomic_dec(&client->refcount);
+	}
+
+	return 0;
+}
+
+/*
+ * replacement for NULL rfunc.
+ */
+static int bad_rfunc(struct seq_file *m, void *ptr)
+{
+	return -EACCES;
+}
+
+/*
+ * fbxprocfs write path is now handled by seq_file code. this
+ * simplifies client code greatly.
+ */
+static int fbxprocfs_open(struct inode *inode, struct file *file)
+{
+	const struct fbxprocfs_desc *desc = pde_data(inode);
+
+	return single_open(file, desc->rfunc ? desc->rfunc : bad_rfunc,
+			   (void*)desc->id);
+}
+
+/*
+ * no particular help from kernel in the write path, fetch user buffer
+ * in a kernel buffer and call write func.
+ */
+static ssize_t fbxprocfs_write(struct file *file, const char __user *ubuf,
+			       size_t len, loff_t *off)
+{
+	/*
+	 * get fbxprocfs desc via the proc_dir_entry in file inode
+	 */
+	struct fbxprocfs_desc *d = pde_data(file_inode(file));
+	char *kbuf;
+	int ret;
+
+	/*
+	 * must have a wfunc callback.
+	 */
+	if (!d->wfunc)
+		return -EACCES;
+
+	/*
+	 * allow up to SZ_4K bytes to be written.
+	 */
+	if (len > SZ_4K)
+		return -EOVERFLOW;
+
+	/*
+	 * alloc and fetch kernel buffer containing user data.
+	 */
+	kbuf = kmalloc(SZ_4K, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	ret = -EFAULT;
+	if (copy_from_user(kbuf, ubuf, len))
+		goto kfree;
+
+	ret = d->wfunc(file, kbuf, len, (void*)d->id);
+
+kfree:
+	kfree(kbuf);
+	return ret;
+}
+
+/*
+ * fbxprocfs file operations, read stuff is handled by seq_file code.
+ */
+static const struct proc_ops fbxprocfs_fops = {
+	.proc_open	= fbxprocfs_open,
+	.proc_lseek	= seq_lseek,
+	.proc_read	= seq_read,
+	.proc_release	= single_release,
+	.proc_write	= fbxprocfs_write,
+};
+
+/*
+ * replaces create_proc_read_entry removed in latest kernels.
+ */
+static struct proc_dir_entry *__create_proc_read_entry(
+				       const struct fbxprocfs_desc *desc,
+				       struct proc_dir_entry *base)
+{
+	return proc_create_data(desc->name, 0, base, &fbxprocfs_fops,
+				(void*)desc);
+}
+
+/*
+ * replaces create_proc_entry removed in latest kernels.
+ */
+static struct proc_dir_entry *__create_proc_entry(
+					const struct fbxprocfs_desc *desc,
+					struct proc_dir_entry *base)
+{
+	return proc_create_data(desc->name, S_IFREG | S_IWUSR | S_IRUGO,
+				base, &fbxprocfs_fops, (void*)desc);
+}
+
+/*
+ * create given entries in client directory
+ */
+static int
+__create_entries(struct fbxprocfs_client *client,
+		 const struct fbxprocfs_desc *ro_desc,
+		 const struct fbxprocfs_desc *rw_desc)
+{
+	struct proc_dir_entry	*proc;
+	int			i;
+
+	for (i = 0; ro_desc && ro_desc[i].name; i++) {
+		if (!(proc = __create_proc_read_entry(&ro_desc[i],
+						      client->dir))) {
+			printk(KERN_ERR PFX "can't create %s/%s entry\n",
+			       client->dirname, ro_desc[i].name);
+			goto err;
+		}
+		atomic_inc(&client->refcount);
+	}
+
+	for (i = 0; rw_desc && rw_desc[i].name; i++) {
+		if (!(proc = __create_proc_entry(&rw_desc[i], client->dir))) {
+			printk(KERN_ERR PFX "can't create %s/%s entry\n",
+			       client->dirname, ro_desc[i].name);
+			goto err;
+		}
+		atomic_inc(&client->refcount);
+	}
+
+	return 0;
+
+err:
+	__remove_entries(client, ro_desc, rw_desc);
+	return -1;
+}
+
+int
+fbxprocfs_create_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc)
+{
+	int	ret;
+
+	ret = __create_entries(client, ro_desc, rw_desc);
+	return ret;
+}
+
+int
+fbxprocfs_remove_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc)
+{
+	int	ret;
+
+	ret = __remove_entries(client, ro_desc, rw_desc);
+	return ret;
+}
+
+
+static int __init
+fbxprocfs_init(void)
+{
+	INIT_LIST_HEAD(&clients);
+	mutex_init(&clients_mutex);
+
+	/* create freebox directory */
+	if (!(root = proc_mkdir("freebox", NULL))) {
+		printk(KERN_ERR PFX "can't create freebox/ dir\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static void __exit
+fbxprocfs_exit(void)
+{
+	remove_proc_entry("freebox", NULL);
+}
+
+module_init(fbxprocfs_init);
+module_exit(fbxprocfs_exit);
+
+EXPORT_SYMBOL(fbxprocfs_create_entries);
+EXPORT_SYMBOL(fbxprocfs_remove_entries);
+EXPORT_SYMBOL(fbxprocfs_add_client);
+EXPORT_SYMBOL(fbxprocfs_remove_client);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+
diff -Nruw linux-6.4-fbx/drivers/fbxwatchdog./Kconfig linux-6.4-fbx/drivers/fbxwatchdog/Kconfig
--- linux-6.4-fbx/drivers/fbxwatchdog./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxwatchdog/Kconfig	2024-03-18 14:40:14.839741005 +0100
@@ -0,0 +1,28 @@
+menuconfig FREEBOX_WATCHDOG
+	tristate "Freebox Watchdog"
+	default n
+
+if FREEBOX_WATCHDOG
+
+config FREEBOX_WATCHDOG_CHAR
+	bool "Freebox Watchdog char device interface."
+	default n
+
+config FREEBOX_WATCHDOG_ORION
+	tristate "Marvell Orion support"
+	depends on PLAT_ORION
+
+config FREEBOX_WATCHDOG_BCM63XX
+	tristate "Broadcom 63xx Freebox Watchdog support"
+	depends on BCM63XX
+	default n
+
+config FREEBOX_WATCHDOG_BCM63XX_OF
+	tristate "Broadcom 63xx Freebox Watchdog support (generic)"
+	depends on OF && !FREEBOX_WATCHDOG_BCM63XX
+
+config FREEBOX_WATCHDOG_FBXGWR_PMU
+	tristate "Freebox PMU Watchdog support"
+	depends on MFD_FBXGWR_PMU
+
+endif
diff -Nruw linux-6.4-fbx/drivers/fbxwatchdog./Makefile linux-6.4-fbx/drivers/fbxwatchdog/Makefile
--- linux-6.4-fbx/drivers/fbxwatchdog./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxwatchdog/Makefile	2024-03-18 14:40:14.839741005 +0100
@@ -0,0 +1,11 @@
+obj-$(CONFIG_FREEBOX_WATCHDOG) += fbxwatchdog.o
+
+fbxwatchdog-objs = fbxwatchdog_core.o
+ifeq ($(CONFIG_FREEBOX_WATCHDOG_CHAR),y)
+fbxwatchdog-objs += fbxwatchdog_char.o
+endif
+
+obj-$(CONFIG_FREEBOX_WATCHDOG_ORION)	+= fbxwatchdog_orion.o
+obj-$(CONFIG_FREEBOX_WATCHDOG_BCM63XX)	+= fbxwatchdog_bcm63xx.o
+obj-$(CONFIG_FREEBOX_WATCHDOG_BCM63XX_OF)	+= fbxwatchdog_bcm63xx_of.o
+obj-$(CONFIG_FREEBOX_WATCHDOG_FBXGWR_PMU)	+= fbxwatchdog_gwr_pmu.o
diff -Nruw linux-6.4-fbx/drivers/fbxwatchdog./fbxwatchdog.h linux-6.4-fbx/drivers/fbxwatchdog/fbxwatchdog.h
--- linux-6.4-fbx/drivers/fbxwatchdog./fbxwatchdog.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxwatchdog/fbxwatchdog.h	2024-03-18 14:40:14.839741005 +0100
@@ -0,0 +1,49 @@
+#ifndef FBXWATCHDOG_H
+# define FBXWATCHDOG_H
+
+struct fbxwatchdog {
+	const char *name;
+	void *priv;
+
+	int enabled;
+	int countdown;
+	int countdown_min;
+
+	int (*wdt_init)(struct fbxwatchdog *wdt);
+	int (*wdt_cleanup)(struct fbxwatchdog *wdt);
+
+	/*
+	 * wdt_start and wdt_stop are called with wdt->lock held and irq
+	 * disabled.
+	 */
+	int (*wdt_start)(struct fbxwatchdog *wdt);
+	int (*wdt_stop)(struct fbxwatchdog *wdt);
+
+	/*
+	 * cb is called from interrupt/softirq context (depends on the
+	 * underlying driver/hardware).
+	 */
+	void (*cb)(struct fbxwatchdog *wdt);
+
+	struct timer_list timer;
+
+	struct device *dev;
+
+	/*
+	 * protect interrupt handlers & start/stop methods running in
+	 * thread context.
+	 */
+	spinlock_t	lock;
+	struct mutex	mutex;
+	bool		use_mutex;
+};
+
+int fbxwatchdog_register(struct fbxwatchdog *wdt);
+int fbxwatchdog_unregister(struct fbxwatchdog *wdt);
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+int fbxwatchdog_char_add(struct fbxwatchdog *wdt);
+void fbxwatchdog_char_remove(struct fbxwatchdog *wdt);
+#endif
+
+#endif /* !FBXWATCHDOG_H */
diff -Nruw linux-6.4-fbx/drivers/fbxwatchdog./fbxwatchdog_core.c linux-6.4-fbx/drivers/fbxwatchdog/fbxwatchdog_core.c
--- linux-6.4-fbx/drivers/fbxwatchdog./fbxwatchdog_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxwatchdog/fbxwatchdog_core.c	2024-03-18 14:40:14.839741005 +0100
@@ -0,0 +1,317 @@
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/reboot.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+
+#include "fbxwatchdog.h"
+
+#define SOFTTIMER_FREQ	(HZ / 10)
+
+#define PFX "fbxwatchdog: "
+
+static struct class *fbxwatchdog_class;
+
+static void wdt_lock(struct fbxwatchdog *wdt, unsigned long *flags)
+{
+	if (wdt->use_mutex)
+		mutex_lock(&wdt->mutex);
+	else
+		spin_lock_irqsave(&wdt->lock, *flags);
+}
+
+static void wdt_unlock(struct fbxwatchdog *wdt, unsigned long *flags)
+{
+	if (wdt->use_mutex)
+		mutex_unlock(&wdt->mutex);
+	else
+		spin_unlock_irqrestore(&wdt->lock, *flags);
+}
+
+static ssize_t
+show_enabled(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->enabled);
+}
+
+/*
+ * start/stop watchdog depending on the value of the first character
+ * of buf. set countdown_min to a sane value.
+ */
+static ssize_t
+store_enabled(struct device *dev,
+	      struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct fbxwatchdog *wdt;
+	unsigned long flags;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	if (size == 0)
+		return 0;
+
+
+	wdt_lock(wdt, &flags);
+	switch (*buf) {
+	case '0':
+		if (wdt->enabled) {
+			wdt->enabled = 0;
+			wdt->wdt_stop(wdt);
+		}
+		break;
+
+	case '1':
+		if (!wdt->enabled) {
+			wdt->enabled = 1;
+			wdt->wdt_start(wdt);
+			wdt->countdown_min = INT_MAX;
+		}
+		break;
+
+	default:
+		break;
+	}
+	wdt_unlock(wdt, &flags);
+
+	return size;
+}
+
+static ssize_t
+show_countdown(struct device *dev,
+	       struct device_attribute *attr, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->countdown);
+}
+
+/*
+ * update watchdog countdown with the userland value given in buf.
+ */
+static ssize_t
+store_countdown(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct fbxwatchdog *wdt;
+	int countdown;
+	char *ptr;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	if (size == 0)
+		return 0;
+
+	ptr = kzalloc(size + 1, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+	strlcpy(ptr, buf, size + 1);
+
+	countdown = simple_strtoul(ptr, NULL, 10);
+	wdt->countdown = countdown;
+	kfree(ptr);
+
+	return size;
+}
+
+static ssize_t
+show_countdown_min(struct device *dev,
+		   struct device_attribute *attr, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->countdown_min);
+}
+
+static struct device_attribute wdt_attributes[] = {
+	__ATTR(enabled, 0600, show_enabled, store_enabled),
+	__ATTR(countdown, 0600, show_countdown, store_countdown),
+	__ATTR(countdown_min, 0400, show_countdown_min, NULL),
+};
+
+/*
+ * software timer callback: decrement countdown and update
+ * countdown_min if needed. this is called 10 times per second.
+ */
+static void fbxwatchdog_timer_cb(struct timer_list *t)
+{
+	struct fbxwatchdog *wdt = from_timer(wdt, t, timer);
+
+	if (wdt->enabled) {
+		wdt->countdown -= jiffies_to_msecs(SOFTTIMER_FREQ);
+		if (wdt->countdown < wdt->countdown_min)
+			wdt->countdown_min = wdt->countdown;
+	}
+
+	wdt->timer.expires = jiffies + SOFTTIMER_FREQ;
+	add_timer(&wdt->timer);
+}
+
+/*
+ * called from half life interrupt handler, panic if countdown is too
+ * low (ie if userland has not reset countdown to before it reached
+ * 0).
+ */
+static void fbxwatchdog_halflife_cb(struct fbxwatchdog *wdt)
+{
+	if (wdt->countdown <= 0) {
+		wdt->wdt_stop(wdt);
+		panic("software fbxwatchdog triggered");
+	}
+}
+
+/*
+ * register a new watchdog device.
+ */
+int fbxwatchdog_register(struct fbxwatchdog *wdt)
+{
+	struct device *dev;
+	int i = 0, err = 0;
+
+	if (wdt == NULL)
+		return -EFAULT;
+
+	printk(KERN_INFO PFX "registering watchdog %s\n", wdt->name);
+
+	dev = device_create(fbxwatchdog_class, NULL, 0, wdt, "%s", wdt->name);
+	if (IS_ERR(dev)) {
+		printk(KERN_ERR PFX "unable to allocate device.\n");
+		err = PTR_ERR(dev);
+		goto out_error;
+	}
+	wdt->dev = dev;
+
+	for (i = 0; i < ARRAY_SIZE(wdt_attributes); i++) {
+		err = device_create_file(dev, &wdt_attributes[i]);
+		if (err)
+			goto out_error;
+	}
+
+	/* start countdown soft timer */
+	timer_setup(&wdt->timer, fbxwatchdog_timer_cb, 0);
+	wdt->timer.expires = jiffies + SOFTTIMER_FREQ;
+	add_timer(&wdt->timer);
+
+	if (wdt->use_mutex)
+		mutex_init(&wdt->mutex);
+	else
+		spin_lock_init(&wdt->lock);
+
+	wdt->cb = fbxwatchdog_halflife_cb;
+	err = wdt->wdt_init(wdt);
+	if (err) {
+		printk(KERN_ERR PFX "unable to do low level init of "
+		       "watchdog %s.\n", wdt->name);
+		goto out_del_timer;
+	}
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+	err = fbxwatchdog_char_add(wdt);
+	if (err) {
+		printk(KERN_ERR PFX "unable to add %s to the fbxwatchdog char "
+		       "device interface.\n", wdt->name);
+		goto out_wdt_cleanup;
+	}
+#endif
+
+	return 0;
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+out_wdt_cleanup:
+	wdt->wdt_cleanup(wdt);
+#endif
+
+out_del_timer:
+	del_timer_sync(&wdt->timer);
+out_error:
+	if (wdt->dev) {
+		for (; i >= 0; i--)
+			device_remove_file(dev, &wdt_attributes[i]);
+		device_unregister(dev);
+	}
+	return err;
+}
+
+int fbxwatchdog_unregister(struct fbxwatchdog *wdt)
+{
+	int i;
+
+	printk(KERN_INFO PFX "unregistering watchdog %s\n", wdt->name);
+
+	if (wdt->enabled) {
+		unsigned long flags;
+
+		printk(KERN_WARNING "removing enabled watchdog.\n");
+		wdt_lock(wdt, &flags);
+		wdt->wdt_stop(wdt);
+		wdt_unlock(wdt, &flags);
+	}
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+	fbxwatchdog_char_remove(wdt);
+#endif
+	wdt->wdt_cleanup(wdt);
+	del_timer_sync(&wdt->timer);
+	for (i = 0; i < ARRAY_SIZE(wdt_attributes); i++)
+		device_remove_file(wdt->dev, &wdt_attributes[i]);
+	device_unregister(wdt->dev);
+	wdt->dev = NULL;
+	return 0;
+}
+
+static int __init fbxwatchdog_init(void)
+{
+	printk(KERN_INFO PFX "2007, Freebox SA.\n");
+	fbxwatchdog_class = class_create("fbxwatchdog");
+	if (IS_ERR(fbxwatchdog_class))
+		return PTR_ERR(fbxwatchdog_class);
+	return 0;
+}
+
+static void __exit fbxwatchdog_exit(void)
+{
+	class_destroy(fbxwatchdog_class);
+}
+
+
+EXPORT_SYMBOL_GPL(fbxwatchdog_register);
+EXPORT_SYMBOL_GPL(fbxwatchdog_unregister);
+
+module_init(fbxwatchdog_init);
+module_exit(fbxwatchdog_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Freebox Watchdog Core - www.freebox.fr");
diff -Nruw linux-6.4-fbx/drivers/fbxwatchdog./fbxwatchdog_gwr_pmu.c linux-6.4-fbx/drivers/fbxwatchdog/fbxwatchdog_gwr_pmu.c
--- linux-6.4-fbx/drivers/fbxwatchdog./fbxwatchdog_gwr_pmu.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/fbxwatchdog/fbxwatchdog_gwr_pmu.c	2024-03-27 19:01:35.028950553 +0100
@@ -0,0 +1,240 @@
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/fbxgwr_pmu.h>
+
+#include "fbxwatchdog.h"
+
+struct fbxgwr_pmu_wdt {
+	struct regmap		*regmap;
+	struct device		*dev;
+	int			irq;
+};
+
+/*
+ * IRQ handler, called when half the hw countdown is reached
+ */
+static irqreturn_t fbxgwr_pmu_wdt_irq(int irq, void *dev_id)
+{
+	struct fbxwatchdog *wdt = dev_id;
+	struct fbxgwr_pmu_wdt *priv;
+	irqreturn_t irqret = IRQ_NONE;
+	int ret;
+	u32 val;
+
+
+	priv = wdt->priv;
+	mutex_lock(&wdt->mutex);
+
+	if (!wdt->enabled) {
+		dev_crit(priv->dev,
+			 "watchdog is still enabled, stopping !\n");
+		regmap_write(priv->regmap, PMU_REG_WDT_CTL, 0);
+		goto out;
+	}
+
+	/* read status */
+	ret = regmap_read(priv->regmap, PMU_REG_WDT_STS, &val);
+	if (ret)
+		goto out_hwerr;
+
+	if (!(val & PMU_WDT_STS_INT_STS))
+		goto out;
+
+	/* clear interrupt and refresh */
+	irqret = IRQ_HANDLED;
+	ret = regmap_write(priv->regmap, PMU_REG_WDT_STS, val);
+	if (ret)
+		goto out_hwerr;
+
+	ret = regmap_write(priv->regmap, PMU_REG_WDT_REFRESH,
+			   PMU_WDT_REFRESH_VAL);
+	if (ret)
+		goto out_hwerr;
+
+	if (wdt->cb)
+		wdt->cb(wdt);
+
+out:
+	mutex_unlock(&wdt->mutex);
+	return irqret;
+
+out_hwerr:
+	mutex_unlock(&wdt->mutex);
+	/* since we cannot refresh the hardware watchdog, give us a
+	 * chance to soft-panic */
+	if (wdt->cb)
+		wdt->cb(wdt);
+	return irqret;
+}
+
+static int fbxgwr_pmu_wdt_init(struct fbxwatchdog *wdt)
+{
+	struct fbxgwr_pmu_wdt *priv = wdt->priv;
+	u32 countdown;
+	int ret;
+
+	ret = request_threaded_irq(priv->irq, NULL, fbxgwr_pmu_wdt_irq,
+				   IRQF_SHARED |
+				   IRQF_ONESHOT |
+				   IRQF_TRIGGER_LOW,
+				   "fbxwatchdog_gwr_pmu", wdt);
+	if (ret) {
+		dev_err(priv->dev, "request_irq failed: %d\n", ret);
+		return ret;
+	}
+
+	/* install a 60 sec watchdog, to have enough time to
+	 * gather info in case of panic() */
+	countdown = 60;
+	ret = regmap_write(priv->regmap, PMU_REG_WDT_TIMEOUT, countdown);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int fbxgwr_pmu_wdt_cleanup(struct fbxwatchdog *wdt)
+{
+	struct fbxgwr_pmu_wdt *priv = wdt->priv;
+
+	free_irq(priv->irq, wdt);
+	return 0;
+}
+
+static int fbxgwr_pmu_wdt_start(struct fbxwatchdog *wdt)
+{
+	struct fbxgwr_pmu_wdt *priv = wdt->priv;
+	int ret;
+	u32 val;
+
+	val = PMU_WDT_CTL_EN | PMU_WDT_CTL_INT_EN;
+	ret = regmap_write(priv->regmap, PMU_REG_WDT_CTL, val);
+	if (ret) {
+		dev_err(priv->dev, "enable failed: %d\n", ret);
+		return ret;
+	}
+
+	dev_info(priv->dev, "watchdog enabled\n");
+	return 0;
+}
+
+static int fbxgwr_pmu_wdt_stop(struct fbxwatchdog *wdt)
+{
+	struct fbxgwr_pmu_wdt *priv = wdt->priv;
+	int ret;
+
+	ret = regmap_write(priv->regmap, PMU_REG_WDT_CTL, 0);
+	if (ret) {
+		dev_err(priv->dev, "disable failed: %d\n", ret);
+		return ret;
+	}
+
+	dev_info(priv->dev, "watchdog disabled\n");
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int fbxgwr_pmu_wdt_probe(struct platform_device *pdev)
+{
+	struct fbxgwr_pmu_wdt *priv;
+	struct fbxwatchdog *wdt;
+	int ret;
+	u32 val;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!priv->regmap)
+		return -ENODEV;
+
+	ret = regmap_read(priv->regmap, PMU_REG_FW_CAPABILITIES, &val);
+	if (ret)
+		return ret;
+
+	if (!(val & PMU_FW_CAP_WDT)) {
+		dev_warn(&pdev->dev, "watchdog cap unavailable\n");
+		return -ENODEV;
+	}
+
+	priv->irq = platform_get_irq_optional(pdev, 0);
+	if (priv->irq < 0) {
+		dev_err(&pdev->dev, "cannot get watchdog irq\n");
+		return priv->irq;
+	}
+
+	priv->dev = &pdev->dev;
+
+	wdt = devm_kzalloc(&pdev->dev, sizeof (*wdt), GFP_KERNEL);
+	if (!wdt) {
+		dev_err(&pdev->dev, "unable allocate memory for watchdog.\n");
+		return -ENOMEM;
+	}
+
+	wdt->name = pdev->name;
+	wdt->priv = priv;
+	wdt->wdt_init = fbxgwr_pmu_wdt_init;
+	wdt->wdt_cleanup = fbxgwr_pmu_wdt_cleanup;
+	wdt->wdt_start = fbxgwr_pmu_wdt_start;
+	wdt->wdt_stop = fbxgwr_pmu_wdt_stop;
+	wdt->use_mutex = true;
+
+	ret = fbxwatchdog_register(wdt);
+	if (ret) {
+		dev_warn(&pdev->dev, "unable to register watchdog %s\n",
+			 wdt->name);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, wdt);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int fbxgwr_pmu_wdt_remove(struct platform_device *pdev)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = platform_get_drvdata(pdev);
+	if (!wdt)
+		return -ENODEV;
+
+	fbxwatchdog_unregister(wdt);
+
+	return 0;
+}
+
+static const struct of_device_id fbxgwr_pmu_wdt_of_id[] = {
+	{ .compatible = "freebox,fbxgwr-pmu-watchdog", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fbxgwr_pmu_wdt_of_id);
+
+static struct platform_driver fbxgwr_pmu_wdt_driver = {
+	.probe		= fbxgwr_pmu_wdt_probe,
+	.remove		= fbxgwr_pmu_wdt_remove,
+	.driver = {
+		.name	= "fbxgwr-pmu-watchdog",
+		.of_match_table	= fbxgwr_pmu_wdt_of_id,
+	},
+};
+
+module_platform_driver(fbxgwr_pmu_wdt_driver);
+
+MODULE_AUTHOR("Marios Makassikis");
+MODULE_LICENSE("GPL");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/gpio/gpio-fbxgwr-pmu.c	2024-03-18 14:40:14.839741005 +0100
@@ -0,0 +1,453 @@
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+
+#define PMU_MAX_GPIOS		32
+#define PMU_BANK_SZ		8
+
+/*
+ * private context
+ */
+struct fbxgwr_pmu_gpio {
+	struct regmap		*regmap;
+	struct gpio_chip	chip;
+
+	int			irq;
+	struct mutex		irq_lock;
+
+	DECLARE_BITMAP(irq_enabled, PMU_MAX_GPIOS);
+	DECLARE_BITMAP(irq_enabled_new, PMU_MAX_GPIOS);
+};
+
+/*
+ * local functions
+ */
+static int fbxgwr_pmu_gpio_get_direction(struct gpio_chip *chip,
+					 unsigned int offset);
+static int fbxgwr_pmu_gpio_direction_input(struct gpio_chip *chip,
+					   unsigned int offset);
+static int fbxgwr_pmu_gpio_direction_output(struct gpio_chip *chip,
+					    unsigned int offset, int value);
+static int fbxgwr_pmu_gpio_get(struct gpio_chip *chip, unsigned int offset);
+static void fbxgwr_pmu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+				int value);
+
+static const struct gpio_chip fbxgwr_pmu_gpio_chip = {
+	.label			= "fbxgwr_pmu_gpio",
+	.owner			= THIS_MODULE,
+	.get_direction		= fbxgwr_pmu_gpio_get_direction,
+	.direction_input	= fbxgwr_pmu_gpio_direction_input,
+	.direction_output	= fbxgwr_pmu_gpio_direction_output,
+	.get			= fbxgwr_pmu_gpio_get,
+	.set			= fbxgwr_pmu_gpio_set,
+	.base			= -1,
+	.can_sleep		= 1,
+};
+
+static int fbxgwr_pmu_gpio_get_direction(struct gpio_chip *chip,
+					 unsigned int offset)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 dirreg, bit;
+	u32 reg_val;
+	int ret;
+
+	dirreg = PMU_REG_GPIO_DIR_GET_0 + offset / PMU_BANK_SZ;
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	ret = regmap_read(priv->regmap, dirreg, &reg_val);
+	if (ret)
+		return ret;
+
+	if (!(reg_val & bit))
+		return GPIO_LINE_DIRECTION_IN;
+
+	return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int fbxgwr_pmu_gpio_direction_input(struct gpio_chip *chip,
+					   unsigned int offset)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 dirreg, bit;
+
+	dirreg = PMU_REG_GPIO_DIR_CLR_0 + offset / PMU_BANK_SZ;
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	return regmap_write(priv->regmap, dirreg, bit);
+}
+
+static int fbxgwr_pmu_gpio_direction_output(struct gpio_chip *chip,
+					    unsigned int offset, int value)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 dirreg, outreg, bit;
+	int ret;
+
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	if (value)
+		outreg = PMU_REG_GPIO_OUT_SET_0 + offset / PMU_BANK_SZ;
+	else
+		outreg = PMU_REG_GPIO_OUT_CLR_0 + offset / PMU_BANK_SZ;
+
+	ret = regmap_write(priv->regmap, outreg, bit);
+	if (ret)
+		return ret;
+
+	dirreg = PMU_REG_GPIO_DIR_SET_0 + offset / PMU_BANK_SZ;
+
+	return regmap_write(priv->regmap, dirreg, bit);
+}
+
+static int fbxgwr_pmu_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 inreg, bit;
+	u32 reg_val;
+	int ret;
+
+	inreg = PMU_REG_GPIO_IN_0 + offset / PMU_BANK_SZ;
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	ret = regmap_read(priv->regmap, inreg, &reg_val);
+	if (ret)
+		return ret;
+
+	return !!(reg_val & bit);
+}
+
+static void fbxgwr_pmu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+				int value)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 outreg, bit;
+
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	if (value)
+		outreg = PMU_REG_GPIO_OUT_SET_0 + offset / PMU_BANK_SZ;
+	else
+		outreg = PMU_REG_GPIO_OUT_CLR_0 + offset / PMU_BANK_SZ;
+
+	regmap_write(priv->regmap, outreg, bit);
+}
+
+static void fbxgwr_pmu_irq_enable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+
+	/* delay manipulation of registers to bus_sync_unlock()
+	 * callback */
+	gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+	set_bit(d->hwirq, priv->irq_enabled_new);
+}
+
+static void fbxgwr_pmu_irq_disable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+
+	/* delay manipulation of registers to bus_sync_unlock()
+	 * callback */
+	clear_bit(d->hwirq, priv->irq_enabled_new);
+	gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+}
+
+static void fbxgwr_pmu_irq_bus_lock(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+
+	mutex_lock(&priv->irq_lock);
+	bitmap_copy(priv->irq_enabled_new, priv->irq_enabled, PMU_MAX_GPIOS);
+}
+
+static int do_irqcmd(struct fbxgwr_pmu_gpio *priv,
+		     u32 cmd, u32 nr)
+{
+	int ret, i;
+	u32 val;
+
+	regmap_write(priv->regmap, PMU_REG_GPIO_IRQ_CMD_NR, nr);
+	regmap_write(priv->regmap, PMU_REG_GPIO_IRQ_CMD, cmd);
+
+	for (i = 0; i < 100; i++) {
+		ret = regmap_read(priv->regmap, PMU_REG_GPIO_IRQ_CMD_STAT,
+				  &val);
+		if (ret) {
+			dev_err(priv->chip.parent, "regmap read: %d\n", ret);
+			return ret;
+		}
+
+		if (val & PMU_GPIOIRQCMD_RES_BUSY) {
+			msleep(1);
+			continue;
+		}
+
+		if ((val & PMU_GPIOIRQCMD_RES_SUCCESS))
+			return 0;
+
+		dev_err(priv->chip.parent, "failed to start/stop interrupt\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void fbxgwr_pmu_irq_bus_sync_unlock(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+	size_t i;
+
+	/* update enabled mask */
+	if (!bitmap_equal(priv->irq_enabled, priv->irq_enabled_new,
+			  PMU_MAX_GPIOS)) {
+		unsigned int p;
+		DECLARE_BITMAP(irq_changed, PMU_MAX_GPIOS);
+
+		bitmap_xor(irq_changed,
+			   priv->irq_enabled, priv->irq_enabled_new,
+			   PMU_MAX_GPIOS);
+
+		for_each_set_bit(p, irq_changed, PMU_MAX_GPIOS) {
+			unsigned int cmd;
+
+			if (test_bit(p, priv->irq_enabled_new))
+				cmd = PMU_GPIOIRQCMD_ENABLE;
+			else
+				cmd = PMU_GPIOIRQCMD_DISABLE;
+			do_irqcmd(priv, cmd, p);
+		}
+
+		/* update changed mask */
+		for (i = 0; i < PMU_MAX_GPIOS / PMU_BANK_SZ; i++) {
+			u8 mask;
+
+			mask = bitmap_get_value8(priv->irq_enabled_new, i * 8);
+			regmap_write(priv->regmap,
+				     PMU_REG_GPIO_IRQ_MASK_BASE + i,
+				     mask);
+		}
+
+		bitmap_copy(priv->irq_enabled,
+			    priv->irq_enabled_new, PMU_MAX_GPIOS);
+	}
+
+	mutex_unlock(&priv->irq_lock);
+}
+
+static irqreturn_t fbxgwr_pmu_irq_handler(int irq, void *devid)
+{
+	struct fbxgwr_pmu_gpio *priv = devid;
+	DECLARE_BITMAP(pending, PMU_MAX_GPIOS);
+	irqreturn_t irqret = IRQ_NONE;
+	unsigned int p;
+	size_t i;
+
+	bitmap_zero(pending, PMU_MAX_GPIOS);
+
+	for (i = 0; i < PMU_MAX_GPIOS / PMU_BANK_SZ; i++) {
+		u32 bank_pending;
+		u8 mask;
+		int ret;
+
+		mask = bitmap_get_value8(priv->irq_enabled, i * 8);
+		if (!mask)
+			continue;
+
+		ret = regmap_read(priv->regmap, PMU_REG_GPIO_IRQ_STAT_BASE + i,
+				  &bank_pending);
+		if (ret)
+			break;
+
+		if (!bank_pending)
+			continue;
+
+		irqret = IRQ_HANDLED;
+
+		/* clear interrupt status */
+		ret = regmap_write(priv->regmap,
+				   PMU_REG_GPIO_IRQ_STAT_BASE + i,
+				   bank_pending);
+		if (ret)
+			break;
+
+		bitmap_set_value8(pending, bank_pending, i * 8);
+	}
+
+	for_each_set_bit(p, pending, PMU_MAX_GPIOS) {
+		int child_irq;
+		child_irq = irq_find_mapping(priv->chip.irq.domain, p);
+		handle_nested_irq(child_irq);
+	}
+
+	return irqret;
+}
+
+static int fbxgwr_pmu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+	/* we only support both edges */
+	if ((type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH) {
+		dev_err(gc->parent, "irq %d: unsupported type %d\n",
+			d->irq, type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void fbxgwr_pmu_init_irq_valid_mask(struct gpio_chip *gc,
+					   unsigned long *valid_mask,
+					   unsigned int ngpios)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+	DECLARE_BITMAP(irq_valid, PMU_MAX_GPIOS);
+	unsigned int i;
+
+	for (i = 0; i < PMU_MAX_GPIOS / PMU_BANK_SZ; i++) {
+		u32 val;
+		int ret;
+
+		ret = regmap_read(priv->regmap, PMU_REG_GPIO_IRQ_CAP_BASE + i,
+				  &val);
+		if (ret) {
+			dev_err(gc->parent, "regmap read failed: %d\n", ret);
+			return;
+		}
+
+		bitmap_set_value8(irq_valid, val, i * 8);
+	}
+
+	bitmap_copy(valid_mask, irq_valid, ngpios);
+}
+
+static const struct irq_chip fbxgwr_pmu_irq_chip = {
+	.name			= "fbxgwr_pmu_gpio",
+	.irq_bus_lock		= fbxgwr_pmu_irq_bus_lock,
+	.irq_bus_sync_unlock	= fbxgwr_pmu_irq_bus_sync_unlock,
+	.irq_set_type		= fbxgwr_pmu_irq_set_type,
+	.irq_enable		= fbxgwr_pmu_irq_enable,
+	.irq_disable		= fbxgwr_pmu_irq_disable,
+	.flags			= IRQCHIP_IMMUTABLE,
+	 GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int fbxgwr_pmu_gpio_irq_setup(struct platform_device *pdev,
+				     struct fbxgwr_pmu_gpio *priv)
+{
+	struct gpio_irq_chip *girq;
+	u32 val;
+	int ret;
+
+	if (!priv->irq)
+		return 0;
+
+	ret = regmap_read(priv->regmap, PMU_REG_FW_CAPABILITIES, &val);
+	if (ret)
+		return -EIO;
+
+	if (!(val & PMU_FW_CAP_GPIO_IRQ))
+		return 0;
+
+	mutex_init(&priv->irq_lock);
+
+	ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
+					NULL, fbxgwr_pmu_irq_handler,
+					IRQF_ONESHOT |
+					IRQF_SHARED |
+					IRQF_TRIGGER_LOW,
+					dev_name(&pdev->dev), priv);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request irq %d\n", ret);
+		return ret;
+	}
+
+	girq = &priv->chip.irq;
+
+	gpio_irq_chip_set_chip(girq, &fbxgwr_pmu_irq_chip);
+	girq->parent_handler = NULL;
+	girq->num_parents = 0;
+	girq->parents = NULL;
+	girq->default_type = IRQ_TYPE_NONE;
+	girq->handler = handle_bad_irq;
+	girq->init_valid_mask = fbxgwr_pmu_init_irq_valid_mask;
+	girq->threaded = true;
+
+	return 0;
+}
+
+static int fbxgwr_pmu_gpio_probe(struct platform_device *pdev)
+{
+	struct fbxgwr_pmu_gpio *priv;
+	u32 ngpios;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct fbxgwr_pmu_gpio),
+			     GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!priv->regmap)
+		return -ENODEV;
+
+	priv->irq = platform_get_irq_optional(pdev, 0);
+	if (priv->irq < 0)
+		return priv->irq;
+
+	priv->chip = fbxgwr_pmu_gpio_chip;
+	priv->chip.parent = &pdev->dev;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
+	if (ret) {
+		dev_err(&pdev->dev, "missing ngpios property in DT\n");
+		return ret;
+	}
+
+	/* reset io expander internal state, in case something else
+	 * enabled any irq before */
+	ret = regmap_write(priv->regmap, PMU_REG_GPIO_REINIT, 1);
+	if (ret)
+		return -EIO;
+
+	priv->chip.ngpio = ngpios;
+
+	ret = fbxgwr_pmu_gpio_irq_setup(pdev, priv);
+	if (ret)
+		return ret;
+
+	ret = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct of_device_id fbxgwr_pmu_gpio_of_id[] = {
+	{ .compatible = "freebox,fbxgwr-pmu-gpio", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fbxgwr_pmu_gpio_of_id);
+
+static struct platform_driver fbxgwr_pmu_gpio_driver = {
+	.probe		= fbxgwr_pmu_gpio_probe,
+	.driver = {
+		.name		= "fbxgwr-pmu-gpio",
+		.of_match_table	= fbxgwr_pmu_gpio_of_id,
+	},
+};
+
+module_platform_driver(fbxgwr_pmu_gpio_driver);
+
+MODULE_AUTHOR("Marios Makassikis");
+MODULE_LICENSE("GPL");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/hwmon/fbxgwr_pmu_hwmon.c	2023-08-10 16:54:11.558117447 +0200
@@ -0,0 +1,281 @@
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+
+struct fbxgwr_pmu_hwmon {
+	struct regmap	*regmap;
+	u8		 *in_map[hwmon_max];
+};
+
+static inline int __pow10(u8 x)
+{
+	int r = 1;
+
+	while (x--)
+		r *= 10;
+
+	return r;
+}
+
+static int fbxgwr_pmu_hwmon_read(struct device *dev,
+				 enum hwmon_sensor_types type,
+				 u32 attr, int channel, long *val)
+{
+	struct fbxgwr_pmu_hwmon *priv = dev_get_drvdata(dev);
+	u32 reg_val;
+	int ret;
+
+	switch (type) {
+	case hwmon_in:
+	case hwmon_curr:
+	case hwmon_fan:
+	case hwmon_temp:
+	case hwmon_power:
+	{
+		int in_off = priv->in_map[type][channel] * 3;
+		u32 high, low;
+		u16 val16;
+		int div;
+
+		ret = regmap_read(priv->regmap,
+				  PMU_REG_IN_BASE + in_off + 0,
+				  &reg_val);
+		if (ret)
+			return ret;
+
+		ret = regmap_read(priv->regmap,
+				  PMU_REG_IN_BASE + in_off + 1,
+				  &low);
+		if (ret)
+			return ret;
+
+		ret = regmap_read(priv->regmap,
+				  PMU_REG_IN_BASE + in_off + 2,
+				  &high);
+		if (ret)
+			return ret;
+
+		val16 = (high << 8) | low;
+		if (reg_val & PMU_IN_SIGNED_MASK)
+			*val = sign_extend32(val16, 15);
+		else
+			*val = val16;
+
+		div = (reg_val & PMU_IN_DIVIDER_MASK) >> PMU_IN_DIVIDER_SHIFT;
+		if (div)
+			*val *= __pow10(div);
+
+		break;
+	}
+
+	case hwmon_pwm:
+		ret = regmap_read(priv->regmap, PMU_REG_FAN_PWM_BASE + channel,
+				  &reg_val);
+		if (ret)
+			return ret;
+		*val = reg_val;
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+static int fbxgwr_pmu_hwmon_write(struct device *dev,
+				  enum hwmon_sensor_types type,
+				  u32 attr, int channel, long val)
+{
+	struct fbxgwr_pmu_hwmon *priv = dev_get_drvdata(dev);
+	int ret;
+
+	switch (type) {
+	case hwmon_pwm:
+		val = clamp_val(val, 0, 255);
+		ret = regmap_write(priv->regmap,
+				   PMU_REG_FAN_PWM_BASE + channel, val);
+		if (ret)
+			return ret;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static umode_t fbxgwr_pmu_hwmon_is_visible(const void *_data,
+					   enum hwmon_sensor_types type,
+					   u32 attr, int channel)
+{
+	switch (type) {
+	case hwmon_in:
+	case hwmon_curr:
+	case hwmon_fan:
+	case hwmon_temp:
+	case hwmon_power:
+		return 0444;
+	case hwmon_pwm:
+		return 0644;
+	default:
+		return 0;
+	}
+}
+
+static const struct hwmon_ops fbxgwr_pmu_hwmon_ops = {
+	.is_visible = fbxgwr_pmu_hwmon_is_visible,
+	.read = fbxgwr_pmu_hwmon_read,
+	.write = fbxgwr_pmu_hwmon_write,
+};
+
+static u32 hwmon_attributes[hwmon_max] = {
+	[hwmon_temp] = HWMON_T_INPUT,
+	[hwmon_in] = HWMON_I_INPUT,
+	[hwmon_curr] = HWMON_C_INPUT,
+	[hwmon_power] = HWMON_P_INPUT,
+	[hwmon_pwm] = HWMON_PWM_INPUT,
+	[hwmon_fan] = HWMON_F_INPUT,
+};
+
+static int fbxgwr_pmu_hwmon_probe(struct platform_device *pdev)
+{
+	struct fbxgwr_pmu_hwmon *hwmon;
+	struct device *hwmon_dev;
+	enum hwmon_sensor_types type;
+	u32 fan_count, in_reg_count, in_count, cinfo_count, i;
+	int ret, nr_count[hwmon_max] = {0};
+	const struct hwmon_channel_info **ptr_cinfos;
+	struct hwmon_channel_info *cinfos;
+	struct hwmon_chip_info pmu_hwmon_chip_info = {
+		.ops = &fbxgwr_pmu_hwmon_ops,
+	};
+
+	hwmon = devm_kzalloc(&pdev->dev, sizeof(struct fbxgwr_pmu_hwmon),
+			     GFP_KERNEL);
+	if (!hwmon)
+		return -ENOMEM;
+
+	hwmon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!hwmon->regmap)
+		return -ENODEV;
+
+	/* enumerate hardware channels and build channel infos
+	 * dynamically */
+	ret = regmap_read(hwmon->regmap, PMU_REG_IN_COUNT, &in_reg_count);
+	if (ret)
+		return ret;
+
+	/* get actual sensor type from hardware */
+	in_count = 0;
+	for (i = 0; i < in_reg_count; i++) {
+		u32 in_type;
+
+		ret = regmap_read(hwmon->regmap, PMU_REG_IN_BASE + i * 3,
+				  &in_type);
+		if (ret)
+			return ret;
+
+		in_type &= PMU_IN_TYPE_MASK;
+		switch (in_type) {
+		case PMU_IN_TYPE_UNUSED:
+			continue;
+		case PMU_IN_TYPE_CURRENT:
+			type = hwmon_curr;
+			break;
+		case PMU_IN_TYPE_VOLTAGE:
+			type = hwmon_in;
+			break;
+		case PMU_IN_TYPE_POWER:
+			type = hwmon_power;
+			break;
+		case PMU_IN_TYPE_TEMPERATURE:
+			type = hwmon_temp;
+			break;
+		case PMU_IN_TYPE_FAN_INPUT:
+			type = hwmon_fan;
+			break;
+		default:
+			dev_warn(&pdev->dev, "unknown in-type, "
+				 "assume voltage\n");
+			type = hwmon_in;
+			break;
+		}
+
+		if (!hwmon->in_map[type]) {
+			hwmon->in_map[type] = devm_kcalloc(&pdev->dev,
+							   in_reg_count,
+							   sizeof (u8),
+							   GFP_KERNEL);
+			if (!hwmon->in_map[type])
+				return -ENOMEM;
+		}
+
+		hwmon->in_map[type][nr_count[type]] = i;
+		nr_count[type]++;
+		in_count++;
+	}
+
+	ret = regmap_read(hwmon->regmap, PMU_REG_FAN_PWM_COUNT, &fan_count);
+	if (ret)
+		return ret;
+	nr_count[hwmon_pwm] = fan_count;
+
+	/* finally allocate channel info */
+	cinfo_count = fan_count + in_count;
+
+	cinfos = devm_kcalloc(&pdev->dev, cinfo_count,
+			      sizeof (*cinfos), GFP_KERNEL);
+	if (!cinfos)
+		return -ENOMEM;
+
+	ptr_cinfos = devm_kcalloc(&pdev->dev, cinfo_count + 1,
+				  sizeof (*ptr_cinfos), GFP_KERNEL);
+	if (!ptr_cinfos)
+		return -ENOMEM;
+
+	pmu_hwmon_chip_info.info = ptr_cinfos;
+
+	for (type = 0; type < hwmon_max; type++) {
+		u32 *cfg;
+
+		if (!nr_count[type])
+			continue;
+
+		cfg = devm_kcalloc(&pdev->dev, nr_count[type] + 1,
+				   sizeof (*cfg), GFP_KERNEL);
+		if (!cfg)
+			return -ENOMEM;
+
+		for (i = 0; i < nr_count[type]; i++)
+			cfg[i] = hwmon_attributes[type];
+
+		cinfos->type = type;
+		cinfos->config = cfg;
+		*ptr_cinfos++ = cinfos++;
+	}
+
+	hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+							 "fbxgwr_pmu_hwmon",
+							 hwmon,
+							 &pmu_hwmon_chip_info,
+							 NULL);
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver fbxgwr_pmu_hwmon_driver = {
+	.probe		= fbxgwr_pmu_hwmon_probe,
+	.driver = {
+		.name	= "fbxgwr-pmu-hwmon",
+	},
+};
+
+module_platform_driver(fbxgwr_pmu_hwmon_driver);
+
+MODULE_AUTHOR("Marios Makassikis");
+MODULE_LICENSE("GPL");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/leds/leds-fbxgwr-pmu.c	2023-06-22 21:43:23.074921233 +0200
@@ -0,0 +1,117 @@
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+struct fbxgwr_pmu_led;
+
+struct pmu_led {
+	struct led_classdev	ldev;
+	u32			reg;
+	struct fbxgwr_pmu_led	*priv;
+};
+
+struct fbxgwr_pmu_led {
+	struct regmap		*regmap;
+	struct pmu_led		*leds;
+	size_t			nleds;
+};
+
+static int fbxgwr_pmu_led_brightness_set(struct led_classdev *led_cdev,
+					 enum led_brightness brightness)
+{
+	struct pmu_led *led = container_of(led_cdev, struct pmu_led, ldev);
+
+	return regmap_write(led->priv->regmap, led->reg, brightness);
+}
+
+static int fbxgwr_pmu_led_probe(struct platform_device *pdev)
+{
+	struct device_node *node, *child;
+	struct fbxgwr_pmu_led *priv;
+	struct pmu_led *led;
+	u32 nleds;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct fbxgwr_pmu_led),
+			    GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!priv->regmap)
+		return -ENODEV;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "nleds", &nleds);
+	if (ret) {
+		dev_err(&pdev->dev, "missing nleds property in DT\n");
+		return ret;
+	}
+
+	led = devm_kcalloc(&pdev->dev, nleds, sizeof(struct pmu_led),
+			   GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	priv->nleds = nleds;
+	priv->leds = led;
+
+	node = pdev->dev.of_node;
+
+	for_each_child_of_node(node, child) {
+		u32 reg;
+
+		led->ldev.name =
+			of_get_property(child, "label", NULL) ?: child->name;
+		led->ldev.flags = 0;
+		led->ldev.brightness_set_blocking =
+				fbxgwr_pmu_led_brightness_set;
+		led->ldev.max_brightness = LED_FULL;
+
+		ret = of_property_read_u32(child, "reg", &reg);
+		if (ret || reg >= nleds) {
+			of_node_put(child);
+			return -EINVAL;
+		}
+
+		led->reg = PMU_REG_LED0_PWM + reg;
+
+		ret = regmap_read(priv->regmap, led->reg,
+				  &led->ldev.brightness);
+		if (ret)
+			return ret;
+
+		ret = devm_led_classdev_register(&pdev->dev, &led->ldev);
+		if (ret)
+			return ret;
+
+		led->priv = priv;
+
+		led++;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id fbxgwr_pmu_led_of_id[] = {
+	{ .compatible = "freebox,fbxgwr-pmu-led", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fbxgwr_pmu_led_of_id);
+
+static struct platform_driver fbxgwr_pmu_led_driver = {
+	.probe		= fbxgwr_pmu_led_probe,
+	.driver = {
+		.name	= "fbxgwr-pmu-led",
+		.of_match_table = fbxgwr_pmu_led_of_id,
+	},
+};
+
+module_platform_driver(fbxgwr_pmu_led_driver);
+
+MODULE_AUTHOR("Marios Makassikis");
+MODULE_LICENSE("GPL");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/mfd/fbxgwr-pmu.c	2024-03-18 14:40:14.839741005 +0100
@@ -0,0 +1,671 @@
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+/*
+ * private context
+ */
+struct fbxgwr_pmu {
+	struct i2c_client 	*i2c_client;
+	struct regmap		*regmap;
+	u32			board_id;
+	u32			app_ver_rev;
+	u32			api_major;
+	u32			api_minor;
+};
+
+/* XXX dummy func to disable regmap caching */
+static bool volatile_reg(struct device *dev, unsigned int reg)
+{
+	return true;
+}
+
+static const struct regmap_config fbxgwr_pmu_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xff,
+
+	.volatile_reg = volatile_reg,
+};
+
+static const struct mfd_cell fbxgwr_pmu_devs[] = {
+	MFD_CELL_OF("fbxgwr-pmu-gpio", NULL,
+		    NULL, 0, 0, "freebox,fbxgwr-pmu-gpio"),
+	MFD_CELL_OF("fbxgwr-pmu-led", NULL,
+		    NULL, 0, 0, "freebox,fbxgwr-pmu-led"),
+	MFD_CELL_NAME("fbxgwr-pmu-hwmon"),
+	MFD_CELL_OF("fbxgwr-pmu-watchdog", NULL,
+		    NULL, 0, 0, "freebox,fbxgwr-pmu-watchdog"),
+};
+
+static ssize_t pmu_board_id_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->board_id);
+}
+
+static ssize_t pmu_app_version_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u.%u\n",
+		       priv->app_ver_rev >> 16,
+		       priv->app_ver_rev & 0xffff);
+}
+
+static ssize_t pmu_app_iversion_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->app_ver_rev);
+}
+
+static ssize_t pmu_api_major_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->api_major);
+}
+
+static ssize_t pmu_api_minor_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->api_minor);
+}
+
+static ssize_t pmu_test_mode_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret;
+	u32 val;
+
+	ret = regmap_read(priv->regmap, PMU_REG_TEST_MODE, &val);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "0x%x\n", val);
+}
+
+static ssize_t pmu_cur_app_bank_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret;
+	u32 val;
+
+	ret = regmap_read(priv->regmap, PMU_REG_CUR_APP_BANK, &val);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t pmu_fw_capabilities_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	size_t len;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_FW_CAPABILITIES, &val);
+	if (ret)
+		return -EIO;
+
+	buf[0] = 0;
+	len = 0;
+	if (val & PMU_FW_CAP_FWUPGRADE)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "fw_upgrade ");
+	if (val & PMU_FW_CAP_BANK_SWITCH)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "bank_switch ");
+	if (val & PMU_FW_CAP_RTC)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "rtc ");
+	if (val & PMU_FW_CAP_STANDBY)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "standby ");
+	if (val & PMU_FW_CAP_WDT)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "watchdog ");
+	if (val & PMU_FW_CAP_GPIO_IRQ)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "gpio-irq");
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+
+static ssize_t pmu_rtc_reg_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf,
+				int base)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	u32 val[4], rtc;
+	int ret, i;
+
+	for (i = 0; i < 4; i++) {
+		ret = regmap_read(priv->regmap, base + i, val + i);
+		if (ret)
+			return -EIO;
+	}
+
+	rtc = (val[3] << 24) | (val[2] << 16) | (val[1] << 8) | val[0];
+	return sprintf(buf, "%u\n", rtc);
+}
+
+static ssize_t pmu_rtc_show(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	return pmu_rtc_reg_show(dev, attr, buf, PMU_REG_RTC_VALUE_0);
+}
+
+static ssize_t pmu_rtc_cmp_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return pmu_rtc_reg_show(dev, attr, buf, PMU_REG_RTC_CMP_VALUE_0);
+}
+
+static ssize_t pmu_rtc_cmp_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int rtc;
+	int ret, i;
+
+	rtc = simple_strtoul(buf, NULL, 10);
+	if (rtc < 0)
+		return ret;
+
+	for (i = 0; i < 4; i++) {
+		ret = regmap_write(priv->regmap, PMU_REG_RTC_CMP_VALUE_0 + i,
+				   (rtc >> (i * 8)) & 0xff);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_board_reset_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val > 0) {
+		ret = regmap_write(priv->regmap, PMU_REG_BOARD_RESET,
+				   PMU_RESET_MAGIC);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_fake_dgasp_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val > 0) {
+		ret = regmap_write(priv->regmap, PMU_REG_FAKE_DGASP,
+				   PMU_FAKE_DGASP_MAGIC);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_enter_standby_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val > 0) {
+		/* clear wake reason */
+		ret = regmap_write(priv->regmap, PMU_REG_WAKE_REASON_MASK,
+				   0xff);
+		if (ret)
+			return -EIO;
+		ret = regmap_write(priv->regmap, PMU_REG_ENTER_STANDBY,
+				   PMU_STANDBY_MAGIC);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_wake_pon_interval_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret;
+	u32 val;
+
+	ret = regmap_read(priv->regmap, PMU_REG_WAKE_PON_INTERVAL, &val);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t pmu_wake_pon_interval_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val >= 0) {
+		ret = regmap_write(priv->regmap, PMU_REG_WAKE_PON_INTERVAL,
+				   val);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_wake_src_enabled_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	size_t len;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_WAKE_SRC_MASK, &val);
+	if (ret)
+		return -EIO;
+
+	buf[0] = 0;
+	len = 0;
+	if (val & PMU_WAKE_R_RTC_MASK)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "rtc ");
+	if (val & PMU_WAKE_R_PWRBTN_MASK)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "button ");
+	if (val & PMU_WAKE_R_WAKEPON_MASK)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "pon ");
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+
+static ssize_t pmu_wake_src_enabled_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret, i, argc;
+	char **args;
+	u32 val;
+
+	args = argv_split(GFP_KERNEL, buf, &argc);
+	if (args == NULL)
+		return -ENOMEM;
+
+	val = 0;
+	for (i = 0; i < argc; i++) {
+		if (!strcmp(args[i], "rtc"))
+			val |= PMU_WAKE_R_RTC_MASK;
+		else if (!strcmp(args[i], "button"))
+			val |= PMU_WAKE_R_PWRBTN_MASK;
+		else if (!strcmp(args[i], "pon"))
+			val |= PMU_WAKE_R_WAKEPON_MASK;
+		else {
+			argv_free(args);
+			return -EINVAL;
+		}
+	}
+
+	argv_free(args);
+	ret = regmap_write(priv->regmap, PMU_REG_WAKE_SRC_MASK, val);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t pmu_wake_src_available_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	return sprintf(buf, "rtc button pon\n");
+}
+
+
+static ssize_t pmu_last_wake_reason_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	const char *reason;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_WAKE_REASON_MASK, &val);
+	if (ret)
+		return -EIO;
+
+	reason = "unknown";
+	if (!val)
+		reason = "power-on-reset";
+	else if (val & PMU_WAKE_R_RTC_MASK)
+		reason = "rtc";
+	else if (val & PMU_WAKE_R_PWRBTN_MASK)
+		reason = "button";
+	else if (val & PMU_WAKE_R_WAKEPON_MASK)
+		reason = "pon";
+	else if (val & PMU_WAKE_R_WDT_RST_MASK)
+		reason = "wdt-reset";
+	else if (val & PMU_WAKE_R_SOC_RST_MASK)
+		reason = "soc-reset";
+
+	return sprintf(buf, "%s\n", reason);
+}
+
+static ssize_t pmu_last_wake_reason_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t len)
+{
+	int ret;
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	ret = regmap_write(priv->regmap, PMU_REG_WAKE_REASON_MASK, ~0);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t pmu_mcu_reboot_reason_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	const char *reason;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_PMU_RESET_REASON, &val);
+	if (ret)
+		return -EIO;
+
+	switch (val) {
+	case PMU_RESET_R_POR:
+		reason = "power-on-reset";
+		break;
+	case PMU_RESET_R_SWRESET:
+		reason = "swreset";
+		break;
+	case PMU_RESET_R_VDROP:
+		reason = "vdrop";
+		break;
+	case PMU_RESET_R_HWRESET:
+		reason = "hwreset";
+		break;
+	case PMU_RESET_R_WATCHDOG:
+		reason = "watchdog";
+		break;
+	case PMU_RESET_R_BUS_ERROR:
+		reason = "bus_error";
+		break;
+	case PMU_RESET_R_SRAM_PARITY:
+		reason = "sram_parity";
+		break;
+	case PMU_RESET_R_BOOTSTRAP:
+		reason = "bootstrap";
+		break;
+	default:
+		reason = "unknown";
+		break;
+	}
+
+	return sprintf(buf, "%s\n", reason);
+}
+
+static ssize_t pmu_cc_polarity_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	const char *reason;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_CC_POLARITY, &val);
+	if (ret)
+		return -EIO;
+
+	switch (val) {
+	case PMU_CC_POLARITY_CC1:
+		reason = "cc1";
+		break;
+	case PMU_CC_POLARITY_CC2:
+		reason = "cc2";
+		break;
+	case PMU_CC_POLARITY_UNKNOWN:
+	default:
+		reason = "unknown";
+		break;
+	}
+
+	return sprintf(buf, "%s\n", reason);
+}
+
+static DEVICE_ATTR_RO(pmu_board_id);
+static DEVICE_ATTR_RO(pmu_app_iversion);
+static DEVICE_ATTR_RO(pmu_app_version);
+static DEVICE_ATTR_RO(pmu_api_major);
+static DEVICE_ATTR_RO(pmu_api_minor);
+static DEVICE_ATTR_RO(pmu_test_mode);
+static DEVICE_ATTR_RO(pmu_cur_app_bank);
+static DEVICE_ATTR_RO(pmu_fw_capabilities);
+static DEVICE_ATTR_RO(pmu_rtc);
+static DEVICE_ATTR_RW(pmu_rtc_cmp);
+static DEVICE_ATTR_WO(pmu_board_reset);
+static DEVICE_ATTR_WO(pmu_enter_standby);
+static DEVICE_ATTR_WO(pmu_fake_dgasp);
+static DEVICE_ATTR_RO(pmu_wake_src_available);
+static DEVICE_ATTR_RW(pmu_wake_src_enabled);
+static DEVICE_ATTR_RW(pmu_wake_pon_interval);
+static DEVICE_ATTR_RW(pmu_last_wake_reason);
+static DEVICE_ATTR_RO(pmu_mcu_reboot_reason);
+static DEVICE_ATTR_RO(pmu_cc_polarity);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+	&dev_attr_pmu_board_id.attr,
+	&dev_attr_pmu_app_iversion.attr,
+	&dev_attr_pmu_app_version.attr,
+	&dev_attr_pmu_api_major.attr,
+	&dev_attr_pmu_api_minor.attr,
+	&dev_attr_pmu_test_mode.attr,
+	&dev_attr_pmu_cur_app_bank.attr,
+	&dev_attr_pmu_fw_capabilities.attr,
+	&dev_attr_pmu_rtc.attr,
+	&dev_attr_pmu_rtc_cmp.attr,
+	&dev_attr_pmu_board_reset.attr,
+	&dev_attr_pmu_enter_standby.attr,
+	&dev_attr_pmu_fake_dgasp.attr,
+	&dev_attr_pmu_wake_src_available.attr,
+	&dev_attr_pmu_wake_src_enabled.attr,
+	&dev_attr_pmu_wake_pon_interval.attr,
+	&dev_attr_pmu_last_wake_reason.attr,
+	&dev_attr_pmu_mcu_reboot_reason.attr,
+	&dev_attr_pmu_cc_polarity.attr,
+	NULL,
+};
+
+static const struct attribute_group pmu_attribute_group[] = {
+	{ .attrs = sysfs_attrs_ctrl },
+};
+
+static int fbxgwr_pmu_i2c_probe(struct i2c_client *i2c)
+{
+	struct fbxgwr_pmu *priv;
+	u32 magic0, magic1, val;
+	int ret;
+
+	priv = devm_kzalloc(&i2c->dev, sizeof(struct fbxgwr_pmu), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, priv);
+	priv->i2c_client = i2c;
+	priv->regmap = devm_regmap_init_i2c(i2c, &fbxgwr_pmu_regmap_config);
+	if (IS_ERR(priv->regmap))
+		return PTR_ERR(priv->regmap);
+
+	/* read magic */
+	ret = regmap_read(priv->regmap, PMU_REG_MAGIC0, &magic0);
+	if (ret)
+		return ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_MAGIC1, &magic1);
+	if (ret)
+		return ret;
+
+	if (magic0 != PMU_MAGIC0_VAL || magic1 != PMU_MAGIC1_VAL) {
+		dev_err(&i2c->dev, "invalid magic\n");
+		return -EINVAL;
+	}
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_REVISION_LO, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev = (val << 0);
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_REVISION_HI, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev |= (val << 8);
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_VERSION_LO, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev |= (val << 16);
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_VERSION_HI, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev |= (val << 24);
+
+	ret = regmap_read(priv->regmap, PMU_REG_API_MAJOR, &priv->api_major);
+	if (ret)
+		return -EIO;
+
+	ret = regmap_read(priv->regmap, PMU_REG_API_MINOR, &priv->api_minor);
+	if (ret)
+		return -EIO;
+
+	ret = regmap_read(priv->regmap, PMU_REG_BOARD_ID, &priv->board_id);
+	if (ret)
+		return -EIO;
+
+	ret = sysfs_create_group(&i2c->dev.kobj, pmu_attribute_group);
+	if (ret < 0) {
+		dev_err(&i2c->dev, "Sysfs registration failed\n");
+		return ret;
+	}
+
+	ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE,
+				   fbxgwr_pmu_devs,
+				   ARRAY_SIZE(fbxgwr_pmu_devs), NULL, 0, 0);
+	if (ret) {
+		dev_err(&i2c->dev, "failed to register subdevices\n");
+		return ret;
+	}
+
+	dev_info(&i2c->dev, "Freebox PMU driver (fwver:%d.%d api:%d.%d)\n",
+		 priv->app_ver_rev >> 16, priv->app_ver_rev & 0xffff,
+		 priv->api_major, priv->api_minor);
+
+	return 0;
+}
+
+static void fbxgwr_pmu_i2c_remove(struct i2c_client *i2c)
+{
+	sysfs_remove_group(&i2c->dev.kobj, pmu_attribute_group);
+}
+const struct of_device_id fbxgwr_pmu_match[] = {
+	{ .compatible = "freebox,fbxgwr-pmu" },
+	{ /* sentinel */ },
+};
+
+static struct i2c_driver fbxgwr_pmu_driver = {
+	.driver = {
+		.name = "fbxgwr_pmu",
+		.of_match_table = of_match_ptr(fbxgwr_pmu_match),
+	},
+	.probe_new = fbxgwr_pmu_i2c_probe,
+	.remove = fbxgwr_pmu_i2c_remove,
+};
+
+static int __init fbxgwr_pmu_i2c_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&fbxgwr_pmu_driver);
+	if (ret != 0)
+		pr_err("Failed to register Freebox PMU driver: %d\n", ret);
+
+	return 0;
+}
+
+static void __exit fbxgwr_pmu_i2c_exit(void)
+{
+	i2c_del_driver(&fbxgwr_pmu_driver);
+}
+
+module_init(fbxgwr_pmu_i2c_init);
+module_exit(fbxgwr_pmu_i2c_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marios Makassikis");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/misc/fbxserial_of.c	2023-02-27 19:50:19.752184110 +0100
@@ -0,0 +1,38 @@
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/fbxserial.h>
+#include <linux/random.h>
+
+static struct fbx_serial serial_info;
+
+const struct fbx_serial *arch_get_fbxserial(void)
+{
+	return &serial_info;
+}
+
+EXPORT_SYMBOL(arch_get_fbxserial);
+
+/*
+ *
+ */
+static __init int fbxserial_of_read(void)
+{
+	struct device_node *np;
+	const void *fbxserial_data;
+	int len;
+
+	np = of_find_node_by_path("/chosen");
+	if (!np)
+		return 0;
+
+	fbxserial_data = of_get_property(np, "fbx,serialinfo", &len);
+	if (!fbxserial_data)
+		return 0;
+
+	fbxserialinfo_read(fbxserial_data, &serial_info);
+	add_device_randomness(&serial_info, sizeof (serial_info));
+
+	return 0;
+}
+
+arch_initcall(fbxserial_of_read);
diff -Nruw linux-6.4-fbx/drivers/misc/hdmi-cec./Kconfig linux-6.4-fbx/drivers/misc/hdmi-cec/Kconfig
--- linux-6.4-fbx/drivers/misc/hdmi-cec./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/misc/hdmi-cec/Kconfig	2023-03-09 15:06:11.352233904 +0100
@@ -0,0 +1,15 @@
+menu "HDMI CEC support"
+
+config HDMI_CEC
+	tristate "HDMI CEC (Consumer Electronics Control) support"
+	help
+	   HDMI Consumer Electronics Control support.
+
+config HDMI_CEC_REMOTI
+	tristate "RemoTI CEC driver"
+	depends on HDMI_CEC
+	select REMOTI
+	help
+	   HDMI CEC driver using RemoTI IPCs.
+
+endmenu
diff -Nruw linux-6.4-fbx/drivers/misc/hdmi-cec./Makefile linux-6.4-fbx/drivers/misc/hdmi-cec/Makefile
--- linux-6.4-fbx/drivers/misc/hdmi-cec./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/misc/hdmi-cec/Makefile	2023-03-09 15:06:11.352233904 +0100
@@ -0,0 +1,6 @@
+obj-$(CONFIG_HDMI_CEC)		+= hdmi-cec.o
+hdmi-cec-objs			+= core.o dev.o
+
+# drivers
+obj-$(CONFIG_HDMI_CEC_REMOTI)	+= remoti-cec.o
+remoti-cec-objs			:= remoti.o
diff -Nruw linux-6.4-fbx/drivers/misc/remoti./Kconfig linux-6.4-fbx/drivers/misc/remoti/Kconfig
--- linux-6.4-fbx/drivers/misc/remoti./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/misc/remoti/Kconfig	2023-03-09 15:06:11.352233904 +0100
@@ -0,0 +1,26 @@
+menu "RemoTI support"
+
+config REMOTI
+	tristate "RemoTI support"
+	depends on FBX6HD
+	help
+	  Texas Instruments RemoTI stack.
+
+config REMOTI_LEDS
+	tristate "RemoTI LEDS support"
+	depends on REMOTI
+	depends on LEDS_CLASS
+	help
+	  RemoTI LEDS class driver support.
+
+config REMOTI_GPIO
+	tristate "RemoTI gpio support"
+	depends on REMOTI
+	help
+	  gpiochip driver for the RemoTI RNP
+
+config REMOTI_USER
+	tristate "RemoTI userspace access"
+	depends on REMOTI
+
+endmenu
diff -Nruw linux-6.4-fbx/drivers/misc/remoti./Makefile linux-6.4-fbx/drivers/misc/remoti/Makefile
--- linux-6.4-fbx/drivers/misc/remoti./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/misc/remoti/Makefile	2023-03-09 15:06:11.352233904 +0100
@@ -0,0 +1,9 @@
+obj-$(CONFIG_REMOTI)		+= remoti.o
+obj-$(CONFIG_REMOTI_GPIO)	+= remoti-gpio.o
+obj-$(CONFIG_REMOTI_LEDS)	+= remoti-leds.o
+obj-$(CONFIG_REMOTI_USER)	+= remoti-user.o
+
+remoti-objs			:= core.o core-sysfs.o
+remoti-gpio-objs		:= gpio.o
+remoti-leds-objs		:= leds.o
+remoti-user-objs		:= user.o
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158./Makefile linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158/Makefile
--- linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158/Makefile	2023-05-22 20:06:41.435779111 +0200
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCM63158_SF2) 		+= sf2/
+obj-$(CONFIG_BCM63158_ENET_RUNNER) 	+= enet/
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/Makefile linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/Makefile
--- linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/Makefile	2023-03-09 15:06:11.356234011 +0100
@@ -0,0 +1,11 @@
+obj-$(CONFIG_BCM63158_ENET_RUNNER) 	+= bcm63158_enet_runner.o
+
+bcm63158_enet_runner-y	:= \
+	ethtool.o \
+	main.o \
+	port_unimac.o \
+	port_xport.o \
+	port_xport_serdes.o \
+	port_xport_epon.o \
+	port_xport_epon_dbg.o \
+	port_xport_xlmac.o
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/Makefile linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/Makefile
--- linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/Makefile	2023-03-09 15:06:11.364234225 +0100
@@ -0,0 +1,4 @@
+obj-$(CONFIG_BCM63158_SF2) 		+= bcm63158_sf2.o
+
+bcm63158_sf2-y 				+= sf2_main.o sf2_fdb.o
+bcm63158_sf2-$(CONFIG_DEBUG_FS) 	+= sf2_debug.o
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner./Makefile linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner/Makefile
--- linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner/Makefile	2023-03-09 15:06:11.368234332 +0100
@@ -0,0 +1,5 @@
+obj-$(CONFIG_BCM63XX_ENET_RUNNER) 	+= bcm63xx_enet_runner_mod.o
+obj-$(CONFIG_BCM63XX_ENET_RUNNER) 	+= bcm63xx_sf2.o
+
+bcm63xx_enet_runner_mod-y 			+= bcm63xx_enet_runner.o
+bcm63xx_enet_runner_mod-$(CONFIG_DEBUG_FS) 	+= bcm63xx_enet_runner_debug.o
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Kconfig linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Kconfig
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Kconfig	2023-12-21 17:30:06.441516398 +0100
@@ -0,0 +1,14 @@
+config IPQ95XX_ESS
+	tristate "IPQ95XX ESS driver (edma+ppe)"
+	select NET_SWITCHDEV
+	select PHYLINK
+	select MII
+
+config IPQ95XX_FBX_FF
+	bool "fastpath support for freebox boards"
+	depends on IPQ95XX_ESS
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Makefile linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Makefile
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Makefile	2024-01-04 16:55:44.208658048 +0100
@@ -0,0 +1,13 @@
+obj-$(CONFIG_IPQ95XX_ESS) 	+= ipq95xx_ess.o
+
+ipq95xx_ess-objs		+= \
+				clocks.o \
+				debug.o \
+				hwdesc.o \
+				fdb.o \
+				ipo.o \
+				main.o \
+				port.o \
+				port_ethtool.o \
+				port_phylink.o \
+				uniphy.o
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./clocks.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/clocks.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./clocks.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/clocks.c	2023-06-05 19:37:48.384778310 +0200
@@ -0,0 +1,403 @@
+#include <linux/clk.h>
+#include "ipq95xx_ess.h"
+
+#define to_clk_uniphy(_hw)	container_of(_hw, struct clk_uniphy, hw)
+
+/* only valid rates for serdes */
+#define UNIPHY_CLK_RATE_125M		125000000
+#define UNIPHY_CLK_RATE_156M		156250000
+#define UNIPHY_CLK_RATE_250M		250000000
+#define UNIPHY_CLK_RATE_312M		312500000
+
+static bool is_valid_uniphy_rate(unsigned int clk)
+{
+	switch (clk) {
+	case UNIPHY_CLK_RATE_125M:
+	case UNIPHY_CLK_RATE_156M:
+	case UNIPHY_CLK_RATE_250M:
+	case UNIPHY_CLK_RATE_312M:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static unsigned long
+uniphy_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct clk_uniphy *uclk = to_clk_uniphy(hw);
+	return uclk->rate;
+}
+
+static int
+uniphy_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+	if (is_valid_uniphy_rate(req->rate))
+		return 0;
+	return -EINVAL;
+}
+
+static int
+uniphy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                     unsigned long parent_rate)
+{
+        struct clk_uniphy *uclk = to_clk_uniphy(hw);
+
+	if (!is_valid_uniphy_rate(rate))
+		return -EINVAL;
+
+	uclk->rate = rate;
+	return 0;
+}
+
+static const struct clk_ops clk_uniphy_ops = {
+	.recalc_rate	= uniphy_clk_recalc_rate,
+	.determine_rate	= uniphy_clk_determine_rate,
+	.set_rate	= uniphy_clk_set_rate,
+};
+
+/*
+ * callback to release uniphy clock memory at device release time
+ */
+static void uniphy_clock_release_cb(void *data)
+{
+	struct clk_hw *hw = data;
+        struct clk_uniphy *uclk = to_clk_uniphy(hw);
+
+	clk_put(uclk->clk);
+	clk_hw_unregister(hw);
+	kfree(uclk);
+}
+
+static struct clk_uniphy *uniphy_clock_register(struct device *dev,
+						size_t uniphy_id, bool is_tx)
+{
+	struct clk_uniphy *uclk;
+	struct clk_init_data cinit;
+	struct clk *clk;
+	char buf[128], con_id[32];
+	int ret;
+
+	uclk = kzalloc(sizeof (*uclk), GFP_KERNEL);
+	if (!uclk)
+		return ERR_PTR(-ENOMEM);
+
+	snprintf(con_id, sizeof (con_id), "uniphy%zu_gcc_%s_clk",
+		 uniphy_id, is_tx ? "tx" : "rx");
+	snprintf(buf, sizeof(buf), "%s_%s", dev_name(dev), con_id);
+
+	memset(&cinit, 0, sizeof (cinit));
+	cinit.name = buf;
+	cinit.ops = &clk_uniphy_ops;
+
+	uclk->hw.init = &cinit;
+	uclk->rate = UNIPHY_CLK_RATE_125M;
+
+	ret = clk_hw_register(dev, &uclk->hw);
+	if (ret)
+		return ERR_PTR(ret);
+
+	clk = clk_hw_get_clk(&uclk->hw, con_id);
+	if (IS_ERR(clk)) {
+		clk_hw_unregister(&uclk->hw);
+		kfree(uclk);
+		return ERR_PTR(ret);
+	}
+
+	uclk->clk = clk;
+
+	ret = devm_add_action_or_reset(dev, uniphy_clock_release_cb, &uclk->hw);
+	if (ret) {
+		dev_err(dev, "failed to setup devm action: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return uclk;
+}
+
+/*
+ * reset get helper
+ */
+static struct reset_control *lgetrst(struct device *dev,
+				     const char *name)
+{
+	struct reset_control *rst;
+
+	rst = devm_reset_control_get(dev, name);
+	if (IS_ERR(rst)) {
+		dev_err(dev, "failed to get reset %s: %ld\n",
+			name, PTR_ERR(rst));
+		return rst;
+	}
+
+	return rst;
+}
+
+#define GETRST(d, clkname)				\
+	({						\
+		struct reset_control *rst;		\
+		rst = lgetrst(d, clkname);		\
+		if (IS_ERR(rst))			\
+			return ret;			\
+		rst;					\
+	})
+
+/*
+ * callback to disable & release clock at device release time
+ */
+static void lclk_devm_disable_release_cb(void *data)
+{
+	struct clk *clk = data;
+	clk_disable_unprepare(clk);
+	clk_put(clk);
+}
+
+/*
+ * callback to release clock at device release time
+ */
+static void lclk_devm_release_cb(void *data)
+{
+	struct clk *clk = data;
+	clk_put(clk);
+}
+
+/*
+ * clock get helper
+ */
+static struct clk *lgetclk(struct device *dev,
+			   const char *name,
+			   bool enable,
+			   unsigned int new_rate)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(dev, name);
+	if (IS_ERR(clk)) {
+		dev_err(dev, "failed to get clock %s: %ld\n",
+			name, PTR_ERR(clk));
+		return clk;
+	}
+
+	if (new_rate) {
+		ret = clk_set_rate(clk, new_rate);
+		if (ret) {
+			dev_err(dev, "failed to set clock %s rate: %d\n",
+				name, ret);
+			clk_put(clk);
+			return ERR_PTR(ret);
+		}
+	}
+
+	if (enable) {
+		ret = clk_prepare_enable(clk);
+		if (ret) {
+			dev_err(dev, "failed to enable clock %s: %d\n",
+				name, ret);
+			clk_put(clk);
+			return ERR_PTR(ret);
+		}
+	}
+
+	ret = devm_add_action_or_reset(dev,
+				       enable ? lclk_devm_disable_release_cb :
+				       lclk_devm_release_cb, clk);
+	if (ret) {
+		dev_err(dev, "failed to setup devm action: %d\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	return clk;
+}
+
+#define GETCLK(d, clkname)				\
+	({						\
+		struct clk *clk;			\
+		clk = lgetclk(d, clkname, false, 0);	\
+		if (IS_ERR(clk))			\
+			return PTR_ERR(clk);		\
+		clk;					\
+	})
+
+#define GETCLK_EN(d, clkname)				\
+	({						\
+		struct clk *clk;			\
+		clk = lgetclk(d, clkname, true, 0);	\
+		if (IS_ERR(clk))			\
+			return PTR_ERR(clk);		\
+		clk;					\
+	})
+
+#define GETCLK_RATE(d, clkname, r)			\
+	({						\
+		struct clk *clk;			\
+		clk = lgetclk(d, clkname, false, r);	\
+		if (IS_ERR(clk))			\
+			return PTR_ERR(clk);		\
+		clk;					\
+	})
+
+#define GETCLK_RATE_EN(d, clkname, r)			\
+	({						\
+		struct clk *clk;			\
+		clk = lgetclk(d, clkname, true, r);	\
+		if (IS_ERR(clk))			\
+			return PTR_ERR(clk);		\
+		clk;					\
+	})
+
+
+int ess_clock_init(struct ipq95xx_ess_priv *priv,
+		   const struct ess_config *config)
+{
+	struct device *dev = &priv->pdev->dev;
+	struct clk_hw_onecell_data *prov_clocks;
+	size_t i, prov_clocks_size;
+	int ret;
+
+	/* register as a clock provider, so the clock framework can
+	 * find the clock we are providing through DT lookup instead
+	 * of name */
+	prov_clocks_size = struct_size(prov_clocks, hws,
+				       ESS_UNIPHY_MAX * 2);
+	prov_clocks = devm_kzalloc(dev, prov_clocks_size, GFP_KERNEL);
+	if (!prov_clocks)
+		return -ENOMEM;
+
+	/* register the uniphy clocks that we "provide" back to nsscc,
+	 * those are the actual serdes link clocks */
+	for (i = 0; i < priv->hw_uniphy_count; i++) {
+		struct clk_uniphy *uclk;
+
+		uclk = uniphy_clock_register(dev, i, false);
+		if (IS_ERR(uclk))
+			return PTR_ERR(uclk);
+
+		priv->uniphy_clocks[i].rx = uclk;
+		prov_clocks->hws[prov_clocks->num++] = &uclk->hw;
+
+		uclk = uniphy_clock_register(dev, i, true);
+		if (IS_ERR(uclk))
+			return PTR_ERR(uclk);
+
+		priv->uniphy_clocks[i].tx = uclk;
+		prov_clocks->hws[prov_clocks->num++] = &uclk->hw;
+	}
+
+	ret = devm_of_clk_add_hw_provider(dev,
+					  of_clk_hw_onecell_get, prov_clocks);
+	if (ret)
+		return ret;
+
+	/* enable other mandatory clocks for PPE */
+	GETCLK_EN(dev, "cmn_ahb_clk");
+	GETCLK_EN(dev, "cmn_sys_clk");
+	GETCLK_RATE_EN(dev, "nss_ppe_switch_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nss_ppe_cfg_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nssnoc_ppe_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nssnoc_ppe_cfg_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nss_edma_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nss_edma_cfg_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nss_ppe_ipe_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nss_ppe_btq_clk", 353000000);
+
+	for (i = 0; i < priv->hw_uniphy_count; i++) {
+		char name[32];
+
+		scnprintf(name, sizeof (name), "uniphy%zu_sys_clk", i);
+		priv->uniphy_clocks[i].sys = GETCLK_RATE(dev, name, 24000000);
+		scnprintf(name, sizeof (name), "uniphy%zu_ahb_clk", i);
+		priv->uniphy_clocks[i].ahb = GETCLK_RATE(dev, name, 100000000);
+	}
+
+	for (i = 0; i < priv->hw_port_count; i++) {
+		char name[32];
+
+		scnprintf(name, sizeof (name), "port%zu_mac_clk", i + 1);
+		GETCLK_RATE_EN(dev, name, 353000000);
+
+		scnprintf(name, sizeof (name), "uniphy_port%zu_rx_clk", i + 1);
+		priv->port_clocks[i].uniphy_rx_clk = GETCLK(dev, name);
+		scnprintf(name, sizeof (name), "uniphy_port%zu_tx_clk", i + 1);
+		priv->port_clocks[i].uniphy_tx_clk = GETCLK(dev, name);
+		scnprintf(name, sizeof (name), "nss_port%zu_rx_clk", i + 1);
+		priv->port_clocks[i].mac_rx_clk = GETCLK(dev, name);
+		scnprintf(name, sizeof (name), "nss_port%zu_tx_clk", i + 1);
+		priv->port_clocks[i].mac_tx_clk = GETCLK(dev, name);
+
+		scnprintf(name, sizeof (name), "nss_port%zu_rst", i + 1);
+		priv->port_clocks[i].port_rst = GETRST(dev, name);
+		scnprintf(name, sizeof (name), "nss_port%zu_mac_rst", i + 1);
+		priv->port_clocks[i].mac_rst = GETRST(dev, name);
+	}
+
+	/* skipped "common block init" setup to 48Mhz since it seems
+	 * to be the default value */
+
+	/* enable other mandatory clocks for EDMA */
+	GETCLK_RATE_EN(dev, "nss_cc_nss_csr_clk", 100000000);
+	GETCLK_RATE_EN(dev, "nss_cc_nssnoc_nss_csr_clk", 100000000);
+	GETCLK_RATE_EN(dev, "nss_cc_imem_qsb_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nss_cc_nssnoc_imem_qsb_clk", 353000000);
+	GETCLK_RATE_EN(dev, "nss_cc_imem_ahb_clk", 100000000);
+	GETCLK_RATE_EN(dev, "nss_cc_nssnoc_imem_ahb_clk", 100000000);
+	GETCLK_RATE_EN(dev, "gcc_mem_noc_nssnoc_clk", 533333333);
+	GETCLK_RATE_EN(dev, "gcc_nss_tbu_clk", 533333333);
+	GETCLK_RATE_EN(dev, "gcc_nss_ts_clk", 24000000);
+	GETCLK_RATE_EN(dev, "gcc_nsscc_clk", 100000000);
+	GETCLK_RATE_EN(dev, "gcc_nsscfg_clk", 100000000);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_atb_clk", 240000000);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_mem_noc_1_clk", 533333333);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_memnoc_clk", 533333333);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_nsscc_clk", 100000000);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_pcnoc_1_clk", 100000000);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_qosgen_ref_clk", 6000000);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_snoc_1_clk", 342857143);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_snoc_clk", 342857143);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_timeout_ref_clk", 6000000);
+	GETCLK_RATE_EN(dev, "gcc_nssnoc_xo_dcd_clk", 24000000);
+
+	/* setup correct routing of source clock depending on
+	 * configuration */
+	for (i = 0; i < ARRAY_SIZE(config->ports); i++) {
+		const struct ess_port_config *epc = &config->ports[i];
+		size_t dir;
+
+		if (!epc->used)
+			continue;
+
+		for (dir = 0; dir < 2; dir++) {
+			char src_name[32];
+			struct clk *clk_parent, *clk_src;
+
+			scnprintf(src_name, sizeof (src_name),
+				  "nss_port%zu_%s_clk_src", i + 1,
+				  dir == 0 ? "rx" : "tx");
+
+			clk_src = clk_get(dev, src_name);
+			if (IS_ERR(clk_src)) {
+				dev_err(dev, "failed to get clock %s: %ld\n",
+					src_name, PTR_ERR(clk_src));
+				return PTR_ERR(clk_src);
+			}
+
+			clk_parent = (dir == 0) ?
+				priv->uniphy_clocks[epc->uniphy_id].rx->clk :
+				priv->uniphy_clocks[epc->uniphy_id].tx->clk;
+
+			ret = clk_set_parent(clk_src, clk_parent);
+			clk_put(clk_src);
+			if (ret) {
+				dev_err(dev,
+					"failed to set clock %s parent: %d\n",
+					src_name, ret);
+				return ret;
+			}
+
+			dev_dbg(dev, "clocks: set %s parent to uniphy %d",
+				src_name, epc->uniphy_id);
+		}
+	}
+
+        return 0;
+}
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./debug.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/debug.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./debug.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/debug.c	2023-12-21 17:30:06.441516398 +0100
@@ -0,0 +1,1317 @@
+#include "port_priv.h"
+#include "uniphy_priv.h"
+#include "regs/edma_regs.h"
+
+enum {
+	/* uniphy */
+	REG_CORE,
+	REG_XPCS,
+
+	/* port */
+	REG_GMAC,
+	REG_XGMAC,
+};
+
+struct reg_desc {
+	const char	*name;
+	u32		offset;
+	unsigned int	type;
+
+	unsigned int	repeat;
+	unsigned int	repeat_offset;
+	unsigned int	consec_count;
+};
+
+static const struct reg_desc ppe_regs[] = {
+	{ "PORT_MUX_CTRL", PORT_MUX_CTRL_REG, 0, },
+	{ "PORT_PHY_STS0", PORT_PHY_STS0_REG, 0, },
+	{ "PORT_PHY_STS1", PORT_PHY_STS1_REG, 0, },
+
+	{ "TDM_CTRL_REG", TDM_CTRL_REG, 0, },
+	{ "TDM_CFG_REG", TDM_CFG_REG(0), 0, 128, 0x10, },
+
+	{ "L2_GLOB_CFG", L2_GLOB_CFG_REG, 0 },
+	{ "IN_L2_SERVICE_TBL", IN_L2_SERVICE_TBL_REG(0, 0), 0, 256, 16, 2 },
+	{ "L2_VP_PORT_TBL", L2_VP_PORT_TBL_REG(0, 0), 0, 256, 16, 4 },
+
+	{ "VSI_TBL", VSI_TBLx_REG(0, 0), 0, 64, 16, 2 },
+
+	{ "VSI_REMAP_TBL", VSI_REMAP_TBL_REG(0, 0), 0, 64, 0x40, 9 },
+
+	{ "MRU_MTU_CTRL", MRU_MTU_CTRL_TBL_REG(0, 0), 0, 8, 0x10, 3 },
+
+	{ "AC_UNI_QUEUE_CFG", AC_UNI_QUEUE_CFG_TBL_REG(0, 0), 0, 256, 0x10, 4 },
+	{ "AC_MUL_QUEUE_CFG", AC_MUL_QUEUE_CFG_TBL_REG(0, 0), 0, 44, 0x10, 4 },
+	{ "AC_GRP_CFG", AC_GRP_CFG_TBL_REG(0, 0), 0, 4, 0x10, 4 },
+
+	{ "PORT_FC_MODE", PORT_FC_MODE_REG(0), 0, 15, 4 },
+	{ "PORT_FC_STAT", PORT_FC_STAT_REG(0), 0, 15, 4 },
+	{ "PORT_FC_GRP_ID", PORT_FC_GRP_ID_REG(0), 0, 15, 4 },
+	{ "PORT_FC_CNT", PORT_FC_CNT_REG(0), 0, 15, 4 },
+	{ "PORT_FC_REACTED_CNT", PORT_FC_REACTED_CNT_REG(0), 0, 15, 4 },
+	{ "PORT_FC_SHARED_GRP_CNT", PORT_FC_SHARED_GRP_CNT_REG(0), 0, 4, 4 },
+	{ "PORT_FC_SHARED_GRP_CFG", PORT_FC_SHARED_GRP_CFG_REG(0), 0, 4, 4 },
+	{ "PORT_FC_CFG", PORT_FC_CFG1_REG(0), 0, 15, 16, 2 },
+
+	{ "PORT_VSI_ENQUEUE_MAP", PORT_VSI_ENQUEUE_MAP_REG(0), 0, 512, 4 },
+
+	{ "UCAST_HASH_MAP_TBL", UCAST_HASH_MAP_TBL_REG(0), 0, 256, 16 },
+
+	{ "PSCH_TDM_DEPTH_CF", PSCH_TDM_DEPTH_CFG_REG, 0, },
+	{ "PSCH_TDM_CFG_TBL", PSCH_TDM_CFG_TBL_REG(0), 0, 128, 16 },
+};
+
+static const struct reg_desc edma_regs[] = {
+	{ "EDMA_PORT_CTRL", EDMA_PORT_CTRL_REG, 0 },
+	{ "EDMA_VLAN_CTRL", EDMA_VLAN_CTRL_REG, 0 },
+	{ "EDMA_RXD2FILL_MAPx[0]", EDMA_RXD2FILL_MAPx_REG(0), 0 },
+	{ "EDMA_RXD2FILL_MAPx[1]", EDMA_RXD2FILL_MAPx_REG(1), 0 },
+	{ "EDMA_RXD2FILL_MAPx[2]", EDMA_RXD2FILL_MAPx_REG(2), 0 },
+	{ "EDMA_TXQ_CTRL", EDMA_TXQ_CTRL_REG, 0 },
+	{ "EDMA_TXQ_CTRL2", EDMA_TXQ_CTRL2_REG, 0 },
+	{ "EDMA_TXQ_FC0", EDMA_TXQ_FC0_REG, 0 },
+	{ "EDMA_TXQ_FC1", EDMA_TXQ_FC1_REG, 0 },
+	{ "EDMA_RXQ_CTRL", EDMA_RXQ_CTRL_REG, 0 },
+	{ "EDMA_MISC_ERR_QID1", EDMA_MISC_ERR_QID1_REG, 0 },
+	{ "EDMA_RXQ_FC_TRHE", EDMA_RXQ_FC_TRHE_REG, 0 },
+	{ "EDMA_DMAR_CTRL", EDMA_DMAR_CTRL_REG, 0 },
+	{ "EDMA_AXIR_CTRL", EDMA_AXIR_CTRL_REG, 0 },
+	{ "EDMA_AXIW_CTRL", EDMA_AXIW_CTRL_REG, 0 },
+	{ "EDMA_MIN_MSS", EDMA_MIN_MSS_REG, 0 },
+	{ "EDMA_LOOPBACK_CTRL", EDMA_LOOPBACK_CTRL_REG, 0 },
+	{ "EDMA_MISC_INT_STAT", EDMA_MISC_INT_STAT_REG, 0 },
+	{ "EDMA_MISC_INT_MASK", EDMA_MISC_INT_MASK_REG, 0 },
+	{ "EDMA_REQ0_FIFO_THRESH", EDMA_REQ0_FIFO_THRESH_REG, 0 },
+	{ "EDMA_WP_OS_THRESH", EDMA_WP_OS_THRESH_REG, 0 },
+	{ "EDMA_MISC_ERR_QID2", EDMA_MISC_ERR_QID2_REG, 0 },
+	{ "EDMA_TXDESC2CMPL_MAPx[0]", EDMA_TXDESC2CMPL_MAPx_REG(0), 0 },
+	{ "EDMA_TXDESC2CMPL_MAPx[1]", EDMA_TXDESC2CMPL_MAPx_REG(1), 0 },
+	{ "EDMA_TXDESC2CMPL_MAPx[2]", EDMA_TXDESC2CMPL_MAPx_REG(2), 0 },
+	{ "EDMA_TXDESC2CMPL_MAPx[3]", EDMA_TXDESC2CMPL_MAPx_REG(3), 0 },
+	{ "EDMA_TXDESC2CMPL_MAPx[4]", EDMA_TXDESC2CMPL_MAPx_REG(4), 0 },
+	{ "EDMA_TXDESC2CMPL_MAPx[5]", EDMA_TXDESC2CMPL_MAPx_REG(5), 0 },
+	{ "EDMA_TXDESC_BAx", EDMA_TXDESC_BAx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXDESC_PRODx_IDX", EDMA_TXDESC_PRODx_IDX_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXDESC_CONSx_IDX", EDMA_TXDESC_CONSx_IDX_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXDESC_RING_SIZEx", EDMA_TXDESC_RING_SIZEx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXDESC_CTRLx", EDMA_TXDESC_CTRLx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXDESC_PH_BAx_REG", EDMA_TXDESC_PH_BAx_REG(0), 0, 32, 0x1000 },
+
+	{ "EDMA_RXFILL_BAx", EDMA_RXFILL_BAx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_PRODx_IDX", EDMA_RXFILL_PRODx_IDX_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_CONSx_IDX", EDMA_RXFILL_CONSx_IDX_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_RING_SIZEx", EDMA_RXFILL_RING_SIZEx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_BUF1_SIZEx", EDMA_RXFILL_BUF1_SIZEx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_FC_THRESHx", EDMA_RXFILL_FC_THRESHx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_UGT_THRESHx", EDMA_RXFILL_UGT_THRESHx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_ENx", EDMA_RXFILL_ENx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_DISABLEx", EDMA_RXFILL_DISABLEx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_DISABLE_DONEx", EDMA_RXFILL_DISABLE_DONEx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_INT_STATx", EDMA_RXFILL_INT_STATx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXFILL_INT_MASKx", EDMA_RXFILL_INT_MASKx_REG(0), 0, 8, 0x1000 },
+	{ "EDMA_RXDESC_BAx", EDMA_RXDESC_BAx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_PRODx_IDX", EDMA_RXDESC_PRODx_IDX_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_CONSx_IDX", EDMA_RXDESC_CONSx_IDX_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_RING_SIZEx", EDMA_RXDESC_RING_SIZEx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_THRESHx", EDMA_RXDESC_THRESHx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_UGT_THRESHx", EDMA_RXDESC_UGT_THRESHx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_CTRLx", EDMA_RXDESC_CTRLx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_BPCx", EDMA_RXDESC_BPCx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_DISABLEx", EDMA_RXDESC_DISABLEx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_DISABLE_DONEx", EDMA_RXDESC_DISABLE_DONEx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_PH_BAx", EDMA_RXDESC_PH_BAx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_INT_STATx", EDMA_RXDESC_INT_STATx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RXDESC_INT_MASKx", EDMA_RXDESC_INT_MASKx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RX_MOD_TIMERx", EDMA_RX_MOD_TIMERx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_RX_INT_CTRLx", EDMA_RX_INT_CTRLx_REG(0), 0, 24, 0x1000 },
+	{ "EDMA_TXCMPL_BA", EDMA_TXCMPL_BAx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXCMPL_PRODx_IDX", EDMA_TXCMPL_PRODx_IDX_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXCMPL_CONSx_IDX", EDMA_TXCMPL_CONSx_IDX_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXCMPL_RING_SIZEx", EDMA_TXCMPL_RING_SIZEx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXCMPL_UGT_THRESHx", EDMA_TXCMPL_UGT_THRESHx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXCMPL_CTRLx", EDMA_TXCMPL_CTRLx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TXCMPL_BPCx", EDMA_TXCMPL_BPCx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TX_INT_STATx", EDMA_TX_INT_STATx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TX_INT_MASKx", EDMA_TX_INT_MASKx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TX_MOD_TIMERx", EDMA_TX_MOD_TIMERx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_TX_INT_CTRLx", EDMA_TX_INT_CTRLx_REG(0), 0, 32, 0x1000 },
+	{ "EDMA_QID2RIDx_TABLE_MEM", EDMA_QID2RIDx_TABLE_MEM(0), 0, (256+44) / 4, 4 },
+};
+
+static const struct reg_desc port_regs[] = {
+	{ "GMAC_ENABLE", GMAC_ENABLE_REG, REG_GMAC, },
+	{ "GMAC_SPEED", GMAC_SPEED_REG, REG_GMAC, },
+	{ "GMAC_PAUSE_MAC_ADDR0", GMAC_PAUSE_MAC_ADDR0_REG, REG_GMAC, },
+	{ "GMAC_PAUSE_MAC_ADDR1", GMAC_PAUSE_MAC_ADDR1_REG, REG_GMAC, },
+	{ "GMAC_MISC", GMAC_MISC_REG, REG_GMAC, },
+	{ "GMAC_MIB_CTRL", GMAC_MIB_CTRL_REG, REG_GMAC, },
+
+	{ "XGMAC_TX_CFG", XGMAC_TX_CFG_REG, REG_XGMAC, },
+	{ "XGMAC_RX_CFG", XGMAC_RX_CFG_REG, REG_XGMAC, },
+	{ "XGMAC_PKT_FLT", XGMAC_PKT_FLT_REG, REG_XGMAC, },
+	{ "XGMAC_Q0_TX_FLOW_CTRL", XGMAC_Q0_TX_FLOW_CTRL_REG, REG_XGMAC, },
+	{ "XGMAC_LPI_CS", XGMAC_LPI_CS_REG, REG_XGMAC, },
+	{ "XGMAC_MMC_CTRL", XGMAC_MMC_CTRL_REG, REG_XGMAC, },
+};
+
+static const struct reg_desc uniphy_regs[] = {
+	{ "CALIB_4", UPHY_CALIB_4_REG, REG_CORE },
+	{ "MISC2", UPHY_MISC2_REG, REG_CORE },
+	{ "MODE_CTRL", UPHY_MODE_CTRL_REG, REG_CORE },
+	{ "SGMII_LPA_CHx[0]", UPHY_SGMII_LPA_CHx_REG(0), REG_CORE },
+	{ "SGMII_ST_CHx[0]", UPHY_SGMII_ST_CHx_REG(0), REG_CORE },
+	{ "SGMII_CTRL_CH[0]", UPHY_SGMII_CTRL_CHx_REG(0), REG_CORE },
+	{ "SGMII_ANEGC_CH[0]", UPHY_SGMII_ANEGC_CHx_REG(0), REG_CORE },
+	{ "SGMII_ANEG_CH[0]", UPHY_SGMII_ANEG_CHx_REG(0), REG_CORE },
+	{ "SGMII_LPA_CHx[1]", UPHY_SGMII_LPA_CHx_REG(1), REG_CORE },
+	{ "SGMII_ST_CHx[1]", UPHY_SGMII_ST_CHx_REG(1), REG_CORE },
+	{ "SGMII_CTRL_CH[1]", UPHY_SGMII_CTRL_CHx_REG(1), REG_CORE },
+	{ "SGMII_ANEGC_CH[1]", UPHY_SGMII_ANEGC_CHx_REG(1), REG_CORE },
+	{ "SGMII_ANEG_CH[1]", UPHY_SGMII_ANEG_CHx_REG(1), REG_CORE },
+	{ "SGMII_LPA_CHx[2]", UPHY_SGMII_LPA_CHx_REG(2), REG_CORE },
+	{ "SGMII_ST_CHx[2]", UPHY_SGMII_ST_CHx_REG(2), REG_CORE },
+	{ "SGMII_CTRL_CH[2]", UPHY_SGMII_CTRL_CHx_REG(2), REG_CORE },
+	{ "SGMII_ANEGC_CH[2]", UPHY_SGMII_ANEGC_CHx_REG(2), REG_CORE },
+	{ "SGMII_ANEG_CH[2]", UPHY_SGMII_ANEG_CHx_REG(2), REG_CORE },
+	{ "SGMII_LPA_CHx[3]", UPHY_SGMII_LPA_CHx_REG(3), REG_CORE },
+	{ "SGMII_ST_CHx[3]", UPHY_SGMII_ST_CHx_REG(3), REG_CORE },
+	{ "SGMII_CTRL_CH[3]", UPHY_SGMII_CTRL_CHx_REG(3), REG_CORE },
+	{ "SGMII_ANEGC_CH[3]", UPHY_SGMII_ANEGC_CHx_REG(3), REG_CORE },
+	{ "SGMII_ANEG_CH[3]", UPHY_SGMII_ANEG_CHx_REG(3), REG_CORE },
+	{ "SGMII_ST_CHx[4]", UPHY_SGMII_ST_CHx_REG(4), REG_CORE },
+	{ "SGMII_CTRL_CH[4]", UPHY_SGMII_CTRL_CHx_REG(4), REG_CORE },
+	{ "SGMII_ANEGC_CH[4]", UPHY_SGMII_ANEGC_CHx_REG(4), REG_CORE },
+	{ "SGMII_ANEG_CH[4]", UPHY_SGMII_ANEG_CHx_REG(4), REG_CORE },
+	{ "USXG_OPT_REG", UPHY_USXG_OPT_REG, REG_CORE },
+	{ "LINK_DET", UPHY_LINK_DET_REG, REG_CORE },
+	{ "PLL", UPHY_PLL_REG, REG_CORE },
+
+	{ "SR_XS_PCS_CTRL1", UPHY_SR_XS_PCS_CTRL1_REG, REG_XPCS },
+	{ "SR_XS_PCS_STS1", UPHY_SR_XS_PCS_STS1_REG, REG_XPCS },
+	{ "SR_XS_PCS_SPD_ABL", UPHY_SR_XS_PCS_SPD_ABL_REG, REG_XPCS },
+	{ "SR_XS_PCS_DEV_PKG1", UPHY_SR_XS_PCS_DEV_PKG1_REG, REG_XPCS },
+	{ "SR_XS_PCS_DEV_PKG2", UPHY_SR_XS_PCS_DEV_PKG2_REG, REG_XPCS },
+	{ "SR_XS_PCS_CTRL2", UPHY_SR_XS_PCS_CTRL2_REG, REG_XPCS },
+	{ "SR_XS_PCS_STS2", UPHY_SR_XS_PCS_STS2_REG, REG_XPCS },
+	{ "SR_XS_PCS_STS3", UPHY_SR_XS_PCS_STS3_REG, REG_XPCS },
+	{ "SR_XS_PCS_EEE_ABL", UPHY_SR_XS_PCS_EEE_ABL_REG, REG_XPCS },
+	{ "SR_XS_PCS_EEE_ABL2", UPHY_SR_XS_PCS_EEE_ABL2_REG, REG_XPCS },
+	{ "SR_XS_PCS_EEE_WKERR", UPHY_SR_XS_PCS_EEE_WKERR_REG, REG_XPCS },
+	{ "SR_XS_PCS_LSTS", UPHY_SR_XS_PCS_LSTS_REG, REG_XPCS },
+	{ "SR_XS_PCS_KR_STS1", UPHY_SR_XS_PCS_KR_STS1_REG, REG_XPCS },
+	{ "SR_XS_PCS_KR_STS2", UPHY_SR_XS_PCS_KR_STS2_REG, REG_XPCS },
+	{ "SR_XS_PCS_TP_Ax[0]", UPHY_SR_XS_PCS_TP_Ax_REG(0), REG_XPCS },
+	{ "SR_XS_PCS_TP_Ax[1]", UPHY_SR_XS_PCS_TP_Ax_REG(1), REG_XPCS },
+	{ "SR_XS_PCS_TP_Ax[2]", UPHY_SR_XS_PCS_TP_Ax_REG(2), REG_XPCS },
+	{ "SR_XS_PCS_TP_Ax[3]", UPHY_SR_XS_PCS_TP_Ax_REG(3), REG_XPCS },
+	{ "SR_XS_PCS_TP_Bx[0]", UPHY_SR_XS_PCS_TP_Bx_REG(0), REG_XPCS },
+	{ "SR_XS_PCS_TP_Bx[1]", UPHY_SR_XS_PCS_TP_Bx_REG(1), REG_XPCS },
+	{ "SR_XS_PCS_TP_Bx[2]", UPHY_SR_XS_PCS_TP_Bx_REG(2), REG_XPCS },
+	{ "SR_XS_PCS_TP_Bx[3]", UPHY_SR_XS_PCS_TP_Bx_REG(3), REG_XPCS },
+	{ "SR_XS_PCS_TP_CTRL", UPHY_SR_XS_PCS_TP_CTRL_REG, REG_XPCS },
+	{ "SR_XS_PCS_TP_ERRCTR", UPHY_SR_XS_PCS_TP_ERRCTR_REG, REG_XPCS },
+	{ "VR_XS_PCS_DIG_CTRL1", UPHY_VR_XS_PCS_DIG_CTRL1_REG, REG_XPCS },
+	{ "VR_XS_PCS_DIG_CTRL2", UPHY_VR_XS_PCS_DIG_CTRL2_REG, REG_XPCS },
+	{ "VR_XS_PCS_DEBUG_CTRL", UPHY_VR_XS_PCS_DEBUG_CTRL_REG, REG_XPCS },
+	{ "VR_XS_PCS_EEE_MCTRL0", UPHY_VR_XS_PCS_EEE_MCTRL0_REG, REG_XPCS },
+	{ "VR_XS_PCS_KR_CTRL", UPHY_VR_XS_PCS_KR_CTRL_REG, REG_XPCS },
+	{ "VR_XS_PCS_EEE_TXTIMER", UPHY_VR_XS_PCS_EEE_TXTIMER_REG, REG_XPCS },
+	{ "VR_XS_PCS_EEE_RXTIMER", UPHY_VR_XS_PCS_EEE_RXTIMER_REG, REG_XPCS },
+	{ "VR_XS_PCS_DIG_STS", UPHY_VR_XS_PCS_DIG_STS_REG, REG_XPCS },
+	{ "VR_XS_PCS_EEE_MCTRL1", UPHY_VR_XS_PCS_EEE_MCTRL1_REG, REG_XPCS },
+	{ "VR_XS_PCS_NSNPS_EEE_CTRL", UPHY_VR_XS_PCS_NSNPS_EEE_CTRL_REG, REG_XPCS },
+	{ "VR_XS_PCS_DIG_STS_ALT", UPHY_VR_XS_PCS_DIG_STS_ALT_REG, REG_XPCS },
+	{ "VR_XS_PCS_CDT_STS", UPHY_VR_XS_PCS_CDT_STS_REG, REG_XPCS },
+	{ "VR_XS_PCS_MISC_STS", UPHY_VR_XS_PCS_MISC_STS_REG, REG_XPCS },
+
+
+	{ "SR_MII_CTRL_CHAN[0]", UPHY_SR_MII_CTRL_CHANx_REG(0), REG_XPCS },
+	{ "SR_MII_EXPN_CHAN[0]", UPHY_SR_MII_EXPN_CHANx_REG(0), REG_XPCS },
+	{ "VR_MII_DIG_CTRL1_CHAN[0]", UPHY_VR_MII_DIG_CTRL1_CHANx_REG(0), REG_XPCS },
+	{ "VR_MII_AN_CTRL_CHAN[0]", UPHY_VR_MII_AN_CTRL_CHANx_REG(0), REG_XPCS },
+	{ "VR_MII_AN_INTR_STS_CHAN[0]", UPHY_VR_MII_AN_INTR_STS_CHANx_REG(0), REG_XPCS },
+	{ "VR_XAUI_MODE_CTRL_CHAN[0]", UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(0), REG_XPCS },
+	{ "VR_MII_LINK_TIMER_CTRL_CHAN[0]", UPHY_VR_MII_LINK_TIMER_CTRL_CHANx_REG(0), REG_XPCS },
+
+	{ "SR_MII_CTRL_CHAN[1]", UPHY_SR_MII_CTRL_CHANx_REG(1), REG_XPCS },
+	{ "SR_MII_EXPN_CHAN[1]", UPHY_SR_MII_EXPN_CHANx_REG(1), REG_XPCS },
+	{ "VR_MII_DIG_CTRL1_CHAN[1]", UPHY_VR_MII_DIG_CTRL1_CHANx_REG(1), REG_XPCS },
+	{ "VR_MII_AN_CTRL_CHAN[1]", UPHY_VR_MII_AN_CTRL_CHANx_REG(1), REG_XPCS },
+	{ "VR_MII_AN_INTR_STS_CHAN[1]", UPHY_VR_MII_AN_INTR_STS_CHANx_REG(1), REG_XPCS },
+	{ "VR_XAUI_MODE_CTRL_CHAN[1]", UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(1), REG_XPCS },
+	{ "VR_MII_LINK_TIMER_CTRL_CHAN[1]", UPHY_VR_MII_LINK_TIMER_CTRL_CHANx_REG(1), REG_XPCS },
+
+	{ "SR_MII_CTRL_CHAN[2]", UPHY_SR_MII_CTRL_CHANx_REG(2), REG_XPCS },
+	{ "SR_MII_EXPN_CHAN[2]", UPHY_SR_MII_EXPN_CHANx_REG(2), REG_XPCS },
+	{ "VR_MII_DIG_CTRL1_CHAN[2]", UPHY_VR_MII_DIG_CTRL1_CHANx_REG(2), REG_XPCS },
+	{ "VR_MII_AN_CTRL_CHAN[2]", UPHY_VR_MII_AN_CTRL_CHANx_REG(2), REG_XPCS },
+	{ "VR_MII_AN_INTR_STS_CHAN[2]", UPHY_VR_MII_AN_INTR_STS_CHANx_REG(2), REG_XPCS },
+	{ "VR_XAUI_MODE_CTRL_CHAN[2]", UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(2), REG_XPCS },
+	{ "VR_MII_LINK_TIMER_CTRL_CHAN[2]", UPHY_VR_MII_LINK_TIMER_CTRL_CHANx_REG(2), REG_XPCS },
+
+
+	{ "SR_MII_CTRL_CHAN[3]", UPHY_SR_MII_CTRL_CHANx_REG(3), REG_XPCS },
+	{ "SR_MII_EXPN_CHAN[3]", UPHY_SR_MII_EXPN_CHANx_REG(3), REG_XPCS },
+	{ "VR_MII_DIG_CTRL1_CHAN[3]", UPHY_VR_MII_DIG_CTRL1_CHANx_REG(3), REG_XPCS },
+	{ "VR_MII_AN_CTRL_CHAN[3]", UPHY_VR_MII_AN_CTRL_CHANx_REG(3), REG_XPCS },
+	{ "VR_MII_AN_INTR_STS_CHAN[3]", UPHY_VR_MII_AN_INTR_STS_CHANx_REG(3), REG_XPCS },
+	{ "VR_XAUI_MODE_CTRL_CHAN[3]", UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(3), REG_XPCS },
+	{ "VR_MII_LINK_TIMER_CTRL_CHAN[3]", UPHY_VR_MII_LINK_TIMER_CTRL_CHANx_REG(3), REG_XPCS },
+
+};
+
+
+/*
+ * port regs dump functions
+ */
+static void *port_regs_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < ARRAY_SIZE(port_regs)) ? pos : NULL;
+}
+
+static void *port_regs_dump_seq_next(struct seq_file *s,
+				       void __always_unused *v,
+				       loff_t *pos)
+{
+	return (++(*pos) < ARRAY_SIZE(port_regs)) ? pos : NULL;
+}
+
+static void port_regs_dump_seq_stop(struct seq_file __always_unused *s,
+				    void __always_unused *v)
+{
+}
+
+static int port_regs_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ess_port *port = s->private;
+	const struct reg_desc *rdesc;
+	int i = *(loff_t *)v;
+	u32 val;
+
+	rdesc = &port_regs[i];
+
+	switch (rdesc->type) {
+	case REG_GMAC:
+		val = gmac_readl(port, rdesc->offset);
+		break;
+	case REG_XGMAC:
+		val = xgmac_readl(port, rdesc->offset);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	seq_printf(s, "%-35s\t0x%-6x\t0x%08x\n",
+		   rdesc->name, rdesc->offset, val);
+
+	return 0;
+}
+
+static const struct seq_operations port_regs_dump_seq_ops = {
+	.start = port_regs_dump_seq_start,
+	.next  = port_regs_dump_seq_next,
+	.stop  = port_regs_dump_seq_stop,
+	.show  = port_regs_dump_seq_show,
+};
+
+static int port_regs_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ess_port *port = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &port_regs_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = port;
+	return 0;
+}
+
+static const struct file_operations port_regs_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = port_regs_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+void port_dbg_init(struct ess_port *port)
+{
+	char name[32];
+
+	snprintf(name, sizeof(name), "port%d_regs", port->id);
+	port->regs_dbg = debugfs_create_file(name, 0400,
+					     port->priv->dbg_root,
+					     port,
+					     &port_regs_dump_fops);
+}
+
+void port_dbg_release(struct ess_port *port)
+{
+	if (port->regs_dbg)
+		debugfs_remove(port->regs_dbg);
+}
+
+/*
+ * uniphy regs dump functions
+ */
+static void *uniphy_regs_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < ARRAY_SIZE(uniphy_regs)) ? pos : NULL;
+}
+
+static void *uniphy_regs_dump_seq_next(struct seq_file *s,
+				       void __always_unused *v,
+				       loff_t *pos)
+{
+	return (++(*pos) < ARRAY_SIZE(uniphy_regs)) ? pos : NULL;
+}
+
+static void uniphy_regs_dump_seq_stop(struct seq_file __always_unused *s,
+				      void __always_unused *v)
+{
+}
+
+static int uniphy_regs_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ess_uniphy *uniphy = s->private;
+	const struct reg_desc *rdesc;
+	int i = *(loff_t *)v;
+	u32 val;
+	int width;
+
+	rdesc = &uniphy_regs[i];
+	switch (rdesc->type) {
+	case REG_CORE:
+		val = uphy_readl(uniphy, rdesc->offset);
+		width = 8;
+		break;
+	case REG_XPCS:
+		switch (uniphy->cur_interface) {
+		case PHY_INTERFACE_MODE_USXGMII:
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+		case PHY_INTERFACE_MODE_10GBASER:
+			break;
+		default:
+			/* xpcs block is in reset */
+			return 0;
+		}
+		val = uphy_readl(uniphy, rdesc->offset);
+		width = 4;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	seq_printf(s, "%-35s\t0x%-6x\t0x%.*x\n",
+		   rdesc->name, rdesc->offset, width, val);
+
+	return 0;
+}
+
+static const struct seq_operations uniphy_regs_dump_seq_ops = {
+	.start = uniphy_regs_dump_seq_start,
+	.next  = uniphy_regs_dump_seq_next,
+	.stop  = uniphy_regs_dump_seq_stop,
+	.show  = uniphy_regs_dump_seq_show,
+};
+
+static int uniphy_regs_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ess_uniphy *uniphy = inode->i_private;
+	int ret;
+
+	/* FIXME: we should hold mutex during dump operation */
+	if (!uniphy->channels_used)
+		return -ENETDOWN;
+
+	ret = seq_open(filep, &uniphy_regs_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = uniphy;
+	return 0;
+}
+
+static const struct file_operations uniphy_regs_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = uniphy_regs_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+void uniphy_dbg_init(struct ess_uniphy *uniphy)
+{
+	char name[32];
+
+	snprintf(name, sizeof(name), "uniphy%d_regs", uniphy->id);
+	uniphy->regs_dbg = debugfs_create_file(name, 0400,
+					       uniphy->priv->dbg_root,
+					       uniphy,
+					       &uniphy_regs_dump_fops);
+
+}
+
+void uniphy_dbg_release(struct ess_uniphy *uniphy)
+{
+	if (uniphy->regs_dbg)
+		debugfs_remove(uniphy->regs_dbg);
+}
+
+/*
+ *
+ */
+static int generic_dump_show(struct ipq95xx_ess_priv *priv,
+			     const struct reg_desc *rdesc,
+			     struct seq_file *s)
+{
+	char rbof[64];
+	unsigned int r, c, off;
+	u32 val;
+
+	if (!rdesc->repeat) {
+		val = ppe_readl(priv, rdesc->offset);
+		seq_printf(s, "%-35s\t0x%06x\t0x%.*x\n",
+			   rdesc->name, rdesc->offset, 8, val);
+		return 0;
+	}
+
+	for (r = 0; r < rdesc->repeat; r++) {
+		unsigned int consec = rdesc->consec_count;
+
+		for (c = 0; c < (consec ? consec : 1); c++) {
+			char cbuf[32];
+
+			off = rdesc->offset + r * rdesc->repeat_offset +
+				c * 0x4;
+			val = ppe_readl(priv, off);
+
+			if (consec)
+				snprintf(cbuf, sizeof (cbuf), "_%d", c);
+			else
+				cbuf[0] = 0;
+
+			snprintf(rbof, sizeof (rbof), "%s%s[%d]",
+				 rdesc->name, cbuf, r);
+			seq_printf(s, "%-35s\t0x%06x\t0x%.*x\n",
+				   rbof, off, 8, val);
+		}
+	}
+	return 0;
+}
+
+/*
+ * ppe regs ops
+ */
+static void *ppe_regs_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < ARRAY_SIZE(ppe_regs)) ? pos : NULL;
+}
+
+static void *ppe_regs_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < ARRAY_SIZE(ppe_regs)) ? pos : NULL;
+}
+
+static void ppe_regs_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int ppe_regs_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int i = *(loff_t *)v;
+	return generic_dump_show(priv, &ppe_regs[i], s);
+}
+
+static const struct seq_operations ppe_regs_dump_seq_ops = {
+	.start = ppe_regs_dump_seq_start,
+	.next  = ppe_regs_dump_seq_next,
+	.stop  = ppe_regs_dump_seq_stop,
+	.show  = ppe_regs_dump_seq_show,
+};
+
+static int ppe_regs_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &ppe_regs_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations ppe_regs_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = ppe_regs_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * edma regs ops
+ */
+static void *edma_regs_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < ARRAY_SIZE(edma_regs)) ? pos : NULL;
+}
+
+static void *edma_regs_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < ARRAY_SIZE(edma_regs)) ? pos : NULL;
+}
+
+static void edma_regs_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int edma_regs_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int i = *(loff_t *)v;
+	return generic_dump_show(priv, &edma_regs[i], s);
+}
+
+static const struct seq_operations edma_regs_dump_seq_ops = {
+	.start = edma_regs_dump_seq_start,
+	.next  = edma_regs_dump_seq_next,
+	.stop  = edma_regs_dump_seq_stop,
+	.show  = edma_regs_dump_seq_show,
+};
+
+static int edma_regs_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &edma_regs_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations edma_regs_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = edma_regs_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * queue_counters ops
+ */
+#define MAX_QUEUE_COUNTERS	256 /* only show unicast queues */
+
+static void *queue_counters_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < MAX_QUEUE_COUNTERS) ? pos : NULL;
+}
+
+static void *queue_counters_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < MAX_QUEUE_COUNTERS) ? pos : NULL;
+}
+
+static void queue_counters_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int queue_counters_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int queue = *(loff_t *)v;
+	u32 val;
+	u64 count;
+	int i;
+
+	val = ppe_readl(priv, QUEUE_TX_COUNTER_TBL_REG(queue, 0));
+	seq_printf(s, "queue[%-3d]: tx packets:    %u\n", queue, val);
+
+	val = ppe_readl(priv, QUEUE_TX_COUNTER_TBL_REG(queue, 1));
+	count = val;
+	val = ppe_readl(priv, QUEUE_TX_COUNTER_TBL_REG(queue, 2));
+	count |= (u64)val << 32;
+
+	seq_printf(s, "queue[%-3d]: tx bytes:      %llu\n", queue, count);
+
+	val = ppe_readl(priv, AC_UNI_QUEUE_CNT_TBL_REG(queue));
+	seq_printf(s, "queue[%-3d]: tx pending:    %u\n", queue, val);
+
+	for (i = 0; i < UNI_DROP_CNT_TBL_MAX_ITEMS; i++) {
+		val = ppe_readl(priv, UNI_DROP_CNT_TBL_REG(queue, i, 0));
+		seq_printf(s, "queue[%-3d]: tx drop%d pkts: %u\n",
+			   queue, i, val);
+	}
+
+	return 0;
+}
+
+static const struct seq_operations queue_counters_dump_seq_ops = {
+	.start = queue_counters_dump_seq_start,
+	.next  = queue_counters_dump_seq_next,
+	.stop  = queue_counters_dump_seq_stop,
+	.show  = queue_counters_dump_seq_show,
+};
+
+static int queue_counters_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &queue_counters_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations queue_counters_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = queue_counters_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * port_counters ops
+ */
+#define MAX_VPORT_COUNTERS	92 /* max value to 256 */
+
+static void *port_counters_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < MAX_VPORT_COUNTERS) ? pos : NULL;
+}
+
+static void *port_counters_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < MAX_VPORT_COUNTERS) ? pos : NULL;
+}
+
+static void port_counters_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int port_counters_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int port = *(loff_t *)v;
+	u32 val[4], rx_packets, rx_dropped, tx_packets;
+	u64 rx_bytes, tx_bytes;
+	int i;
+
+	/* VP RX stage */
+	for (i = 0; i < 4; i++)
+		val[i] = ppe_readl(priv, PORT_RX_CNT_TBL_REG(port, i));
+
+	rx_packets = (val[0] & PORT_RX_CNT0_PKTS_MASK) >>
+		PORT_RX_CNT0_PKTS_SHIFT;
+
+	rx_bytes = (val[1] & PORT_RX_CNT1_BYTES_LO_MASK) >>
+		PORT_RX_CNT1_BYTES_LO_SHIFT;
+	rx_bytes |= ((u64)val[2] & PORT_RX_CNT2_BYTES_HI_MASK) << 32;
+
+	rx_dropped = (val[2] & PORT_RX_CNT2_DROP_PKT_LO_MASK) >>
+		PORT_RX_CNT2_DROP_PKT_LO_SHIFT;
+	rx_dropped |= (val[3] & PORT_RX_CNT3_DROP_PKT_HI_MASK) << 24;
+
+	seq_printf(s, "vport[%-2d]: rx packets: %u\n", port, rx_packets);
+	seq_printf(s, "vport[%-2d]: rx bytes:   %llu\n", port, rx_bytes);
+	seq_printf(s, "vport[%-2d]: rx drop:    %u\n", port, rx_dropped);
+
+	for (i = 0; i < 3; i++)
+		val[i] = ppe_readl(priv, PORT_TX_CNT_TBL_REG(port, i));
+
+	/* Post-L2 stage */
+	tx_packets = (val[0] & PORT_TX_CNT0_PKTS_MASK) >>
+		PORT_TX_CNT0_PKTS_SHIFT;
+
+	tx_bytes = (val[1] & PORT_TX_CNT1_BYTES_LO_MASK) >>
+		PORT_TX_CNT1_BYTES_LO_SHIFT;
+	tx_bytes |= ((u64)val[2] & PORT_TX_CNT2_BYTES_HI_MASK) << 32;
+
+	seq_printf(s, "vport[%-2d]: tx packets: %u\n", port, tx_packets);
+	seq_printf(s, "vport[%-2d]: tx bytes:   %llu\n", port, tx_bytes);
+
+	return 0;
+}
+
+static const struct seq_operations port_counters_dump_seq_ops = {
+	.start = port_counters_dump_seq_start,
+	.next  = port_counters_dump_seq_next,
+	.stop  = port_counters_dump_seq_stop,
+	.show  = port_counters_dump_seq_show,
+};
+
+static int port_counters_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &port_counters_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations port_counters_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = port_counters_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * phys_port_counters ops
+ */
+#define MAX_PHYS_PORT_COUNTERS	8
+
+static void *phys_port_counters_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < MAX_PHYS_PORT_COUNTERS) ? pos : NULL;
+}
+
+static void *phys_port_counters_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < MAX_PHYS_PORT_COUNTERS) ? pos : NULL;
+}
+
+static void phys_port_counters_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int phys_port_counters_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int port = *(loff_t *)v;
+	u32 val[4], rx_packets, rx_dropped;
+	u64 rx_bytes;
+	int i;
+
+	/* RX MAC stage */
+	for (i = 0; i < 4; i++)
+		val[i] = ppe_readl(priv, PHYS_PORT_RX_CNT_TBL_REG(port, i));
+
+	rx_packets = (val[0] & PORT_RX_CNT0_PKTS_MASK) >>
+		PORT_RX_CNT0_PKTS_SHIFT;
+
+	rx_bytes = (val[1] & PORT_RX_CNT1_BYTES_LO_MASK) >>
+		PORT_RX_CNT1_BYTES_LO_SHIFT;
+	rx_bytes |= ((u64)val[2] & PORT_RX_CNT2_BYTES_HI_MASK) << 32;
+
+	rx_dropped = (val[2] & PORT_RX_CNT2_DROP_PKT_LO_MASK) >>
+		PORT_RX_CNT2_DROP_PKT_LO_SHIFT;
+	rx_dropped |= (val[3] & PORT_RX_CNT3_DROP_PKT_HI_MASK) << 24;
+
+	seq_printf(s, "physport[%-2d]: rx packets: %u\n", port, rx_packets);
+	seq_printf(s, "physport[%-2d]: rx bytes:   %llu\n", port, rx_bytes);
+	seq_printf(s, "physport[%-2d]: rx drop:    %u\n", port, rx_dropped);
+
+	return 0;
+}
+
+static const struct seq_operations phys_port_counters_dump_seq_ops = {
+	.start = phys_port_counters_dump_seq_start,
+	.next  = phys_port_counters_dump_seq_next,
+	.stop  = phys_port_counters_dump_seq_stop,
+	.show  = phys_port_counters_dump_seq_show,
+};
+
+static int phys_port_counters_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &phys_port_counters_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations phys_port_counters_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = phys_port_counters_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * drop_stats ops
+ */
+#define MAX_DROP_STATS_PORT	14
+
+static void *drop_stats_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < MAX_DROP_STATS_PORT) ? pos : NULL;
+}
+
+static void *drop_stats_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < MAX_DROP_STATS_PORT) ? pos : NULL;
+}
+
+static void drop_stats_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int drop_stats_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int port = *(loff_t *)v;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		u32 val;
+		u64 count;
+
+		val = ppe_readl(priv, DROP_STAT_REG(port, 0));
+		seq_printf(s, "port[%-2d]: %s dropped pkts:   %u\n",
+			   port,
+			   (i == 0) ? "ac" : "fc",
+			   val & DROP_STAT0_PKTS_MASK);
+
+		val = ppe_readl(priv, DROP_STAT_REG(port, 1));
+		count = val & DROP_STAT1_BYTES_LO_MASK;
+
+		val = ppe_readl(priv, DROP_STAT_REG(port, 2));
+		count |= (u64)(val & DROP_STAT2_BYTES_HI_SHIFT) << 32;
+
+		seq_printf(s, "port[%-2d]: %s dropped bytes:  %llu\n",
+			   port,
+			   (i == 0) ? "ac" : "fc",
+			   count);
+	}
+
+	return 0;
+}
+
+static const struct seq_operations drop_stats_dump_seq_ops = {
+	.start = drop_stats_dump_seq_start,
+	.next  = drop_stats_dump_seq_next,
+	.stop  = drop_stats_dump_seq_stop,
+	.show  = drop_stats_dump_seq_show,
+};
+
+static int drop_stats_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &drop_stats_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations drop_stats_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = drop_stats_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * drop_reason ops
+ */
+#define MAX_DROP_REASON_CODE	128
+#define MAX_DROP_REASON_PORTS	8
+#define MAX_DROP_REASON_ITEMS	(MAX_DROP_REASON_CODE * MAX_DROP_REASON_PORTS)
+
+static void *drop_reason_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < MAX_DROP_REASON_ITEMS) ? pos : NULL;
+}
+
+static void *drop_reason_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < MAX_DROP_REASON_ITEMS) ? pos : NULL;
+}
+
+static void drop_reason_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int drop_reason_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	u32 val[3];
+	int index = *(loff_t *)v;
+	int i, port, code;
+
+	code = (index / MAX_DROP_REASON_PORTS);
+	port = (index - code * MAX_DROP_REASON_PORTS);
+
+	for (i = 0; i < 3; i++)
+		val[i] = ppe_readl(priv, DROP_CPU_CNT_TBL_REG(port, code, i));
+
+	seq_printf(s, "code[%-2d] port[%-2d]: dropped pkts:   %u\n",
+		   code, port, val[0]);
+
+	return 0;
+}
+
+static const struct seq_operations drop_reason_dump_seq_ops = {
+	.start = drop_reason_dump_seq_start,
+	.next  = drop_reason_dump_seq_next,
+	.stop  = drop_reason_dump_seq_stop,
+	.show  = drop_reason_dump_seq_show,
+};
+
+static int drop_reason_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &drop_reason_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations drop_reason_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = drop_reason_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * FDB
+ */
+#define MAX_FDB_IDX	2040
+
+/* randomly choosen to check match between request and response */
+#define FDB_CMD_ID	4
+
+static void *fdb_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < MAX_FDB_IDX) ? pos : NULL;
+}
+
+static void *fdb_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < MAX_FDB_IDX) ? pos : NULL;
+}
+
+static void fdb_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int fdb_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int i, index = *(loff_t *)v;
+	u32 val[3];
+	unsigned int res_count;
+
+	/* clear any previous results */
+	(void)ppe_readl(priv, FDB_TBL_RD_OP_RESULT_REG);
+
+	/* zero input data register */
+	for (i = 0; i < 3; i++)
+		ppe_writel(priv, FDB_TBL_RD_OP_DATAx_REG(i), 0);
+
+	/* send command */
+	val[0] = (FDB_CMD_ID << FDB_TBL_OP_CMD_ID_SHIFT) |
+		(FDB_TBL_OP_TYPE_ID_GET << FDB_TBL_OP_TYPE_SHIFT) |
+		 (3 << FDB_TBL_OP_HASH_BLOCK_BMP_SHIFT) |
+		 FDB_TBL_OP_MODE_INDEX_MASK |
+		(index << FDB_TBL_OP_ENTRY_INDEX_SHIFT);
+	ppe_writel(priv, FDB_TBL_RD_OP_REG, val[0]);
+
+	/* check result */
+	val[0] = ppe_readl(priv, FDB_TBL_RD_OP_RESULT_REG);
+	if ((val[0] & FDB_TBL_OPR_CMD_ID_MASK) != FDB_CMD_ID) {
+		printk("unexpected fdb command id\n");
+		return 1;
+	}
+
+	res_count = (val[0] & FDB_TBL_OPR_VALID_CNT_MASK) >>
+		FDB_TBL_OPR_VALID_CNT_SHIFT;
+	if (!res_count || res_count > 8) {
+		printk("unexpected fdb result count\n");
+		return 1;
+	}
+
+	if (!!(val[0] & FDB_TBL_OPR_OP_RES_MASK)) {
+		printk("fdb get command failed\n");
+		return 1;
+	}
+
+	/* fetch result data */
+	for (i = 0; i < 3; i++)
+		val[i] = ppe_readl(priv, FDB_TBL_RD_OP_RESULTx_REG(i));
+
+	if (val[1] & FDB_ENT1_ENTRY_VALID_MASK) {
+		unsigned int vsi, dst_info, info_type, port;
+		bool is_static;
+		u8 mac[6];
+
+		mac[0] = (val[1] & 0xff00) >> 8;
+		mac[1] = (val[1] & 0xff);
+		mac[2] = (val[0] & 0xff000000) >> 24;
+		mac[3] = (val[0] & 0xff0000) >> 16;
+		mac[4] = (val[0] & 0xff00) >> 8;
+		mac[5] = val[0] & 0xff;
+
+		vsi = (val[1] & FDB_ENT1_VSI_MASK) >> FDB_ENT1_VSI_SHIFT;
+		dst_info = (val[1] & FDB_ENT1_DST_INFO_LO_MASK) >>
+			FDB_ENT1_DST_INFO_LO_SHIFT;
+		dst_info |= ((val[2] & FDB_ENT2_DST_INFO_HI_MASK) >>
+			     FDB_ENT2_DST_INFO_HI_SHIFT) << 8;
+
+		info_type = (dst_info >> 12) & 0x3;
+		if (info_type != 0x2) {
+			printk("fdb unsupported dst info type %x\n",
+			       info_type);
+			return 1;
+		}
+
+		port = dst_info & 0xfff;
+		is_static = ((val[2] & FDB_ENT2_HIT_AGE_MASK) >>
+			     FDB_ENT2_HIT_AGE_SHIFT) == 0x3;
+		seq_printf(s, "%pM vsi:%d port:%u %s\n",
+			   mac, vsi, port,
+			   is_static ? "static" : "dynamic");
+	}
+
+	return 0;
+}
+
+static const struct seq_operations fdb_dump_seq_ops = {
+	.start = fdb_dump_seq_start,
+	.next  = fdb_dump_seq_next,
+	.stop  = fdb_dump_seq_stop,
+	.show  = fdb_dump_seq_show,
+};
+
+static int fdb_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &fdb_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations fdb_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = fdb_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * VSI
+ */
+#define MAX_VSI_IDX	64
+
+static void *vsi_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < MAX_VSI_IDX) ? pos : NULL;
+}
+
+static void *vsi_dump_seq_next(struct seq_file *s,
+				    void __always_unused *v,
+				    loff_t *pos)
+{
+	return (++(*pos) < MAX_VSI_IDX) ? pos : NULL;
+}
+
+static void vsi_dump_seq_stop(struct seq_file __always_unused *s,
+				   void __always_unused *v)
+{
+}
+
+static int vsi_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct ipq95xx_ess_priv *priv = s->private;
+	int index = *(loff_t *)v;
+	char port_list[32], vport_list[512];
+	size_t len;
+	u32 val[2], members;
+	int i;
+
+	for (i = 0; i < 2; i++)
+		val[i] = ppe_readl(priv, VSI_TBLx_REG(index, i));
+
+	len = 0;
+	len += scnprintf(port_list + len,
+			 sizeof (port_list) - len,
+			 "[");
+	members = val[0] & VSI_TBL0_MEMB_MAP_MASK;
+	for (i = 0; i < 8; i++) {
+		if (!(members & (1 << i)))
+			continue;
+		len += scnprintf(port_list + len,
+				 sizeof (port_list) - len,
+				 "%s%d", len == 1 ? "" : ",",
+				 i);
+	}
+	len += scnprintf(port_list + len,
+			 sizeof (port_list) - len,
+			 "]");
+
+	len = 0;
+	len += scnprintf(vport_list + len,
+			 sizeof (vport_list) - len,
+			 "[");
+	for (i = 0; i < 256 / 32; i++) {
+		u32 vmembers;
+		int j;
+
+		vmembers = ppe_readl(priv, VSI_REMAP_TBL_REG(index, i));
+		for (j = 0; j < 32; j++) {
+			if (!(vmembers & (1 << j)))
+				continue;
+			len += scnprintf(vport_list + len,
+					 sizeof (vport_list) - len,
+					 "%s%d", len == 1 ? "" : ",",
+					 j + i * 32);
+		}
+	}
+	len += scnprintf(vport_list + len, sizeof (vport_list) - len,
+			 "]");
+
+	seq_printf(s, "vsi:%d ports:%s vports:%s\n",
+		   index, port_list, vport_list);
+
+	return 0;
+}
+
+static const struct seq_operations vsi_dump_seq_ops = {
+	.start = vsi_dump_seq_start,
+	.next  = vsi_dump_seq_next,
+	.stop  = vsi_dump_seq_stop,
+	.show  = vsi_dump_seq_show,
+};
+
+static int vsi_dump_open(struct inode *inode, struct file *filep)
+{
+	struct ipq95xx_ess_priv *priv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &vsi_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = priv;
+	return 0;
+}
+
+static const struct file_operations vsi_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vsi_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * init
+ */
+void ess_dbg_init(struct ipq95xx_ess_priv *priv)
+{
+	priv->regs_ppe_dbg = debugfs_create_file("ppe_regs", 0400,
+						 priv->dbg_root,
+						 priv,
+						 &ppe_regs_dump_fops);
+	priv->regs_edma_dbg = debugfs_create_file("edma_regs", 0400,
+						  priv->dbg_root,
+						  priv,
+						  &edma_regs_dump_fops);
+	priv->phys_port_counters_dbg =
+		debugfs_create_file("phys_port_counters", 0400,
+				    priv->dbg_root,
+				    priv,
+				    &phys_port_counters_dump_fops);
+	priv->port_counters_dbg = debugfs_create_file("port_counters", 0400,
+						      priv->dbg_root,
+						      priv,
+						      &port_counters_dump_fops);
+	priv->queue_counters_dbg = debugfs_create_file("queue_counters", 0400,
+						      priv->dbg_root,
+						      priv,
+						      &queue_counters_dump_fops);
+	priv->drop_dbg = debugfs_create_file("drop_fc", 0400,
+					     priv->dbg_root,
+					     priv,
+					     &drop_stats_dump_fops);
+	priv->drop_reason_dbg = debugfs_create_file("drop_per_reason", 0400,
+						    priv->dbg_root,
+						    priv,
+						    &drop_reason_dump_fops);
+	priv->fdb_dbg = debugfs_create_file("fdb_dump", 0400,
+					    priv->dbg_root,
+					    priv,
+					    &fdb_dump_fops);
+	priv->vsi_dbg = debugfs_create_file("vsi_dump", 0400,
+					    priv->dbg_root,
+					    priv,
+					    &vsi_dump_fops);
+}
+
+void ess_dbg_release(struct ipq95xx_ess_priv *priv)
+{
+	debugfs_remove(priv->regs_ppe_dbg);
+	debugfs_remove(priv->regs_edma_dbg);
+	debugfs_remove(priv->phys_port_counters_dbg);
+	debugfs_remove(priv->port_counters_dbg);
+	debugfs_remove(priv->queue_counters_dbg);
+	debugfs_remove(priv->drop_dbg);
+	debugfs_remove(priv->drop_reason_dbg);
+	debugfs_remove(priv->fdb_dbg);
+	debugfs_remove(priv->vsi_dbg);
+}
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./fdb.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/fdb.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./fdb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/fdb.c	2023-12-21 17:30:06.441516398 +0100
@@ -0,0 +1,152 @@
+#include "regs/ppe_regs.h"
+#include "ipq95xx_ess.h"
+
+/*
+ *
+ */
+#define FDB_CMD_ID	4
+
+static int fdb_do_command(struct ipq95xx_ess_priv *priv,
+			  bool is_read,
+			  unsigned int cmd_type,
+			  bool op_mode_is_index,
+			  int index_entry,
+			  int *ret_index)
+{
+	unsigned int res_count, ret_cmd_id;
+	u32 val;
+
+	/* clear any previous results */
+	(void)ppe_readl(priv, FDB_TBL_RD_OP_RESULT_REG);
+
+	/* send command */
+	val = (FDB_CMD_ID << FDB_TBL_OP_CMD_ID_SHIFT) |
+		(cmd_type << FDB_TBL_OP_TYPE_SHIFT) |
+		(3 << FDB_TBL_OP_HASH_BLOCK_BMP_SHIFT) |
+		(index_entry << FDB_TBL_OP_ENTRY_INDEX_SHIFT);
+
+	if (op_mode_is_index)
+		val |= FDB_TBL_OP_MODE_INDEX_MASK;
+
+	if (is_read) {
+		ppe_writel(priv, FDB_TBL_RD_OP_REG, val);
+		val = ppe_readl(priv, FDB_TBL_RD_OP_RESULT_REG);
+	} else {
+		ppe_writel(priv, FDB_TBL_OP_REG, val);
+		val = ppe_readl(priv, FDB_TBL_OP_RESULT_REG);
+	}
+
+	ret_cmd_id = val & FDB_TBL_OPR_CMD_ID_MASK;
+	if (ret_cmd_id != FDB_CMD_ID) {
+		printk("unexpected fdb command id: %x vs %x\n",
+		       ret_cmd_id, FDB_CMD_ID);
+		return 1;
+	}
+
+	res_count = (val & FDB_TBL_OPR_VALID_CNT_MASK) >>
+		FDB_TBL_OPR_VALID_CNT_SHIFT;
+	if (!res_count || res_count > 8) {
+		printk("unexpected fdb result count\n");
+		return 1;
+	}
+
+	if (!!(val & FDB_TBL_OPR_OP_RES_MASK)) {
+		*ret_index = -1;
+		return 0;
+	}
+
+	*ret_index = (val & FDB_TBL_OPR_ENTRY_INDEX_MASK) >>
+		FDB_TBL_OPR_ENTRY_INDEX_SHIFT;
+	return 0;
+}
+
+/*
+ *
+ */
+static void fdb_setup_read_data(struct ipq95xx_ess_priv *priv,
+				unsigned int vsi, const u8 *mac)
+{
+	u32 val[3];
+	int i;
+
+	val[0] = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+	val[1] = mac[1] | (mac[0] << 8);
+	val[1] |= vsi << FDB_ENT1_VSI_SHIFT;
+	val[2] = 0;
+
+	for (i = 0; i < 3; i++)
+		ppe_writel(priv, FDB_TBL_RD_OP_DATAx_REG(i), val[i]);
+}
+
+/*
+ *
+ */
+static void fdb_setup_data(struct ipq95xx_ess_priv *priv,
+			   unsigned int vsi, const u8 *mac,
+			   unsigned int port_id, bool is_static)
+{
+	unsigned int dst_info;
+	u32 val[3];
+	int i;
+
+	dst_info = (DST_INFO_ENC_TYPE_PORT_ID << DST_INFO_ENC_TYPE_SHIFT) |
+		port_id;
+
+	val[0] = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+
+	val[1] = mac[1] | (mac[0] << 8);
+	val[1] |= FDB_ENT1_ENTRY_VALID_MASK;
+	val[1] |= FDB_ENT1_LOOKUP_VALID_MASK;
+	val[1] |= vsi << FDB_ENT1_VSI_SHIFT;
+	val[1] |= (dst_info << FDB_ENT1_DST_INFO_LO_SHIFT) &
+		FDB_ENT1_DST_INFO_LO_MASK;
+
+	val[2] = (dst_info >> (32 - FDB_ENT1_DST_INFO_LO_SHIFT)) <<
+		  FDB_ENT2_DST_INFO_HI_SHIFT;
+	if (is_static)
+		val[2] |= 0x3 << FDB_ENT2_HIT_AGE_SHIFT;
+	else
+		val[2] |= 0x2 << FDB_ENT2_HIT_AGE_SHIFT;
+
+	for (i = 0; i < 3; i++)
+		ppe_writel(priv, FDB_TBL_OP_DATAx_REG(i), val[i]);
+}
+
+/*
+ *
+ */
+int
+ess_ppe_fdb_get_entry_index(struct ipq95xx_ess_priv *priv,
+			    unsigned int vsi, const u8 *mac,
+			    int *index)
+{
+	fdb_setup_read_data(priv, vsi, mac);
+	if (fdb_do_command(priv, true, FDB_TBL_OP_TYPE_ID_GET, false, 0,
+			   index))
+		return 1;
+
+	return 0;
+}
+
+/*
+ *
+ */
+int ess_ppe_fdb_add(struct ipq95xx_ess_priv *priv,
+		    unsigned int vsi,
+		    const u8 *mac, unsigned int port)
+{
+	int index;
+
+//	ret = ess_ppe_fdb_get_entry_index(priv, vsi, mac, &index);
+//	printk("ess_ppe_fdb_add: ret:%d index:%d\n", ret, index);
+	fdb_setup_data(priv, vsi, mac, port, true);
+	if (fdb_do_command(priv, false, FDB_TBL_OP_TYPE_ID_ADD, false, 0,
+			   &index))
+		return 1;
+
+	if (index < 0)
+		return 1;
+
+	printk("ok fdb added at index %u\n", index);
+	return 0;
+}
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./hwdesc.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/hwdesc.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./hwdesc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/hwdesc.c	2023-12-21 17:30:06.441516398 +0100
@@ -0,0 +1,445 @@
+#include "ipq95xx_ess.h"
+
+/*
+ *
+ */
+unsigned int hwdesc_get_port_count(enum ipq95xx_ess_type type)
+{
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		return 6;
+	}
+	return 0;
+}
+
+/*
+ *
+ */
+unsigned int hwdesc_get_uniphy_count(enum ipq95xx_ess_type type)
+{
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		return 3;
+	}
+	return 0;
+}
+
+/*
+ *
+ */
+void hwdesc_get_queues_count(enum ipq95xx_ess_type type,
+			     unsigned int *rxfr,
+			     unsigned int *rxdr,
+			     unsigned int *txdr)
+{
+	/* note that the current code assumes same number of available
+	 * tx desc queues as tx completion queues */
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		*rxfr = 8,
+		*rxdr = 24;
+		*txdr = 32;
+		break;
+	}
+}
+
+/*
+ *
+ */
+static int
+hwdesc_port_get_supported_phy_modes_ipq95xx(unsigned int ppe_port_id,
+					    int uniphy_id,
+					    unsigned long *modes)
+{
+	phy_interface_zero(modes);
+
+	switch (ppe_port_id) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		if (uniphy_id != -1 && uniphy_id != 0)
+			return -ENOTSUPP;
+		break;
+	case 4:
+		if (uniphy_id != -1 && uniphy_id != 0 && uniphy_id != 1)
+			return -ENOTSUPP;
+		break;
+	case 5:
+		if (uniphy_id != -1 && uniphy_id != 2)
+			return -ENOTSUPP;
+		break;
+	default:
+		WARN(1, "invalid port");
+		return -EINVAL;
+	}
+
+	switch (ppe_port_id) {
+	case 0:
+	case 1:
+		/* when muxed to uniphy0 */
+		__set_bit(PHY_INTERFACE_MODE_1000BASEX, modes);
+		__set_bit(PHY_INTERFACE_MODE_SGMII, modes);
+		__set_bit(PHY_INTERFACE_MODE_2500BASEX, modes);
+		__set_bit(PHY_INTERFACE_MODE_QSGMII, modes);
+		__set_bit(PHY_INTERFACE_MODE_PSGMII, modes);
+		__set_bit(PHY_INTERFACE_MODE_10G_QXGMII, modes);
+		break;
+	case 2:
+	case 3:
+		/* when muxed to uniphy0 */
+		__set_bit(PHY_INTERFACE_MODE_QSGMII, modes);
+		__set_bit(PHY_INTERFACE_MODE_PSGMII, modes);
+		__set_bit(PHY_INTERFACE_MODE_10G_QXGMII, modes);
+		break;
+	case 4:
+		/* when muxed to uniphy0 */
+		if (uniphy_id == -1 || uniphy_id == 0) {
+			__set_bit(PHY_INTERFACE_MODE_PSGMII, modes);
+		}
+
+		/* when muxed to uniphy1 */
+		if (uniphy_id == -1 || uniphy_id == 1) {
+			__set_bit(PHY_INTERFACE_MODE_10GBASER, modes);
+			__set_bit(PHY_INTERFACE_MODE_USXGMII, modes);
+		}
+
+		/* when muxed to uniphy0 or uniphy1 */
+		__set_bit(PHY_INTERFACE_MODE_1000BASEX, modes);
+		__set_bit(PHY_INTERFACE_MODE_SGMII, modes);
+		__set_bit(PHY_INTERFACE_MODE_2500BASEX, modes);
+		break;
+	case 5:
+		/* when muxed to uniphy2 */
+		__set_bit(PHY_INTERFACE_MODE_1000BASEX, modes);
+		__set_bit(PHY_INTERFACE_MODE_SGMII, modes);
+		__set_bit(PHY_INTERFACE_MODE_2500BASEX, modes);
+		__set_bit(PHY_INTERFACE_MODE_10GBASER, modes);
+		__set_bit(PHY_INTERFACE_MODE_USXGMII, modes);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ *
+ */
+int hwdesc_port_get_supported_phy_modes(enum ipq95xx_ess_type type,
+					unsigned int ppe_port_id,
+					int uniphy_id,
+					unsigned long *modes)
+{
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		return hwdesc_port_get_supported_phy_modes_ipq95xx(ppe_port_id,
+								   uniphy_id,
+								   modes);
+	}
+	return -EINVAL;
+}
+
+/*
+ *
+ */
+static int
+hwdesc_port_select_uniphy_ipq95xx(unsigned int ppe_port_id,
+				  phy_interface_t mode)
+{
+	switch (ppe_port_id) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		return 0;
+	case 4:
+		switch (mode) {
+		case PHY_INTERFACE_MODE_PSGMII:
+			return 0;
+		case PHY_INTERFACE_MODE_SGMII:
+		case PHY_INTERFACE_MODE_1000BASEX:
+		case PHY_INTERFACE_MODE_2500BASEX:
+			/* can't decide, both uniphy can do this */
+			return -1;
+		default:
+			return 1;
+		}
+		break;
+	case 5:
+		return 2;
+	default:
+		return -1;
+	}
+}
+
+/*
+ *
+ */
+int hwdesc_port_select_uniphy(enum ipq95xx_ess_type type,
+			      unsigned int ppe_port_id,
+			      phy_interface_t mode)
+{
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		return hwdesc_port_select_uniphy_ipq95xx(ppe_port_id, mode);
+	}
+	return -1;
+}
+
+/*
+ *
+ */
+static int
+hwdesc_port_get_uniphy_channel_id_ipq95xx(unsigned int ppe_port_id,
+					  unsigned int uniphy_id)
+{
+	switch (ppe_port_id) {
+	case 0:
+		return 0;
+	case 1:
+		return 1;
+	case 2:
+		return 2;
+	case 3:
+		return 3;
+	case 4:
+		if (uniphy_id == 0)
+			return 4;
+		else
+			return 0;
+		break;
+	case 5:
+		return 0;
+	default:
+		return -1;
+	}
+}
+
+/*
+ *
+ */
+int hwdesc_port_get_uniphy_channel_id(enum ipq95xx_ess_type type,
+				      unsigned int ppe_port_id,
+				      unsigned int uniphy_id)
+{
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		return hwdesc_port_get_uniphy_channel_id_ipq95xx(ppe_port_id,
+								 uniphy_id);
+	}
+	return -1;
+}
+
+/*
+ *
+ */
+#define TDM_EGRESS(port) { .enabled = true, .is_egress = true, .port_id = port }
+#define TDM_INGRESS(port) { .enabled = true, .is_egress = false, .port_id = port }
+
+static const struct ess_tdm_config_entry ipq95xx_tdm_config[] = {
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(1),
+	TDM_EGRESS(1),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(7),
+	TDM_EGRESS(7),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(1),
+	TDM_EGRESS(1),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(2),
+	TDM_EGRESS(2),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(1),
+	TDM_EGRESS(1),
+	TDM_INGRESS(3),
+	TDM_EGRESS(3),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(7),
+	TDM_EGRESS(7),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(1),
+	TDM_EGRESS(1),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(4),
+	TDM_EGRESS(4),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(1),
+	TDM_EGRESS(1),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(2),
+	TDM_EGRESS(2),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(7),
+	TDM_EGRESS(7),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(1),
+	TDM_EGRESS(1),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(3),
+	TDM_EGRESS(3),
+	TDM_INGRESS(1),
+	TDM_EGRESS(1),
+	TDM_INGRESS(0),
+	TDM_EGRESS(0),
+	TDM_INGRESS(5),
+	TDM_EGRESS(5),
+	TDM_INGRESS(6),
+	TDM_EGRESS(6),
+	TDM_INGRESS(4),
+	TDM_EGRESS(4),
+	TDM_INGRESS(7),
+	TDM_EGRESS(7),
+};
+
+/*
+ *
+ */
+int hwdesc_get_tdm_config(enum ipq95xx_ess_type type,
+			  const struct ess_tdm_config_entry **plist,
+			  size_t *pcount)
+{
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		*plist = ipq95xx_tdm_config;
+		*pcount = ARRAY_SIZE(ipq95xx_tdm_config);
+		return 0;
+	}
+	return -1;
+}
+
+#define PSCHED_ENTRY(_pb, _esp, _dsp, _dssp, _dssp_en) {	\
+		.port_bitmap = _pb,				\
+		.en_sched_port = _esp,				\
+		.de_sched_port = _dsp,				\
+		.de_sched_sec_port = _dssp,			\
+		.de_sched_sec_port_en = _dssp_en,		\
+	}
+
+
+static const struct ess_psched_config_entry ipq95xx_psched_config[] = {
+	PSCHED_ENTRY(0x98, 6, 0, 0x1, true),
+	PSCHED_ENTRY(0x94, 5, 6, 0x3, true),
+	PSCHED_ENTRY(0x86, 0, 5, 0x4, true),
+	PSCHED_ENTRY(0x8C, 1, 6, 0x0, true),
+	PSCHED_ENTRY(0x1C, 7, 5, 0x1, true),
+	PSCHED_ENTRY(0x98, 2, 6, 0x0, true),
+	PSCHED_ENTRY(0x1C, 5, 7, 0x1, true),
+	PSCHED_ENTRY(0x34, 3, 6, 0x0, true),
+	PSCHED_ENTRY(0x8C, 4, 5, 0x1, true),
+	PSCHED_ENTRY(0x98, 2, 6, 0x0, true),
+	PSCHED_ENTRY(0x8C, 5, 4, 0x1, true),
+	PSCHED_ENTRY(0xA8, 0, 6, 0x2, true),
+	PSCHED_ENTRY(0x98, 5, 1, 0x0, true),
+	PSCHED_ENTRY(0x98, 6, 5, 0x2, true),
+	PSCHED_ENTRY(0x89, 1, 6, 0x4, true),
+	PSCHED_ENTRY(0xA4, 3, 0, 0x1, true),
+	PSCHED_ENTRY(0x8C, 5, 6, 0x4, true),
+	PSCHED_ENTRY(0xA8, 0, 2, 0x1, true),
+	PSCHED_ENTRY(0x98, 6, 5, 0x0, true),
+	PSCHED_ENTRY(0xC4, 4, 3, 0x1, true),
+	PSCHED_ENTRY(0x94, 6, 5, 0x0, true),
+	PSCHED_ENTRY(0x1C, 7, 6, 0x1, true),
+	PSCHED_ENTRY(0x98, 2, 5, 0x0, true),
+	PSCHED_ENTRY(0x1C, 6, 7, 0x1, true),
+	PSCHED_ENTRY(0x1C, 5, 6, 0x0, true),
+	PSCHED_ENTRY(0x94, 3, 5, 0x1, true),
+	PSCHED_ENTRY(0x8C, 4, 6, 0x0, true),
+	PSCHED_ENTRY(0x94, 1, 5, 0x3, true),
+	PSCHED_ENTRY(0x94, 6, 1, 0x0, true),
+	PSCHED_ENTRY(0xD0, 3, 5, 0x2, true),
+	PSCHED_ENTRY(0x98, 6, 0, 0x1, true),
+	PSCHED_ENTRY(0x94, 5, 6, 0x3, true),
+	PSCHED_ENTRY(0x94, 1, 5, 0x0, true),
+	PSCHED_ENTRY(0x98, 2, 6, 0x1, true),
+	PSCHED_ENTRY(0x8C, 4, 5, 0x0, true),
+	PSCHED_ENTRY(0x1C, 7, 6, 0x1, true),
+	PSCHED_ENTRY(0x8C, 0, 5, 0x4, true),
+	PSCHED_ENTRY(0x89, 1, 6, 0x2, true),
+	PSCHED_ENTRY(0x98, 5, 0, 0x1, true),
+	PSCHED_ENTRY(0x94, 6, 5, 0x3, true),
+	PSCHED_ENTRY(0x92, 0, 6, 0x2, true),
+	PSCHED_ENTRY(0x98, 1, 5, 0x0, true),
+	PSCHED_ENTRY(0x98, 6, 2, 0x1, true),
+	PSCHED_ENTRY(0xD0, 0, 5, 0x3, true),
+	PSCHED_ENTRY(0x94, 6, 0, 0x1, true),
+	PSCHED_ENTRY(0x8C, 5, 6, 0x4, true),
+	PSCHED_ENTRY(0x8C, 1, 5, 0x0, true),
+	PSCHED_ENTRY(0x1C, 6, 7, 0x1, true),
+	PSCHED_ENTRY(0x1C, 5, 6, 0x0, true),
+	PSCHED_ENTRY(0xB0, 2, 3, 0x1, true),
+	PSCHED_ENTRY(0xC4, 4, 5, 0x0, true),
+	PSCHED_ENTRY(0x8C, 6, 4, 0x1, true),
+	PSCHED_ENTRY(0xA4, 3, 6, 0x0, true),
+	PSCHED_ENTRY(0x1C, 5, 7, 0x1, true),
+	PSCHED_ENTRY(0x4C, 0, 5, 0x4, true),
+	PSCHED_ENTRY(0x8C, 6, 0, 0x1, true),
+	PSCHED_ENTRY(0x34, 7, 6, 0x3, true),
+	PSCHED_ENTRY(0x94, 5, 0, 0x1, true),
+	PSCHED_ENTRY(0x98, 6, 5, 0x2, true),
+};
+
+/*
+ *
+ */
+int hwdesc_get_psched_config(enum ipq95xx_ess_type type,
+			     const struct ess_psched_config_entry **plist,
+			     size_t *pcount)
+{
+	switch (type) {
+	case ESS_TYPE_IPQ95XX:
+		*plist = ipq95xx_psched_config;
+		*pcount = ARRAY_SIZE(ipq95xx_psched_config);
+		return 0;
+	}
+	return -1;
+}
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./ipo.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/ipo.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./ipo.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/ipo.c	2024-01-04 16:55:44.208658048 +0100
@@ -0,0 +1,330 @@
+#include "ipq95xx_ess.h"
+#include "regs/ppe_regs.h"
+
+/*
+ * helper to write IPO rules
+ */
+static void ppe_ipo_rule_reg_set(struct ipq95xx_ess_priv *priv,
+				 union ipo_rule_reg_u *hw_reg, int rule_id)
+{
+        int i;
+
+        for (i = 0; i < 3; i++)
+                ppe_writel(priv, IPO_RULE_TBL_REG(rule_id) + i * 4,
+			   hw_reg->val[i]);
+}
+
+static void ppe_ipo_mask_reg_set(struct ipq95xx_ess_priv *priv,
+				 union ipo_mask_reg_u *hw_mask, int rule_id)
+{
+        int i;
+
+        for (i = 0; i < 2; i++)
+                ppe_writel(priv, IPO_MASK_TBL_REG(rule_id) + i * 4,
+			   hw_mask->val[i]);
+}
+
+static void ppe_ipo_action_set(struct ipq95xx_ess_priv *priv,
+			       union ipo_action_u *hw_act, int rule_id)
+{
+        int i;
+
+	for (i = 0; i < 5; i++)
+                ppe_writel(priv, IPO_ACTION_TBL_REG(rule_id) + i * 4,
+			   hw_act->val[i]);
+}
+
+/*
+ * install IPO rule to force virtual_port_id destination on all
+ * traffic coming from port_id
+ */
+void
+ppe_ipo_rule_src_port_force_vp_id_set(struct ipq95xx_ess_priv *priv,
+				      int rule_id,
+				      int port_id,
+				      int out_vp_id)
+{
+	union ipo_rule_reg_u hw_reg;
+	union ipo_mask_reg_u hw_mask;
+	union ipo_action_u hw_act;
+	unsigned int port_mask;
+
+	memset(&hw_reg, 0, sizeof(hw_reg));
+	memset(&hw_mask, 0, sizeof(hw_mask));
+	memset(&hw_act, 0, sizeof(hw_act));
+
+	/* use match by MAC DA, type does not matter since we are
+	 * using a zero mask for wildcard match */
+	hw_reg.bf.rule_type = IPO_RULE_TYPE_MAC_DA;
+
+	hw_reg.bf.src_type = IPO_SRC_TYPE_PORTBITMAP;
+	port_mask = (1 << port_id);
+	hw_reg.bf.src_0 = port_mask;
+	hw_reg.bf.src_1 = port_mask >> 1;
+	hw_reg.bf.pri = 1;
+
+	hw_act.bf.fwd_cmd = 0; /* 0 = forward, 1 = drop */
+	hw_act.bf.dest_info_change_en = 1;
+	hw_act.bf.dest_info =
+		(DEST_INFO_TYPE_PORT_ID << DEST_INFO_TYPE_SHIFT) |
+		out_vp_id;
+
+	ppe_ipo_rule_reg_set(priv, &hw_reg, rule_id);
+	ppe_ipo_mask_reg_set(priv, &hw_mask, rule_id);
+	ppe_ipo_action_set(priv, &hw_act, rule_id);
+}
+
+/*
+ * setup IPv6 dest match
+ *
+ * cannot match whole 128 bits at once, 3 rules have to be setup and
+ * chained together
+ *
+ * offset ranges from 0 to 2
+ */
+static void
+ppe_ipo_rule_setup_dst_ipv6_match(const u8 *dip6,
+				  unsigned int offset,
+				  union ipo_rule_reg_u *hw_reg,
+				  union ipo_mask_reg_u *hw_mask)
+{
+	u32 val;
+
+	memset(hw_reg, 0, sizeof(*hw_reg));
+	memset(hw_mask, 0, sizeof(*hw_mask));
+
+	switch (offset) {
+	case 0:
+		hw_reg->bf.rule_type = IPO_RULE_TYPE_IPV6_DIP0;
+
+		/* 12 => 15 */
+		memcpy(&val, &dip6[12], 4);
+		val = be32_to_cpu(val);
+		hw_reg->bf.rule_field_0 = val;
+
+		/* 10 => 11 */
+		memcpy(&val, &dip6[8], 4);
+		val = be32_to_cpu(val);
+		val &= 0xffff;
+		hw_reg->bf.rule_field_1 = val;
+
+		/* only match IP section, not fragment/ip type */
+		hw_mask->bf.maskfield_0 = 0xffffffff;
+		hw_mask->bf.maskfield_1 = 0x0000ffff;
+		break;
+
+	case 1:
+		hw_reg->bf.rule_type = IPO_RULE_TYPE_IPV6_DIP1;
+
+		/* 6 => 9 */
+		memcpy(&val, &dip6[6], 4);
+		val = be32_to_cpu(val);
+		hw_reg->bf.rule_field_0 = val;
+
+		/* 4 => 5 */
+		memcpy(&val, &dip6[4], 4);
+		val = be32_to_cpu(val);
+		val >>= 16;
+		hw_reg->bf.rule_field_1 = val;
+
+		/* only match IP section, not fragment/ip type */
+		hw_mask->bf.maskfield_0 = 0xffffffff;
+		hw_mask->bf.maskfield_1 = 0x0000ffff;
+		break;
+
+	case 2:
+		hw_reg->bf.rule_type = IPO_RULE_TYPE_IPV6_DIP2;
+
+		/* 2 => 3 */
+		memcpy(&val, &dip6[0], 4);
+		val = be32_to_cpu(val);
+		hw_reg->bf.rule_field_0 = (val << 16);
+
+		/* 0 => 1 */
+		memcpy(&val, &dip6[0], 4);
+		val = be32_to_cpu(val);
+		val >>= 16;
+		hw_reg->bf.rule_field_1 = val;
+
+		/* only match IP section, not port */
+		hw_mask->bf.maskfield_0 = 0xffff0000;
+		hw_mask->bf.maskfield_1 = 0x0000ffff;
+		break;
+	}
+
+}
+
+/*
+ * install IPO rules to force virtual_port_id queue id on traffic
+ * matching partial dest ipv6
+ */
+void
+ppe_ipo_rule_dst_ip6_flow_force_queue_id(struct ipq95xx_ess_priv *priv,
+					 int rule_id,
+					 int port_id,
+					 const u8 *dip6,
+					 unsigned int dip_off,
+					 int out_queue_id)
+{
+	union ipo_rule_reg_u hw_reg;
+	union ipo_mask_reg_u hw_mask;
+	union ipo_action_u hw_act;
+	unsigned int port_mask;
+
+	memset(&hw_reg, 0, sizeof(hw_reg));
+	memset(&hw_mask, 0, sizeof(hw_mask));
+	memset(&hw_act, 0, sizeof(hw_act));
+
+	ppe_ipo_rule_setup_dst_ipv6_match(dip6, dip_off, &hw_reg, &hw_mask);
+
+	/* match on port id */
+	hw_reg.bf.src_type = IPO_SRC_TYPE_PORTBITMAP;
+	port_mask = (1 << port_id);
+	hw_reg.bf.src_0 = port_mask;
+	hw_reg.bf.src_1 = port_mask >> 1;
+
+	/* set higher prio than default rules to assign VP port */
+	hw_reg.bf.pri = 2;
+
+	hw_act.bf.fwd_cmd = 0; /* 0 = forward, 1 = drop */
+
+	if (out_queue_id > 0) {
+		hw_act.bf.qid_en = 1;
+		hw_act.bf.qid = out_queue_id;
+	}
+
+	ppe_ipo_rule_reg_set(priv, &hw_reg, rule_id);
+	ppe_ipo_mask_reg_set(priv, &hw_mask, rule_id);
+	ppe_ipo_action_set(priv, &hw_act, rule_id);
+}
+
+/*
+ * install IPO rules to match UDF0
+ */
+void
+ppe_ipo_rule_udf0_match(struct ipq95xx_ess_priv *priv,
+			int rule_id,
+			int port_id,
+			u16 match_mask,
+			u16 match_value)
+{
+	union ipo_rule_reg_u hw_reg;
+	union ipo_mask_reg_u hw_mask;
+	union ipo_action_u hw_act;
+	unsigned int port_mask;
+
+	memset(&hw_reg, 0, sizeof(hw_reg));
+	memset(&hw_mask, 0, sizeof(hw_mask));
+	memset(&hw_act, 0, sizeof(hw_act));
+
+	/* match on port id */
+	hw_reg.bf.src_type = IPO_SRC_TYPE_PORTBITMAP;
+	port_mask = (1 << port_id);
+	hw_reg.bf.src_0 = port_mask;
+	hw_reg.bf.src_1 = port_mask >> 1;
+
+	/* set higher prio than default rules to assign VP port */
+	hw_reg.bf.pri = 2;
+
+	/* match udf0 from profile0 */
+	hw_reg.bf.rule_type = IPO_RULE_TYPE_WINDOW0;
+
+	hw_reg.bf.rule_field_0 = match_value;
+
+	/* udf0 valid */
+	hw_reg.bf.rule_field_1 |= (1 << 16);
+
+	/* is_ipv6 */
+	hw_reg.bf.rule_field_1 |= (1 << 19);
+
+	/* is_ip */
+	hw_reg.bf.rule_field_1 |= (1 << 20);
+
+	/* matched udf0 portion */
+	hw_mask.bf.maskfield_0 = match_mask;
+
+	/* udf0 valid mask */
+	hw_mask.bf.maskfield_1 |= (1 << 16);
+	hw_mask.bf.maskfield_1 |= (1 << 19);
+	hw_mask.bf.maskfield_1 |= (1 << 20);
+
+	ppe_ipo_rule_reg_set(priv, &hw_reg, rule_id);
+	ppe_ipo_mask_reg_set(priv, &hw_mask, rule_id);
+	ppe_ipo_action_set(priv, &hw_act, rule_id);
+}
+
+/*
+ *
+ */
+void
+ppe_ipo_chain_rules(struct ipq95xx_ess_priv *priv,
+		    unsigned int first_rule, unsigned int count)
+{
+	unsigned int i, rule_row, rule_off, align;
+	union ipo_rule_ext_1_u ext1;
+	union ipo_rule_ext_2_u ext2;
+	union ipo_rule_ext_4_u ext4;
+	bool spanned[8];
+
+	BUG_ON(count <= 1);
+
+	/* check first rule alignment */
+	align = __roundup_pow_of_two(count);
+	BUG_ON(first_rule % align);
+
+	rule_row = first_rule / 8;
+	rule_off = first_rule % 8;
+
+	/* rules must be on same row */
+	BUG_ON((first_rule + count - 1) / 8 != rule_row);
+
+	memset(spanned, 0, sizeof (spanned));
+	for (i = 0; i < count; i++)
+		spanned[rule_off + i] = true;
+
+	ext1.val = ppe_readl(priv, IPO_RULE_EXT1_TBL_REG(rule_row));
+	if (spanned[0] && spanned[1])
+		ext1.bf.ext2_0 = 1;
+	if (spanned[2] && spanned[3])
+		ext1.bf.ext2_1 = 1;
+	if (spanned[4] && spanned[5])
+		ext1.bf.ext2_2 = 1;
+	if (spanned[6] && spanned[6])
+		ext1.bf.ext2_3 = 1;
+	ppe_writel(priv, IPO_RULE_EXT1_TBL_REG(rule_row), ext1.val);
+
+	ext2.val = ppe_readl(priv, IPO_RULE_EXT2_TBL_REG(rule_row));
+	if (spanned[0] && spanned[2])
+		ext2.bf.ext4_0 = 1;
+	if (spanned[4] && spanned[6])
+		ext2.bf.ext4_1 = 1;
+	ppe_writel(priv, IPO_RULE_EXT2_TBL_REG(rule_row), ext2.val);
+
+	ext4.val = ppe_readl(priv, IPO_RULE_EXT4_TBL_REG(rule_row));
+	if (spanned[0] && spanned[4])
+		ext4.bf.ext8 = 1;
+	ppe_writel(priv, IPO_RULE_EXT4_TBL_REG(rule_row), ext4.val);
+}
+
+/*
+ *
+ */
+int ppe_ipo_allocate_rules_id(struct ipq95xx_ess_priv *priv,
+			      unsigned int count)
+{
+	int align_count, rule;
+
+	align_count = count;
+	if (count > 1)
+		align_count = __roundup_pow_of_two(count);
+
+	rule = priv->ipo_rule_id;
+	if (rule % align_count)
+		rule += align_count - (rule % align_count);
+
+	if (rule + count >= 512)
+		return -1;
+
+	priv->ipo_rule_id = rule + count;
+	return rule;
+}
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./ipq95xx_ess.h linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/ipq95xx_ess.h
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./ipq95xx_ess.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/ipq95xx_ess.h	2024-01-19 17:01:19.845846484 +0100
@@ -0,0 +1,640 @@
+#ifndef IPQ95XX_ESS_H_
+#define IPQ95XX_ESS_H_
+
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/reset.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/clk-provider.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <net/page_pool.h>
+
+#include "regs/edma_desc.h"
+
+#define RATE_1D25MHZ			1250000
+#define RATE_2D5MHZ			2500000
+#define RATE_12D5MHZ			12500000
+#define RATE_25MHZ			25000000
+#define RATE_78MHZ			78125000
+#define RATE_125MHZ			125000000
+#define RATE_156MHZ			156250000
+#define RATE_250MHZ			250000000
+#define RATE_312MHZ			312500000
+
+/*
+ * worst case for all hw, used to size static arrays/bitmaps
+*/
+#define ESS_PORT_MAX			6
+#define ESS_UNIPHY_MAX			3
+#define ESS_QUEUES_RXFR_MAX		8
+#define ESS_QUEUES_RXDR_MAX		24
+#define ESS_QUEUES_TXDR_MAX		32
+
+/*
+ * switch hardware port id range
+ */
+#define ESS_PHYS_CPU_PORT_ID		0
+#define ESS_PHYS_PORT_COUNT		8
+#define ESS_VIRT_PORT_COUNT		256
+
+/*
+ * start using virtual port id from this offset
+ */
+#define ESS_ALLOC_VIRT_PORT_OFFSET	64
+
+/*
+ * profile ID used
+ */
+#define ESS_NO_RSS_PROFILE_ID		15
+
+/*
+ * hardware freezes with more than 32 TSO segs
+ */
+#define ESS_TSO_MAX_SEGS		32
+#define ESS_TSO_MIN_MSS			256
+#define ESS_TSO_MAX_MSS			10240
+
+/*
+ * hardware prefetches 32 descriptors in tx queue, so there could be
+ * more descriptors inside hardware than what we push in tx queue
+ */
+#define ESS_TX_PREFETCH_COUNT		32
+
+struct ipq95xx_ess_priv;
+
+/*
+ * config extracted from device tree
+ */
+struct ess_cpu_port_config {
+	unsigned int			rx_qm_qid_first;
+	unsigned int			rx_qm_qid_count;
+};
+
+struct ess_port_config {
+	bool				used;
+	struct device_node		*np;
+	unsigned int			ppe_port_id;
+	unsigned int			virt_port_id;
+
+	int				uniphy_id;
+	int				uniphy_channel_id;
+	phy_interface_t			phy_mode;
+	const char			*name;
+	u8				mac_addr[ETH_ALEN];
+};
+
+struct ess_config {
+	struct ess_cpu_port_config	cpu_port;
+	struct ess_port_config		ports[ESS_PORT_MAX];
+};
+
+/*
+ * This driver provides two clocks per uniphy, which are the clocks of
+ * the raw serdes link. The NSSCC dividers then adjusts their
+ * frequencies before they are fed from/to the PPE mac.
+ */
+struct clk_uniphy {
+	unsigned int	rate;
+	struct clk_hw	hw;
+	struct clk	*clk;
+};
+
+struct ess_uniphy_clocks {
+	struct clk_uniphy		*rx;
+	struct clk_uniphy		*tx;
+
+	struct clk			*sys;
+	struct clk			*ahb;
+};
+
+enum usxsgmii_aneg_state {
+	USX_ANEG_INIT,
+	USX_ANEG_RPCS_DOWN,
+	USX_ANEG_SETUP,
+	USX_ANEG_WAIT,
+	USX_ANEG_DONE_LINK_DOWN,
+	USX_ANEG_DONE_LINK_UP,
+};
+
+struct ess_uniphy_channel {
+	unsigned int			id;
+	bool				aneg_error_reported;
+	unsigned long			aneg_next_restart;
+	enum usxsgmii_aneg_state	usxgmii_aneg_state;
+	int				usxgmii_aneg_speed;
+	int				usxgmii_aneg_duplex;
+	struct ess_uniphy		*uniphy;
+};
+
+/* max channels is 5 for PSGMII mode */
+#define ESS_MAX_UNIPHY_CHANNELS		5
+
+struct ess_uniphy {
+	unsigned int			id;
+	void __iomem			*base;
+	bool				powered_up;
+	struct ess_uniphy_clocks	*clocks;
+	struct reset_control		*xpcs_rst;
+	struct reset_control		*soft_rst;
+	struct reset_control		*sys_rst;
+	struct mutex			lock;
+
+	struct ess_uniphy_channel	channels[ESS_MAX_UNIPHY_CHANNELS];
+	unsigned int			channels_used;
+
+	phy_interface_t			cur_interface;
+	int				cur_mode;
+
+	char				debounce_wq_name[30];
+	struct workqueue_struct		*debounce_wq;
+	struct delayed_work		debounce_work;
+	bool				debounced_link;
+	bool				debounced_link_down_latch;
+	unsigned int			debounced_consec_up;
+
+	struct dentry			*regs_dbg;
+	struct ipq95xx_ess_priv		*priv;
+};
+
+struct ess_port_clocks {
+	struct clk			*uniphy_rx_clk;
+	struct clk			*uniphy_tx_clk;
+	struct clk			*mac_rx_clk;
+	struct clk			*mac_tx_clk;
+
+	struct reset_control		*mac_rst;
+	struct reset_control		*port_rst;
+};
+
+enum ess_mac_type {
+	ESS_MAC_GMAC,
+	ESS_MAC_XGMAC,
+};
+
+/*
+ * RX DMA ring definitions
+ */
+struct rx_fill_ring {
+	spinlock_t			lock;
+	int				index;
+	int				hw_index;
+	int				ring_size;
+
+	struct page_pool		*pp;
+	struct napi_struct		napi;
+	unsigned int			descs_avail_cache;
+	int				prod_idx;
+	unsigned int			rx_desc_area_size;
+	struct device			*dev;
+	struct edma_rxfill_desc		*rx_desc_area;
+	dma_addr_t			rx_desc_dma;
+
+	struct ess_cpu_port		*cport;
+	int				irq;
+	char				irq_name[32];
+	struct cpumask			*mask;
+} ____cacheline_internodealigned_in_smp;
+
+struct rx_done_ring {
+	int				index;
+	int				hw_index;
+	int				ring_size;
+
+	struct page_pool		*pp;
+	struct napi_struct		napi;
+	int				cons_idx;
+
+	unsigned int			rx_desc_area_size;
+	struct device			*dev;
+	struct edma_rxdesc_desc		*rx_desc_area;
+	dma_addr_t			rx_desc_dma;
+
+	unsigned int			rx_sec_desc_area_size;
+	struct edma_rx_sec_desc		*rx_sec_desc_area;
+	dma_addr_t			rx_sec_desc_dma;
+
+	struct ess_cpu_port		*cport;
+	int				irq;
+	char				irq_name[32];
+	struct cpumask			*mask;
+} ____cacheline_internodealigned_in_smp;
+
+/*
+ * tx descriptor does not allow us to get back enough information
+ * after transmission, we need both the skb/page virtual address and
+ * the dma_addr_t
+ *
+ * resort to using out of band information
+ */
+struct tx_meta_info {
+	u32				paddr;
+	u32				map_len;
+	union {
+		struct sk_buff		*skb;
+		struct page		*page;
+	};
+};
+
+/*
+ * TX DMA ring definitions
+ *
+ * uses both 1 tx desc and 1 tx completion queue
+ */
+struct tx_ring {
+	struct ipq95xx_ess_priv		*priv;
+	bool				setup_done;
+	u32				last_use_time;
+
+	int				index;
+	int				hw_index;
+	int				ring_size;
+	int				meta_ring_size;
+
+	spinlock_t			lock;
+	int				desc_prod_idx;
+	bool				desc_prod_dirty;
+	unsigned int			descs_avail_cache;
+
+	int				cmpl_cons_idx;
+	unsigned int			descs_sent_cache;
+
+	int				meta_idx;
+
+	struct device			*dev;
+	unsigned int			tx_desc_area_size;
+	struct edma_txdesc_desc		*tx_desc_area;
+	dma_addr_t			tx_desc_dma;
+
+	struct tx_meta_info		*tx_meta_info;
+
+	unsigned int			tx_sec_desc_area_size;
+	struct edma_tx_sec_desc		*tx_sec_desc_area;
+	dma_addr_t			tx_sec_desc_dma;
+
+	unsigned int			tx_cmpl_desc_area_size;
+	struct edma_txcmpl_desc		*tx_cmpl_desc_area;
+	dma_addr_t			tx_cmpl_desc_dma;
+
+	/* only valid for queues with IRQ assigned */
+	struct napi_struct		napi;
+	struct ess_port			*port;
+	int				irq;
+	char				irq_name[32];
+	struct cpumask			*mask;
+} ____cacheline_internodealigned_in_smp;
+
+/*
+ * per-port data
+ */
+struct ess_port {
+	struct device_node		*np;
+
+	/* physical port id (range from 0 to 6), used in device
+	 * tree */
+	unsigned int			id;
+
+	/* physical port id using PPE naming (0 is CPU port, 7 is
+	 * security engine, physical ports starts at 1) */
+	unsigned int			ppe_port_id;
+
+	/* bitmap of assigned done descriptor rx/tx rings (per
+	 * hw_index) */
+	DECLARE_BITMAP(assigned_txdr, ESS_QUEUES_TXDR_MAX);
+	unsigned int			txq_count;
+
+	/* buf & queue sizes */
+	size_t				txq_buf_count;
+
+	struct net_device		*netdev;
+
+	/*
+	 * EDMA data
+	 */
+	struct tx_ring			*tx_rings;
+
+	/* clocks for this port */
+	struct ess_port_clocks		*clocks;
+	bool				clocks_enabled;
+
+	/* uniphy associated to this port */
+	unsigned int			uniphy_id;
+	unsigned int			uniphy_channel_id;
+
+	DECLARE_PHY_INTERFACE_MASK(supported_phy_modes);
+	struct phylink			*phylink;
+	struct phylink_config		phylink_config;
+	struct phylink_pcs		phylink_pcs;
+	struct ess_uniphy_channel	*cur_uniphy_channel;
+	enum ess_mac_type		cur_mac;
+
+	/* irq coalescing config*/
+	unsigned int			tx_irq_coalesce_us;
+	unsigned int			tx_irq_coalesce_pkts;
+
+	/* eee status */
+	bool				eee_enabled;
+	bool				eee_active;
+	bool				eee_tx_lpi_enabled;
+
+	struct dentry			*regs_dbg;
+	struct list_head		next;
+	struct ipq95xx_ess_priv		*priv;
+	struct mii_bus			*mii_bus;
+};
+
+/*
+ * cpu port data, only for rx
+ */
+struct ess_cpu_port_pcpu {
+
+	/* active ports per physical id, used in rx path */
+	struct ess_port			*ports_by_ppe_id[ESS_PHYS_PORT_COUNT];
+};
+
+struct ess_cpu_port {
+	bool				active;
+	int				ref_count;
+
+	/* assigned PPE QM queue to this port for rx traffic (PPE to
+	 * CPU) */
+	unsigned int			rx_qm_qid_first;
+	unsigned int			rx_qm_qid_count;
+	unsigned int			rxq_max_count;
+
+	DECLARE_BITMAP(assigned_rxdr, ESS_QUEUES_RXDR_MAX);
+	unsigned int			rxq_count;
+
+	size_t				rx_pkt_size;
+	size_t				rx_frag_mapped_size;
+	size_t				rx_frag_size;
+	size_t				rxq_buf_count;
+
+	struct net_device		dummy_netdev;
+
+	struct ess_cpu_port_pcpu __percpu	*percpu_priv;
+	struct rx_fill_ring		*rxf_rings;
+	struct rx_done_ring		*rxd_rings;
+
+	unsigned int			rx_irq_coalesce_us;
+	unsigned int			rx_irq_coalesce_pkts;
+
+	struct ipq95xx_ess_priv		*priv;
+};
+
+enum ipq95xx_ess_type {
+	ESS_TYPE_IPQ95XX,
+};
+
+struct ipq95xx_ess_priv {
+	struct platform_device		*pdev;
+	enum ipq95xx_ess_type		ess_type;
+
+	/* debugfs files */
+	struct dentry			*dbg_root;
+	struct dentry			*regs_ppe_dbg;
+	struct dentry			*regs_edma_dbg;
+	struct dentry			*phys_port_counters_dbg;
+	struct dentry			*port_counters_dbg;
+	struct dentry			*queue_counters_dbg;
+	struct dentry			*drop_dbg;
+	struct dentry			*drop_reason_dbg;
+	struct dentry			*fdb_dbg;
+	struct dentry			*vsi_dbg;
+
+	struct reset_control		*ppe_rst;
+	struct reset_control		*edma_rst;
+
+	/* PPE/edma/uniphy registers area */
+	void __iomem			*regs[3];
+
+	/* uniphy data */
+	size_t				hw_uniphy_count;
+	struct ess_uniphy		uniphys[ESS_UNIPHY_MAX];
+	struct ess_uniphy_clocks	uniphy_clocks[ESS_UNIPHY_MAX];
+
+	/* ports data */
+	struct ess_cpu_port		cpu_port;
+	struct ess_port_clocks		port_clocks[ESS_PORT_MAX];
+	size_t				hw_port_count;
+	struct list_head		ports;
+
+	/* bitmask of allocated descriptor rings (per hw_index), hold
+	 * rtnl lock to modify */
+	DECLARE_BITMAP(used_rxfr, ESS_QUEUES_RXFR_MAX);
+	unsigned int			rxfr_count;
+	DECLARE_BITMAP(used_rxdr, ESS_QUEUES_RXDR_MAX);
+	unsigned int			rxdr_count;
+	DECLARE_BITMAP(used_txdr, ESS_QUEUES_TXDR_MAX);
+	unsigned int			txdr_count;
+	unsigned int			txdr_reserved_start;
+	unsigned int			txdr_reserved_count;
+
+	int				ipo_rule_id;
+};
+
+struct ess_tdm_config_entry {
+	bool	enabled;
+	bool	is_egress;
+	u8	port_id;
+};
+
+struct ess_psched_config_entry {
+	u8	port_bitmap;
+	u8	en_sched_port;
+	u8	de_sched_port;
+	u8	de_sched_sec_port;
+	bool	de_sched_sec_port_en;
+};
+
+/*
+ * io helpers
+ */
+static inline u32 ppe_readl(struct ipq95xx_ess_priv *priv, u32 reg)
+{
+	void *base = priv->regs[0];
+	return readl(base + reg);
+}
+
+static inline void ppe_writel(struct ipq95xx_ess_priv *priv, u32 reg, u32 val)
+{
+	void *base = priv->regs[0];
+	writel(val, base + reg);
+}
+
+static inline u32 edma_readl(struct ipq95xx_ess_priv *priv, u32 reg)
+{
+	void *base = priv->regs[1];
+	return readl(base + reg);
+}
+
+static inline u32 edma_readl_relaxed(struct ipq95xx_ess_priv *priv, u32 reg)
+{
+	void *base = priv->regs[1];
+	return readl_relaxed(base + reg);
+}
+
+static inline void edma_writel(struct ipq95xx_ess_priv *priv, u32 reg, u32 val)
+{
+	void *base = priv->regs[1];
+	writel(val, base + reg);
+}
+
+static inline void edma_writel_relaxed(struct ipq95xx_ess_priv *priv,
+				       u32 reg, u32 val)
+{
+	void *base = priv->regs[1];
+	writel_relaxed(val, base + reg);
+}
+
+/*
+ * clocks.c
+ */
+int ess_clock_init(struct ipq95xx_ess_priv *priv,
+		   const struct ess_config *config);
+
+/*
+ * debug.c
+ */
+void port_dbg_init(struct ess_port *port);
+
+void port_dbg_release(struct ess_port *port);
+
+void ess_dbg_init(struct ipq95xx_ess_priv *priv);
+
+void ess_dbg_release(struct ipq95xx_ess_priv *priv);
+
+/*
+ * fdb.c
+ */
+int
+ess_ppe_fdb_get_entry_index(struct ipq95xx_ess_priv *priv,
+			    unsigned int vsi, const u8 *mac,
+			    int *index);
+
+int ess_ppe_fdb_add(struct ipq95xx_ess_priv *priv,
+		    unsigned int vsi,
+		    const u8 *mac, unsigned int port);
+
+
+/*
+ * ipo.c
+ */
+int ppe_ipo_allocate_rules_id(struct ipq95xx_ess_priv *priv,
+			      unsigned int count);
+
+void
+ppe_ipo_chain_rules(struct ipq95xx_ess_priv *priv,
+		    unsigned int first_rule, unsigned int count);
+
+void
+ppe_ipo_rule_src_port_force_vp_id_set(struct ipq95xx_ess_priv *priv,
+				      int rule_id,
+				      int port_id,
+				      int out_vp_id);
+
+void
+ppe_ipo_rule_udf0_match(struct ipq95xx_ess_priv *priv,
+			int rule_id,
+			int port_id,
+			u16 match_mask,
+			u16 match_value);
+
+void
+ppe_ipo_rule_dst_ip6_flow_force_queue_id(struct ipq95xx_ess_priv *priv,
+					 int rule_id,
+					 int port_id,
+					 const u8 *dip6,
+					 unsigned int dip_off,
+					 int out_queue_id);
+
+/*
+ * hwdesc.c
+ */
+unsigned int hwdesc_get_port_count(enum ipq95xx_ess_type type);
+
+unsigned int hwdesc_get_uniphy_count(enum ipq95xx_ess_type type);
+
+void hwdesc_get_queues_count(enum ipq95xx_ess_type type,
+			     unsigned int *rxfr,
+			     unsigned int *rxdr,
+			     unsigned int *txdr);
+
+int hwdesc_port_get_supported_phy_modes(enum ipq95xx_ess_type type,
+					unsigned int ppe_port_id,
+					int uniphy_id,
+					unsigned long *modes);
+
+int hwdesc_port_select_uniphy(enum ipq95xx_ess_type type,
+			      unsigned int ppe_port_id,
+			      phy_interface_t mode);
+
+int hwdesc_port_get_uniphy_channel_id(enum ipq95xx_ess_type type,
+				      unsigned int ppe_port_id,
+				      unsigned int uniphy_id);
+
+int hwdesc_get_tdm_config(enum ipq95xx_ess_type type,
+			  const struct ess_tdm_config_entry **plist,
+			  size_t *pcount);
+
+int hwdesc_get_psched_config(enum ipq95xx_ess_type type,
+			     const struct ess_psched_config_entry **plist,
+			     size_t *pcount);
+
+/*
+ * uniphy.c
+ */
+bool ess_uniphy_sync_ok_get(struct ess_uniphy_channel *uc);
+
+void
+ess_uniphy_channel_config_get(struct ess_uniphy_channel *uc,
+			      phy_interface_t *interface, int *mode);
+
+void ess_uniphy_channel_status_get(struct ess_uniphy_channel *uc,
+				   struct phylink_link_state *state);
+
+void ess_uniphy_channel_an_restart(struct ess_uniphy_channel *uc);
+
+void ess_uniphy_channel_link_up(struct ess_uniphy_channel *uc,
+				int speed, int duplex);
+
+struct ess_uniphy_channel *
+ess_uniphy_channel_get(struct ipq95xx_ess_priv *priv,
+		       unsigned int uniphy_id,
+		       unsigned int channel,
+		       phy_interface_t interface, int mode,
+		       const unsigned long *advertising);
+
+void ess_uniphy_channel_put(struct ess_uniphy_channel *uc);
+
+int ess_uniphy_init(struct ipq95xx_ess_priv *priv, unsigned int uniphy_id);
+
+void ess_uniphy_release(struct ess_uniphy *uniphy);
+
+
+/*
+ * ports.c
+ */
+int ess_port_init(struct platform_device *pdev,
+		  struct ipq95xx_ess_priv *priv,
+		  const struct ess_port_config *pcfg,
+		  int port_id,
+		  unsigned int txdr_count);
+
+void ess_port_release(struct ess_port *port);
+
+int ess_cpu_port_init(struct platform_device *pdev,
+		      const struct ess_cpu_port_config *cfg,
+		      struct ipq95xx_ess_priv *priv);
+
+void ess_cpu_port_release(struct ipq95xx_ess_priv *priv);
+
+#endif /* !IPQ95XX_ESS_H */
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./main.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/main.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./main.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/main.c	2024-01-04 16:55:44.208658048 +0100
@@ -0,0 +1,1156 @@
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+
+#include "regs/ppe_regs.h"
+#include "regs/edma_regs.h"
+
+#include "ipq95xx_ess.h"
+
+/*
+ * TDM configuration
+ */
+static int ppe_tdm_init(struct ipq95xx_ess_priv *priv)
+{
+	const struct ess_tdm_config_entry *list;
+	size_t count, i;
+	u32 val;
+
+	if (hwdesc_get_tdm_config(priv->ess_type, &list, &count))
+		return 1;
+
+	for (i = 0; i < count; ++i) {
+		const struct ess_tdm_config_entry *e = &list[i];
+
+		val = 0;
+		if (e->enabled)
+			val |= TDM_CFG_PORT_VALID_MASK;
+		if (e->is_egress)
+			val |= TDM_CFG_PORT_DIR_EGRESS_MASK;
+		val |= e->port_id << TDM_CFG_PORT_NUM_SHIFT;
+		ppe_writel(priv, TDM_CFG_REG(i), val);
+	}
+
+	/* zero remaining entries to ease debug */
+	for (; i < 128; ++i)
+		ppe_writel(priv, TDM_CFG_REG(i), 0);
+
+	val = ppe_readl(priv, TDM_CTRL_REG);
+	val &= ~TDM_CTRL_DEPTH_MASK;
+	val &= ~TDM_CTRL_OFFSET_MASK;
+	val |= count << TDM_CTRL_DEPTH_SHIFT;
+	val |= TDM_CTRL_EN_MASK;
+	ppe_writel(priv, TDM_CTRL_REG, val);
+
+	return 0;
+}
+
+/*
+ * port scheduler init
+ */
+static int ppe_psched_init(struct ipq95xx_ess_priv *priv)
+{
+	const struct ess_psched_config_entry *list;
+	size_t count, i;
+	u32 val;
+
+	if (hwdesc_get_psched_config(priv->ess_type, &list, &count))
+		return 1;
+
+	for (i = 0; i < count; ++i) {
+		const struct ess_psched_config_entry *e = &list[i];
+
+		val = 0;
+		val |= e->de_sched_port << PSCH_TDM_CFG_DES_PORT_SHIFT;
+		val |= e->en_sched_port << PSCH_TDM_CFG_ENS_PORT_SHIFT;
+		val |= e->port_bitmap << PSCH_TDM_CFG_ENS_PORT_BM_SHIFT;
+		if (e->de_sched_sec_port_en)
+			val |= PSCH_TDM_CFG_DES_SEC_PORT_EN_MASK;
+		val |= e->de_sched_sec_port << PSCH_TDM_CFG_DES_SEC_PORT_SHIFT;
+		ppe_writel(priv, PSCH_TDM_CFG_TBL_REG(i), val);
+	}
+
+	/* zero remaining entries to ease debug */
+	for (; i < 128; ++i)
+		ppe_writel(priv, PSCH_TDM_CFG_TBL_REG(i), 0);
+
+	val = ppe_readl(priv, PSCH_TDM_DEPTH_CFG_REG);
+	val &= ~PSCH_TDM_DEPTH_CFG_MASK;
+	val |= count << PSCH_TDM_DEPTH_CFG_SHIFT;
+	ppe_writel(priv, PSCH_TDM_DEPTH_CFG_REG, val);
+
+
+	return 0;
+}
+
+/*
+ * initialize buffer manager flow control
+ *
+ * numerical values extracted from QSDK 12.4
+ */
+static void ppe_bm_fc_init(struct ipq95xx_ess_priv *priv)
+{
+	size_t i;
+
+	/* setup shared group 0 number of buffers */
+	ppe_writel(priv, PORT_FC_SHARED_GRP_CFG_REG(0), 1550);
+
+	for (i = 0; i < BM_PORT_COUNT; i++) {
+		u32 val;
+
+		/* enable flow control for virtual ports only (packets
+		 * coming from CPU TX DMA) */
+		val = ppe_readl(priv, PORT_FC_MODE_REG(i));
+		if (BM_PORT_IS_VIRTUAL(i))
+			val |= PORT_FC_MODE_EN_MASK;
+		else
+			val &= ~PORT_FC_MODE_EN_MASK;
+		ppe_writel(priv, PORT_FC_MODE_REG(i), val);
+	}
+
+	/* setup per-port buffer limits */
+	for (i = 0; i < BM_PORT_COUNT; i++) {
+		u32 react_buf, share_ceiling, weight, resume_off;
+		u32 val[2];
+
+		/* make all ports use shared group 0 */
+		ppe_writel(priv, PORT_FC_GRP_ID_REG(i), 0);
+
+		if (BM_PORT_IS_CPU(i) || BM_PORT_IS_VIRTUAL(i))
+			react_buf = 100;
+		else if (BM_PORT_IS_PHYSICAL(i))
+			react_buf = 128;
+		else
+			react_buf = 0;
+
+		if (BM_PORT_IS_CPU(i)) {
+			share_ceiling = 1146;
+			resume_off = 8;
+			/* x1 */
+			weight = 7;
+		} else {
+			share_ceiling = 250;
+			resume_off = 36;
+			/* x1/4 */
+			weight = 4;
+		}
+
+		/* need to be read/written back to back */
+		val[0] = ppe_readl(priv, PORT_FC_CFG1_REG(i));
+		val[1] = ppe_readl(priv, PORT_FC_CFG2_REG(i));
+
+		val[0] &= ~PORT_FC_CFG_REACT_LIM_MASK;
+		val[0] |= react_buf << PORT_FC_CFG_REACT_LIM_SHIFT;
+		val[0] &= ~PORT_FC_CFG_RESM_FLOOR_MASK;
+		val[0] &= ~PORT_FC_CFG_RESM_OFF_MASK;
+		val[0] |= resume_off << PORT_FC_CFG_RESM_OFF_SHIFT;
+		val[0] &= ~PORT_FC_CFG_SH_CEIL0_MASK;
+		val[0] |= ((share_ceiling << PORT_FC_CFG_SH_CEIL0_SHIFT)) &
+			PORT_FC_CFG_SH_CEIL0_MASK;
+
+		val[1] &= ~PORT_FC_CFG2_PREALLOC_MASK;
+		val[1] &= ~PORT_FC_CFG2_SH_CEIL1_MASK;
+		val[1] |= (share_ceiling >> 3) << PORT_FC_CFG2_SH_CEIL1_SHIFT;
+		val[1] &= ~PORT_FC_CFG2_SH_WEIGHT_MASK;
+		val[1] |= weight << PORT_FC_CFG2_SH_WEIGHT_SHIFT;
+		val[1] |= PORT_FC_CFG2_SH_DYN_MASK;
+
+		ppe_writel(priv, PORT_FC_CFG1_REG(i), val[0]);
+		ppe_writel(priv, PORT_FC_CFG2_REG(i), val[1]);
+	}
+}
+
+/*
+ *
+ */
+static void nss_ppe_setup_ac(struct ipq95xx_ess_priv *priv)
+{
+	u32 vals[4];
+	int i, queue;
+
+	/* ucast queues */
+	for (queue = 0; queue < 256; queue++) {
+		vals[0] = (AC_UNI_CFG0_AC_EN_MASK |
+			   AC_UNI_CFG0_SHRD_DYN_EN_MASK |
+			   (6 << AC_UNI_CFG0_SHRD_WEIGHT_SHIFT) |
+			   (800 << AC_UNI_CFG0_SHRD_CEIL_SHIFT));
+		vals[1] = 0;
+		vals[2] = 0;
+		vals[3] = (36 << AC_UNI_CFG3_GRN_RESUME_OFF_HI_SHIFT);
+
+		for (i = 0; i < 4; i++)
+			ppe_writel(priv, AC_UNI_QUEUE_CFG_TBL_REG(queue, i),
+				   vals[i]);
+	}
+
+	/* mcast queue */
+	for (queue = 0; queue < 44; queue++) {
+		vals[0] = (AC_MUL_CFG0_AC_EN_MASK |
+			   (250 << AC_MUL_CFG0_SHR_CEIL_SHIFT));
+		vals[1] = 0;
+		vals[2] = 36 << AC_MUL_CFG2_GRN_RESUME_OFF_HI_SHIFT;
+
+		for (i = 0; i < 3; i++)
+			ppe_writel(priv, AC_MUL_QUEUE_CFG_TBL_REG(queue, i),
+				   vals[i]);
+	}
+
+	/* assign the ac group 0 with buffer number */
+	vals[0] = 0;
+	vals[1] = (2000 << AC_GRP_CFG1_GRP_LIMIT_SHIFT);
+	vals[2] = 0;
+	vals[3] = 0;
+
+	for (i = 0; i < 4; i++)
+		ppe_writel(priv, AC_GRP_CFG_TBL_REG(0, i), vals[i]);
+}
+
+/*
+ * set default VSI for port, used to assign VSI on traffic received on
+ * the port
+ */
+static void ppe_vsi_port_set(struct ipq95xx_ess_priv *priv,
+			     int port, int vsi)
+{
+	ppe_writel(priv, L3_VP_PORT_TBL0_REG(port), 0x0);
+	ppe_writel(priv, L3_VP_PORT_TBL1_REG(port),
+		   L3_VP_PORT_TBL1_VSI_VALID_MASK |
+		   (vsi << L3_VP_PORT_TBL1_VSI_SHIFT));
+	ppe_writel(priv, L3_VP_PORT_TBL2_REG(port), 0x0);
+	ppe_writel(priv, L3_VP_PORT_TBL3_REG(port), 0x0);
+}
+
+/*
+ *
+ */
+static void ppe_vsi_set_members(struct ipq95xx_ess_priv *priv, int vsi,
+				u8 port_mask)
+{
+	u32 val;
+
+	val = (port_mask << VSI_TBL0_MEMB_MAP_SHIFT) |
+		(port_mask << VSI_TBL0_UNK_UNI_MAP_SHIFT) |
+		(port_mask << VSI_TBL0_UNK_MULTI_MAP_SHIFT) |
+		(port_mask << VSI_TBL0_BCAST_MAP_SHIFT);
+	ppe_writel(priv, VSI_TBLx_REG(vsi, 0), val);
+
+	ppe_writel(priv, VSI_TBLx_REG(vsi, 1),
+		   VSI_TBL1_NEW_ADDR_LEARN_EN_MASK |
+		   VSI_TBL1_ADDR_MOVE_EN_MASK);
+}
+
+
+/*
+ *
+ */
+static void ppe_setup_scheduler(struct ipq95xx_ess_priv *priv,
+				struct ess_config *cfg)
+{
+	int i, port, queue_id, cpu_base_queue_id;
+	int l0_drr_id, l1_drr_id, l0_sp_id, l1_sp_id;
+	u32 val;
+
+	/*
+	 * Setup queue scheduler, static mapping is used.
+	 *
+	 * 1 unicast queue per ethernet port and 4 per enabled
+	 * physical port on the CPU side
+	 */
+	l0_drr_id = l1_drr_id = 0;
+	l0_sp_id = l1_sp_id = 0;
+	queue_id = 0;
+
+	/*
+	 * setup L0/L1 for CPU port
+	 *
+	 * 4 queues in DRR
+	 *
+	 * allocate two L1 DRR (on for C path, one for E path) on
+	 * current L1 SP for prio 0
+	 */
+	ppe_writel(priv, L1_C_SP_CFG_TBL_REG(l1_sp_id, 0),
+		   l1_drr_id++);
+	ppe_writel(priv, L1_E_SP_CFG_TBL_REG(l1_sp_id, 0),
+		   l1_drr_id++);
+
+	/*
+	 * allocate two DRR (on for C path, one for E path) on
+	 * current L0 SP
+	 */
+	ppe_writel(priv, L0_C_SP_CFG_TBL_REG(l0_sp_id, 0),
+		   l0_drr_id++);
+	ppe_writel(priv, L0_E_SP_CFG_TBL_REG(l0_sp_id, 0),
+		   l0_drr_id++);
+
+	cpu_base_queue_id = queue_id;
+	cfg->cpu_port.rx_qm_qid_first = queue_id;
+	cfg->cpu_port.rx_qm_qid_count = 4;
+
+	for (i = 0; i < cfg->cpu_port.rx_qm_qid_count; i++) {
+		/* associate this level0 queue to the correct port */
+		ppe_writel(priv,
+			   L0_FLOW_PORT_TBL_REG(queue_id),
+			   ESS_PHYS_CPU_PORT_ID);
+
+		/* setup on prio 0 */
+		val = (l0_sp_id << L0_FLOW_MAP_SP_ID_SHIFT) |
+			(0 << L0_FLOW_MAP_C_PRI_SHIFT) |
+			(0 << L0_FLOW_MAP_E_PRI_SHIFT) |
+			(1 << L0_FLOW_MAP_C_DRR_WT_SHIFT) |
+			(1 << L0_FLOW_MAP_E_DRR_WT_SHIFT);
+		ppe_writel(priv, L0_FLOW_MAP_TBL_REG(queue_id), val);
+		queue_id++;
+	}
+
+	/* associate this level1 "flow" to the correct port */
+	ppe_writel(priv, L1_FLOW_PORT_TBL_REG(l0_sp_id),
+			   ESS_PHYS_CPU_PORT_ID);
+
+	/* setup on L1 prio 0 */
+	val = (l1_sp_id << L1_FLOW_MAP_SP_ID_SHIFT) |
+		(0 << L1_FLOW_MAP_C_PRI_SHIFT) |
+		(0 << L1_FLOW_MAP_E_PRI_SHIFT) |
+		(1 << L1_FLOW_MAP_C_DRR_WT_SHIFT) |
+		(1 << L1_FLOW_MAP_E_DRR_WT_SHIFT);
+	ppe_writel(priv, L1_FLOW_MAP_TBL_REG(l0_sp_id), val);
+	l0_sp_id++;
+
+	/* force traffic coming from each external port to CPU queues,
+	 * with an individual profile id so we can change each port
+	 * RSS indirection mapping */
+	for (i = 0; i < ARRAY_SIZE(cfg->ports); i++) {
+		struct ess_port_config *pcfg = &cfg->ports[i];
+		unsigned int virt_port;
+		int port, rule_id;
+
+		if (!pcfg->used)
+			continue;
+
+		port = pcfg->ppe_port_id;
+		virt_port = port + ESS_ALLOC_VIRT_PORT_OFFSET;
+		pcfg->virt_port_id = virt_port;
+
+		/*
+		 * force all traffic incoming from that physical port
+		 * to be sent to the CPU corresponding virtual port.
+		 */
+		rule_id = ppe_ipo_allocate_rules_id(priv, 1);
+		ppe_ipo_rule_src_port_force_vp_id_set(priv, rule_id,
+						      port, virt_port);
+
+		/* make all outgoing unicast traffic to the CPU
+		 * virtual port corresponding to that physical port
+		 * use that base queue id, use dedicated profile for
+		 * each port so we can have different RSS hash
+		 * table */
+		val = (cpu_base_queue_id << UCAST_QUEUE_MAP_QID_SHIFT) |
+			(pcfg->ppe_port_id << UCAST_QUEUE_MAP_PROF_SHIFT);
+		ppe_writel(priv, UCAST_QUEUE_MAP_TBL_REG(virt_port),
+			   val);
+
+	}
+	l1_sp_id++;
+
+	/*
+	 * setup per-ethernet port L0/L1
+	 *
+	 * FIXME: only 1 unicast queue per port is scheduled for now
+	 */
+	for (port = 1; port < 7; port++) {
+		/*
+		 * setup l0
+		 */
+
+		/* associate the queue to the correct port */
+		ppe_writel(priv, L0_FLOW_PORT_TBL_REG(queue_id), port);
+
+		/* setup single prio */
+		val = (l0_sp_id << L0_FLOW_MAP_SP_ID_SHIFT) |
+			(0 << L0_FLOW_MAP_C_PRI_SHIFT) |
+			(0 << L0_FLOW_MAP_E_PRI_SHIFT) |
+			(1 << L0_FLOW_MAP_C_DRR_WT_SHIFT) |
+			(1 << L0_FLOW_MAP_E_DRR_WT_SHIFT);
+		ppe_writel(priv, L0_FLOW_MAP_TBL_REG(queue_id), val);
+
+		/* allocate two DRR per prio (on for C path, one for E
+		 * path)
+		 * number of resources for level0:
+		 *  - 64 SP
+		 *  - 160 * 2 DRR
+		 */
+		ppe_writel(priv, L0_C_SP_CFG_TBL_REG(l0_sp_id, 0),
+			   l0_drr_id++);
+		ppe_writel(priv, L0_E_SP_CFG_TBL_REG(l0_sp_id, 0),
+			   l0_drr_id++);
+
+		/*
+		 * setup l1
+		 */
+		/* associate this level1 "flow" to the correct port */
+		ppe_writel(priv, L1_FLOW_PORT_TBL_REG(l0_sp_id), port);
+
+		/* setup strict prio */
+		val = (l1_sp_id << L1_FLOW_MAP_SP_ID_SHIFT) |
+			(0 << L1_FLOW_MAP_C_PRI_SHIFT) |
+			(0 << L1_FLOW_MAP_E_PRI_SHIFT) |
+			(1 << L1_FLOW_MAP_C_DRR_WT_SHIFT) |
+			(1 << L1_FLOW_MAP_E_DRR_WT_SHIFT);
+		ppe_writel(priv, L1_FLOW_MAP_TBL_REG(l0_sp_id), val);
+		ppe_writel(priv, L1_C_SP_CFG_TBL_REG(l1_sp_id, 0),
+			   l1_drr_id++);
+		ppe_writel(priv, L1_E_SP_CFG_TBL_REG(l1_sp_id, 0),
+			   l1_drr_id++);
+
+		/* make all outgoing unicast traffic on this port use
+		 * that queue id, and a dedicated profile id so that
+		 * it has a distinct RSS/prio table */
+		val = (queue_id << UCAST_QUEUE_MAP_QID_SHIFT) |
+			(ESS_NO_RSS_PROFILE_ID << UCAST_QUEUE_MAP_PROF_SHIFT);
+		ppe_writel(priv, UCAST_QUEUE_MAP_TBL_REG(port), val);
+
+		queue_id++;
+		l0_sp_id++;
+		l1_sp_id++;
+	}
+
+	/* zero RSS indir table for "no-rss" profile */
+	for (i = 0; i < 256; i++) {
+		int idx = (i << UCAST_HASH_MAP_HASH_SHIFT) |
+			(ESS_NO_RSS_PROFILE_ID << UCAST_HASH_MAP_PROF_SHIFT);
+		ppe_writel(priv, UCAST_HASH_MAP_TBL_REG(idx), 0);
+	}
+
+	/*
+	 * we don't want any queue_id decision to be modified by
+	 * packet priority, so we zero the mapping table so that
+	 * queue_id is unchanged whatever priority the packet is
+	 */
+	for (i = 0; i < 256; i++)
+		ppe_writel(priv, UCAST_PRIO_MAP_TBL_REG(i), 0);
+}
+
+/*
+ *
+ */
+static void ppe_rss_init(struct ipq95xx_ess_priv *priv)
+{
+	u32 seed;
+
+	/* setup RSS hash */
+#define RSS_HASH_MASK 0xFFF
+
+#define RSS_HASH_FIN_INNER_OUTER_0 0x205
+#define RSS_HASH_FIN_INNER_OUTER_1 0x264
+#define RSS_HASH_FIN_INNER_OUTER_2 0x227
+#define RSS_HASH_FIN_INNER_OUTER_3 0x245
+#define RSS_HASH_FIN_INNER_OUTER_4 0x201
+
+#define RSS_HASH_PROTOCOL_MIX 0x13
+#define RSS_HASH_DPORT_MIX 0xb
+#define RSS_HASH_SPORT_MIX 0x13
+
+#define RSS_HASH_SIPV4_MIX 0x13
+#define RSS_HASH_DIPV4_MIX 0xb
+#define RSS_HASH_SIPV6_MIX_0 0x13
+#define RSS_HASH_SIPV6_MIX_1 0xb
+#define RSS_HASH_SIPV6_MIX_2 0x13
+#define RSS_HASH_SIPV6_MIX_3 0xb
+#define RSS_HASH_DIPV6_MIX_0 0x13
+#define RSS_HASH_DIPV6_MIX_1 0xb
+#define RSS_HASH_DIPV6_MIX_2 0x13
+#define RSS_HASH_DIPV6_MIX_3 0xb
+
+	get_random_bytes(&seed, sizeof (seed));
+
+	ppe_writel(priv, RSS_HASH_MASK_IPV4_REG, RSS_HASH_MASK);
+	ppe_writel(priv, RSS_HASH_SEED_IPV4_REG, seed);
+	ppe_writel(priv, RSS_HASH_MIX_IPV4_REG(0), RSS_HASH_SIPV4_MIX);
+	ppe_writel(priv, RSS_HASH_MIX_IPV4_REG(1), RSS_HASH_DIPV4_MIX);
+	ppe_writel(priv, RSS_HASH_MIX_IPV4_REG(2), RSS_HASH_PROTOCOL_MIX);
+	ppe_writel(priv, RSS_HASH_MIX_IPV4_REG(3), RSS_HASH_DPORT_MIX);
+	ppe_writel(priv, RSS_HASH_MIX_IPV4_REG(4), RSS_HASH_SPORT_MIX);
+	ppe_writel(priv, RSS_HASH_FIN_IPV4_REG(0), RSS_HASH_FIN_INNER_OUTER_0);
+	ppe_writel(priv, RSS_HASH_FIN_IPV4_REG(1), RSS_HASH_FIN_INNER_OUTER_1);
+	ppe_writel(priv, RSS_HASH_FIN_IPV4_REG(2), RSS_HASH_FIN_INNER_OUTER_2);
+	ppe_writel(priv, RSS_HASH_FIN_IPV4_REG(3), RSS_HASH_FIN_INNER_OUTER_3);
+	ppe_writel(priv, RSS_HASH_FIN_IPV4_REG(4), RSS_HASH_FIN_INNER_OUTER_4);
+
+	ppe_writel(priv, RSS_HASH_MASK_REG, RSS_HASH_MASK);
+	ppe_writel(priv, RSS_HASH_SEED_REG, seed);
+	ppe_writel(priv, RSS_HASH_MIX_REG(0), RSS_HASH_SIPV6_MIX_0);
+	ppe_writel(priv, RSS_HASH_MIX_REG(1), RSS_HASH_SIPV6_MIX_1);
+	ppe_writel(priv, RSS_HASH_MIX_REG(2), RSS_HASH_SIPV6_MIX_2);
+	ppe_writel(priv, RSS_HASH_MIX_REG(3), RSS_HASH_SIPV6_MIX_3);
+	ppe_writel(priv, RSS_HASH_MIX_REG(4), RSS_HASH_DIPV6_MIX_0);
+	ppe_writel(priv, RSS_HASH_MIX_REG(5), RSS_HASH_DIPV6_MIX_1);
+	ppe_writel(priv, RSS_HASH_MIX_REG(6), RSS_HASH_DIPV6_MIX_2);
+	ppe_writel(priv, RSS_HASH_MIX_REG(7), RSS_HASH_DIPV6_MIX_3);
+	ppe_writel(priv, RSS_HASH_MIX_REG(8), RSS_HASH_PROTOCOL_MIX);
+	ppe_writel(priv, RSS_HASH_MIX_REG(9), RSS_HASH_DPORT_MIX);
+	ppe_writel(priv, RSS_HASH_MIX_REG(10), RSS_HASH_SPORT_MIX);
+
+	ppe_writel(priv, RSS_HASH_FIN_REG(0), RSS_HASH_FIN_INNER_OUTER_0);
+	ppe_writel(priv, RSS_HASH_FIN_REG(1), RSS_HASH_FIN_INNER_OUTER_1);
+	ppe_writel(priv, RSS_HASH_FIN_REG(2), RSS_HASH_FIN_INNER_OUTER_2);
+	ppe_writel(priv, RSS_HASH_FIN_REG(3), RSS_HASH_FIN_INNER_OUTER_3);
+	ppe_writel(priv, RSS_HASH_FIN_REG(4), RSS_HASH_FIN_INNER_OUTER_4);
+
+}
+
+/*
+ *
+ */
+static int ess_ppe_init(struct ipq95xx_ess_priv *priv)
+{
+	int ret, i, port;
+
+	/* the edma QID2RID registers are not affected by reset, set
+	 * them to a known value */
+	for (i = 0; i < EDMA_QID2RID_TABLE_SIZE; i++)
+		edma_writel(priv, EDMA_QID2RIDx_TABLE_MEM(i), 0);
+
+	ppe_bm_fc_init(priv);
+	ppe_rss_init(priv);
+
+	ret = ppe_psched_init(priv);
+	if (ret)
+		return ret;
+
+	ret = ppe_tdm_init(priv);
+	if (ret)
+		return ret;
+
+	/* setup queue access control */
+	nss_ppe_setup_ac(priv);
+
+	/* add all physical ports to a dedicated VSI */
+	for (port = 1; port < ESS_PHYS_PORT_COUNT; port++) {
+		int vsi = port + 1;
+		ppe_vsi_port_set(priv, port, vsi);
+	}
+
+	/* setup port briding control, only enable forwarding for CPU
+	 * port, for other ports it will be enabled/disable depending
+	 * on port actual link status */
+	for (port = 0; port < ESS_PHYS_PORT_COUNT; port++) {
+		u32 val;
+
+		val = ppe_readl(priv, PPE_PORT_BRCTL_REG(port));
+
+		/* FIXME: understand how isolation mask works wrt VSI
+		 * membership */
+		val &= ~PORT_BRCTL_PORT_ISOL_MASK;
+		val |= (0x7f << PORT_BRCTL_PORT_ISOL_SHIFT);
+
+		/* don't attempt to match incoming mac addresses */
+		val |= PORT_BRCTL_PROMISC_EN_MASK;
+
+		/* no learning */
+		val &= ~(PORT_BRCTL_NEW_ADDR_LRN_EN_MASK |
+			 PORT_BRCTL_STA_MOVE_LRN_EN_MASK);
+
+		if (port == 0)
+			val |= PORT_BRCTL_TXMAC_EN_MASK;
+		else
+			val &= ~PORT_BRCTL_TXMAC_EN_MASK;
+
+		ppe_writel(priv, PPE_PORT_BRCTL_REG(port), val);
+	}
+
+	/* setup port mask of each VSI  */
+	for (port = 1; port < ESS_PHYS_PORT_COUNT; port++) {
+		int vsi;
+		u8 port_mask;
+
+		vsi = port + 1;
+		port_mask = (1 << 0) | (1 << port);
+
+		ppe_vsi_set_members(priv, vsi, port_mask);
+	}
+
+	/* Port 0-7 STP */
+	for (port = 0; port < ESS_PHYS_PORT_COUNT; port++)
+		ppe_writel(priv, PPE_STP_STATE_REG(port),
+			   PPE_STP_STATE_FORWARDING);
+
+	/* enable queues rx/tx counters */
+	ppe_writel(priv, EG_BRIDGE_CFG_REG, EG_BRIDGE_CFG_CNT_EN_MASK);
+
+	/* enable per-virtport rx/tx counters */
+	for (port = 0; port < ESS_VIRT_PORT_COUNT; port++) {
+		u32 vals[3];
+		u32 val;
+
+		vals[0] = ppe_readl(priv, MRU_MTU_CTRL_TBL_REG(port, 0));
+		vals[1] = ppe_readl(priv, MRU_MTU_CTRL_TBL_REG(port, 1));
+		vals[2] = ppe_readl(priv, MRU_MTU_CTRL_TBL_REG(port, 2));
+
+		vals[1] |= MRU_MTU_CTRL1_CNT_RX_EN_MASK;
+		vals[1] |= MRU_MTU_CTRL1_CNT_TX_EN_MASK;
+		ppe_writel(priv, MRU_MTU_CTRL_TBL_REG(port, 0), vals[0]);
+		ppe_writel(priv, MRU_MTU_CTRL_TBL_REG(port, 1), vals[1]);
+		ppe_writel(priv, MRU_MTU_CTRL_TBL_REG(port, 2), vals[2]);
+
+		val = ppe_readl(priv, MC_MTU_CTRL_TBL_REG(port));
+		val |= MC_MTU_CTRL_CNT_EN_MASK;
+		ppe_writel(priv, MC_MTU_CTRL_TBL_REG(port), val);
+	}
+
+	/* also needed for tx counters to work on physport */
+	for (port = 0; port < ESS_PHYS_PORT_COUNT; port++) {
+		u32 val;
+
+		val = ppe_readl(priv, PORT_EG_VLAN_REG(port));
+		val |= PORT_EG_VLAN_TX_COUNTING_MASK;
+		ppe_writel(priv, PORT_EG_VLAN_REG(port), val);
+	}
+
+	return 0;
+}
+
+
+/*
+ *
+ */
+static int check_ess_config(struct platform_device *pdev,
+			    int hw_uniphy_count,
+			    const struct ess_config *cfg)
+{
+	int uniphy_id;
+
+	/*
+	 * ports sharing the same uniphy id should be using the same
+	 * phy mode
+	 *
+	 * this would be caught at runtime, but report it now if
+	 * possible
+	 */
+	for (uniphy_id = 0; uniphy_id < hw_uniphy_count; uniphy_id++) {
+		phy_interface_t mode;
+		size_t i, port_id;
+		bool used;
+
+		used = false;
+		for (i = 0; i < ARRAY_SIZE(cfg->ports); i++) {
+			const struct ess_port_config *pcfg = &cfg->ports[i];
+
+			if (!pcfg->used ||
+			    pcfg->uniphy_id == -1 ||
+			    pcfg->phy_mode == PHY_INTERFACE_MODE_NA)
+				continue;
+
+			if (!used) {
+				used = true;
+				mode = pcfg->phy_mode;
+				port_id = i;
+				continue;
+			}
+
+			if (mode == pcfg->phy_mode)
+				continue;
+
+			dev_err(&pdev->dev, "port %zu shares uniphy %d with "
+				"port %zu, but have different configured "
+				"phy mode\n", port_id, uniphy_id, i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int of_read_ess_config(enum ipq95xx_ess_type ess_type,
+			      struct platform_device *pdev,
+			      struct device_node *ports_np,
+			      struct ess_config *config)
+{
+	struct device_node *port_np;
+	unsigned int hw_port_count, hw_uniphy_count;
+	int ret;
+
+	/*
+	 * fill config from device tree
+	 */
+	memset(config, 0, sizeof (*config));
+	hw_port_count = hwdesc_get_port_count(ess_type);
+	hw_uniphy_count = hwdesc_get_uniphy_count(ess_type);
+
+	ret = 0;
+	for_each_available_child_of_node(ports_np, port_np) {
+		DECLARE_PHY_INTERFACE_MASK(supported_phy_modes);
+		struct ess_port_config *pcfg;
+		const char *name;
+		phy_interface_t mode;
+		int port_id, uniphy_id, channel_id;
+
+		ret = of_property_read_u32(port_np, "reg", &port_id);
+		if (ret)
+                        break;
+
+		if (port_id >= hw_port_count) {
+                        ret = -EINVAL;
+			break;
+		}
+
+		pcfg = &config->ports[port_id];
+		pcfg->np = port_np;
+		pcfg->phy_mode = PHY_INTERFACE_MODE_NA;
+		pcfg->uniphy_id = -1;
+		pcfg->used = of_device_is_available(port_np);
+		if (!pcfg->used)
+			continue;
+
+		ret = of_get_mac_address(port_np, pcfg->mac_addr);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to find port %d "
+				"mac address\n", port_id);
+			break;
+		}
+
+		if (of_property_read_u32(port_np, "ess,ppe-port-id",
+					 &pcfg->ppe_port_id)) {
+			dev_err(&pdev->dev, "missing ess,ppe-port-id "
+				"on port %d\n", port_id);
+                        ret = -EINVAL;
+			break;
+		}
+
+		if (of_property_read_string(port_np, "label", &name))
+			name = "eth%d";
+
+		pcfg->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
+		if (!pcfg->name) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		/*
+		 * for ports than can be dynamically muxed to
+		 * different uniphy, the uniphy-id has to be
+		 * specificied because it cannot be guessed, though
+		 * since it can be implied from the phy-mode, we can
+		 * try to guess first
+		 */
+		ret = of_property_read_u32(port_np, "ess,uniphy-id",
+					   &uniphy_id);
+		if (!ret && uniphy_id >= hw_uniphy_count) {
+			dev_err(&pdev->dev, "invalid uniphy-id given\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret) {
+			if (ret == -EINVAL) {
+				/* no value given, default to
+				 * autoselect */
+				uniphy_id = -1;
+				ret = 0;
+			} else {
+				dev_err(&pdev->dev, "bad uniphy-id given\n");
+				ret = -EINVAL;
+				break;
+			}
+		}
+
+		ret = hwdesc_port_get_supported_phy_modes(ess_type,
+							  port_id,
+							  uniphy_id,
+							  supported_phy_modes);
+		if (ret) {
+			/* this is where we catch inappropriate
+			 * uniphy-id for the given port */
+			dev_err(&pdev->dev, "cannot use this uniphy-id %d on "
+				"port %d\n", uniphy_id, port_id);
+			ret = -EINVAL;
+			break;
+		}
+
+		/* if a phy-mode is forced, it must be valid, we can
+		 * check that now instead of waiting for further
+		 * phylink failure */
+		ret = of_get_phy_mode(port_np, &mode);
+		if (!ret) {
+			if (!test_bit(mode, supported_phy_modes)) {
+				dev_err(&pdev->dev,
+					"port id %d does not support "
+					"requested phy mode %s",
+					port_id, phy_modes(mode));
+				ret = -EINVAL;
+				break;
+			}
+		} else {
+			if (ret != -EINVAL)
+				break;
+			mode = PHY_INTERFACE_MODE_NA;
+			break;
+		}
+
+		/* finally decide which uniphy to use if not forced */
+		if (uniphy_id == -1)
+			uniphy_id = hwdesc_port_select_uniphy(ess_type,
+							      port_id, mode);
+
+		if (uniphy_id == -1) {
+			dev_err(&pdev->dev,
+				"cannot infer which uniphy to "
+				"use for port id %d from phy-mode", port_id);
+			ret = -EINVAL;
+			break;
+		}
+
+		channel_id = hwdesc_port_get_uniphy_channel_id(ess_type,
+							       port_id,
+							       uniphy_id);
+		if (channel_id == -1) {
+			dev_err(&pdev->dev,
+				"cannot determine which uniphy channel to "
+				"use for port id %d", port_id);
+			ret = -EINVAL;
+			break;
+		}
+
+		dev_dbg(&pdev->dev,
+			"port%d: using uniphy %d / chan %d\n",
+			port_id, uniphy_id, channel_id);
+		pcfg->uniphy_id = uniphy_id;
+		pcfg->uniphy_channel_id = channel_id;
+	}
+
+	if (ret)
+		return ret;
+
+	return check_ess_config(pdev, hw_uniphy_count, config);
+}
+
+/*
+ *
+ */
+static irqreturn_t edma_misc_isr(int irq, void *dev_id)
+{
+	struct ipq95xx_ess_priv *priv = dev_id;
+	u32 val, qid1, qid2;
+
+	val = edma_readl(priv, EDMA_MISC_INT_STAT_REG);
+
+	/* hardware error may repeat itself, make sure we don't enter
+	 * infinite irq loop, disable IRQ */
+	edma_writel(priv, EDMA_MISC_INT_MASK_REG, 0);
+
+	qid1 = edma_readl(priv, EDMA_MISC_ERR_QID1_REG);
+	qid2 = edma_readl(priv, EDMA_MISC_ERR_QID2_REG);
+
+	if (val & MISC_INT_AXI_RD_ERR_MASK)
+		dev_err(&priv->pdev->dev, "AXI read error (%08x/%08x)\n",
+			qid1, qid2);
+	if (val & MISC_INT_AXI_WR_ERR_MASK)
+		dev_err(&priv->pdev->dev, "AXI write error (%08x/%08x)\n",
+			qid1, qid2);
+	if (val & MISC_RX_DESC_FIFO_FULL_MASK)
+		dev_err_ratelimited(&priv->pdev->dev,
+				    "hwerr: rx desc fifo full (%08x/%08x)\n",
+				    qid1, qid2);
+	if (val & MISC_RX_ERR_BUF_SIZE_MASK)
+		dev_err_ratelimited(&priv->pdev->dev,
+				    "hwerr: rx buf size error (%08x/%08x)\n",
+				    qid1, qid2);
+	if (val & MISC_TX_SRAM_FULL_MASK)
+		dev_err_ratelimited(&priv->pdev->dev,
+				    "hwerr: tx sram full error (%08x/%08x)\n",
+				    qid1, qid2);
+	if (val & MISC_TX_CMPL_BUF_FULL_MASK)
+		dev_err_ratelimited(&priv->pdev->dev,
+				    "hwerr: tx comp buffer full (%08x/%08x)\n",
+				    qid1, qid2);
+	if (val & MISC_TX_DATA_LEN_ERR_MASK)
+		dev_err_ratelimited(&priv->pdev->dev,
+				    "hwerr: tx data len err (%08x/%08x)\n",
+				    qid1, qid2);
+	if (val & MISC_TX_TIMEOUT_MASK)
+		dev_err_ratelimited(&priv->pdev->dev,
+				    "hwerr: tx timeout (%08x/%08x)\n",
+				    qid1, qid2);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *
+ */
+static int ipq95xx_ess_probe(struct platform_device *pdev)
+{
+	const char *const reg_name[] = { "ppe", "edma", "uniphy" };
+	struct device_node *ports_np;
+	struct ipq95xx_ess_priv *priv;
+	struct ess_port *port, *tmp_port;
+	struct ess_config config;
+	size_t i, ports_used_count;
+	unsigned int per_port_txdr;
+	int ret, irq;
+	u32 val;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->ess_type = (enum ipq95xx_ess_type)
+		device_get_match_data(&pdev->dev);
+	INIT_LIST_HEAD(&priv->ports);
+	priv->hw_uniphy_count = hwdesc_get_uniphy_count(priv->ess_type);
+	priv->hw_port_count = hwdesc_get_port_count(priv->ess_type);
+	priv->pdev = pdev;
+
+	ports_np = of_get_child_by_name(pdev->dev.of_node, "ports");
+	if (!ports_np) {
+		dev_err(&pdev->dev, "missing ports property");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	ret = of_read_ess_config(priv->ess_type, pdev, ports_np, &config);
+	if (ret)
+		goto fail;
+
+	/* gather memory resources */
+	for (i = 0; i < ARRAY_SIZE(reg_name); i++) {
+		priv->regs[i] =
+			devm_platform_ioremap_resource_byname(pdev,
+							      reg_name[i]);
+		if (IS_ERR(priv->regs[i])) {
+			dev_err(&pdev->dev, "unable to ioremap %s regs: %ld\n",
+				reg_name[i], PTR_ERR(priv->regs[i]));
+			return PTR_ERR(priv->regs[i]);
+		}
+	}
+
+	/* gather resets */
+	priv->ppe_rst = devm_reset_control_get(&pdev->dev, "ppe_rst");
+	if (IS_ERR(priv->ppe_rst)) {
+		dev_err(&pdev->dev, "failed to get ppe_rst: %ld\n",
+			PTR_ERR(priv->ppe_rst));
+		return ret;
+	}
+
+	priv->edma_rst = devm_reset_control_get(&pdev->dev, "edma_rst");
+	if (IS_ERR(priv->edma_rst)) {
+		dev_err(&pdev->dev, "failed to get edma_rst: %ld\n",
+			PTR_ERR(priv->edma_rst));
+		return ret;
+	}
+
+	priv->dbg_root = debugfs_create_dir("ipq95xx_ess", NULL);
+	if (!priv->dbg_root)
+		goto fail;
+
+	/* get and enable main clocks */
+	ret = ess_clock_init(priv, &config);
+	if (ret)
+		goto fail;
+
+	/* full reset of PPE */
+	/* FIXME: check those timings */
+	reset_control_assert(priv->ppe_rst);
+	msleep(100);
+	reset_control_deassert(priv->ppe_rst);
+	msleep(100);
+
+	reset_control_assert(priv->edma_rst);
+	msleep(1);
+	reset_control_deassert(priv->edma_rst);
+	msleep(1);
+
+	ret = ess_ppe_init(priv);
+	if (ret)
+		return ret;
+
+	/* register global edma misc interrupt so we can report DMA
+	 * errors loudly */
+	irq = platform_get_irq_byname(pdev, "edma_misc");
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to find edma_misc irq\n");
+		ret = irq;
+		goto fail;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, edma_misc_isr,
+			       0, "edma-misc", priv);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request edma_irq: %d\n", ret);
+		goto fail;
+	}
+
+	/* setup edma global config */
+	val = (DMAR_CTRL_REQ_PRI_RR << DMAR_CTRL_REQ_PRI_SHIFT) |
+		(DMAR_CTRL_BURST_LEN_256 << DMAR_CTRL_BURST_LEN_SHIFT) |
+		(31 << DMAR_CTRL_TXDATA_OST_NUM_SHIFT) |
+		(7 << DMAR_CTRL_TXDESC_OST_NUM_SHIFT) |
+		(7 << DMAR_CTRL_RXFILL_OST_NUM_SHIFT);
+	edma_writel(priv, EDMA_DMAR_CTRL_REG, val);
+
+	/* edma global enable */
+	val = PORT_CTRL_PAD_EN_MASK | PORT_CTRL_GLOB_EN_MASK;
+	edma_writel(priv, EDMA_PORT_CTRL_REG, val);
+
+	/* init uniphys */
+	for (i = 0; i < ARRAY_SIZE(priv->uniphys); i++) {
+		ret = ess_uniphy_init(priv, i);
+		if (ret)
+			goto fail;
+	}
+
+	/* prepare bitfields of available edma rings  */
+	hwdesc_get_queues_count(priv->ess_type,
+				&priv->rxfr_count,
+				&priv->rxdr_count,
+				&priv->txdr_count);
+
+	/* first loop to count used ports */
+	ports_used_count = 0;
+	for (i = 0; i < ARRAY_SIZE(config.ports); i++) {
+		if (config.ports[i].used)
+			ports_used_count++;
+	}
+
+	/* small sanity check */
+	if (ports_used_count > priv->txdr_count) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* setup scheduler for those ports, this will assign PPE QM
+	 * queue */
+	ppe_setup_scheduler(priv, &config);
+
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF)) {
+		/* reserve tx one queue per CPU for ff */
+		priv->txdr_reserved_count = num_possible_cpus();
+		priv->txdr_count -= priv->txdr_reserved_count;
+		priv->txdr_reserved_start = priv->txdr_count;
+	}
+
+	/* init cpu port */
+	ret = ess_cpu_port_init(pdev, &config.cpu_port, priv);
+	if (ret)
+		goto fail;
+
+	/* allocate as many tx queues that we can to each port by
+	 * default, this can be tuned via ethtool later if needed */
+	per_port_txdr = priv->txdr_count / ports_used_count;
+
+	/* init each ports */
+	for (i = 0; i < ARRAY_SIZE(config.ports); i++) {
+		const struct ess_port_config *pcfg = &config.ports[i];
+
+		if (!pcfg->used)
+			continue;
+		/*
+		 * some ports can be associated to multiple uniphys,
+		 * so the associated mux has to be configured to
+		 * reflect the selected one
+		 *
+		 * on 95xx, only port 4 has such a toggle
+		 */
+		if (i == 4) {
+			u32 val;
+
+			val = ppe_readl(priv, PORT_MUX_CTRL_REG);
+			if (pcfg->uniphy_id == 0)
+				val &= ~PORT_MUX_CTRL_PORTx_PCS_SEL_MASK(4);
+			else
+				val |= PORT_MUX_CTRL_PORTx_PCS_SEL_MASK(4);
+			ppe_writel(priv, PORT_MUX_CTRL_REG, val);
+		}
+
+		ret = ess_port_init(pdev, priv, pcfg, i, per_port_txdr);
+		if (ret)
+			goto fail_cpu_port;
+	}
+
+	of_node_put(ports_np);
+	dev_set_drvdata(&pdev->dev, priv);
+	ess_dbg_init(priv);
+
+	/* enable all misc interrupts */
+	edma_writel(priv, EDMA_MISC_INT_MASK_REG,
+		    (MISC_INT_AXI_RD_ERR_MASK |
+		     MISC_INT_AXI_WR_ERR_MASK |
+		     MISC_RX_DESC_FIFO_FULL_MASK |
+		     MISC_RX_ERR_BUF_SIZE_MASK |
+		     MISC_TX_SRAM_FULL_MASK |
+		     MISC_TX_CMPL_BUF_FULL_MASK |
+		     MISC_TX_DATA_LEN_ERR_MASK |
+		     MISC_TX_TIMEOUT_MASK));
+
+	return 0;
+
+fail_cpu_port:
+	list_for_each_entry_safe(port, tmp_port, &priv->ports, next)
+		ess_port_release(port);
+
+	ess_cpu_port_release(priv);
+
+fail:
+	for (i = 0; i < ARRAY_SIZE(priv->uniphys); i++)
+		ess_uniphy_release(&priv->uniphys[i]);
+
+	if (priv->dbg_root)
+		debugfs_remove_recursive(priv->dbg_root);
+
+	of_node_put(ports_np);
+	return ret;
+}
+
+/*
+ *
+ */
+static int ipq95xx_ess_remove(struct platform_device *pdev)
+{
+	struct ipq95xx_ess_priv *priv = dev_get_drvdata(&pdev->dev);
+	struct ess_port *port, *tmp_port;
+	size_t i;
+
+	list_for_each_entry_safe(port, tmp_port, &priv->ports, next)
+		ess_port_release(port);
+
+	ess_cpu_port_release(priv);
+
+	for (i = 0; i < ARRAY_SIZE(priv->uniphys); i++)
+		ess_uniphy_release(&priv->uniphys[i]);
+
+	ess_dbg_release(priv);
+	debugfs_remove_recursive(priv->dbg_root);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static const struct of_device_id ipq95xx_ess_dt_match[] = {
+	{
+		.compatible = "qcom,ipq9574-ess",
+		.data = (void *)ESS_TYPE_IPQ95XX,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, ipq95xx_ess_dt_match);
+
+/*
+ *
+ */
+static struct platform_driver ipq95xx_ess_platform_driver = {
+	.probe	= ipq95xx_ess_probe,
+	.remove	= ipq95xx_ess_remove,
+	.driver = {
+		.name		= "ipq95xx-net",
+		.of_match_table = ipq95xx_ess_dt_match,
+	},
+};
+
+module_platform_driver(ipq95xx_ess_platform_driver);
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port.c	2024-03-08 17:37:03.604237263 +0100
@@ -0,0 +1,5198 @@
+#include "port_priv.h"
+
+#undef DEBUG_FF
+
+/* not tested with higher value, but no structural reason it should
+ * not work */
+#define ESS_MAX_MTU		2000
+
+#define RX_OFFSET		(ALIGN(NET_SKB_PAD, SMP_CACHE_BYTES) + 2)
+
+/*
+ * ff related stuff
+ */
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/arp.h>
+#include <net/ip_ffn.h>
+#include <net/ip_tunnels.h>
+#include <net/ip6_ffn.h>
+#include <net/ip6_route.h>
+#include <net/ip6_tunnel.h>
+
+#include "../../../net/bridge/br_private.h"
+#include "../../../net/fbxbridge/fbxbr_private.h"
+#include "../../../net/dsa/slave.h"
+
+/* how deep ff will look inside packet */
+#define FF_MAX_LOOKAHEAD						\
+	(								\
+		VLAN_ETH_HLEN +						\
+		sizeof (struct ipv6hdr) +				\
+		sizeof (struct iphdr) +					\
+		max(sizeof (struct udphdr), sizeof (struct tcphdr))	\
+	)
+
+static int txr_setup_bare(struct ipq95xx_ess_priv *priv,
+			  struct tx_ring *txr,
+			  int index, int hw_index, int buf_count);
+static void txr_release(struct tx_ring *txr);
+static void txr_enable(struct tx_ring *txr);
+static void txr_kick(struct tx_ring *txr);
+static int txr_stop(struct tx_ring *txr);
+
+static inline bool txr_has_room(struct tx_ring *txr);
+static inline bool txr_can_reclaim(struct tx_ring *txr);
+static unsigned int txr_compute_avail_desc(struct ipq95xx_ess_priv *priv,
+					   struct tx_ring *txr);
+static unsigned int txr_compute_sent_descs(struct ipq95xx_ess_priv *priv,
+					   struct tx_ring *txr);
+static int txr_reclaim_bare(struct tx_ring *txr, int budget,
+			    unsigned int *pdone_len);
+static int txr_reclaim(struct tx_ring *txr, int budget);
+
+static int rxfr_refill(struct rx_fill_ring *rxfr, int budget, bool only_ff);
+
+struct ff_pkt_info {
+	__be16	vlan_id;
+	u8	is_ipv4:1;
+	u8	l3_hdr_offset;
+	u16	l3_plen;
+};
+
+struct ff_dev_desc {
+	bool			is_hardware;
+	const char		*netdev_name;
+	const char		*bridge_name;
+};
+
+struct ff_dev_ctx {
+	__be16			vlan_id;
+	uint8_t			is_hardware:1;
+	uint8_t			hw_id:3;
+	uint8_t			active:1;
+	u8			hwaddr[6];
+
+	struct net_device	*netdev;
+	struct net_device	*br_netdev;
+	struct net_bridge_port	*br_port;
+	struct fbxbr_port	*fbxbr_port;
+	struct net_device	*real_netdev;
+};
+
+struct ff_tun_ctx {
+	struct net_device	*netdev;
+	u16			mtu;
+	u8			active:1;
+
+	/* sit parameters */
+	union ff_tun_params {
+		struct {
+			u32		src;
+			u32		s6rd_prefix;
+			u32		s6rd_pmask;
+			u8		s6rd_plen;
+		} sit;
+
+		struct {
+			/* map parameters */
+			u32		ipv4_prefix;
+			u32		ipv4_pmask;
+			u8		ipv4_plen;
+			u8		ipv6_plen;
+			struct in6_addr	src;
+			struct in6_addr	br;
+
+			u64		ipv6_prefix;
+			u32		ea_addr_mask;
+			u16		ea_port_mask;
+			u8		psid_len;
+			u8		ea_lshift;
+		} map;
+	} u;
+};
+
+#define FF_TXQ_BUF_COUNT	1024
+
+enum {
+	FF_HWDEV_ID_SWP1,
+	FF_HWDEV_ID_SWP2,
+	FF_HWDEV_ID_SWP3,
+	FF_HWDEV_ID_SWP4,
+	FF_HWDEV_ID_FTTH,
+	FF_HWDEV_ID_SFPLAN0,
+
+	FF_HWDEV_ID_LAST = FF_HWDEV_ID_SFPLAN0,
+};
+
+enum {
+	FF_DEV_WLAN0,
+	FF_DEV_WLAN1,
+	FF_DEV_WLAN2,
+	FF_DEV_WLAN3,
+	FF_DEV_SWP1,
+	FF_DEV_SWP2,
+	FF_DEV_SWP3,
+	FF_DEV_SWP4,
+	FF_DEV_SFPLAN0,
+
+	FF_DEV_WAN,
+	FF_DEV_LANWAN0,
+	FF_DEV_LANWAN1,
+
+	FF_DEV_LAST = FF_DEV_LANWAN1,
+	FF_DEV_LAN_LAST = FF_DEV_SFPLAN0,
+};
+
+#define MK_LAN_DEV(_enum, _name) [_enum]  = {	\
+	.is_hardware		= false,	\
+	.netdev_name		= _name,	\
+	.bridge_name		= "br0",	\
+	}
+
+#define MK_LAN_HW_DEV(_enum, _name) [_enum]  = {		\
+	.is_hardware		= true,	\
+	.netdev_name		= _name,	\
+	.bridge_name		= "br0",	\
+	}
+
+#define MK_WAN_DEV(_enum, _name) [_enum]  = {	\
+	.is_hardware		= true,		\
+	.netdev_name		= _name,	\
+	}
+
+
+static struct ff_ctx  {
+	struct ipq95xx_ess_priv		*priv;
+	struct ff_dev_ctx		devs[FF_DEV_LAST + 1];
+	struct ff_tun_ctx		tun;
+	u32				jiffies;
+
+	struct ff_dev_desc		devs_desc[FF_DEV_LAST + 1];
+	int				wan_active_dev;
+	char				tun_netdev_name[IFNAMSIZ];
+	char				wan_netdev_name[IFNAMSIZ];
+
+} ff = {
+	.devs_desc = {
+		MK_WAN_DEV(FF_DEV_WAN, "ftthpub0"),
+		MK_LAN_HW_DEV(FF_DEV_SFPLAN0, "sfplan0"),
+
+		MK_LAN_HW_DEV(FF_DEV_SWP1, "swp1"),
+		MK_LAN_HW_DEV(FF_DEV_SWP2, "swp2"),
+		MK_LAN_HW_DEV(FF_DEV_SWP3, "swp3"),
+		MK_LAN_HW_DEV(FF_DEV_SWP4, "swp4"),
+
+		MK_WAN_DEV(FF_DEV_LANWAN0, "lanwanpub0"),
+		MK_WAN_DEV(FF_DEV_LANWAN1, "lanwanpub1"),
+
+		MK_LAN_DEV(FF_DEV_WLAN0, "wlan0"),
+		MK_LAN_DEV(FF_DEV_WLAN1, "wlan1"),
+		MK_LAN_DEV(FF_DEV_WLAN2, "wlan2"),
+		MK_LAN_DEV(FF_DEV_WLAN3, "wlan3"),
+	},
+};
+
+struct ff_pcpu_ctx {
+	struct tx_ring			tx_ring;
+	struct task_struct		*tx_reclaim_thread;
+};
+
+static DEFINE_PER_CPU(struct ff_pcpu_ctx, ff_pcpu);
+static DEFINE_PER_CPU(spinlock_t, ff_plock);
+
+static struct notifier_block ff_notifier;
+static DEFINE_MUTEX(ff_notifier_mutex);
+static bool ff_enabled;
+
+static bool ff_idx_is_wan(size_t idx)
+{
+	return (idx == FF_DEV_WAN ||
+		idx == FF_DEV_LANWAN0 ||
+		idx == FF_DEV_LANWAN1);
+
+}
+
+static inline u32 ff_gen_netmask(u8 len)
+{
+	return htonl(~((1 << (32 - len)) - 1));
+}
+
+/*
+ * ipv4 forward cache private data
+ */
+struct ff_priv {
+	struct in6_addr		tun_dest_ip6;
+	struct dst_entry	*tun_dst;
+};
+
+static void ff_priv_release(const struct ff_priv *priv)
+{
+	dst_release(priv->tun_dst);
+}
+
+static void ff_priv_destructor_cb(void *data)
+{
+	const struct ff_priv *priv = (const struct ff_priv *)data;
+	ff_priv_release(priv);
+}
+
+static const struct ff_priv *ffn_get_ro_priv(const struct ffn_lookup_entry *e)
+{
+	if (e->manip.priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (const struct ff_priv *)e->manip.ffn_priv_area;
+}
+
+static struct ff_priv *ffn_get_rw_priv(struct ffn_lookup_entry *e)
+{
+	BUILD_BUG_ON(sizeof (e->manip.ffn_priv_area) <
+		     sizeof (struct ff_priv));
+
+	if (e->manip.priv_destructor &&
+	    e->manip.priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (struct ff_priv *)e->manip.ffn_priv_area;
+}
+
+static const struct ff_priv *fwc_get_ro_priv(const struct fbxbr_fwcache *fwc)
+{
+	if (fwc->priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (const struct ff_priv *)fwc->priv_area;
+}
+
+static struct ff_priv *fwc_get_rw_priv(const struct fbxbr_fwcache *fwc)
+{
+	BUILD_BUG_ON(sizeof (fwc->priv_area) < sizeof (struct ff_priv));
+
+	if (fwc->priv_destructor &&
+	    fwc->priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (struct ff_priv *)fwc->priv_area;
+}
+
+/*
+ * ipv6 forward cache private data
+ */
+struct ff6_priv {
+	u32			tun_dest_ip;
+	struct dst_entry	*tun_dst;
+};
+
+static void ff6_priv_release(const struct ff6_priv *priv)
+{
+	dst_release(priv->tun_dst);
+}
+
+static void ff6_priv_destructor_cb(void *data)
+{
+	const struct ff6_priv *priv = (const struct ff6_priv *)data;
+	ff6_priv_release(priv);
+}
+
+static const struct ff6_priv *ffn6_get_ro_priv(const struct ffn6_lookup_entry *e6)
+{
+	if (e6->manip.priv_destructor != ff6_priv_destructor_cb)
+		return NULL;
+
+	return (const struct ff6_priv *)e6->manip.ffn_priv_area;
+}
+
+static struct ff6_priv *ffn6_get_rw_priv(struct ffn6_lookup_entry *e6)
+{
+	BUILD_BUG_ON(sizeof (e6->manip.ffn_priv_area) <
+		     sizeof (struct ff6_priv));
+
+	if (e6->manip.priv_destructor &&
+	    e6->manip.priv_destructor != ff6_priv_destructor_cb)
+		return NULL;
+
+	return (struct ff6_priv *)e6->manip.ffn_priv_area;
+}
+
+/*
+ * ff lock
+ */
+static void ff_lock_this_cpu(void)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, raw_smp_processor_id());
+	spin_lock(lock);
+}
+
+static void ff_unlock_this_cpu(void)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, raw_smp_processor_id());
+	spin_unlock(lock);
+}
+
+static void ff_lock_cpu_bh(int cpu)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, cpu);
+	spin_lock_bh(lock);
+}
+
+static void ff_unlock_cpu_bh(int cpu)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, cpu);
+	spin_unlock_bh(lock);
+}
+
+static void ff_lock_all_cpu_bh(void)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		ff_lock_cpu_bh(cpu);
+}
+
+static void ff_unlock_all_cpu_bh(void)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		ff_unlock_cpu_bh(cpu);
+}
+
+/*
+ *
+ */
+static u32 ff_tun_extract_6rd_addr(const struct in6_addr *d)
+{
+	u32 a1, a2;
+
+	a1 = ntohl(d->s6_addr32[0] & ~ff.tun.u.sit.s6rd_pmask);
+	a1 <<= ff.tun.u.sit.s6rd_plen;
+
+	a2 = ntohl(d->s6_addr32[1] & ff.tun.u.sit.s6rd_pmask);
+	a2 >>= (32 - ff.tun.u.sit.s6rd_plen);
+	return htonl(a1 | a2);
+}
+
+/*
+ *
+ */
+static void ff_tun_gen_mape_addr(u32 addr, u16 port, struct in6_addr *dest)
+{
+	u32 eabits;
+	u16 psid;
+
+	eabits = ntohl(addr & ff.tun.u.map.ea_addr_mask) << ff.tun.u.map.psid_len;
+	psid = 0;
+	if (ff.tun.u.map.psid_len) {
+		psid = ntohs(port & ff.tun.u.map.ea_port_mask) >>
+			(16 - ff.tun.u.map.psid_len);
+		eabits |= psid;
+	}
+
+	memcpy(dest, &ff.tun.u.map.ipv6_prefix, 8);
+	dest->s6_addr32[1] |= htonl(eabits << ff.tun.u.map.ea_lshift);
+
+	dest->s6_addr32[2] = htonl(ntohl(addr) >> 16);
+	dest->s6_addr32[3] = htonl((ntohl(addr) << 16) | psid);
+}
+
+/*
+ *
+ */
+static void ff_send(unsigned int ppe_port_id,
+		    struct page *page,
+		    unsigned int send_offset,
+		    unsigned int send_len,
+		    unsigned int clean_len)
+{
+	struct ff_pcpu_ctx *ffc = this_cpu_ptr(&ff_pcpu);
+	struct tx_ring *txr = &ffc->tx_ring;
+	struct edma_txdesc_desc *desc;
+	struct tx_meta_info *mi;
+	dma_addr_t page_dma_addr;
+	u32 prod_idx;
+	int meta_idx;
+	void *clean_start, *clean_end;
+
+	/* pad small packets */
+	if (send_len < 60)
+		send_len = 60;
+
+	/* cache clean the whole part, so it's ready to be recycled */
+	clean_start = (void *)page_address(page) + send_offset;
+	clean_end = clean_start + clean_len;
+	dmac_clean_range_no_dsb(clean_start, clean_end);
+
+	/* we checked that before */
+	BUG_ON(unlikely(!txr_has_room(txr)));
+
+	/* point to the next available desc */
+	page_dma_addr = page_pool_get_dma_addr(page);
+	prod_idx = txr->desc_prod_idx;
+	meta_idx = txr->meta_idx;
+	desc = &txr->tx_desc_area[prod_idx];
+	mi = &txr->tx_meta_info[meta_idx];
+	desc->tdes0 = cpu_to_le32(page_dma_addr + send_offset);
+	desc->tdes1 = 0;
+	desc->tdes2 = meta_idx;
+	desc->tdes4 = EDMA_TXDESC_MK_DPORT_ID(ppe_port_id);
+	desc->tdes5 = send_len << EDMA_TXDESC_DATA_LEN_SET_SHIFT;
+	desc->tdes6 = 0;
+	desc->tdes7 = 0;
+
+	mi->paddr = page_dma_addr;
+	/* filling mi->map_len is not needed for page */
+	mi->page = page;
+
+	++meta_idx;
+	if (unlikely(meta_idx >= txr->meta_ring_size))
+		meta_idx = 0;
+
+	prod_idx++;
+	prod_idx &= (txr->ring_size - 1);
+
+	txr->meta_idx = meta_idx;
+	txr->desc_prod_idx = prod_idx;
+	txr->descs_avail_cache--;
+	txr->desc_prod_dirty = true;
+}
+
+/*
+ *
+ */
+static int ff_reclaim_threadfn(void *data)
+{
+	struct ff_pcpu_ctx *ffc = this_cpu_ptr(&ff_pcpu);
+
+	set_user_nice(current, MAX_NICE);
+
+	while (!kthread_should_stop()) {
+		int todo;
+
+		if (nfct_time_stamp < ffc->tx_ring.last_use_time + HZ) {
+			msleep_interruptible(100);
+			continue;
+		}
+
+		todo = 128;
+		while (todo > 0 && !kthread_should_stop()) {
+			int done = 0;
+
+			local_bh_disable();
+			done = txr_reclaim_bare(&ffc->tx_ring, 1, NULL);
+			local_bh_enable();
+
+			if (!done)
+				break;
+			schedule();
+			todo--;
+		}
+
+		if (todo)
+			msleep_interruptible(100);
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int ff_parse_packet(struct ff_pkt_info *info,
+			   struct page *page,
+			   size_t offset,
+			   size_t eth_len)
+{
+	const struct ethhdr *eth;
+	const uint16_t *proto;
+
+	eth = (const struct ethhdr *)((uint8_t *)page_address(page) + offset);
+	proto = (const uint16_t *)&eth->h_proto;
+
+	if (*proto == htons(ETH_P_8021Q)) {
+		const struct vlan_hdr *vhdr;
+
+		vhdr = (const struct vlan_hdr *)(proto + 1);
+		info->vlan_id = vhdr->h_vlan_TCI;
+
+		proto = (const uint16_t *)&vhdr->h_vlan_encapsulated_proto;
+		info->l3_hdr_offset = (const void *)(vhdr + 1) -
+			(const void *)eth;
+		info->l3_plen = eth_len - VLAN_ETH_HLEN;
+	} else {
+		info->vlan_id = 0;
+		info->l3_hdr_offset = (const void *)(proto + 1) -
+			(const void *)eth;
+		info->l3_plen = eth_len - ETH_HLEN;
+	}
+
+	if (*proto == htons(ETH_P_IP)) {
+		if (info->l3_plen < sizeof (struct iphdr))
+			return 1;
+		info->is_ipv4 = 1;
+		return 0;
+	}
+
+	info->is_ipv4 = 0;
+	if (*proto == htons(ETH_P_IPV6)) {
+		if (info->l3_plen < sizeof (struct ipv6hdr))
+			return 1;
+		return 0;
+	}
+
+	return 1;
+}
+
+enum ff_xmit_mode {
+	FF_XMIT_IPV4,
+	FF_XMIT_IPV6,
+	FF_XMIT_IPV6_IN_IPV4,
+	FF_XMIT_IPV4_IN_IPV6,
+};
+
+/*
+ *
+ */
+static bool ff_receive(struct device *dev,
+		       struct ess_port *rx_port,
+		       struct sk_buff_head *ff_list,
+		       struct page_pool *pp,
+		       struct page *page,
+		       size_t offset,
+		       size_t eth_len)
+{
+	struct ff_pcpu_ctx *ffc = this_cpu_ptr(&ff_pcpu);
+	struct ff_pkt_info pinfo;
+	struct ethhdr *eth;
+	struct net_device *last_rx_dev, *next_tx_dev;
+	struct ffn_lookup_entry *e = NULL;
+	struct ffn6_lookup_entry *e6 = NULL;
+	struct nf_conn *ct = NULL;
+	enum ff_xmit_mode xmit_mode;
+	const struct in6_addr *tun_v6_pdest = NULL;
+	struct net_device *tx_dev;
+	struct ff_dev_ctx *tx_ff_dev, *rx_ff_dev;
+	u32 tun_v4_dest = 0;
+	u8 dest_hw[6];
+	unsigned int timeout;
+	unsigned int clean_len;
+	void *l2_hdr, *l3_hdr, *l4_hdr;
+	bool l3_is_ipv4, l4_is_tcp;
+	u16 proto;
+	u16 *pproto;
+	size_t i, rx_ff_dev_idx, tx_ff_dev_idx;
+	bool parsed;
+
+	/* make sure we have headroom for the worst case scenario */
+	BUILD_BUG_ON(NET_SKB_PAD < (sizeof (struct ipv6hdr) + VLAN_HLEN));
+
+	if (unlikely(!ff_enabled))
+		return false;
+
+	if (unlikely(eth_len < ETH_HLEN))
+		return false;
+
+	/* locate rx ff device */
+	parsed = false;
+	for (i = 0; i < ARRAY_SIZE(ff.devs); i++) {
+		if (!ff.devs[i].active)
+			continue;
+
+		if (ff.devs[i].hw_id != rx_port->ppe_port_id)
+			continue;
+
+		/* candidate, fully parse packet */
+		if (!parsed &&
+		    likely(ff_parse_packet(&pinfo, page, offset, eth_len)))
+			return false;
+
+		parsed = true;
+
+		/* make sure this is the right device */
+		if (ff.devs[i].vlan_id != pinfo.vlan_id)
+			continue;
+
+		/* device match! */
+		break;
+	}
+
+	if (unlikely(i == ARRAY_SIZE(ff.devs)))
+		return false;
+
+	rx_ff_dev_idx = i;
+	rx_ff_dev = &ff.devs[rx_ff_dev_idx];
+	last_rx_dev = rx_ff_dev->netdev;
+
+	/* find opposing device */
+	if (ff_idx_is_wan(rx_ff_dev_idx)) {
+		if (!ff.devs[FF_DEV_LAN_LAST].active)
+			return false;
+
+		/* XXX: to get bridge/fbxbridge device, assume to be
+		 * the same on all devices, real tx dev not yet
+		 * known */
+		if (!ff.devs[FF_DEV_LAN_LAST].fbxbr_port)
+			tx_dev = ff.devs[FF_DEV_LAN_LAST].br_netdev;
+		else
+			tx_dev = ff.devs[FF_DEV_LAN_LAST].netdev;
+	} else {
+		if (likely(ff.wan_active_dev != -1))
+			tx_dev = ff.devs[ff.wan_active_dev].netdev;
+		else
+			return false;
+	}
+
+	if (WARN_ON(!tx_dev))
+		return false;
+
+	/* make sure packet is for our mac address */
+	eth = (struct ethhdr *)((uint8_t *)page_address(page) + offset);
+	if (unlikely(memcmp(eth->h_dest, ff.devs[i].hwaddr, 6)))
+		return false;
+
+	l3_is_ipv4 = pinfo.is_ipv4;
+	l3_hdr = (u8 *)eth + pinfo.l3_hdr_offset;
+
+	if (l3_is_ipv4) {
+		struct iphdr *iph;
+		struct fbxbr_fwcache *fwc;
+		struct fbxbr *fbxbr = NULL;
+		struct fbxbr_port *fbxbr_fwd_port = NULL;
+		u16 sport, dport;
+		u8 ip_proto;
+
+handle_ipv4:
+		iph = (struct iphdr *)l3_hdr;
+
+		/* lookup IP ffn entry */
+		if (unlikely(iph->ihl > 5 ||
+			     (iph->frag_off & htons(IP_MF | IP_OFFSET))))
+			return false;
+
+		if (unlikely(iph->ttl <= 1))
+			return false;
+
+		ip_proto = iph->protocol;
+		if (ip_proto == IPPROTO_TCP) {
+			struct tcphdr *tcph;
+
+			if (unlikely(pinfo.l3_plen < sizeof (*iph) +
+				     sizeof (*tcph)))
+				return false;
+
+			tcph = (struct tcphdr *)((u8 *)iph + 20);
+			if (unlikely(tcph->fin ||
+				     tcph->syn ||
+				     tcph->rst ||
+				     !tcph->ack)) {
+				return false;
+			}
+
+			sport = tcph->source;
+			dport = tcph->dest;
+			l4_hdr = tcph;
+			l4_is_tcp = true;
+
+		} else if (ip_proto == IPPROTO_UDP) {
+			struct udphdr *udph;
+
+			if (unlikely(pinfo.l3_plen < sizeof (*iph) +
+				     sizeof (*udph)))
+				return false;
+
+			udph = (struct udphdr *)((u8 *)iph + 20);
+			sport = udph->source;
+			dport = udph->dest;
+			l4_hdr = udph;
+			l4_is_tcp = false;
+
+		} else if (ip_proto == IPPROTO_IPV6) {
+			struct ipv6hdr *ip6hdr;
+			u32 ip6rd_daddr;
+
+			if (!unlikely(ff.tun.active))
+				return false;
+
+			/* must be for us */
+			if (unlikely(iph->daddr != ff.tun.u.sit.src))
+				return false;
+
+			/* check len */
+			if (unlikely(pinfo.l3_plen < sizeof (struct iphdr) +
+				     sizeof (struct ipv6hdr)))
+				return false;
+
+			ip6hdr = (struct ipv6hdr *)(iph + 1);
+
+			/* must belong to 6rd prefix */
+			if (unlikely((ip6hdr->daddr.s6_addr32[0] &
+				      ff.tun.u.sit.s6rd_pmask) != ff.tun.u.sit.s6rd_prefix))
+				return false;
+
+			/* 6rd address */
+			ip6rd_daddr = ff_tun_extract_6rd_addr(&ip6hdr->daddr);
+			if (unlikely(ip6rd_daddr != ff.tun.u.sit.src))
+				return false;
+
+			/* TODO: should check for spoofing here */
+			l3_hdr = ip6hdr;
+			pinfo.l3_plen -= 20;
+			l3_is_ipv4 = false;
+			goto handle_ipv6;
+
+		} else
+			return false;
+
+		if (netif_is_fbxbridge_port(last_rx_dev)) {
+			struct fbxbr_fwcache_key k;
+			struct fbxbr_port *p;
+			u32 hash;
+
+			p = fbxbr_port_get_rcu(last_rx_dev);
+			fbxbr = p->br;
+
+			if (p->is_wan) {
+				k.wan_ip = iph->saddr;
+				k.lan_ip = iph->daddr;
+				k.wan_port = sport;
+				k.lan_port = dport;
+				fbxbr_fwd_port = fbxbr->lan_port;
+			} else {
+				k.lan_ip = iph->saddr;
+				k.wan_ip = iph->daddr;
+				k.lan_port = sport;
+				k.wan_port = dport;
+				fbxbr_fwd_port = fbxbr->wan_port;
+			}
+			k.is_tcp = l4_is_tcp;
+
+			if (!unlikely(fbxbr_fwd_port))
+				return false;
+
+			hash = fbxbr_fwcache_hash(&k);
+			fwc = __fbxbr_fwcache_lookup_rcu(p->br, hash, &k);
+			if (!fwc)
+				return false;
+
+			next_tx_dev = fbxbr_fwd_port->dev;
+			e = NULL;
+		} else {
+			struct ffn_lookup_key k;
+
+			k.sip = iph->saddr;
+			k.dip = iph->daddr;
+			k.sport = sport;
+			k.dport = dport;
+			k.is_tcp = l4_is_tcp;
+
+			e = __ffn_get_rcu(&k);
+			if (unlikely(!e))
+				return false;
+
+			if (unlikely(e->manip.dst->obsolete > 0))
+				return false;
+
+			ct = e->manip.ct;
+
+			/* only fast forward TCP connections in established state */
+			if (unlikely(l4_is_tcp &&
+				     ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED))
+				return false;
+
+			next_tx_dev = e->manip.dst->dev;
+			fwc = NULL;
+		}
+
+		/* find out if the packet is to be sent as-is or
+		 * tunneled */
+		if (ff.tun.netdev && next_tx_dev == ff.tun.netdev) {
+			const struct ff_priv *ff_priv;
+			struct ff_priv *ff_wpriv;
+			struct dst_entry *v6_dst;
+			const struct in6_addr *pdest, *nexthop;
+			struct in6_addr dest;
+			struct rt6_info *rt6;
+			struct neighbour *neigh;
+
+			/* IPv4 tunneled into MAP-E device */
+			if (!unlikely(ff.tun.active)) {
+				return false;
+			}
+
+			if (unlikely(pinfo.l3_plen > ff.tun.mtu))
+				return false;
+
+			/* lookup ipv6 route cache */
+			if (e)
+				ff_priv = ffn_get_ro_priv(e);
+			else
+				ff_priv = fwc_get_ro_priv(fwc);
+
+			if (ff_priv) {
+				if (likely(ff_priv->tun_dst->obsolete < 0)) {
+					/* valid route found */
+					v6_dst = ff_priv->tun_dst;
+					pdest = &ff_priv->tun_dest_ip6;
+					goto cached_ipv6_route;
+				}
+
+				ff_priv_release(ff_priv);
+				if (e)
+					e->manip.priv_destructor = NULL;
+				else
+					fwc->priv_destructor = NULL;
+			}
+
+			/* cache miss, compute IPv6 destination */
+			if (ff.tun.u.map.ipv4_prefix &&
+			    (iph->daddr & ff.tun.u.map.ipv4_pmask) ==
+			    ff.tun.u.map.ipv4_prefix) {
+				/* compute dest using FMR */
+				ff_tun_gen_mape_addr(iph->daddr, dport, &dest);
+				pdest = &dest;
+			} else {
+				/* next hop is BR */
+				pdest = &ff.tun.u.map.br;
+			}
+
+			/* v6 route lookup */
+			rt6 = rt6_lookup(&init_net, pdest, NULL, 0, NULL, 0);
+			if (unlikely(!rt6))
+				return false;
+
+			if (e)
+				ff_wpriv = ffn_get_rw_priv(e);
+			else
+				ff_wpriv = fwc_get_rw_priv(fwc);
+			if (unlikely(!ff_wpriv))
+				return false;
+
+			/* cache this inside FFN private area */
+			ff_wpriv->tun_dst = (struct dst_entry *)rt6;
+			memcpy(&ff_wpriv->tun_dest_ip6, pdest, 16);
+			if (e)
+				e->manip.priv_destructor = ff_priv_destructor_cb;
+			else
+				fwc->priv_destructor = ff_priv_destructor_cb;
+			ff_priv = ff_wpriv;
+
+			v6_dst = (struct dst_entry *)rt6;
+
+cached_ipv6_route:
+			if (unlikely(v6_dst->dev != tx_dev)) {
+				return false;
+			}
+
+			/* is the neighboor ready ? */
+			rt6 = (struct rt6_info *)v6_dst;
+			nexthop = rt6_nexthop(rt6, (struct in6_addr *)pdest);
+			if (unlikely(!nexthop)) {
+				return false;
+			}
+
+			neigh = __ipv6_neigh_lookup_noref(tx_dev, nexthop);
+			if (unlikely(!neigh || !(neigh->nud_state & NUD_VALID)))
+				return false;
+			memcpy(dest_hw, neigh->ha, 6);
+
+			xmit_mode = FF_XMIT_IPV4_IN_IPV6;
+			tun_v6_pdest = &ff_priv->tun_dest_ip6;
+
+		} else if (next_tx_dev == tx_dev) {
+			struct neighbour *neigh;
+			const struct rtable *rt;
+
+			/* is the neighboor ready ? */
+			if (likely(e)) {
+				u32 nexthop;
+
+				rt = (const struct rtable *)e->manip.dst;
+				nexthop = (__force u32)rt_nexthop(rt,
+							   e->manip.new_dip);
+				neigh = __ipv4_neigh_lookup_noref(tx_dev,
+								  nexthop);
+				if (!neigh || !(neigh->nud_state & NUD_VALID))
+					return false;
+
+				memcpy(dest_hw, neigh->ha, 6);
+			} else {
+				if (!fbxbr_fwd_port->is_wan) {
+					if (!fbxbr->have_hw_addr)
+						return false;
+					memcpy(dest_hw, fbxbr->lan_hwaddr, 6);
+				} else {
+					__be32 nh;
+
+					nh = iph->daddr;
+					if ((nh & fbxbr->wan_netmask) !=
+					    (fbxbr->wan_ipaddr &
+					     fbxbr->wan_netmask)) {
+						rt = fbxbr_fwd_port->rt;
+						if (!rt ||
+						    rt->dst.obsolete > 0)
+							return false;
+
+						nh = rt_nexthop(rt, nh);
+					}
+
+					neigh = __ipv4_neigh_lookup_noref(
+						tx_dev, nh);
+					if (!neigh ||
+					    !(neigh->nud_state & NUD_VALID))
+						return false;
+
+					memcpy(dest_hw, neigh->ha, 6);
+				}
+			}
+
+			xmit_mode = FF_XMIT_IPV4;
+		} else
+			return false;
+
+	} else {
+		struct ffn6_lookup_key k;
+		struct ipv6hdr *ip6hdr;
+		u16 sport, dport;
+		u8 ip_proto;
+
+handle_ipv6:
+		ip6hdr = (struct ipv6hdr *)l3_hdr;
+
+		if (unlikely(ip6hdr->hop_limit <= 1 || !ip6hdr->payload_len))
+			return false;
+
+		if (unlikely(ntohs(ip6hdr->payload_len) > pinfo.l3_plen))
+			return false;
+
+		ip_proto = ip6hdr->nexthdr;
+
+		if (ip_proto == IPPROTO_TCP) {
+			struct tcphdr *tcph;
+
+			if (unlikely(pinfo.l3_plen < sizeof (*ip6hdr) +
+				     sizeof (*tcph)))
+				return false;
+
+			tcph = (struct tcphdr *)((u8 *)ip6hdr +
+						 sizeof (*ip6hdr));
+
+			if (unlikely(tcph->fin ||
+				     tcph->syn ||
+				     tcph->rst ||
+				     !tcph->ack)) {
+				return false;
+			}
+
+			sport = tcph->source;
+			dport = tcph->dest;
+			l4_hdr = tcph;
+			l4_is_tcp = true;
+
+		} else if (ip_proto == IPPROTO_UDP) {
+			struct udphdr *udph;
+
+			if (unlikely(pinfo.l3_plen < sizeof (*ip6hdr) +
+				     sizeof (*udph)))
+				return false;
+
+			udph = (struct udphdr *)((u8 *)ip6hdr +
+						 sizeof (*ip6hdr));
+			sport = udph->source;
+			dport = udph->dest;
+			l4_hdr = udph;
+			l4_is_tcp = false;
+
+		} else if (ip_proto == IPPROTO_IPIP) {
+			struct iphdr *iph;
+
+			if (unlikely(!ff.tun.active))
+				return false;
+
+			/* must be for us */
+			if (unlikely(memcmp(&ip6hdr->daddr,
+					    &ff.tun.u.map.src, 16)))
+				return false;
+
+			/* check len */
+			if (unlikely(pinfo.l3_plen < sizeof (struct iphdr) +
+				     sizeof (struct ipv6hdr)))
+				return false;
+
+			iph = (struct iphdr *)(ip6hdr + 1);
+
+			/* does it come from BR ? */
+			if (memcmp(&ip6hdr->saddr, &ff.tun.u.map.br, 16)) {
+				struct in6_addr exp_src_addr;
+
+				/* no, check FMR for spoofing */
+				if (unlikely(!ff.tun.u.map.ipv4_prefix))
+					return false;
+
+				/* check up to PSID to reduce lookup
+				 * depth */
+				ff_tun_gen_mape_addr(iph->saddr, 0,
+						     &exp_src_addr);
+				if (unlikely(!ipv6_prefix_equal(
+						     &ip6hdr->saddr,
+						     &exp_src_addr,
+						     ff.tun.u.map.ipv6_plen +
+						     ff.tun.u.map.ipv4_plen)))
+					return false;
+			}
+
+			last_rx_dev = ff.tun.netdev;
+			if (unlikely(!last_rx_dev))
+				return false;
+
+			l3_hdr = iph;
+			pinfo.l3_plen -= sizeof (*ip6hdr);
+			l3_is_ipv4 = true;
+			goto handle_ipv4;
+
+		} else
+			return false;
+
+		k.sip = ip6hdr->saddr.s6_addr32;
+		k.dip = ip6hdr->daddr.s6_addr32;
+		k.sport = sport;
+		k.dport = dport;
+		k.is_tcp = l4_is_tcp;
+
+		e6 = __ffn6_get_rcu(&k);
+		if (unlikely(!e6)) {
+			return false;
+		}
+
+		if (unlikely(e6->manip.dst->obsolete > 0)) {
+			return false;
+		}
+
+		ct = e6->manip.ct;
+
+		/* only fast forward TCP connections in established state */
+		if (unlikely(l4_is_tcp &&
+			     ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)) {
+			return false;
+		}
+
+		/* find out if the packet is to be sent as-is or
+		 * tunneled */
+		if (ff.tun.netdev && e6->manip.dst->dev == ff.tun.netdev) {
+			const struct ff6_priv *ff6_priv;
+			struct ff6_priv *ff6_wpriv;
+			struct dst_entry *v4_dst;
+			struct flowi4 fl4;
+			struct rtable *rt;
+			struct neighbour *neigh;
+			u32 dest, nexthop;
+
+			/* IPv6 tunneled into SIT device using 6rd */
+			if (unlikely(!ff.tun.active)) {
+				return false;
+			}
+
+			if (unlikely(pinfo.l3_plen > ff.tun.mtu))
+				return false;
+
+			/* lookup ipv4 route cache */
+			ff6_priv = ffn6_get_ro_priv(e6);
+			if (ff6_priv) {
+				if (likely(!ff6_priv->tun_dst->obsolete)) {
+					/* valid route found */
+					v4_dst = ff6_priv->tun_dst;
+					dest = ff6_priv->tun_dest_ip;
+					goto cached_ipv4_route;
+				}
+
+				ff6_priv_release(ff6_priv);
+				e6->manip.priv_destructor = NULL;
+			}
+
+			/* cache miss, compute IPv4 destination */
+			if ((ip6hdr->daddr.s6_addr32[0] &
+			     ff.tun.u.sit.s6rd_pmask) == ff.tun.u.sit.s6rd_prefix) {
+				/* next hop via prefix */
+				dest = ff_tun_extract_6rd_addr(&ip6hdr->daddr);
+			} else {
+				const struct in6_addr *nh6;
+				struct rt6_info *rt6;
+
+				/* next hop via route */
+				rt6 = (struct rt6_info *)e6->manip.dst;
+				nh6 = rt6_nexthop(rt6,
+				      (struct in6_addr *)e6->manip.new_dip);
+				if (!nh6) {
+					return false;
+				}
+
+				/* should be a v4 mapped */
+				if (nh6->s6_addr32[0] != 0 ||
+				    nh6->s6_addr32[1] != 0 ||
+				    nh6->s6_addr32[2] != 0) {
+					return false;
+				}
+
+				dest = nh6->s6_addr32[3];
+			}
+
+			/* v4 route lookup */
+			rt = ip_route_output_ports(&init_net, &fl4, NULL,
+						   dest, ff.tun.u.sit.src,
+						   0, 0,
+						   IPPROTO_IPV6, 0,
+						   0);
+			if (IS_ERR(rt) ||
+			    rt->rt_type != RTN_UNICAST)
+				return false;
+
+			ff6_wpriv = ffn6_get_rw_priv(e6);
+			if (!ff6_wpriv)
+				return false;
+
+			/* cache this inside FFN private area */
+			ff6_wpriv->tun_dst = (struct dst_entry *)rt;
+			ff6_wpriv->tun_dest_ip = dest;
+			e6->manip.priv_destructor = ff6_priv_destructor_cb;
+
+			v4_dst = (struct dst_entry *)rt;
+			ff6_priv = ff6_wpriv;
+
+cached_ipv4_route:
+			if (unlikely(v4_dst->dev != tx_dev)) {
+				return false;
+			}
+
+			/* is the neighboor ready ? */
+			rt = (struct rtable *)v4_dst;
+			nexthop = (__force u32)rt_nexthop(rt, dest);
+			neigh = __ipv4_neigh_lookup_noref(tx_dev, nexthop);
+			if (unlikely(!neigh || !(neigh->nud_state & NUD_VALID)))
+				return false;
+			memcpy(dest_hw, neigh->ha, 6);
+
+			tun_v4_dest = dest;
+			xmit_mode = FF_XMIT_IPV6_IN_IPV4;
+
+		} else if (e6->manip.dst->dev == tx_dev) {
+			const struct in6_addr *nexthop;
+			struct rt6_info *rt6;
+			struct neighbour *neigh;
+
+			/* is the neighboor ready ? */
+			rt6 = (struct rt6_info *)e6->manip.dst;
+
+			nexthop = rt6_nexthop(rt6,
+				      (struct in6_addr *)e6->manip.new_dip);
+			if (unlikely(!nexthop))
+				return false;
+
+			neigh = __ipv6_neigh_lookup_noref(tx_dev, nexthop);
+			if (unlikely(!neigh || !(neigh->nud_state & NUD_VALID)))
+				return false;
+			memcpy(dest_hw, neigh->ha, 6);
+
+			xmit_mode = FF_XMIT_IPV6;
+		} else
+			return false;
+	}
+
+	/* compute outgoing device */
+	if (!ff_idx_is_wan(rx_ff_dev_idx)) {
+		tx_ff_dev = &ff.devs[ff.wan_active_dev];
+		tx_ff_dev_idx = ff.wan_active_dev;
+
+	} else if (ff.devs[FF_DEV_LAN_LAST].br_port) {
+		struct net_bridge_port *br_port;
+		struct net_bridge_fdb_entry *fdb;
+
+		/* XXX get reference to bridge using last lan port */
+		br_port = ff.devs[FF_DEV_LAN_LAST].br_port;
+		fdb = br_fdb_find_rcu(br_port->br, dest_hw, 0);
+		if (unlikely(!fdb))
+			return false;
+
+		tx_ff_dev = NULL;
+		for (i = 0; i < ARRAY_SIZE(ff.devs); i++) {
+			if (!ff.devs[i].active)
+				continue;
+			if (ff.devs[i].br_port == fdb->dst) {
+				tx_ff_dev = &ff.devs[i];
+				break;
+			}
+		}
+
+		if (unlikely(!tx_ff_dev)) {
+			return false;
+		}
+
+		tx_ff_dev_idx = i;
+
+	} else if (ff.devs[FF_DEV_LAN_LAST].fbxbr_port) {
+		tx_ff_dev = &ff.devs[FF_DEV_LAN_LAST];
+		tx_ff_dev_idx = FF_DEV_LAN_LAST;
+	} else
+		return false;
+
+
+	/* update rx statistics */
+	if (!ff_idx_is_wan(rx_ff_dev_idx) && rx_ff_dev->br_port) {
+		struct net_bridge *br;
+		struct net_bridge_port *p;
+
+		/* packet comes from a bridge, make sure we are
+		 * allowed to ingress it */
+		p = rx_ff_dev->br_port;
+		if (unlikely(p->state != BR_STATE_FORWARDING))
+			return false;
+
+		/* refresh FDB entry for this source */
+		br = netdev_priv(rx_ff_dev->br_netdev);
+		if (unlikely(!br_fdb_update_only(br, p, eth->h_source)))
+			return false;
+
+		dev_sw_netstats_rx_add(br->dev, eth_len);
+	}
+
+	if (rx_ff_dev->vlan_id) {
+		struct vlan_dev_priv *vlan = vlan_dev_priv(rx_ff_dev->netdev);
+		struct vlan_pcpu_stats *stats;
+		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+		u64_stats_inc(&stats->rx_packets);
+		u64_stats_add(&stats->rx_bytes, eth_len);
+	}
+
+	/* do we have room in the tx queue ? */
+	if (tx_ff_dev->is_hardware) {
+		struct tx_ring *txr = &ffc->tx_ring;
+
+		/* check if we have room in txq */
+		if (unlikely(!txr_has_room(txr)))
+			txr->descs_avail_cache =
+				txr_compute_avail_desc(rx_port->priv, txr);
+
+		if (unlikely(!txr_has_room(txr))) {
+			/* FIXME: we can reclaim that descriptor in
+			 * rx fill */
+			page_pool_recycle_direct(pp, page);
+			return true;
+		}
+	}
+
+	if (ct && l4_is_tcp) {
+		/* don't try to track window anymore on this
+		 * connection */
+		ct->proto.tcp.no_window_track = 1;
+	}
+
+	/* alter l3 & l4 content if needed (routing only) */
+	if (l3_is_ipv4 && e) {
+		struct iphdr *iph = (struct iphdr *)l3_hdr;
+
+		if (e->manip.alter) {
+			if (l4_is_tcp) {
+				struct tcphdr *tcph = (struct tcphdr *)l4_hdr;
+				tcph->source = e->manip.new_sport;
+				tcph->dest = e->manip.new_dport;
+				tcph->check = csum16_sub(tcph->check,
+						 e->manip.l4_adjustment);
+			} else {
+				struct udphdr *udph = (struct udphdr *)l4_hdr;
+				udph->source = e->manip.new_sport;
+				udph->dest = e->manip.new_dport;
+				if (udph->check) {
+					u16 tcheck;
+
+					tcheck = csum16_sub(udph->check,
+						    e->manip.l4_adjustment);
+					udph->check = tcheck ? tcheck : 0xffff;
+				}
+			}
+
+			iph->saddr = e->manip.new_sip;
+			iph->daddr = e->manip.new_dip;
+		}
+
+		iph->ttl--;
+		iph->check = csum16_sub(iph->check,
+					e->manip.ip_adjustment);
+
+	} else if (!l3_is_ipv4 && e6) {
+		struct ipv6hdr *ip6hdr = (struct ipv6hdr *)l3_hdr;
+
+		if (e6->manip.alter) {
+			if (l4_is_tcp) {
+				struct tcphdr *tcph = (struct tcphdr *)l4_hdr;
+				tcph->source = e6->manip.new_sport;
+				tcph->dest = e6->manip.new_dport;
+				tcph->check = csum16_sub(tcph->check,
+							 e6->manip.adjustment);
+			} else {
+				struct udphdr *udph = (struct udphdr *)l4_hdr;
+				udph->source = e6->manip.new_sport;
+				udph->dest = e6->manip.new_dport;
+
+				if (udph->check) {
+					u16 tcheck;
+
+					tcheck = csum16_sub(udph->check,
+						    e6->manip.adjustment);
+					udph->check = tcheck ? tcheck : 0xffff;
+				}
+			}
+
+			memcpy(ip6hdr->saddr.s6_addr32, e6->manip.new_sip, 16);
+			memcpy(ip6hdr->daddr.s6_addr32, e6->manip.new_dip, 16);
+		}
+
+		ip6hdr->hop_limit--;
+	}
+
+	/* packet is ready to xmit */
+	switch (xmit_mode) {
+	case FF_XMIT_IPV4:
+		proto = ETH_P_IP;
+		clean_len = sizeof (struct iphdr);
+		break;
+
+	case FF_XMIT_IPV6:
+		clean_len = sizeof (struct ipv6hdr);
+		proto = ETH_P_IPV6;
+		break;
+
+	case FF_XMIT_IPV6_IN_IPV4:
+	{
+		struct iphdr *tun_hdr;
+		/* prepend IPv4 */
+		tun_hdr = (struct iphdr *)((u8 *)l3_hdr - sizeof (*tun_hdr));
+		tun_hdr->ihl = 5;
+		tun_hdr->version = 4;
+		tun_hdr->tos = 0;
+		tun_hdr->tot_len = htons(pinfo.l3_plen + sizeof (*tun_hdr));
+		tun_hdr->id = 0;
+		tun_hdr->frag_off = 0;
+		tun_hdr->check = 0;
+		tun_hdr->ttl = 64;
+		tun_hdr->protocol = IPPROTO_IPV6;
+		tun_hdr->saddr = ff.tun.u.sit.src;
+		tun_hdr->daddr = tun_v4_dest;
+		tun_hdr->check = ip_fast_csum((u8 *)tun_hdr, 5);
+
+		l3_hdr = (u8 *)tun_hdr;
+		pinfo.l3_plen += sizeof (*tun_hdr);
+
+		clean_len = sizeof (struct iphdr) + sizeof (struct ipv6hdr);
+		proto = ETH_P_IP;
+		break;
+	}
+
+	case FF_XMIT_IPV4_IN_IPV6:
+	{
+		struct ipv6hdr *tun_6hdr;
+
+		/* prepend IPv6 */
+		tun_6hdr = (struct ipv6hdr *)((u8 *)l3_hdr - sizeof (*tun_6hdr));
+		tun_6hdr->version = 6;
+		tun_6hdr->priority = 0;
+		memset(tun_6hdr->flow_lbl, 0, sizeof (tun_6hdr->flow_lbl));
+		tun_6hdr->payload_len = htons(pinfo.l3_plen);
+		tun_6hdr->nexthdr = IPPROTO_IPIP;
+		tun_6hdr->hop_limit = 64;
+		tun_6hdr->saddr = ff.tun.u.map.src;
+		tun_6hdr->daddr = *tun_v6_pdest;
+
+		l3_hdr = (u8 *)tun_6hdr;
+		pinfo.l3_plen += sizeof (*tun_6hdr);
+
+		clean_len = sizeof (struct ipv6hdr) + sizeof (struct iphdr);
+		proto = ETH_P_IPV6;
+		break;
+	}
+	}
+
+	if (e || e6) {
+		if (l4_is_tcp)
+			clean_len += sizeof (struct tcphdr);
+		else
+			clean_len += sizeof (struct udphdr);
+	}
+
+	/* add vlan header if any */
+	l2_hdr = l3_hdr;
+	if (tx_ff_dev->vlan_id) {
+		struct vlan_hdr *vhdr;
+
+		l2_hdr -= VLAN_HLEN;
+		vhdr = (struct vlan_hdr *)l2_hdr;
+		vhdr->h_vlan_TCI = tx_ff_dev->vlan_id;
+		vhdr->h_vlan_encapsulated_proto = htons(proto);
+		proto = ETH_P_8021Q;
+		clean_len += VLAN_HLEN;
+	}
+
+	/* add protocol */
+	l2_hdr -= sizeof (*pproto);
+	pproto = (u16 *)l2_hdr;
+	*pproto = htons(proto);
+
+	/* finally add eth dst/src */
+	l2_hdr -= ETH_ALEN * 2;
+	eth = (struct ethhdr *)l2_hdr;
+	memcpy(eth->h_dest, dest_hw, 6);
+	memcpy(eth->h_source, tx_ff_dev->hwaddr, 6);
+	clean_len += ETH_HLEN;
+
+	/* compute final len */
+	eth_len = pinfo.l3_plen + (l3_hdr - l2_hdr);
+
+	/* refresh conntrack */
+	if (ct) {
+		if (l4_is_tcp)
+			timeout = HZ * 3600 * 24 * 5;
+		else
+			timeout = HZ * 180;
+
+		if (ct->timeout - ff.jiffies < timeout - 10 * HZ) {
+			unsigned long newtime = ff.jiffies + timeout;
+			ct->timeout = newtime;
+		}
+	}
+
+	if (tx_ff_dev->is_hardware) {
+		ff_send(tx_ff_dev->hw_id,
+			page,
+			(void *)eth - page_address(page),
+			eth_len,
+			clean_len);
+
+		if (tx_ff_dev->vlan_id) {
+			struct vlan_dev_priv *vlan = vlan_dev_priv(tx_ff_dev->netdev);
+			struct vlan_pcpu_stats *stats;
+			stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+			u64_stats_inc(&stats->tx_packets);
+			u64_stats_add(&stats->tx_bytes, eth_len);
+			dev_sw_netstats_tx_add(tx_ff_dev->real_netdev, 1, eth_len);
+		} else
+			dev_sw_netstats_tx_add(tx_ff_dev->netdev, 1, eth_len);
+
+	} else {
+		struct sk_buff *skb;
+		unsigned int data_offset = (void *)eth - page_address(page);
+		unsigned int sync_done, sync_todo;
+
+		/* compute dma area size to sync, only complete what
+		 * has been done before */
+		sync_done = offset + FF_MAX_LOOKAHEAD;
+		sync_todo = data_offset + eth_len;
+
+		if (sync_todo > sync_done) {
+			dma_addr_t addr = page_pool_get_dma_addr(page);
+
+			dma_sync_single_for_cpu(dev,
+						addr + sync_done,
+						sync_todo - sync_done,
+						DMA_FROM_DEVICE);
+		}
+
+		skb = napi_build_skb(page_address(page), PAGE_SIZE);
+		if (unlikely(!skb)) {
+			page_pool_recycle_direct(pp, page);
+			return true;
+		}
+
+		skb_reserve(skb, data_offset);
+		skb_put(skb, eth_len);
+		skb->protocol = eth->h_proto;
+		skb_set_network_header(skb, l3_hdr - l2_hdr);
+		skb->dev = tx_ff_dev->netdev;
+		skb_mark_for_recycle(skb);
+		skb->ffn_ff_done = 1;
+		skb->ffn_ff_dirty_len = clean_len;
+		__skb_queue_tail(ff_list, skb);
+	}
+
+	if (!ff_idx_is_wan(tx_ff_dev_idx) && tx_ff_dev->br_port) {
+		struct net_bridge *br;
+		br = netdev_priv(tx_ff_dev->br_netdev);
+
+		dev_sw_netstats_tx_add(br->dev, 1, eth_len);
+	}
+
+	return true;
+}
+
+/*
+ * setup manual RSS for MAP-E incoming packets using flow-label as
+ * hash key
+ */
+static void ff_setup_rss_mape_ipo(void)
+{
+	struct ipq95xx_ess_priv *priv = ff.priv;
+	struct ess_cpu_port *cport = &priv->cpu_port;
+	static bool done;
+	int cpu, port_idx;
+
+	if (done)
+		return;
+
+	if (ff.wan_active_dev == -1)
+		return;
+
+	port_idx = ff.devs[ff.wan_active_dev].hw_id;
+
+	for_each_online_cpu(cpu) {
+		const u8 *dip6 = (const u8 *)&ff.tun.u.map.src;
+		int queue = cport->rx_qm_qid_first + cpu;
+		int rule_id, i;
+
+		/* need to allocate a chain of 4 rules, align */
+		rule_id = ppe_ipo_allocate_rules_id(priv, 4);
+		if (WARN_ON(rule_id < 0))
+			return;
+
+		for (i = 0; i < 3; i++)
+			ppe_ipo_rule_dst_ip6_flow_force_queue_id(ff.priv,
+								 rule_id + i,
+								 port_idx,
+								 dip6,
+								 i,
+								 queue);
+		ppe_ipo_rule_udf0_match(priv,
+					rule_id + 3,
+					port_idx,
+					0x3,
+					cpu);
+
+		ppe_ipo_chain_rules(priv, rule_id, 4);
+	}
+	done = true;
+}
+
+static void __ff_tun_set_params(bool active,
+				unsigned int mtu,
+				const union ff_tun_params *tp)
+{
+	if (!active) {
+		if (!ff.tun.active)
+			return;
+
+		printk(KERN_DEBUG "ff: tunnel now NOT active\n");
+		ff.tun.active = 0;
+		return;
+	}
+
+	if (ff.tun.active) {
+		if (ff.tun.mtu == mtu && !memcmp(tp, &ff.tun.u, sizeof (*tp)))
+			return;
+	}
+
+	ff.tun.mtu = mtu;
+	memcpy(&ff.tun.u, tp, sizeof (*tp));
+
+	if (!ff.tun.active)
+		printk(KERN_DEBUG "ff: tunnel now active\n");
+	else
+		printk(KERN_DEBUG "ff: tunnel params updated\n");
+
+	ff_setup_rss_mape_ipo();
+	ff.tun.active = true;
+}
+
+static void __ff_tun_read_params(void)
+{
+	union ff_tun_params tp;
+	const struct ff_dev_ctx *wan_ff_dev;
+
+	if (!ff.tun.netdev)
+		return;
+
+	if (ff.wan_active_dev == -1) {
+		__ff_tun_set_params(false, 0, NULL);
+		return;
+	}
+
+	wan_ff_dev = &ff.devs[ff.wan_active_dev];
+
+	memset(&tp, 0, sizeof (tp));
+
+	if (ff.tun.netdev->type == ARPHRD_SIT) {
+		const struct ip_tunnel *tun = netdev_priv(ff.tun.netdev);
+		const struct ip_tunnel_6rd_parm *ip6rd = &tun->ip6rd;
+
+		if (!ip6rd->prefixlen || ip6rd->prefixlen > 32) {
+			printk(KERN_DEBUG "ff: unsupported 6rd plen\n");
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		if (ff.tun.netdev->mtu + sizeof (struct iphdr) > wan_ff_dev->netdev->mtu) {
+			printk(KERN_DEBUG "ff: WAN mtu too "
+			       "small for tunnel (%u => %u)\n",
+			       ff.tun.netdev->mtu, wan_ff_dev->netdev->mtu);
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		tp.sit.src = tun->parms.iph.saddr;
+		tp.sit.s6rd_prefix = ip6rd->prefix.s6_addr32[0];
+		tp.sit.s6rd_pmask = ff_gen_netmask(ip6rd->prefixlen);
+		tp.sit.s6rd_plen = ip6rd->prefixlen;
+		__ff_tun_set_params(true, ff.tun.netdev->mtu, &tp);
+		return;
+	}
+
+	if (ff.tun.netdev->type == ARPHRD_TUNNEL6) {
+		const struct ip6_tnl *t = netdev_priv(ff.tun.netdev);
+		const struct __ip6_tnl_parm *prm = &t->parms;
+		const struct __ip6_tnl_fmr *fmr;
+
+		if (ff.tun.netdev->mtu + sizeof (struct ipv6hdr) >
+		    wan_ff_dev->netdev->mtu) {
+			printk(KERN_DEBUG "ff: WAN mtu too "
+			       "small for tunnel (%u => %u)\n",
+			       ff.tun.netdev->mtu, wan_ff_dev->netdev->mtu);
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		tp.map.src = prm->laddr;
+		tp.map.br = prm->raddr;
+
+		fmr = prm->fmrs;
+		if (!fmr) {
+			tp.map.ipv4_prefix = 0;
+			__ff_tun_set_params(true, ff.tun.netdev->mtu, &tp);
+			return;
+		}
+
+		if (fmr->ip6_prefix_len < 32 ||
+		    (fmr->ip6_prefix_len + 32 - fmr->ip4_prefix_len > 64)) {
+			printk(KERN_DEBUG "ff: unsupp MAP-E: eabits "
+			       "span 32 bits\n");
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		if (fmr->offset) {
+			printk(KERN_DEBUG "ff: unsupp MAP-E: non zero "
+			       "PSID offset\n");
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		tp.map.ipv4_prefix = fmr->ip4_prefix.s_addr;
+		tp.map.ipv4_pmask = ff_gen_netmask(fmr->ip4_prefix_len);
+		tp.map.ipv4_plen = fmr->ip4_prefix_len;
+		tp.map.ipv6_plen = fmr->ip6_prefix_len;
+		memcpy(&tp.map.ipv6_prefix, &fmr->ip6_prefix, 8);
+
+		tp.map.ea_addr_mask = ~ff_gen_netmask(fmr->ip4_prefix_len);
+		if (fmr->ea_len <= 32 - fmr->ip4_prefix_len) {
+			/* v4 prefix or full IP */
+			u32 addr_bits;
+
+			addr_bits = fmr->ip4_prefix_len + fmr->ea_len;
+			if (addr_bits != 32)
+				tp.map.ea_addr_mask &= ff_gen_netmask(addr_bits);
+			tp.map.psid_len = 0;
+		} else {
+			u8 psid_len;
+
+			psid_len = fmr->ea_len - (32 - fmr->ip4_prefix_len);
+			tp.map.psid_len = psid_len;
+			tp.map.ea_port_mask = ff_gen_netmask(psid_len);
+		}
+
+		tp.map.ea_lshift = 32 - (fmr->ip6_prefix_len - 32) -
+			fmr->ea_len;
+
+		__ff_tun_set_params(true, ff.tun.netdev->mtu, &tp);
+		return;
+	}
+}
+
+static void __ff_tun_capture(void)
+{
+	struct net_device *dev;
+
+	if (ff.tun.netdev) {
+		printk(KERN_ERR "ff: error: tun already registered\n");
+		return;
+	}
+
+	dev = dev_get_by_name(&init_net, ff.tun_netdev_name);
+	if (!dev) {
+		return;
+	}
+
+	if (dev->type != ARPHRD_SIT && dev->type != ARPHRD_TUNNEL6) {
+		return;
+	}
+
+	if (!(dev->flags & IFF_UP)) {
+		dev_put(ff.tun.netdev);
+		return;
+	}
+
+	ff.tun.netdev = dev;
+	__ff_tun_read_params();
+	printk(KERN_INFO "ff: tun dev grabbed\n");
+}
+
+static void __ff_tun_release(void)
+{
+	int was_on = 0;
+
+	if (ff.tun.netdev) {
+		dev_put(ff.tun.netdev);
+		ff.tun.netdev = NULL;
+		was_on = 1;
+	}
+	if (was_on)
+		printk(KERN_INFO "ff: tun dev released\n");
+}
+
+static void ff_notifier_event_tunnel(struct net_device *dev,
+				     unsigned long event)
+{
+	ff_lock_all_cpu_bh();
+
+	switch (event) {
+	case NETDEV_UP:
+		if (!ff.tun.netdev)
+			__ff_tun_capture();
+		break;
+
+	case NETDEV_CHANGE:
+	case NETDEV_CHANGEMTU:
+		if (ff.tun.netdev == dev)
+			__ff_tun_read_params();
+		break;
+
+	case NETDEV_GOING_DOWN:
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		if (ff.tun.netdev == dev)
+			__ff_tun_release();
+		break;
+	}
+
+	ff_unlock_all_cpu_bh();
+}
+
+static int ff_dev_resolve_bridge(struct ff_dev_ctx *ff_dev,
+				 const char *bridge_name)
+{
+	bool ok = false;
+
+	rcu_read_lock();
+
+	if (netif_is_bridge_port(ff_dev->netdev)) {
+		struct net_bridge_port *br_port;
+		struct net_bridge *br;
+
+		ff_dev->fbxbr_port = NULL;
+		br_port = br_port_get_rcu(ff_dev->netdev);
+		if (!br_port) {
+			ff_dev->br_port = NULL;
+			goto done;
+		}
+
+		br = br_port->br;
+		if (!br) {
+			ff_dev->br_port = NULL;
+			goto done;
+		}
+
+		if (strcmp(br->dev->name, bridge_name)) {
+			ff_dev->br_port = NULL;
+			goto done;
+		}
+
+		ff_dev->br_port = br_port;
+		if (br->dev->flags & IFF_UP) {
+			memcpy(ff_dev->hwaddr, br->dev->dev_addr, 6);
+			ff_dev->br_netdev = br->dev;
+			ok = true;
+		}
+	}
+
+	if (netif_is_fbxbridge_port(ff_dev->netdev)) {
+		struct fbxbr_port *fbxbr_port;
+		struct fbxbr *fbxbr;
+
+		ff_dev->br_port = NULL;
+		fbxbr_port = fbxbr_port_get_rcu(ff_dev->netdev);
+		if (!fbxbr_port) {
+			ff_dev->fbxbr_port = NULL;
+			goto done;
+		}
+
+		fbxbr = fbxbr_port->br;
+		if (strcmp(fbxbr->dev->name, bridge_name)) {
+			ff_dev->fbxbr_port = NULL;
+			goto done;
+		}
+
+		ff_dev->fbxbr_port = fbxbr_port_get_rcu(ff_dev->netdev);
+		if (fbxbr->dev->flags & IFF_UP)
+			ok = true;
+	}
+
+done:
+	rcu_read_unlock();
+	return ok ? 0 : 1;
+}
+
+static bool ff_dev_bridge_is_up(struct ff_dev_ctx *ff_dev)
+{
+	if (ff_dev->br_port) {
+		struct net_bridge *br = ff_dev->br_port->br;
+		if (!br)
+			return false;
+		return br->dev->flags & IFF_UP;
+	}
+	if (ff_dev->fbxbr_port)
+		return ff_dev->fbxbr_port->br->dev->flags & IFF_UP;
+	return false;
+}
+
+static void __ff_select_active_wan(void)
+{
+	size_t idx;
+	int matching;
+
+	matching = -1;
+	for (idx = 0; idx < FF_DEV_LAST + 1; idx++) {
+		if (!ff_idx_is_wan(idx))
+			continue;
+		if (!ff.devs[idx].active)
+			continue;
+		if (strcmp(ff.devs[idx].netdev->name, ff.wan_netdev_name))
+			continue;
+		matching = (int)idx;
+		break;
+	}
+
+	if (ff.wan_active_dev != -1) {
+		if (matching == -1) {
+			printk(KERN_INFO "ff: no more selected wan\n");
+			ff.wan_active_dev = -1;
+		} else if (matching != ff.wan_active_dev) {
+			printk(KERN_INFO "ff: selected wan changed\n");
+			ff.wan_active_dev = matching;
+		}
+
+	} else if (matching != -1) {
+		printk(KERN_INFO "ff: selected wan now %s\n",
+		       ff.wan_netdev_name);
+		ff.wan_active_dev = matching;
+	}
+	__ff_tun_read_params();
+}
+
+static void ff_dev_mark_active(struct ff_dev_ctx *ff_dev, size_t dev_idx)
+{
+	ff_lock_all_cpu_bh();
+	ff_dev->active = true;
+
+	if (ff_idx_is_wan(dev_idx))
+		__ff_select_active_wan();
+
+	ff_unlock_all_cpu_bh();
+	printk(KERN_INFO "ff: ff_dev %s: now active\n", ff_dev->netdev->name);
+}
+
+static void ff_dev_mark_inactive(struct ff_dev_ctx *ff_dev, size_t dev_idx)
+{
+	bool was_active;
+
+	ff_lock_all_cpu_bh();
+	was_active = ff_dev->active;
+	ff_dev->active = false;
+
+	if (ff_idx_is_wan(dev_idx))
+		__ff_select_active_wan();
+
+	ff_unlock_all_cpu_bh();
+
+	if (was_active)
+		printk(KERN_INFO "ff: ff_dev %s: now inactive\n",
+		       ff_dev->netdev->name);
+}
+
+static void ff_notifier_event_dev(struct net_device *netdev,
+				  unsigned long event,
+				  unsigned int dev_idx,
+				  void *ptr)
+{
+	const struct ff_dev_desc *desc = &ff.devs_desc[dev_idx];
+	struct ff_dev_ctx *ff_dev = &ff.devs[dev_idx];
+	struct net_device *real_netdev = netdev;
+
+	switch (event) {
+	case NETDEV_UP:
+	case NETDEV_CHANGE:
+	{
+		bool found;
+
+		if (ff_dev->active) {
+			/* ignore up event while already active */
+			return;
+		}
+
+		if (is_vlan_dev(netdev)) {
+			ff_dev->vlan_id = ntohs(vlan_dev_vlan_id(netdev));
+			real_netdev = vlan_dev_upper_dev(netdev);
+		} else
+			ff_dev->vlan_id = 0;
+
+		if (!(netdev->flags & IFF_UP))
+			return;
+
+		if (real_netdev != netdev && !(real_netdev->flags & IFF_UP))
+			return;
+
+		/* does this device matches one hardware port */
+		if (!desc->is_hardware)
+			ff_dev->is_hardware = 0;
+		else {
+			struct ess_port *port;
+
+			found = false;
+			list_for_each_entry(port, &ff.priv->ports, next) {
+				if (port->netdev != real_netdev)
+					continue;
+
+				found = true;
+				break;
+			}
+
+			if (!found)
+				return;
+
+			ff_dev->hw_id = port->ppe_port_id;
+			ff_dev->is_hardware = 1;
+		}
+
+		if (ff_dev->netdev)
+			dev_put(ff_dev->netdev);
+
+		ff_dev->netdev = netdev;
+		dev_hold(netdev);
+		ff_dev->real_netdev = real_netdev;
+		memcpy(ff_dev->hwaddr, netdev->dev_addr, 6);
+
+		/* resolve bridge */
+		if (desc->bridge_name) {
+			if (ff_dev_resolve_bridge(ff_dev, desc->bridge_name))
+				return;
+		}
+
+		ff_dev_mark_active(ff_dev, dev_idx);
+		break;
+	}
+
+	case NETDEV_CHANGEUPPER:
+	{
+		const struct netdev_notifier_changeupper_info *info = ptr;
+
+		if (!desc->bridge_name || !ff_dev->netdev)
+			return;
+
+		if (!ff_dev->active) {
+			if (!ff_dev_resolve_bridge(ff_dev, desc->bridge_name))
+				ff_dev_mark_active(ff_dev, dev_idx);
+		} else {
+			if (!info->linking ||
+			    ff_dev_resolve_bridge(ff_dev, desc->bridge_name))
+				ff_dev_mark_inactive(ff_dev, dev_idx);
+		}
+		break;
+	}
+
+	case NETDEV_GOING_DOWN:
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		if (!ff_dev->netdev)
+			return;
+
+		ff_dev_mark_inactive(ff_dev, dev_idx);
+
+		/* remove all references */
+		dev_put(netdev);
+		ff_dev->netdev = NULL;
+		ff_dev->real_netdev = NULL;
+		ff_dev->br_port = NULL;
+		ff_dev->fbxbr_port = NULL;
+		ff_dev->br_netdev = NULL;
+		break;
+	}
+}
+
+static int ff_notifier_event(struct net_device *dev, unsigned long event,
+			     void *ptr)
+{
+	size_t i;
+
+	mutex_lock(&ff_notifier_mutex);
+
+	/*
+	 * check for tun match
+	 */
+	if (!strcmp(dev->name, ff.tun_netdev_name)) {
+		ff_notifier_event_tunnel(dev, event);
+		mutex_unlock(&ff_notifier_mutex);
+		return 0;
+	}
+
+	/*
+	 * check for dev match
+	 */
+	for (i = 0; i < ARRAY_SIZE(ff.devs_desc); i++) {
+		if (!strcmp(dev->name, ff.devs_desc[i].netdev_name)) {
+			ff_notifier_event_dev(dev, event, i, ptr);
+			mutex_unlock(&ff_notifier_mutex);
+			return 0;
+		}
+	}
+
+	/*
+	 * check for bridge/fbxbridge match
+	 *
+	 * bridge can change up/down status, but lower netdev will not get
+	 * CHANGE_UPPER
+	 */
+	if (netif_is_bridge_master(dev) || netif_is_fbxbridge_master(dev)) {
+		size_t i;
+
+		for (i = 0; i < ARRAY_SIZE(ff.devs_desc); i++) {
+			struct ff_dev_ctx *ff_dev = &ff.devs[i];
+			const char *bridge_name = ff.devs_desc[i].bridge_name;
+
+			if (!bridge_name)
+				continue;
+
+			if (!ff_dev->netdev)
+				continue;
+
+			if (!ff_dev->active) {
+				if (!ff_dev_resolve_bridge(ff_dev,
+							   bridge_name))
+					ff_dev_mark_active(ff_dev, i);
+			} else {
+				if (!ff_dev_bridge_is_up(ff_dev))
+					ff_dev_mark_inactive(ff_dev, i);
+			}
+		}
+	}
+
+	/*
+	 * check for real_dev match
+	 */
+	for (i = 0; i < ARRAY_SIZE(ff.devs); i++) {
+		if (dev == ff.devs[i].real_netdev)
+			ff_notifier_event_dev(ff.devs[i].netdev, event, i, ptr);
+	}
+
+	mutex_unlock(&ff_notifier_mutex);
+	return 0;
+}
+
+static int ff_notifier_event_cb(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	if (!net_eq(dev_net(dev), &init_net))
+		return 0;
+
+	ff_notifier_event(dev, event, ptr);
+	return 0;
+}
+
+/*
+ *
+ */
+static ssize_t ff_show_enabled(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%u\n", ff_enabled);
+}
+
+static ssize_t ff_store_enabled(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	if (ff_enabled == val)
+		return len;
+
+	printk(KERN_NOTICE "ff: fastpath now %s\n",
+	       val ? "enabled" : "disabled");
+	ff_enabled = val;
+	return len;
+}
+
+static struct device_attribute dev_attr_ff = {
+	.attr = { .name = "ff_enabled", .mode = (S_IRUGO | S_IWUSR) },
+	.show = ff_show_enabled,
+	.store = ff_store_enabled,
+};
+
+/*
+ *
+ */
+static ssize_t ff_show_tun_dev(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%s\n", ff.tun_netdev_name);
+}
+
+static ssize_t ff_store_tun_dev(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	if (!len || buf[0] == '\n') {
+		ff.tun_netdev_name[0] = 0;
+		ff_lock_all_cpu_bh();
+		__ff_tun_release();
+		ff_unlock_all_cpu_bh();
+		printk(KERN_NOTICE "ff: tun dev unset\n");
+		return len;
+	}
+
+	ff_lock_all_cpu_bh();
+	__ff_tun_release();
+	strncpy(ff.tun_netdev_name, buf, len);
+	strim(ff.tun_netdev_name);
+	printk(KERN_NOTICE "ff: tun dev set to %s\n", ff.tun_netdev_name);
+	__ff_tun_capture();
+	ff_unlock_all_cpu_bh();
+	return len;
+}
+
+static struct device_attribute dev_attr_tun = {
+	.attr = { .name = "ff_tun_dev", .mode = (S_IRUGO | S_IWUSR) },
+	.show = ff_show_tun_dev,
+	.store = ff_store_tun_dev,
+};
+
+/*
+ *
+ */
+static ssize_t ff_show_wan_dev(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%s\n", ff.wan_netdev_name);
+}
+
+static ssize_t ff_store_wan_dev(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	if (!len || buf[0] == '\n') {
+		ff.wan_netdev_name[0] = 0;
+		ff_lock_all_cpu_bh();
+		__ff_select_active_wan();
+		ff_unlock_all_cpu_bh();
+		printk(KERN_NOTICE "ff: requested wan dev unset\n");
+		return len;
+	}
+
+	ff_lock_all_cpu_bh();
+	ff.wan_netdev_name[0] = 0;
+	__ff_select_active_wan();
+	strncpy(ff.wan_netdev_name, buf, len);
+	strim(ff.wan_netdev_name);
+	printk(KERN_NOTICE "ff: requested wan dev set to %s\n",
+	       ff.wan_netdev_name);
+	__ff_select_active_wan();
+	ff_unlock_all_cpu_bh();
+	return len;
+}
+
+static struct device_attribute dev_attr_wan = {
+	.attr = { .name = "ff_wan_dev", .mode = (S_IRUGO | S_IWUSR) },
+	.show = ff_show_wan_dev,
+	.store = ff_store_wan_dev,
+};
+
+/*
+ *
+ */
+static int ff_init(struct ipq95xx_ess_priv *priv)
+{
+	union ipr_udf_ctrl_u udfp;
+	union ipr_udf_profile_base_u udfb;
+	union ipr_udf_profile_offset_u udfo;
+	struct device *dev = &priv->pdev->dev;
+	int i, ret;
+
+	for_each_possible_cpu(i) {
+                spinlock_t *lock = &per_cpu(ff_plock, i);
+                spin_lock_init(lock);
+        }
+
+	ff.priv = priv;
+	ff.wan_active_dev = -1;
+	device_create_file(dev, &dev_attr_ff);
+	device_create_file(dev, &dev_attr_wan);
+	device_create_file(dev, &dev_attr_tun);
+
+	ff_notifier.notifier_call = ff_notifier_event_cb;
+	register_netdevice_notifier(&ff_notifier);
+
+	/* allocated dedicated tx queues */
+	for (i = 0; i < priv->txdr_reserved_count; i++) {
+		int rindex = priv->txdr_reserved_start + i;
+		struct ff_pcpu_ctx *ffc = per_cpu_ptr(&ff_pcpu, i);
+		struct tx_ring *txr = &ffc->tx_ring;
+		struct task_struct *t;
+
+		ret = txr_setup_bare(priv, txr, i, rindex, FF_TXQ_BUF_COUNT);
+		if (ret)
+			goto fail;
+
+		t = kthread_create_on_cpu(ff_reclaim_threadfn,
+					  priv, i, "ff_reclaim%d");
+		if (IS_ERR(t)) {
+			ret = PTR_ERR(t);
+			goto fail;
+		}
+
+		ffc->tx_reclaim_thread = t;
+		wake_up_process(t);
+	}
+
+	for (i = 0; i < priv->txdr_reserved_count; i++) {
+		struct ff_pcpu_ctx *ffc = per_cpu_ptr(&ff_pcpu, i);
+		struct tx_ring *txr = &ffc->tx_ring;
+		txr_enable(txr);
+	}
+
+	/* prepare UDF (user defined field) profile 0 to match last
+	 * two bytes of IPv6 flow label (2 bytes from l3 start) */
+	memset(&udfp, 0, sizeof (udfp));
+	udfp.bf.valid = 1;
+	udfp.bf.l3_type = 3; /* 0:other 1:ipv4 2:arp 3:ipv6 */
+	udfp.bf.l3_type_incl = 1;
+	udfp.bf.udf_profile = 0;
+	/* put it last in table as QCA does */
+	ppe_writel(priv, IPR_UDF_CTRL_REG(15), udfp.val);
+
+	/* match last two bytes of the flow-label field, that
+	 * is 2 bytes offset from L3 start */
+	memset(&udfb, 0, sizeof (udfb));
+	udfb.bf.udf0_base = 1; /* 0:L2 1:L3: 2:L4 */
+	ppe_writel(priv, IPR_UDF_PROFILE_BASE_REG(0), udfb.val);
+
+	memset(&udfo, 0, sizeof (udfo));
+	udfo.bf.udf0_offset = 2 / 2;
+	ppe_writel(priv, IPR_UDF_PROFILE_OFFSET_REG(0), udfo.val);
+
+	return 0;
+
+fail:
+	for (i = 0; i < priv->txdr_reserved_count; i++) {
+		struct ff_pcpu_ctx *ffc = per_cpu_ptr(&ff_pcpu, i);
+		struct tx_ring *txr = &ffc->tx_ring;
+
+		if (ffc->tx_reclaim_thread) {
+			kthread_stop(ffc->tx_reclaim_thread);
+			ffc->tx_reclaim_thread = NULL;
+		}
+
+		txr_release(txr);
+	}
+	return ret;
+}
+
+/*
+ *
+ */
+static void ff_release(struct ipq95xx_ess_priv *priv)
+{
+	struct device *dev = &priv->pdev->dev;
+	int i;
+
+	for (i = 0; i < priv->txdr_reserved_count; i++) {
+		struct ff_pcpu_ctx *ffc = per_cpu_ptr(&ff_pcpu, i);
+		struct tx_ring *txr = &ffc->tx_ring;
+
+		if (ffc->tx_reclaim_thread) {
+			kthread_stop(ffc->tx_reclaim_thread);
+			ffc->tx_reclaim_thread = NULL;
+		}
+
+		txr_stop(txr);
+		txr_release(txr);
+	}
+
+	device_remove_file(dev, &dev_attr_ff);
+	device_remove_file(dev, &dev_attr_wan);
+	device_remove_file(dev, &dev_attr_tun);
+	unregister_netdevice_notifier(&ff_notifier);
+}
+
+/*
+ *
+ */
+static unsigned int txr_release_desc(struct tx_ring *txr, int idx)
+{
+	struct tx_meta_info *mi;
+	unsigned int len;
+	struct edma_txcmpl_desc *cdesc;
+	int meta_idx;
+
+	cdesc = &txr->tx_cmpl_desc_area[idx];
+	meta_idx = cdesc->tdes0;
+#ifdef DEBUG_FF
+	cdesc->tdes0 = 0x11111;
+#endif
+	mi = &txr->tx_meta_info[meta_idx];
+
+#ifdef DEBUG_FF
+	if (meta_idx == 0x11111) {
+		printk("FFR[%d/%d] BAD meta:%x\n",
+		       txr->index, idx, meta_idx);
+	}
+#endif
+
+	if (!txr->port) {
+		/* this is an ff queue */
+		/* printk( "FFR[%d/%d] txdone meta:%d\n", */
+		/*        txr->index, idx, meta_idx); */
+#ifdef DEBUG_FF
+		if (mi->page == (void *)0xdeadbeef ||
+		    mi->page == (void *)0xdeadbeee) {
+			printk("BAD FRAG, idx:%u meta_id:%u\n", idx, meta_idx);
+			return len;
+		}
+#endif
+		page_pool_put_full_page(mi->page->pp, mi->page, false);
+	} else {
+		struct sk_buff *skb;
+		/* printk("txr[%d/%d] txdone meta:%d\n", */
+		/*        txr->index, idx, meta_idx); */
+		dma_unmap_single(txr->dev, mi->paddr, mi->map_len, DMA_TO_DEVICE);
+		skb = mi->skb;
+		len = 0;
+		if (skb) {
+			len = skb->len;
+			dev_kfree_skb(skb);
+		}
+	}
+
+#ifdef DEBUG_FF
+	mi->page = (void *)0xdeadbeef;
+	mi->paddr = 0x42424242;
+	mi->map_len = 16;
+#endif
+
+	return len;
+}
+
+/*
+ *
+ */
+static void txr_release_unsent_desc(struct tx_ring *txr, int idx)
+{
+	struct tx_meta_info *mi;
+	unsigned int len;
+	struct edma_txdesc_desc *desc;
+	int meta_idx;
+
+	desc = &txr->tx_desc_area[idx];
+	meta_idx = desc->tdes2;
+	mi = &txr->tx_meta_info[meta_idx];
+
+	if (!txr->port) {
+		/* this is an ff queue */
+		page_pool_put_full_page(mi->page->pp, mi->page, false);
+	} else {
+		struct sk_buff *skb;
+		dma_unmap_single(txr->dev, mi->paddr, mi->map_len, DMA_TO_DEVICE);
+		skb = mi->skb;
+		len = 0;
+		if (skb) {
+			len = skb->len;
+			dev_kfree_skb(skb);
+		}
+	}
+}
+
+
+/*
+ *
+ */
+static void txr_kick(struct tx_ring *txr)
+{
+	if (unlikely(!txr->desc_prod_dirty))
+		return;
+
+	/* technically slower than writel(), but gives better
+	 * results */
+	wmb();
+	edma_writel_relaxed(txr->priv,
+			    EDMA_TXDESC_PRODx_IDX_REG(txr->hw_index),
+			    txr->desc_prod_idx);
+}
+
+/*
+ * reserve room in tx queue for maximum number of TSO segments the
+ * hardware support, plus an additionnal descriptor for header.
+ */
+static inline bool txr_has_room(struct tx_ring *txr)
+{
+	return txr->descs_avail_cache >= (ESS_TSO_MAX_SEGS + 1);
+}
+
+/*
+ *
+ */
+static unsigned int txr_compute_avail_desc(struct ipq95xx_ess_priv *priv,
+					   struct tx_ring *txr)
+{
+	u32 cons_idx;
+	unsigned int delta;
+
+	cons_idx = edma_readl(priv, EDMA_TXDESC_CONSx_IDX_REG(txr->hw_index));
+	delta = (txr->desc_prod_idx - cons_idx) & (txr->ring_size - 1);
+	return txr->ring_size - delta - 1;
+}
+
+/*
+ *
+ */
+static inline bool txr_can_reclaim(struct tx_ring *txr)
+{
+	return txr->descs_sent_cache > 0;
+}
+
+/*
+ *
+ */
+static unsigned int txr_compute_sent_descs(struct ipq95xx_ess_priv *priv,
+					   struct tx_ring *txr)
+{
+	u32 prod_idx;
+	unsigned int delta;
+
+	prod_idx = edma_readl(priv, EDMA_TXCMPL_PRODx_IDX_REG(txr->hw_index));
+	delta = (prod_idx - txr->cmpl_cons_idx) & (txr->ring_size - 1);
+	/* printk("txr[%u] completion idx: p:%d c:%d toclean:%d\n", txr->index, prod_idx, txr->cmpl_cons_idx, */
+	/*        delta); */
+	return delta;
+}
+
+/*
+ *
+ */
+static int txr_reclaim_bare(struct tx_ring *txr, int budget,
+			    unsigned int *pdone_len)
+{
+	struct ipq95xx_ess_priv *priv = txr->priv;
+	u32 cons_idx;
+	unsigned int done_len;
+	int done, todo;
+
+	if (unlikely(!txr_can_reclaim(txr)))
+		txr->descs_sent_cache = txr_compute_sent_descs(priv, txr);
+
+	if (unlikely(!txr_can_reclaim(txr))) {
+		if (pdone_len)
+			*pdone_len = 0;
+		return 0;
+	}
+
+	todo = min_t(int, budget, txr->descs_sent_cache);
+
+	cons_idx = txr->cmpl_cons_idx;
+	done = 0;
+	done_len = 0;
+	while (done < todo) {
+		done_len += txr_release_desc(txr, cons_idx);
+		cons_idx++;
+		cons_idx &= (txr->ring_size - 1);
+		done++;
+	}
+
+	txr->cmpl_cons_idx = cons_idx;
+	txr->descs_sent_cache -= done;
+	edma_writel(priv, EDMA_TXCMPL_CONSx_IDX_REG(txr->hw_index), cons_idx);
+
+	if (pdone_len)
+		*pdone_len = done_len;
+
+	return done;
+}
+
+/*
+ *
+ */
+static int txr_reclaim(struct tx_ring *txr, int budget)
+{
+	struct ipq95xx_ess_priv *priv = txr->priv;
+	struct ess_port *port = txr->port;
+	struct netdev_queue *netdev_txq;
+	unsigned int done, done_len;
+
+	spin_lock(&txr->lock);
+
+	done = txr_reclaim_bare(txr, budget, &done_len);
+	netdev_txq = netdev_get_tx_queue(port->netdev, txr->index);
+	netdev_tx_completed_queue(netdev_txq, done, done_len);
+
+	if (likely(!netif_tx_queue_stopped(netdev_txq)))
+		goto end;
+
+	/* make sure we have actual room in tx queue before waking it
+	 * up */
+	if (!txr_has_room(txr))
+		txr->descs_avail_cache = txr_compute_avail_desc(priv, txr);
+
+	if (likely(txr_has_room(txr)))
+		netif_tx_wake_queue(netdev_txq);
+
+end:
+	spin_unlock(&txr->lock);
+	return done;
+}
+
+/*
+ * reclaim sent packets from tx completion queue
+ */
+static int txr_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct tx_ring *txr;
+	int work_done;
+
+	txr = container_of(napi, struct tx_ring, napi);
+
+	if (!unlikely(budget))
+		return 0;
+
+	work_done = txr_reclaim(txr, budget);
+
+	if (work_done < budget) {
+		napi_complete_done(napi, budget);
+
+		/* enable interrupt again */
+		edma_writel(txr->priv,
+			    EDMA_TX_INT_MASKx_REG(txr->hw_index),
+			    TX_INT_PKT_DONE_MASK);
+	}
+
+	return work_done;
+}
+
+/*
+ *
+ */
+static irqreturn_t edma_tx_compl_isr(int irq, void *dev_id)
+{
+	struct tx_ring *txr = dev_id;
+	struct ipq95xx_ess_priv *priv = txr->priv;
+
+	/* mask tx irq */
+	if (likely(napi_schedule_prep(&txr->napi))) {
+		__napi_schedule(&txr->napi);
+		edma_writel(priv, EDMA_TX_INT_MASKx_REG(txr->hw_index), 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *
+ */
+static netdev_tx_t ess_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ess_port *port = netdev_priv(dev);
+	struct ipq95xx_ess_priv *priv = port->priv;
+	struct netdev_queue *netdev_txq;
+	struct tx_ring *txr;
+	struct tx_meta_info *mi;
+	struct edma_txdesc_desc *desc;
+	dma_addr_t address;
+	u32 prod_idx, desc_count;
+	u16 queue;
+	unsigned int i, nr_frags;
+	int meta_idx;
+
+	queue = skb_get_queue_mapping(skb);
+	netdev_txq = netdev_get_tx_queue(dev, queue);
+	txr = &port->tx_rings[queue];
+
+	spin_lock(&txr->lock);
+
+	/* make sure we have room */
+	if (unlikely(!txr_has_room(txr)))
+		txr->descs_avail_cache = txr_compute_avail_desc(priv, txr);
+
+	if (unlikely(!txr_has_room(txr))) {
+		/* queue was already full */
+		netif_tx_stop_queue(netdev_txq);
+		if (net_ratelimit())
+			netdev_err(dev, "txq[%d] full unexpected\n",
+				   txr->index);
+		spin_unlock(&txr->lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* sanity check, make sure kernel respect tso_max_segs */
+	if (unlikely(skb_shinfo(skb)->nr_frags > ESS_TSO_MAX_SEGS)) {
+		if (net_ratelimit())
+			netdev_err(dev, "txq[%d] full unexpected\n",
+				   txr->index);
+		spin_unlock(&txr->lock);
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* map buffer */
+	address = dma_map_single(dev->dev.parent,
+				 skb->data,
+				 skb_headlen(skb),
+				 DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(dev->dev.parent, address))) {
+		netdev_err(dev, "dma_map_single failed\n");
+		dev_kfree_skb(skb);
+		spin_unlock(&txr->lock);
+		return NETDEV_TX_OK;
+	}
+
+	/* point to the next available desc */
+	meta_idx = txr->meta_idx;
+	prod_idx = txr->desc_prod_idx;
+	desc_count = 0;
+	/* printk("txr[%d/%d] sending meta:%d\n", */
+	/*        txr->index, prod_idx, meta_idx); */
+
+	desc = &txr->tx_desc_area[prod_idx];
+	mi = &txr->tx_meta_info[meta_idx];
+	desc->tdes0 = cpu_to_le32(address);
+	desc->tdes1 = 0;
+	desc->tdes2 = meta_idx;
+	desc->tdes4 = EDMA_TXDESC_MK_DPORT_ID(port->ppe_port_id);
+	desc->tdes5 = skb_headlen(skb) << EDMA_TXDESC_DATA_LEN_SET_SHIFT;
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		desc->tdes5 |= EDMA_TXDESC_GEN_IP_CSUM_MASK |
+			EDMA_TXDESC_GEN_L4_CSUM_MASK |
+			EDMA_TXDESC_ADV_OFFSET_MASK;
+	}
+
+	if (skb_is_gso(skb)) {
+		u32 mss;
+
+		desc->tdes5 |= EDMA_TXDESC_TSO_EN_MASK;
+		mss = skb_shinfo(skb)->gso_size;
+		if (unlikely(mss < ESS_TSO_MIN_MSS))
+			mss = ESS_TSO_MIN_MSS;
+		else if (unlikely(mss > ESS_TSO_MAX_MSS))
+			mss = ESS_TSO_MAX_MSS;
+
+		desc->tdes6 = mss << EDMA_TXDESC_MSS_SHIFT;
+	} else
+		desc->tdes6 = 0;
+
+	desc->tdes7 = 0;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	mi->paddr = address;
+	mi->map_len = skb_headlen(skb);
+	mi->skb = skb;
+
+	i = 0;
+	for (i = 0; i < nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		u32 frag_len;
+
+		frag_len = skb_frag_size(frag);
+		if (unlikely(!frag_len)) {
+			netdev_err(port->netdev, "zero-len frag skipped\n");
+			continue;
+		}
+
+		/* map fragment */
+		address = dma_map_single(dev->dev.parent,
+					 skb_frag_address(frag),
+					 frag_len,
+					 DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev->dev.parent, address))) {
+			netdev_err(dev, "dma_map_single failed\n");
+			dev_kfree_skb(skb);
+			spin_unlock(&txr->lock);
+			return NETDEV_TX_OK;
+		}
+
+		/* set more bit on previous desc */
+		desc->tdes1 |= EDMA_TXDESC_MORE_BIT_MASK;
+
+		/* remove skb from previous tx meta data */
+		mi->skb = NULL;
+
+		/* switch to next descriptor */
+		prod_idx++;
+		prod_idx &= (txr->ring_size - 1);
+
+		++meta_idx;
+		if (unlikely(meta_idx >= txr->meta_ring_size))
+			meta_idx = 0;
+
+		desc_count++;
+
+		mi = &txr->tx_meta_info[meta_idx];
+		desc = &txr->tx_desc_area[prod_idx];
+		desc->tdes0 = cpu_to_le32(address);
+		desc->tdes1 = 0;
+		desc->tdes2 = meta_idx;
+		desc->tdes4 = 0;
+		desc->tdes5 = frag_len << EDMA_TXDESC_DATA_LEN_SET_SHIFT;
+		desc->tdes6 = 0;
+		desc->tdes7 = 0;
+
+		mi->paddr = address;
+		mi->map_len = frag_len;
+		mi->skb = skb;
+	}
+
+	++meta_idx;
+	if (unlikely(meta_idx >= txr->meta_ring_size))
+		meta_idx = 0;
+
+	prod_idx++;
+	prod_idx &= (txr->ring_size - 1);
+	desc_count++;
+
+	txr->meta_idx = meta_idx;
+	txr->desc_prod_idx = prod_idx;
+	txr->desc_prod_dirty = true;
+	txr->descs_avail_cache -= desc_count;
+
+	if (!netdev_xmit_more() || unlikely(!txr_has_room(txr)))
+		txr_kick(txr);
+
+	/* check if queue is now full */
+	if (unlikely(!txr_has_room(txr))) {
+		/* printk("txr[%u] stop tx queue\n", txr->index); */
+		netif_tx_stop_queue(netdev_txq);
+	}
+
+	netdev_tx_sent_queue(netdev_txq, skb->len);
+	spin_unlock(&txr->lock);
+
+	dev_sw_netstats_tx_add(port->netdev, 1, skb->len);
+
+	return NETDEV_TX_OK;
+}
+
+/*
+ *
+ */
+static void rxdr_do_rx_csum(u32 rdes6, struct sk_buff *skb)
+{
+	skb_checksum_none_assert(skb);
+
+	if (skb->protocol == htons(ETH_P_IPV6)) {
+		if (likely(rdes6 & EDMA_RXDESC_L4CSUM_STATUS_MASK))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	} else if (likely(skb->protocol == htons(ETH_P_IP))) {
+		if (likely((rdes6 & EDMA_RXDESC_L3CSUM_STATUS_MASK) &&
+			   (rdes6 & EDMA_RXDESC_L4CSUM_STATUS_MASK)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+}
+
+struct rx_page {
+	struct page *page;
+	u32 rdes6;
+	u16 src_id;
+	u16 pkt_len;
+};
+
+/*
+ *
+ */
+static int rxdr_process(struct napi_struct *napi,
+			struct rx_done_ring *rxdr, int budget)
+{
+	struct ess_cpu_port *cport = rxdr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	struct device *dev = &priv->pdev->dev;
+	const struct ess_cpu_port_pcpu *cport_pdata;
+	const size_t rx_dma_desc_size = sizeof (*rxdr->rx_desc_area);
+	struct sk_buff_head ff_list;
+	u32 prod_idx, cons_idx, dcount, dcount2;
+	int todo, done, page_count, ff_done, i;
+	struct rx_page rx_pages[64];
+
+	cport_pdata = this_cpu_ptr(cport->percpu_priv);
+
+	prod_idx = edma_readl(priv, EDMA_RXDESC_PRODx_IDX_REG(rxdr->hw_index));
+	cons_idx = rxdr->cons_idx;
+
+	ff_done = 0;
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF)) {
+		struct rx_fill_ring *rxfr = &cport->rxf_rings[rxdr->index];
+
+		rxfr_refill(rxfr, -1, true);
+		__skb_queue_head_init(&ff_list);
+		ff_lock_this_cpu();
+	}
+
+	if (unlikely(budget > ARRAY_SIZE(rx_pages)))
+		budget = ARRAY_SIZE(rx_pages);
+
+	/* do cache management of devices rx dma descriptors */
+	dcount2 = 0;
+	if (prod_idx >= cons_idx) {
+		dcount = prod_idx - cons_idx;
+		if (dcount > budget)
+			dcount = budget;
+	} else {
+		dcount = rxdr->ring_size - cons_idx;
+		if (dcount > budget)
+			dcount = budget;
+		else {
+			dcount2 = prod_idx;
+			if (dcount2 > budget - dcount)
+				dcount2 = budget - dcount;
+		}
+	}
+
+	if (dcount)
+		dma_sync_single_for_cpu(dev,
+					rxdr->rx_desc_dma +
+					cons_idx * rx_dma_desc_size,
+					rx_dma_desc_size * dcount,
+					DMA_FROM_DEVICE);
+
+	if (dcount2)
+		dma_sync_single_for_cpu(dev,
+					rxdr->rx_desc_dma,
+					rx_dma_desc_size * dcount2,
+					DMA_FROM_DEVICE);
+
+	todo = dcount + dcount2;
+	done = 0;
+	page_count = 0;
+
+	/* gather all pages */
+	while (likely(todo--)) {
+		struct edma_rxdesc_desc *desc;
+		struct ess_port *port;
+		unsigned long page_vaddr;
+		unsigned int pkt_len;
+		struct page *page;
+		void *inval_start;
+		u32 src_id, src_type;
+
+		desc = &rxdr->rx_desc_area[cons_idx];
+		pkt_len = EDMA_RXDESC_PACKET_LEN_GET(desc);
+		src_id = (desc->rdes4 & EDMA_RXDESC_SRC_PORT_ID_MASK) >>
+			EDMA_RXDESC_SRC_PORT_ID_SHIFT;
+		src_type = (desc->rdes4 & EDMA_RXDESC_SRC_PORT_TYPE_MASK) >>
+			EDMA_RXDESC_SRC_PORT_TYPE_SHIFT;
+
+		/* find back frag */
+		page_vaddr = ((unsigned long)desc->rdes2 |
+			      ((unsigned long)desc->rdes3 << 32));
+		page = (void *)page_vaddr;
+
+		/* find associated netdevice to this port */
+		if (unlikely(src_type != DEST_INFO_TYPE_PORT_ID ||
+			     src_id == ESS_PHYS_CPU_PORT_ID ||
+			     src_id >= ESS_PHYS_PORT_COUNT)) {
+			if (net_ratelimit())
+				dev_err(&priv->pdev->dev,
+					"unexpected src %u/%u on rx packet\n",
+					src_type, src_id);
+			page_pool_recycle_direct(rxdr->pp, page);
+			goto next_pkt;
+		}
+
+		port = cport_pdata->ports_by_ppe_id[src_id];
+		if (!unlikely(port)) {
+			/* port is not up, should not happen because
+			 * physical port mac is down, but there may be
+			 * traffic still in-flight when we shutdown a
+			 * port */
+			page_pool_recycle_direct(rxdr->pp, page);
+			goto next_pkt;
+		}
+
+		rx_pages[page_count].page = page;
+		rx_pages[page_count].src_id = src_id;
+		rx_pages[page_count].pkt_len = pkt_len;
+		rx_pages[page_count].rdes6 = le32_to_cpu(desc->rdes6);
+		page_count++;
+
+		inval_start = page_address(page) + RX_OFFSET;
+		if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF)) {
+			unsigned int inval_size;
+
+			/* invalidate cache lines of the area ff will
+			 * be only looking at */
+			inval_size = min_t(unsigned int,
+					   FF_MAX_LOOKAHEAD, pkt_len);
+
+			dmac_inv_range_no_dsb(inval_start,
+					      inval_start + inval_size);
+		} else {
+			dmac_inv_range_no_dsb(inval_start,
+					      inval_start + pkt_len);
+		}
+
+	next_pkt:
+		cons_idx++;
+		cons_idx &= (rxdr->ring_size - 1);
+		done++;
+	}
+
+	/* first pass for ff */
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF))
+		dsb(st);
+
+	for (i = 0; IS_ENABLED(CONFIG_IPQ95XX_FBX_FF) && i < page_count; i++) {
+		struct rx_page *rx_page = &rx_pages[i];
+		unsigned int inval_size, inval_done;
+		void *inval_start;
+		struct ess_port *port;
+		struct page *page = rx_page->page;
+		unsigned int src_id = rx_page->src_id;
+		unsigned int pkt_len = rx_page->pkt_len;
+		int ret;
+
+		/* prefetch next skb header */
+		if (i < page_count - 1) {
+			void *next = page_address(rx_pages[i + 1].page);
+			prefetch(next + RX_OFFSET);
+			prefetch(next + RX_OFFSET + 64);
+		}
+
+		port = cport_pdata->ports_by_ppe_id[src_id];
+		rcu_read_lock();
+		ret = ff_receive(dev,
+				 port,
+				 &ff_list,
+				 rxdr->pp,
+				 page,
+				 RX_OFFSET,
+				 pkt_len);
+		rcu_read_unlock();
+
+		if (ret) {
+			dev_sw_netstats_rx_add(port->netdev, pkt_len);
+			ff_done++;
+			/* mark as stolen */
+			rx_pages[i].page = NULL;
+			continue;
+		}
+
+		/* invalidate remaining part for slow receive path */
+		inval_done = min_t(unsigned int, FF_MAX_LOOKAHEAD, pkt_len);
+		inval_start = page_address(page) + RX_OFFSET + inval_done;
+		inval_size = pkt_len - inval_done;
+
+		if (likely(inval_size))
+			dmac_inv_range_no_dsb(inval_start,
+					      inval_start + inval_size);
+	}
+
+	dsb(st);
+
+	for (i = 0; i < page_count && ff_done != page_count; i++) {
+		struct rx_page *rx_page = &rx_pages[i];
+		struct page *page = rx_page->page;
+		unsigned int pkt_len = rx_page->pkt_len;
+		struct ess_port *port;
+		struct net_device *netdev;
+		struct sk_buff *skb;
+
+		if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF)) {
+			if (!rx_page->page)
+				continue;
+		} else	if (i < page_count - 1) {
+			/* prefetch next skb header */
+			void *next = page_address(rx_pages[i + 1].page);
+			prefetch(next + RX_OFFSET);
+			prefetch(next + RX_OFFSET + 64);
+		}
+
+		port = cport_pdata->ports_by_ppe_id[rx_page->src_id];
+		netdev = port->netdev;
+
+		skb = napi_build_skb(page_address(page), PAGE_SIZE);
+		if (unlikely(!skb)) {
+			page_pool_recycle_direct(rxdr->pp, page);
+			/* FIXME increment dropped stats */
+			continue;
+		}
+
+		skb_mark_for_recycle(skb);
+		skb_reserve(skb, RX_OFFSET);
+		skb_put(skb, pkt_len);
+		skb->protocol = eth_type_trans(skb, netdev);
+		if (likely((netdev->features & NETIF_F_RXCSUM)))
+			rxdr_do_rx_csum(rx_page->rdes6, skb);
+
+		dev_sw_netstats_rx_add(netdev, pkt_len);
+		napi_gro_receive(napi, skb);
+	}
+
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF)) {
+		struct ff_pcpu_ctx *ffc = this_cpu_ptr(&ff_pcpu);
+		struct sk_buff *skb, *tmp;
+
+		ff_unlock_this_cpu();
+
+		if (ff_done)
+			txr_kick(&ffc->tx_ring);
+
+		skb_queue_walk_safe(&ff_list, skb, tmp) {
+			__skb_unlink(skb, &ff_list);
+			dev_queue_xmit(skb);
+		}
+	}
+
+	rxdr->cons_idx = cons_idx;
+	edma_writel(priv, EDMA_RXDESC_CONSx_IDX_REG(rxdr->hw_index), cons_idx);
+	return done;
+}
+
+/*
+ *
+ */
+static int rxdr_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct rx_done_ring *rxdr;
+	int work_done;
+
+	rxdr = container_of(napi, struct rx_done_ring, napi);
+	if (!unlikely(budget))
+		return 0;
+
+	work_done = rxdr_process(napi, rxdr, budget);
+
+	if (work_done < budget) {
+		napi_complete_done(napi, budget);
+
+		/* enable interrupt again */
+		edma_writel(rxdr->cport->priv,
+			    EDMA_RXDESC_INT_MASKx_REG(rxdr->hw_index),
+			    RXDESC_INT_PKT_DONE_MASK);
+	}
+
+	return work_done;
+}
+
+/*
+ *
+ */
+static irqreturn_t edma_rx_done_isr(int irq, void *dev_id)
+{
+	struct rx_done_ring *rxdr = dev_id;
+	struct ess_cpu_port *cport = rxdr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+
+	/* mask rx irq */
+	if (likely(napi_schedule_prep(&rxdr->napi))) {
+		__napi_schedule(&rxdr->napi);
+		edma_writel(priv, EDMA_RXDESC_INT_MASKx_REG(rxdr->hw_index), 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * alloc buffer & setup single descriptor in the refill ring
+ */
+static int rxfr_refill_desc(struct rx_fill_ring *rxfr, int desc_idx)
+{
+	struct ess_cpu_port *cport = rxfr->cport;
+	struct edma_rxfill_desc *desc;
+	struct page *page;
+	dma_addr_t addr;
+
+	page = page_pool_dev_alloc_pages(rxfr->pp);
+	if (!page)
+		return -ENOMEM;
+
+	desc = &rxfr->rx_desc_area[desc_idx];
+	addr = page_pool_get_dma_addr(page);
+
+	/* on ARM64, this will dcache clean only (not invalidate) the
+	 * area */
+	if (page_pool_is_recycled(page)) {
+		/* this page came back to us through ff fastpath and
+		 * should have been untouched, no need to clean
+		 * because no line should be dirty */
+		page_pool_clear_recycle_flag(page);
+	} else {
+		dma_sync_single_for_device(rxfr->dev,
+					   addr + RX_OFFSET,
+					   cport->rx_pkt_size,
+					   DMA_FROM_DEVICE);
+	}
+
+	desc->rdes0 = cpu_to_le32(addr + RX_OFFSET);
+	desc->rdes1 = cpu_to_le32(cport->rx_pkt_size << 16);
+	desc->rdes2 = (unsigned long)page & 0xffffffff;
+	desc->rdes3 = (unsigned long)page >> 32;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static inline bool rxfr_has_room(struct rx_fill_ring *rxfr)
+{
+	return rxfr->descs_avail_cache >= 1;
+}
+
+/*
+ *
+ */
+static unsigned int rxfr_compute_avail_desc(struct ipq95xx_ess_priv *priv,
+					    struct rx_fill_ring *rxfr)
+{
+	u32 cons_idx;
+	unsigned int delta;
+
+	cons_idx = edma_readl(priv, EDMA_RXFILL_CONSx_IDX_REG(rxfr->hw_index));
+	delta = (rxfr->prod_idx - cons_idx) & (rxfr->ring_size - 1);
+	return rxfr->ring_size - delta - 1;
+}
+
+/*
+ * refill rx fill queue with fresh fragments
+ */
+static int rxfr_refill(struct rx_fill_ring *rxfr, int budget, bool only_ff)
+{
+	struct ff_pcpu_ctx *ffc = this_cpu_ptr(&ff_pcpu);
+	struct tx_ring *txr = &ffc->tx_ring;
+	struct ipq95xx_ess_priv *priv = rxfr->cport->priv;
+	u32 prod_idx;
+	int done, done_ff, todo;
+
+	/* make sure we have room */
+	if (unlikely(!rxfr_has_room(rxfr)))
+		rxfr->descs_avail_cache = rxfr_compute_avail_desc(priv, rxfr);
+
+	if (unlikely(!rxfr_has_room(rxfr)))
+		return 0;
+
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF)) {
+		if (unlikely(!txr_can_reclaim(txr)))
+			txr->descs_sent_cache = txr_compute_sent_descs(priv, txr);
+	}
+
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF) && only_ff)
+		todo = min_t(int, txr->descs_sent_cache,
+			     rxfr->descs_avail_cache);
+	else
+		todo = min_t(int, budget, rxfr->descs_avail_cache);
+
+	prod_idx = rxfr->prod_idx;
+	done = 0;
+	done_ff = 0;
+
+	while (done < todo) {
+		/* try to reclaim sent frags from ff tx queue */
+		if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF) &&
+		    txr->descs_sent_cache) {
+			struct tx_meta_info *mi;
+			struct edma_txcmpl_desc *cdesc;
+			struct edma_rxfill_desc *desc;
+			int tcons_idx;
+			int meta_idx;
+
+			tcons_idx = txr->cmpl_cons_idx;
+			cdesc = &txr->tx_cmpl_desc_area[tcons_idx];
+			meta_idx = cdesc->tdes0;
+			mi = &txr->tx_meta_info[meta_idx];
+#ifdef DEBUG_FF
+			cdesc->tdes0 = 0x11111;
+
+			if (mi->page == (void *)0xdeadbeef ||
+			    mi->page == (void *)0xdeadbeee) {
+				printk("BAD FRAG2, idx:%u meta_id:%u\n",
+				       tcons_idx, meta_idx);
+			}
+#endif
+
+			tcons_idx++;
+			tcons_idx &= (txr->ring_size - 1);
+			txr->descs_sent_cache--;
+			txr->cmpl_cons_idx = tcons_idx;
+			txr->last_use_time = ff.jiffies;
+
+			desc = &rxfr->rx_desc_area[prod_idx];
+			desc->rdes0 = cpu_to_le32(mi->paddr + RX_OFFSET);
+			desc->rdes1 = cpu_to_le32(rxfr->cport->rx_pkt_size << 16);
+			desc->rdes2 = (unsigned long)mi->page & 0xffffffff;
+			desc->rdes3 = (unsigned long)mi->page >> 32;
+
+#ifdef DEBUG_FF
+			mi->page = (void *)0xdeadbeee;
+			mi->paddr = 0x42424243;
+			mi->map_len = 16;
+#endif
+			done_ff++;
+		} else {
+			if (rxfr_refill_desc(rxfr, prod_idx))
+				break;
+		}
+
+		prod_idx++;
+		prod_idx &= (rxfr->ring_size - 1);
+		done++;
+	}
+
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF) && done_ff)
+		edma_writel(priv, EDMA_TXCMPL_CONSx_IDX_REG(txr->hw_index),
+			    txr->cmpl_cons_idx);
+
+	rxfr->prod_idx = prod_idx;
+	WARN_ON(done > rxfr->descs_avail_cache);
+	rxfr->descs_avail_cache -= done;
+	edma_writel(priv, EDMA_RXFILL_PRODx_IDX_REG(rxfr->hw_index), prod_idx);
+
+	return done;
+}
+
+/*
+ * refill rx fill queue with fresh fragments
+ */
+static int rxfr_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct rx_fill_ring *rxfr;
+	int work_done;
+
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF))
+		ff.jiffies = nfct_time_stamp;
+
+	rxfr = container_of(napi, struct rx_fill_ring, napi);
+
+	if (!unlikely(budget))
+		return 0;
+
+	work_done = rxfr_refill(rxfr, budget, false);
+
+	if (work_done < budget) {
+		napi_complete_done(napi, budget);
+
+		/* enable interrupt again */
+		edma_writel(rxfr->cport->priv,
+			    EDMA_RXFILL_INT_MASKx_REG(rxfr->hw_index),
+			    RXFILL_INT_URG_MASK);
+	}
+
+	return work_done;
+}
+
+/*
+ *
+ */
+static irqreturn_t edma_rx_fill_isr(int irq, void *dev_id)
+{
+	struct rx_fill_ring *rxfr = dev_id;
+	struct ess_cpu_port *cport = rxfr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+
+	if (raw_smp_processor_id() != rxfr->index) {
+		printk("WTF fill isr ring %d on cpu %d\n",
+		       rxfr->index, raw_smp_processor_id());
+	}
+
+	/* mask irq */
+	if (likely(napi_schedule_prep(&rxfr->napi))) {
+		__napi_schedule(&rxfr->napi);
+		edma_writel(priv, EDMA_RXFILL_INT_MASKx_REG(rxfr->hw_index), 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *
+ */
+static void rxfr_enable(struct rx_fill_ring *rxfr)
+{
+	struct ipq95xx_ess_priv *priv = rxfr->cport->priv;
+
+	edma_writel(priv, EDMA_RXFILL_ENx_REG(rxfr->hw_index), 1);
+}
+
+/*
+ *
+ */
+static int rxfr_stop(struct rx_fill_ring *rxfr)
+{
+	struct ess_cpu_port *cport = rxfr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	u32 val;
+	int i;
+
+	val = edma_readl(priv, EDMA_RXFILL_ENx_REG(rxfr->hw_index));
+	if (!val)
+		return 0;
+
+	edma_writel(priv, EDMA_RXFILL_DISABLEx_REG(rxfr->hw_index), 1);
+	msleep(10);
+
+	for (i = 0; i < 100; i++) {
+		val = edma_readl(priv,
+				 EDMA_RXFILL_DISABLE_DONEx_REG(rxfr->hw_index));
+		if (val)
+			break;
+
+		usleep_range(100, 200);
+	}
+
+	if (!val) {
+		dev_err(rxfr->dev, "failed to disable rx fill ring %d\n",
+			rxfr->hw_index);
+		return -ETIMEDOUT;
+	}
+
+	/* enable bit does not self clear */
+	edma_writel(priv, EDMA_RXFILL_ENx_REG(rxfr->hw_index), 0);
+	edma_writel(priv, EDMA_RXFILL_DISABLEx_REG(rxfr->hw_index), 0);
+	edma_writel(priv, EDMA_RXFILL_DISABLE_DONEx_REG(rxfr->hw_index), 0);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void rxfr_release_desc(struct rx_fill_ring *rxfr, int idx)
+{
+	struct edma_rxfill_desc *desc;
+	unsigned long frag_vaddr;
+	struct page *page;
+
+	desc = &rxfr->rx_desc_area[idx];
+	frag_vaddr = ((unsigned long)desc->rdes2 |
+		      ((unsigned long)desc->rdes3 << 32));
+	page = (struct page *)frag_vaddr;
+	page_pool_put_full_page(rxfr->pp, page, false);
+}
+
+/*
+ *
+ */
+static void rxfr_release(struct rx_fill_ring *rxfr)
+{
+	struct ess_cpu_port *cport = rxfr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	u32 prod_idx, cons_idx;
+
+	/* hardware is stopped, release all armed descriptors */
+	cons_idx = edma_readl(priv, EDMA_RXFILL_CONSx_IDX_REG(rxfr->hw_index));
+	prod_idx = rxfr->prod_idx;
+
+	while (cons_idx != prod_idx) {
+		rxfr_release_desc(rxfr, cons_idx);
+		cons_idx++;
+		cons_idx &= (rxfr->ring_size - 1);
+	}
+
+	page_pool_destroy(rxfr->pp);
+	rxfr->pp = NULL;
+	dma_free_coherent(rxfr->dev,
+			  rxfr->rx_desc_area_size,
+			  rxfr->rx_desc_area,
+			  rxfr->rx_desc_dma);
+	netif_napi_del(&rxfr->napi);
+	irq_set_affinity_and_hint(rxfr->irq, NULL);
+	free_irq(rxfr->irq, rxfr);
+	kfree(rxfr->mask);
+	rxfr->mask = NULL;
+}
+
+/*
+ *
+ */
+static int rxfr_setup(struct ess_cpu_port *cport,
+		      struct rx_fill_ring *rxfr,
+		      int index, int hw_index, int irq, int buf_count)
+{
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	struct device *dev = &priv->pdev->dev;
+	struct page_pool_params pp_params = { 0 };
+	size_t size;
+	int i, ret;
+	u32 cons_idx;
+
+	memset(rxfr, 0, sizeof (*rxfr));
+	spin_lock_init(&rxfr->lock);
+	rxfr->dev = dev;
+	rxfr->cport = cport;
+	rxfr->index = index;
+	rxfr->hw_index = hw_index;
+	rxfr->ring_size = buf_count;
+
+	size = rxfr->ring_size * sizeof (*rxfr->rx_desc_area);
+	rxfr->rx_desc_area_size = size;
+	rxfr->rx_desc_area = dma_alloc_coherent(dev,
+						size, &rxfr->rx_desc_dma,
+						GFP_KERNEL);
+	if (rxfr->rx_desc_area == NULL) {
+		dev_err(dev,
+			"can't allocate rx desc ring (%zu bytes)\n", size);
+		return -ENOMEM;
+	}
+
+	/* allocate dedicated page pool */
+	pp_params.order = 0;
+	pp_params.flags = PP_FLAG_DMA_MAP;
+	pp_params.nid = NUMA_NO_NODE;
+	pp_params.dev = dev;
+	pp_params.napi = &rxfr->napi;
+	pp_params.dma_dir = DMA_FROM_DEVICE;
+
+	rxfr->pp = page_pool_create(&pp_params);
+	if (IS_ERR(rxfr->pp)) {
+		ret = PTR_ERR(rxfr->pp);
+		rxfr->pp = NULL;
+		goto fail;
+	}
+
+	/* the hardware (consumer) index of the ring cannot be reset
+	 * when queue is start/stopped, so we have to resume from it's
+	 * previous location */
+	cons_idx = edma_readl(priv, EDMA_RXFILL_CONSx_IDX_REG(rxfr->hw_index));
+	rxfr->prod_idx = cons_idx;
+
+	for (i = 0; i < rxfr->ring_size - 1; i++) {
+		int ret;
+
+                ret = rxfr_refill_desc(rxfr, rxfr->prod_idx);
+		if (ret)
+			goto fail;
+
+		rxfr->prod_idx++;
+		rxfr->prod_idx &= (rxfr->ring_size - 1);
+	}
+
+	snprintf(rxfr->irq_name, sizeof (rxfr->irq_name),
+		 "%s-rxf%d", dev_name(dev), index);
+
+	rxfr->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+	if (!rxfr->mask) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = request_irq(irq, edma_rx_fill_isr, 0, rxfr->irq_name, rxfr);
+	if (ret) {
+		dev_err(dev, "failed to request irq %s: %d\n",
+			rxfr->irq_name, ret);
+		goto fail;
+	}
+	rxfr->irq = irq;
+	netif_napi_add(&cport->dummy_netdev, &rxfr->napi, rxfr_napi_poll);
+
+	irq_set_status_flags(rxfr->irq, IRQ_NO_BALANCING);
+	if (index < num_possible_cpus()) {
+		cpumask_set_cpu(index, rxfr->mask);
+		irq_set_affinity_and_hint(rxfr->irq, rxfr->mask);
+	}
+
+	/* setup hardware */
+	edma_writel(priv, EDMA_RXFILL_BAx_REG(rxfr->hw_index),
+		    rxfr->rx_desc_dma);
+	edma_writel(priv, EDMA_RXFILL_RING_SIZEx_REG(rxfr->hw_index),
+		    rxfr->ring_size);
+	edma_writel(priv, EDMA_RXFILL_PRODx_IDX_REG(rxfr->hw_index),
+		    rxfr->prod_idx);
+
+	/* FIXME: pickup better threshold */
+	edma_writel(priv, EDMA_RXFILL_UGT_THRESHx_REG(rxfr->hw_index),
+		    rxfr->ring_size / 2);
+	return 0;
+
+fail:
+	while (i > 0) {
+		if (rxfr->prod_idx > 0)
+			rxfr->prod_idx--;
+		else
+			rxfr->prod_idx = rxfr->ring_size - 1;
+		rxfr_release_desc(rxfr, rxfr->prod_idx);
+		i--;
+	}
+
+	page_pool_destroy(rxfr->pp);
+	rxfr->pp = NULL;
+	dma_free_coherent(dev,
+			  rxfr->rx_desc_area_size,
+			  rxfr->rx_desc_area,
+			  rxfr->rx_desc_dma);
+	return ret;
+}
+
+/*
+ *
+ */
+static int rxdr_stop(struct rx_done_ring *rxdr)
+{
+	struct ess_cpu_port *cport = rxdr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	u32 val;
+	int i;
+
+	edma_writel(priv, EDMA_RXDESC_DISABLEx_REG(rxdr->hw_index), 1);
+
+	for (i = 0; i < 100; i++) {
+		val = edma_readl(priv,
+				 EDMA_RXDESC_DISABLE_DONEx_REG(rxdr->hw_index));
+		if (val)
+			break;
+
+		usleep_range(100, 200);
+	}
+
+	if (!val) {
+		dev_err(rxdr->dev, "failed to disable rx done ring %d\n",
+			rxdr->hw_index);
+		return -ETIMEDOUT;
+	}
+
+	/* enable bit does not self clear */
+	val = edma_readl(priv, EDMA_RXDESC_CTRLx_REG(rxdr->hw_index));
+	val &= ~RXDESC_CTRL_EN_MASK;
+	edma_writel(priv, EDMA_RXDESC_CTRLx_REG(rxdr->hw_index), val);
+	edma_writel(priv, EDMA_RXDESC_DISABLEx_REG(rxdr->hw_index), 0);
+	edma_writel(priv, EDMA_RXDESC_DISABLE_DONEx_REG(rxdr->hw_index), 0);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void rxdr_release_desc(struct rx_done_ring *rxdr, int idx)
+{
+	struct edma_rxdesc_desc *desc;
+	unsigned long page_vaddr;
+	struct page *page;
+
+	desc = &rxdr->rx_desc_area[idx];
+	page_vaddr = ((unsigned long)desc->rdes2 |
+		      ((unsigned long)desc->rdes3 << 32));
+	page = (void *)page_vaddr;
+	page_pool_put_full_page(rxdr->pp, page, false);
+}
+
+/*
+ *
+ */
+static void rxdr_release(struct rx_done_ring *rxdr)
+{
+	if (!rxdr->cport)
+		return;
+
+	if (rxdr->irq) {
+		netif_napi_del(&rxdr->napi);
+		irq_set_affinity_and_hint(rxdr->irq, NULL);
+		free_irq(rxdr->irq, rxdr);
+		rxdr->irq = 0;
+	}
+
+	if (rxdr->rx_desc_area) {
+		const size_t rx_dma_desc_size = sizeof (*rxdr->rx_desc_area);
+		u32 prod_idx, cons_idx;
+
+		/* hardware is stopped, release all done
+		 * descriptors */
+		prod_idx = edma_readl(rxdr->cport->priv,
+				      EDMA_RXDESC_PRODx_IDX_REG(rxdr->hw_index));
+		cons_idx = rxdr->cons_idx;
+
+		while (cons_idx != prod_idx) {
+			dma_sync_single_for_cpu(rxdr->dev,
+						rxdr->rx_desc_dma +
+						cons_idx * rx_dma_desc_size,
+						rx_dma_desc_size,
+						DMA_FROM_DEVICE);
+			rxdr_release_desc(rxdr, cons_idx);
+			cons_idx++;
+			cons_idx &= (rxdr->ring_size - 1);
+		}
+
+		dma_unmap_single(rxdr->dev,
+				 rxdr->rx_desc_dma,
+				 rxdr->rx_desc_area_size,
+				 DMA_FROM_DEVICE);
+		kfree(rxdr->rx_desc_area);
+		rxdr->rx_desc_area = NULL;
+	}
+
+	if (rxdr->rx_sec_desc_area) {
+		dma_free_coherent(rxdr->dev,
+				  rxdr->rx_sec_desc_area_size,
+				  rxdr->rx_sec_desc_area,
+				  rxdr->rx_sec_desc_dma);
+		rxdr->rx_sec_desc_area = NULL;
+	}
+
+	kfree(rxdr->mask);
+	rxdr->mask = NULL;
+}
+
+/*
+ *
+ */
+static void rxdr_enable(struct rx_done_ring *rxdr)
+{
+	struct ess_cpu_port *cport = rxdr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	u32 val;
+
+	val = edma_readl(priv, EDMA_RXDESC_CTRLx_REG(rxdr->hw_index));
+	val |= RXDESC_CTRL_EN_MASK;
+	edma_writel(priv, EDMA_RXDESC_CTRLx_REG(rxdr->hw_index), val);
+}
+
+/*
+ *
+ */
+static void rxdr_update_rx_coalesce(struct rx_done_ring *rxdr)
+{
+	struct ess_cpu_port *cport = rxdr->cport;
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	u32 val;
+
+	/* setup rx interrupt coalescing timer */
+	val = EDMA_US_TO_TIMER_TICKS(cport->rx_irq_coalesce_us) <<
+		RX_MOD_TIMER_VAL_SHIFT;
+	edma_writel(priv, EDMA_RX_MOD_TIMERx_REG(rxdr->hw_index), val);
+
+	/* setup rx interrupt coalescing max packets */
+	val = cport->rx_irq_coalesce_pkts << RXDESC_UGT_THRESHx_LOW_SHIFT;
+	edma_writel(priv, EDMA_RXDESC_UGT_THRESHx_REG(rxdr->hw_index), val);
+}
+
+/*
+ *
+ */
+static int rxdr_setup(struct ess_cpu_port *cport,
+		      struct rx_done_ring *rxdr,
+		      struct page_pool *pp,
+		      int index, int hw_index, int irq, int buf_count)
+{
+	struct ipq95xx_ess_priv *priv = cport->priv;
+	struct device *dev = &priv->pdev->dev;
+	size_t size;
+	u32 val, prod_idx;
+	int ret;
+
+	memset(rxdr, 0, sizeof (*rxdr));
+	rxdr->dev = dev;
+	rxdr->cport = cport;
+	rxdr->index = index;
+	rxdr->hw_index = hw_index;
+	rxdr->ring_size = buf_count;
+	rxdr->pp = pp;
+
+	size = rxdr->ring_size * sizeof (*rxdr->rx_desc_area);
+	rxdr->rx_desc_area_size = size;
+	rxdr->rx_desc_area = kcalloc(sizeof (*rxdr->rx_desc_area),
+				     rxdr->ring_size, GFP_KERNEL);
+	if (!rxdr->rx_desc_area_size) {
+		dev_err(dev,
+			"can't allocate rx desc ring (%zu bytes)\n", size);
+		return -ENOMEM;
+	}
+
+	rxdr->rx_desc_dma = dma_map_single(dev,
+					   rxdr->rx_desc_area,
+					   rxdr->rx_desc_area_size,
+					   DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, rxdr->rx_desc_dma)) {
+		dev_err(dev,
+			"can't allocate rx done ring (%zu bytes)\n", size);
+		kfree(rxdr->rx_desc_area);
+		return -ENOMEM;
+	}
+
+	size = rxdr->ring_size * sizeof (*rxdr->rx_sec_desc_area);
+	rxdr->rx_sec_desc_area_size = size;
+	rxdr->rx_sec_desc_area = dma_alloc_coherent(dev, size,
+						    &rxdr->rx_sec_desc_dma,
+						    GFP_KERNEL);
+	if (rxdr->rx_sec_desc_area == NULL) {
+		dev_err(dev, "can't allocate rx done sec "
+			"ring (%zu bytes)\n", size);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	snprintf(rxdr->irq_name, sizeof (rxdr->irq_name),
+		 "%s-rxd%d", dev_name(dev), index);
+
+	rxdr->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+	if (!rxdr->mask) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = request_irq(irq, edma_rx_done_isr, 0, rxdr->irq_name, rxdr);
+	if (ret) {
+		dev_err(dev, "failed to request irq %s: %d\n",
+			rxdr->irq_name, ret);
+		ret = -ENOMEM;
+		goto fail;
+	}
+	rxdr->irq = irq;
+	netif_napi_add(&cport->dummy_netdev, &rxdr->napi, rxdr_napi_poll);
+
+	irq_set_status_flags(rxdr->irq, IRQ_NO_BALANCING);
+	if (index < num_possible_cpus()) {
+		cpumask_set_cpu(index, rxdr->mask);
+		irq_set_affinity_and_hint(rxdr->irq, rxdr->mask);
+	}
+
+	/* setup hardware */
+	val = (RXDESC_CTRL_RET_MODE_OPAQUE << RXDESC_CTRL_RET_MODE_SHIFT) |
+		(RXDESC_CTRL_PH_ADDR_SEL_INRING << RXDESC_CTRL_PH_ADDR_SEL_SHIFT) |
+		(RXDESC_CTRL_PH_LEN_SEL_24B << RXDESC_CTRL_PH_LEN_SEL_SHIFT);
+	edma_writel(priv, EDMA_RXDESC_CTRLx_REG(rxdr->hw_index), val);
+
+	edma_writel(priv, EDMA_RXDESC_BAx_REG(rxdr->hw_index),
+		    rxdr->rx_desc_dma);
+	edma_writel(priv, EDMA_RXDESC_PH_BAx_REG(rxdr->hw_index),
+		    rxdr->rx_sec_desc_dma);
+	edma_writel(priv, EDMA_RXDESC_RING_SIZEx_REG(rxdr->hw_index),
+		    rxdr->ring_size);
+
+	rxdr_update_rx_coalesce(rxdr);
+
+	/* the hardware (producer) index of the ring cannot be reset
+	 * when queue is start/stopped, so we have to resume from it's
+	 * previous location */
+	prod_idx = edma_readl(priv, EDMA_RXDESC_PRODx_IDX_REG(rxdr->hw_index));
+	rxdr->cons_idx = prod_idx;
+	edma_writel(priv, EDMA_RXDESC_CONSx_IDX_REG(rxdr->hw_index), prod_idx);
+
+	/* setup ring interrupt */
+	edma_writel(priv, EDMA_RX_INT_CTRLx_REG(rxdr->hw_index),
+		    RX_INT_CTRL_NE_EN_MASK);
+
+
+	return 0;
+
+fail:
+	rxdr_release(rxdr);
+	return ret;
+}
+
+
+/*
+ *
+ */
+static int txr_stop(struct tx_ring *txr)
+{
+	struct ipq95xx_ess_priv *priv = txr->priv;
+	u32 val;
+
+	val = edma_readl(priv, EDMA_TXDESC_CTRLx_REG(txr->hw_index));
+	val &= ~TXDESC_CTRL_EN_MASK;
+	edma_writel(priv, EDMA_TXDESC_CTRLx_REG(txr->hw_index), val);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static bool txr_has_unsent_buffers(struct tx_ring *txr)
+{
+	struct ipq95xx_ess_priv *priv = txr->priv;
+	u32 cons_idx;
+	cons_idx = edma_readl(priv,
+			      EDMA_TXDESC_CONSx_IDX_REG(txr->hw_index));
+	return cons_idx != txr->desc_prod_idx;
+}
+
+/*
+ *
+ */
+static void txr_release(struct tx_ring *txr)
+{
+	if (!txr->setup_done)
+		return;
+
+	if (txr->irq) {
+		netif_napi_del(&txr->napi);
+		irq_set_affinity_and_hint(txr->irq, NULL);
+		free_irq(txr->irq, txr);
+		txr->irq = 0;
+	}
+
+	if (txr->tx_cmpl_desc_area) {
+		txr_reclaim_bare(txr, txr->ring_size, NULL);
+		dma_free_coherent(txr->dev,
+				  txr->tx_cmpl_desc_area_size,
+				  txr->tx_cmpl_desc_area,
+				  txr->tx_cmpl_desc_dma);
+		txr->tx_cmpl_desc_area = NULL;
+	}
+
+	if (txr->tx_desc_area) {
+		struct ipq95xx_ess_priv *priv = txr->priv;
+		u32 cons_idx;
+
+		/* reclaim unsent buffers */
+		cons_idx = edma_readl(priv,
+				      EDMA_TXDESC_CONSx_IDX_REG(txr->hw_index));
+		while (cons_idx != txr->desc_prod_idx) {
+			if (txr->desc_prod_idx > 0)
+				txr->desc_prod_idx--;
+			else
+				txr->desc_prod_idx = (txr->ring_size - 1);
+			txr_release_unsent_desc(txr, txr->desc_prod_idx);
+		}
+
+		dma_free_coherent(txr->dev,
+				  txr->tx_desc_area_size,
+				  txr->tx_desc_area,
+				  txr->tx_desc_dma);
+		txr->tx_desc_area = NULL;
+	}
+
+	if (txr->tx_sec_desc_area) {
+		dma_free_coherent(txr->dev,
+				  txr->tx_sec_desc_area_size,
+				  txr->tx_sec_desc_area,
+				  txr->tx_sec_desc_dma);
+		txr->tx_sec_desc_area = NULL;
+	}
+
+	kfree(txr->tx_meta_info);
+	kfree(txr->mask);
+	txr->mask = NULL;
+	txr->setup_done = false;
+}
+
+/*
+ *
+ */
+static void txr_enable(struct tx_ring *txr)
+{
+	struct ipq95xx_ess_priv *priv = txr->priv;
+	u32 val;
+
+	val = edma_readl(priv, EDMA_TXDESC_CTRLx_REG(txr->hw_index));
+	val |= TXDESC_CTRL_EN_MASK;
+	edma_writel(priv, EDMA_TXDESC_CTRLx_REG(txr->hw_index), val);
+}
+
+/*
+ *
+ */
+static int txr_setup_common(struct ipq95xx_ess_priv *priv,
+			    struct tx_ring *txr,
+			    int index, int hw_index,
+			    int buf_count)
+{
+	struct device *dev = &priv->pdev->dev;
+	size_t size;
+	u32 val, cons_idx, prod_idx;
+	int reg_idx, ret;
+
+	memset(txr, 0, sizeof (*txr));
+	spin_lock_init(&txr->lock);
+	txr->priv = priv;
+	txr->dev = dev;
+	txr->index = index;
+	txr->hw_index = hw_index;
+	txr->ring_size = buf_count;
+
+	/* Though we use 1:1 send/completion queue mapping, the
+	 * hardware can fetch more descriptors than what it can push
+	 * back in completion queue (they are still released in same
+	 * order they are queued). Allocate more descriptors meta
+	 * data to account for this. */
+	txr->meta_ring_size = buf_count * 2 + ESS_TX_PREFETCH_COUNT;
+	txr->tx_meta_info = kcalloc(sizeof (*txr->tx_meta_info),
+				    txr->meta_ring_size,
+				    GFP_KERNEL);
+	if (!txr->tx_meta_info) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	size = txr->ring_size * sizeof (*txr->tx_desc_area);
+	txr->tx_desc_area_size = size;
+	txr->tx_desc_area = dma_alloc_coherent(dev, size,
+					       &txr->tx_desc_dma,
+					       GFP_KERNEL);
+	if (txr->tx_desc_area == NULL) {
+		dev_err(dev,
+			"can't allocate tx desc ring (%zu bytes)\n", size);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	size = txr->ring_size * sizeof (*txr->tx_sec_desc_area);
+	txr->tx_sec_desc_area_size = size;
+	txr->tx_sec_desc_area = dma_alloc_coherent(dev, size,
+						   &txr->tx_sec_desc_dma,
+						   GFP_KERNEL);
+	if (txr->tx_sec_desc_area == NULL) {
+		dev_err(dev,  "can't allocate tx desc sec "
+			"ring (%zu bytes)\n", size);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	size = txr->ring_size * sizeof (*txr->tx_cmpl_desc_area);
+	txr->tx_cmpl_desc_area_size = size;
+	txr->tx_cmpl_desc_area = dma_alloc_coherent(dev, size,
+						    &txr->tx_cmpl_desc_dma,
+						    GFP_KERNEL);
+	if (txr->tx_cmpl_desc_area == NULL) {
+		dev_err(dev,
+			"can't allocate tx compl ring (%zu bytes)\n", size);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* setup hardware, tx desc ring */
+	edma_writel(priv, EDMA_TXDESC_BAx_REG(txr->hw_index),
+		    txr->tx_desc_dma);
+	edma_writel(priv, EDMA_TXDESC_PH_BAx_REG(txr->hw_index),
+		    txr->tx_sec_desc_dma);
+	edma_writel(priv, EDMA_TXDESC_RING_SIZEx_REG(txr->hw_index),
+		    txr->ring_size);
+
+	/* the hardware (consumer) index of the tx ring cannot be
+	 * reset when queue is start/stopped, so we have to resume
+	 * from it's previous location */
+	cons_idx = edma_readl(priv, EDMA_TXDESC_CONSx_IDX_REG(txr->hw_index));
+	edma_writel(priv, EDMA_TXDESC_PRODx_IDX_REG(txr->hw_index), cons_idx);
+	txr->desc_prod_idx = cons_idx;
+	txr->descs_avail_cache = txr->ring_size - 1;
+
+	/* setup hardware, tx completion ring */
+	val = (TXCMPL_CTRL_RET_MODE_OPAQUE << TXCMPL_CTRL_RET_MODE_SHIFT);
+	edma_writel(priv, EDMA_TXCMPL_CTRLx_REG(txr->hw_index), val);
+	edma_writel(priv, EDMA_TXCMPL_BAx_REG(txr->hw_index),
+		    txr->tx_cmpl_desc_dma);
+	edma_writel(priv, EDMA_TXCMPL_RING_SIZEx_REG(txr->hw_index),
+		    txr->ring_size);
+
+	/* the hardware (producer) index of the completion ring cannot
+	 * be reset when queue is start/stopped, so we have to resume
+	 * from it's previous location */
+	prod_idx = edma_readl(priv, EDMA_TXCMPL_PRODx_IDX_REG(txr->hw_index));
+	edma_writel(priv, EDMA_TXCMPL_CONSx_IDX_REG(txr->hw_index), prod_idx);
+	txr->cmpl_cons_idx = prod_idx;
+
+	/* associate tx desc queue to the correct completion ring */
+	reg_idx = TXDESC2CMP_Q2REG(hw_index);
+	val = edma_readl(priv, EDMA_TXDESC2CMPL_MAPx_REG(reg_idx));
+	val &= ~TXDESC2CMPL_MAP_IDx_MASK(hw_index);
+	val |= hw_index << TXDESC2CMPL_MAP_IDx_SHIFT(hw_index);
+	edma_writel(priv, EDMA_TXDESC2CMPL_MAPx_REG(reg_idx), val);
+
+	return 0;
+
+
+fail:
+	txr_release(txr);
+	return ret;
+}
+
+/*
+ *
+ */
+static int txr_setup_bare(struct ipq95xx_ess_priv *priv,
+			  struct tx_ring *txr,
+			  int index, int hw_index, int buf_count)
+{
+	int ret;
+
+	ret = txr_setup_common(priv, txr, index, hw_index, buf_count);
+	if (ret)
+		return ret;
+
+	/* assign tx queue to flow control group 0, for which flow
+	 * control is disabled, so there is no backpressure on this
+	 * port */
+	edma_writel(priv, EDMA_TXDESC_CTRLx_REG(txr->hw_index),
+		    0 << TXDESC_CTRL_FC_GRP_ID_SHIFT);
+
+	txr->setup_done = true;
+	return 0;
+}
+
+/*
+ *
+ */
+static void txr_update_tx_coalesce(struct tx_ring *txr)
+{
+	struct ess_port *port = txr->port;
+	struct ipq95xx_ess_priv *priv = txr->priv;
+	u32 val;
+
+	/* setup tx interrupt coalescing timer */
+	val = EDMA_US_TO_TIMER_TICKS(port->tx_irq_coalesce_us) <<
+		TX_MOD_TIMER_VAL_SHIFT;
+	edma_writel(priv, EDMA_TX_MOD_TIMERx_REG(txr->hw_index), val);
+
+	/* setup tx interrupt coalescing max packets */
+	val = port->tx_irq_coalesce_pkts << TXCMPL_UGT_THRESH_LOW_SHIFT;
+	edma_writel(priv, EDMA_TXCMPL_UGT_THRESHx_REG(txr->hw_index), val);
+}
+
+/*
+ *
+ */
+static int txr_setup_port(struct ess_port *port,
+			  struct tx_ring *txr,
+			  int index, int hw_index, int irq, int buf_count)
+{
+	struct ipq95xx_ess_priv *priv = port->priv;
+	int ret;
+
+	ret = txr_setup_common(priv, txr, index, hw_index, buf_count);
+	if (ret)
+		return ret;
+
+	txr->port = port;
+	txr->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+	if (!txr->mask) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* assign tx queue to the per-port flow control group, this
+	 * allows hardware backpressure from the switch to EDMA so we
+	 * don't schedule packet faster than the outgoing port */
+	edma_writel(priv, EDMA_TXDESC_CTRLx_REG(txr->hw_index),
+		    port->ppe_port_id << TXDESC_CTRL_FC_GRP_ID_SHIFT);
+
+	snprintf(txr->irq_name, sizeof (txr->irq_name),
+		 "%s-txd%d", port->netdev->name, index);
+
+	ret = request_irq(irq, edma_tx_compl_isr, 0, txr->irq_name, txr);
+	if (ret) {
+		dev_err(&priv->pdev->dev, "failed to request irq %s: %d\n",
+			txr->irq_name, ret);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	txr->irq = irq;
+	netif_napi_add_tx(port->netdev, &txr->napi, txr_napi_poll);
+
+	irq_set_status_flags(txr->irq, IRQ_NO_BALANCING);
+	if (index < num_possible_cpus()) {
+		cpumask_set_cpu(index, txr->mask);
+		irq_set_affinity_and_hint(txr->irq, txr->mask);
+	}
+
+	/* setup ring interrupt */
+	edma_writel(priv, EDMA_TX_INT_CTRLx_REG(txr->hw_index),
+		    TX_INT_CTRL_NE_EN_MASK);
+
+	/* Assign this queue to a CPU */
+	if (index < num_possible_cpus())
+		netif_set_xps_queue(port->netdev, cpumask_of(index), index);
+
+	txr_update_tx_coalesce(txr);
+	txr->setup_done = true;
+	return 0;
+
+fail:
+	txr_release(txr);
+	return ret;
+}
+
+/*
+ *
+ */
+void ess_port_update_tx_coalesce(struct ess_port *port)
+{
+	int i;
+
+	for (i = 0; i < port->txq_count; i++)
+		txr_update_tx_coalesce(&port->tx_rings[i]);
+}
+
+/*
+ *
+ */
+struct pcpu_action {
+	struct work_struct work;
+	struct ess_cpu_port *cport;
+	struct ess_port *port;
+	bool state;
+};
+
+/*
+ *
+ */
+static void __cpu_port_mark_action(struct work_struct *ws)
+{
+	const struct pcpu_action *action;
+	struct ess_cpu_port *cport;
+	struct ess_cpu_port_pcpu *cport_pdata;
+	void *port_ptr;
+
+	action = container_of(ws, struct pcpu_action, work);
+	cport = action->cport;
+
+	if (!action->state)
+		port_ptr = NULL;
+	else
+		port_ptr = action->port;
+
+	cport_pdata = this_cpu_ptr(cport->percpu_priv);
+	local_bh_disable();
+	cport_pdata->ports_by_ppe_id[action->port->ppe_port_id] = port_ptr;
+	local_bh_enable();
+}
+
+/*
+ *
+ */
+static void __cpu_port_mark_port_state(struct ipq95xx_ess_priv *priv,
+				       struct ess_port *port,
+				       bool state)
+{
+	struct ess_cpu_port *cport = &priv->cpu_port;
+	struct pcpu_action __percpu *acts;
+	int cpu;
+
+	acts = alloc_percpu(struct pcpu_action);
+	if (!acts)
+		return;
+
+	cpus_read_lock();
+
+	for_each_online_cpu(cpu) {
+		struct pcpu_action *act = per_cpu_ptr(acts, cpu);
+
+		act->cport = cport;
+		act->port = port;
+		act->state = state;
+		INIT_WORK(&act->work, __cpu_port_mark_action);
+		schedule_work_on(cpu, &act->work);
+	}
+
+	for_each_online_cpu(cpu)
+		flush_work(&per_cpu_ptr(acts, cpu)->work);
+
+	cpus_read_unlock();
+	free_percpu(acts);
+}
+
+/*
+ *
+ */
+static void cpu_port_mark_port_active(struct ipq95xx_ess_priv *priv,
+				      struct ess_port *port)
+{
+	return __cpu_port_mark_port_state(priv, port, true);
+}
+
+/*
+ *
+ */
+static void cpu_port_mark_port_inactive(struct ipq95xx_ess_priv *priv,
+					struct ess_port *port)
+{
+	return __cpu_port_mark_port_state(priv, port, false);
+}
+
+/*
+ *
+ */
+void ess_cpu_port_update_rx_coalesce(struct ess_cpu_port *cport)
+{
+	int i;
+
+	for (i = 0; i < cport->rxq_count; i++)
+		rxdr_update_rx_coalesce(&cport->rxd_rings[i]);
+}
+
+/*
+ * pickup highest MTU among all ports
+ */
+static unsigned int compute_max_ports_mtu(struct ipq95xx_ess_priv *priv)
+{
+	struct ess_port *port;
+	unsigned int max_mtu;
+
+	max_mtu = 0;
+	list_for_each_entry(port, &priv->ports, next)
+		max_mtu = max_t(unsigned int, max_mtu, port->netdev->mtu);
+
+	if (max_mtu == 0)
+		max_mtu = 1500;
+
+	if (max_mtu < 1700)
+		max_mtu = 1700;
+
+	return max_mtu;
+}
+
+/*
+ *
+ */
+static int __ess_cpu_port_open(struct ipq95xx_ess_priv *priv)
+{
+	struct ess_cpu_port *cport = &priv->cpu_port;
+	unsigned int i, hw_index;
+	int irq, ret, qid;
+	unsigned int max_mtu;
+	u32 val;
+
+	BUG_ON(cport->active);
+
+	/*
+	 * Reserve 14 bytes for an ethernet header + 8 bytes for up
+	 * to two VLAN tags
+	 */
+	max_mtu = compute_max_ports_mtu(priv);
+	cport->rx_pkt_size = max_mtu + ETH_HLEN + 4 * 2;
+
+	/*
+	 * add NET_SKB_PAD per build_skb() requirement, make sure we
+	 * have room to align data to cache size after reserving
+	 */
+	cport->rx_frag_mapped_size = cport->rx_pkt_size + RX_OFFSET;
+
+	/*
+	 * per build_skb() requirement
+	 */
+	cport->rx_frag_size =
+		(SKB_DATA_ALIGN(cport->rx_frag_mapped_size) +
+		 SKB_DATA_ALIGN(sizeof (struct skb_shared_info)));
+
+	/*
+	 * assign rx dma queues index
+	 */
+	bitmap_zero(cport->assigned_rxdr, ESS_QUEUES_RXDR_MAX);
+	for (i = 0; i < cport->rxq_count; i++) {
+		int index;
+
+		index = bitmap_find_free_region(priv->used_rxdr,
+						priv->rxdr_count, 0);
+		if (index < 0) {
+			dev_err(&priv->pdev->dev,
+				"cannot find %u available "
+				"rx dma ring\n", cport->rxq_count);
+			ret = -ENOSPC;
+			goto fail;
+		}
+		set_bit(index, cport->assigned_rxdr);
+	}
+
+	/* allocate rx fill/done rings */
+	cport->rxf_rings = kcalloc(cport->rxq_count,
+				   sizeof (*cport->rxf_rings),
+				   GFP_KERNEL);
+	if (!cport->rxf_rings) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	cport->rxd_rings = kcalloc(cport->rxq_count,
+				   sizeof (*cport->rxd_rings),
+				   GFP_KERNEL);
+	if (!cport->rxd_rings) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	i = 0;
+	for_each_set_bit(hw_index, cport->assigned_rxdr, ESS_QUEUES_RXDR_MAX) {
+		char irq_buf[32];
+		int reg_idx;
+
+		/* allocate rx fill ring */
+		snprintf(irq_buf, sizeof (irq_buf), "edma_rx_fill_ring_%u",
+			 hw_index);
+
+		irq = platform_get_irq_byname(priv->pdev, irq_buf);
+		if (irq < 0) {
+			dev_err(&priv->pdev->dev,
+				"failed to find rx fill irq %s\n",
+				irq_buf);
+			ret = irq;
+			goto fail;
+		}
+
+		ret = rxfr_setup(cport, &cport->rxf_rings[i], i, hw_index,
+				 irq, cport->rxq_buf_count);
+		if (ret)
+			goto fail;
+
+
+		snprintf(irq_buf, sizeof (irq_buf), "edma_rx_desc_ring_%u",
+			 hw_index);
+
+		irq = platform_get_irq_byname(priv->pdev, irq_buf);
+		if (irq < 0) {
+			dev_err(&priv->pdev->dev,
+				"failed to find rx done irq %s\n",
+				irq_buf);
+			ret = irq;
+			goto fail;
+		}
+
+		ret = rxdr_setup(cport, &cport->rxd_rings[i],
+				 cport->rxf_rings[i].pp, i, hw_index,
+				 irq, cport->rxq_buf_count);
+		if (ret)
+			goto fail;
+
+		/* associate rx done queue to the correct fill ring */
+		reg_idx = RXD2FILL_Q2REG(hw_index);
+		val = edma_readl(priv, EDMA_RXD2FILL_MAPx_REG(reg_idx));
+		val &= ~RXD2FILL_MAP_IDx_MASK(hw_index);
+		val |= hw_index << RXD2FILL_MAP_IDx_SHIFT(hw_index);
+		edma_writel(priv, EDMA_RXD2FILL_MAPx_REG(reg_idx), val);
+
+		/* setup PPE QM queue to edma rx ring mapping */
+		qid = cport->rx_qm_qid_first + i;
+		BUG_ON(i >= cport->rx_qm_qid_count);
+		val = edma_readl(priv,
+				 EDMA_QID2RIDx_TABLE_MEM(QID2RID_Q2REG(qid)));
+		val &= ~QID2REG_RIDx_MASK(qid);
+		val |= hw_index << QID2REG_RIDx_SHIFT(qid);
+		edma_writel(priv,
+			    EDMA_QID2RIDx_TABLE_MEM(QID2RID_Q2REG(qid)), val);
+		i++;
+	}
+
+	BUG_ON(i != cport->rxq_count);
+
+	/* enable RX */
+	for (i = 0; i < cport->rxq_count; i++) {
+		struct rx_fill_ring *rxfr = &cport->rxf_rings[i];
+		struct rx_done_ring *rxdr = &cport->rxd_rings[i];
+
+		napi_enable(&rxfr->napi);
+		rxfr_enable(rxfr);
+		napi_enable(&rxdr->napi);
+		rxdr_enable(rxdr);
+	}
+
+	/* unmask RX interrupts */
+	for (i = 0; i < cport->rxq_count; i++) {
+		struct rx_fill_ring *rxfr = &cport->rxf_rings[i];
+		struct rx_done_ring *rxdr = &cport->rxd_rings[i];
+
+		edma_writel(priv, EDMA_RXFILL_INT_MASKx_REG(rxfr->hw_index),
+			    RXFILL_INT_URG_MASK);
+		edma_writel(priv, EDMA_RXDESC_INT_MASKx_REG(rxdr->hw_index),
+			    RXDESC_INT_PKT_DONE_MASK);
+	}
+
+	cport->active = true;
+	return 0;
+
+fail:
+	if (cport->rxd_rings) {
+		for (i = 0; i < cport->rxq_count; i++)
+			rxdr_release(&cport->rxd_rings[i]);
+		kfree(cport->rxd_rings);
+		cport->rxd_rings = NULL;
+	}
+
+	if (cport->rxf_rings) {
+		for (i = 0; i < cport->rxq_count; i++)
+			rxfr_release(&cport->rxf_rings[i]);
+		kfree(cport->rxf_rings);
+		cport->rxf_rings = NULL;
+	}
+
+	for_each_set_bit(hw_index, cport->assigned_rxdr, ESS_QUEUES_RXDR_MAX)
+		clear_bit(hw_index, priv->used_rxdr);
+	bitmap_zero(cport->assigned_rxdr, ESS_QUEUES_RXDR_MAX);
+
+	return ret;
+}
+
+/*
+ *
+ */
+static void __ess_cpu_port_stop(struct ipq95xx_ess_priv *priv)
+{
+	struct ess_cpu_port *cport = &priv->cpu_port;
+	unsigned int hw_index;
+	u32 val;
+	int i;
+
+	BUG_ON(!cport->active);
+
+	/* prevent poll() from being called & mask all interrupts */
+	for (i = 0; i < cport->rxq_count; i++) {
+		struct rx_done_ring *rxdr = &cport->rxd_rings[i];
+		struct rx_fill_ring *rxfr = &cport->rxf_rings[i];
+
+		disable_irq(rxdr->irq);
+		napi_disable(&rxdr->napi);
+		edma_writel(priv, EDMA_RXDESC_INT_MASKx_REG(rxdr->hw_index), 0);
+
+		disable_irq(rxfr->irq);
+		napi_disable(&rxfr->napi);
+		edma_writel(priv, EDMA_RXFILL_INT_MASKx_REG(rxfr->hw_index), 0);
+	}
+
+	/*
+	 * before stopping rx dma, we need to make sure the ESS does
+	 * not try to send any packet to us, or the DMA queue can fail to
+	 * stop and in-flight buffers will be lost
+	 *
+	 * disable forwarding to CPU port and restore it later to
+	 * previous value
+	 */
+	val = ppe_readl(priv, PPE_PORT_BRCTL_REG(0));
+	val &= ~PORT_BRCTL_TXMAC_EN_MASK;
+	ppe_writel(priv, PPE_PORT_BRCTL_REG(0), val);
+
+	/* stop RX DMA */
+	for (i = 0; i < cport->rxq_count; i++) {
+		struct rx_done_ring *rxdr = &cport->rxd_rings[i];
+		u32 last_idx;
+		int j;
+
+		/* Wait for rx dma done queue to be inactive. This is
+		 * not perfect, we should poll ESS queue counters
+		 * instead */
+		last_idx = edma_readl(rxdr->cport->priv,
+				      EDMA_RXDESC_PRODx_IDX_REG(
+					      rxdr->hw_index));
+		for (j = 0; j < 1000; j++) {
+			u32 prod_idx;
+
+			udelay(100);
+			prod_idx = edma_readl(rxdr->cport->priv,
+					      EDMA_RXDESC_PRODx_IDX_REG(
+						      rxdr->hw_index));
+			if (prod_idx == last_idx)
+				break;
+			last_idx = prod_idx;
+		}
+
+		rxfr_stop(&cport->rxf_rings[i]);
+		rxdr_stop(rxdr);
+	}
+
+	/* release RX dma queues */
+	for (i = 0; i < cport->rxq_count; i++) {
+		rxfr_release(&cport->rxf_rings[i]);
+		rxdr_release(&cport->rxd_rings[i]);
+	}
+
+	kfree(cport->rxf_rings);
+	cport->rxf_rings = NULL;
+	kfree(cport->rxd_rings);
+	cport->rxd_rings = NULL;
+
+	/* restore previous port forwarding state */
+	val = ppe_readl(priv, PPE_PORT_BRCTL_REG(0));
+	val |= PORT_BRCTL_TXMAC_EN_MASK;
+	ppe_writel(priv, PPE_PORT_BRCTL_REG(0), val);
+
+	/* unassigned rings to this port */
+	for_each_set_bit(hw_index, cport->assigned_rxdr, ESS_QUEUES_RXDR_MAX)
+		clear_bit(hw_index, priv->used_rxdr);
+	bitmap_zero(cport->assigned_rxdr, ESS_QUEUES_RXDR_MAX);
+	cport->active = false;
+}
+
+/*
+ *
+ */
+static int ess_cpu_port_open(struct ipq95xx_ess_priv *priv)
+{
+	struct ess_cpu_port *cport = &priv->cpu_port;
+	int ret;
+
+	if (cport->active) {
+		++cport->ref_count;
+		return 0;
+	}
+
+	ret = __ess_cpu_port_open(priv);
+	if (!ret)
+		++cport->ref_count;
+
+	return ret;
+}
+
+/*
+ *
+ */
+static void ess_cpu_port_stop(struct ipq95xx_ess_priv *priv)
+{
+	struct ess_cpu_port *cport = &priv->cpu_port;
+
+	--cport->ref_count;
+	if (cport->ref_count)
+		return;
+
+	if (cport->active)
+		__ess_cpu_port_stop(priv);
+}
+
+/*
+ *
+ */
+static int ess_netdev_open(struct net_device *dev)
+{
+	struct ess_port *port = netdev_priv(dev);
+	struct ipq95xx_ess_priv *priv = port->priv;
+	char irq_buf[32];
+	unsigned int i, hw_index, rx_pkt_size;
+	int irq, ret;
+	u32 val;
+
+	/* in case ethtool adjusted the number of tx queues, set it
+	 * again */
+	ret = netif_set_real_num_tx_queues(dev, port->txq_count);
+	if (ret)
+		return ret;
+
+	/* open cpu port */
+	ret = ess_cpu_port_open(priv);
+	if (ret)
+		return ret;
+
+	/* assign hardware index for each ring, this may fails if
+	 * other ports have used too many tx queues */
+	bitmap_zero(port->assigned_txdr, ESS_QUEUES_TXDR_MAX);
+
+	for (i = 0; i < port->txq_count; i++) {
+		int index;
+
+		index = bitmap_find_free_region(priv->used_txdr,
+						priv->txdr_count, 0);
+		BUG_ON(index < 0);
+		if (index < 0) {
+			netdev_err(dev, "cannot find %u available "
+				   "tx dma ring\n", port->txq_count);
+			ret = -ENOSPC;
+			goto fail;
+		}
+		set_bit(index, port->assigned_txdr);
+	}
+
+	/* allocate tx rings */
+	port->tx_rings = kcalloc(port->txq_count, sizeof (*port->tx_rings),
+				 GFP_KERNEL);
+	if (!port->tx_rings) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	i = 0;
+	for_each_set_bit(hw_index, port->assigned_txdr, ESS_QUEUES_TXDR_MAX) {
+		snprintf(irq_buf, sizeof (irq_buf), "edma_tx_compl_ring_%u",
+			 hw_index);
+		irq = platform_get_irq_byname(port->priv->pdev, irq_buf);
+		if (irq < 0) {
+			netdev_err(dev, "failed to find tx compl irq %s\n",
+				   irq_buf);
+			ret = irq;
+			goto fail;
+		}
+
+		ret = txr_setup_port(port, &port->tx_rings[i], i, hw_index,
+				     irq, port->txq_buf_count);
+		if (ret)
+			goto fail;
+		i++;
+	}
+
+	BUG_ON(i != port->txq_count);
+
+	/* init RSS indir table for this port, default is to balance
+	 * on all queues, up to max number of CPUs */
+	for (i = 0; i < 256; i++) {
+		unsigned int maxq = min_t(unsigned int,
+					  num_possible_cpus(),
+					  port->priv->cpu_port.rxq_count);
+
+		int idx = (i << UCAST_HASH_MAP_HASH_SHIFT) |
+			(port->ppe_port_id << UCAST_HASH_MAP_PROF_SHIFT);
+
+		ppe_writel(priv, UCAST_HASH_MAP_TBL_REG(idx), i % maxq);
+	}
+
+	ret = phylink_of_phy_connect(port->phylink, port->np, 0);
+	if (ret) {
+		netdev_err(dev, "failed to connect to phy: %d\n", ret);
+		goto fail;
+	}
+
+	/* clear & enable mib for both macs */
+	val = gmac_readl(port, GMAC_MIB_CTRL_REG);
+	val |= GMAC_MIB_CTRL_EN_MASK;
+	val |= GMAC_MIB_CTRL_RESET_MASK;
+	val &= ~GMAC_MIB_CTRL_CLR_ON_RD_MASK;
+	gmac_writel(port, GMAC_MIB_CTRL_REG, val);
+	val &= ~GMAC_MIB_CTRL_RESET_MASK;
+	gmac_writel(port, GMAC_MIB_CTRL_REG, val);
+
+	val = xgmac_readl(port, XGMAC_MMC_CTRL_REG);
+	val &= ~XGMAC_MMC_CTRL_CLR_ON_RD_MASK;
+	val |= XGMAC_MMC_CTRL_CNT_RST_MASK;
+	xgmac_writel(port, XGMAC_MMC_CTRL_REG, val);
+
+	/* setup xgmac in promiscous mode */
+	val = xgmac_readl(port, XGMAC_PKT_FLT_REG);
+	val |= XGMAC_PKT_FLT_PR_MASK |
+		(0x2 << XGMAC_PKT_FLT_PCF_SHIFT) |
+		XGMAC_PKT_FLT_RA_MASK;
+	xgmac_writel(port, XGMAC_PKT_FLT_REG, val);
+
+	/* setup default eee wake-up timer */
+	xgmac_writel(port, XGMAC_LPI_ENTRY_TIMER_REG, 352);
+
+	/* disable rx flow control (pause frame reception) */
+	val = xgmac_readl(port, XGMAC_RX_FLOW_CTRL_REG);
+	val &= ~XGMAC_RX_FLOW_CTRL_RFE_MASK;
+	xgmac_writel(port, XGMAC_RX_FLOW_CTRL_REG, val);
+
+	/* disable txfc (pause frame generation) */
+	val = xgmac_readl(port, XGMAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
+	val |= 0xffff << XGMAC_Q0_TX_FLOW_CTRL_PT_SHIFT;
+	val &= ~XGMAC_Q0_TX_FLOW_CTRL_TFE_MASK;
+	xgmac_writel(port, XGMAC_Q0_TX_FLOW_CTRL_PT_SHIFT, val);
+
+	/*
+	 * setup MRU for GMAC
+	 *
+	 * this register will filter packets when packet size is
+	 * higher, rx_bad_bytes MIB counter will increment
+	 */
+	rx_pkt_size = dev->mtu + ETH_HLEN + 4 * 2;
+	val = gmac_readl(port, GMAC_CTRL2_REG);
+	val &= ~GMAC_CTRL2_MAXLEN_MASK;
+	val |= (rx_pkt_size + ETH_FCS_LEN) << GMAC_CTRL2_MAXLEN_SHIFT;
+	gmac_writel(port, GMAC_CTRL2_REG, val);
+
+	/* this register only has en effect on MIB rx_too_big, but
+	 * since the packet is dropped by previous filter, it will
+	 * never actually increment */
+	val = gmac_readl(port, GMAC_MISC_REG);
+	val &= ~GMAC_MISC_JUMBO_SIZE_MASK;
+	val |= (rx_pkt_size + ETH_FCS_LEN) << GMAC_MISC_JUMBO_SIZE_SHIFT;
+	gmac_writel(port, GMAC_MISC_REG, val);
+
+	/*
+	 * setup MRU for XGMAC
+	 *
+	 * GPSLCE needs to be set for GPSL value to be taken into
+	 * account, otherwire hardware default to 1518, note that GPSL
+	 * cannot be lower than 1518 either
+	 *
+	 * hardware automatically adds 4 bytes to this value for
+	 * incoming vlan tagged frames
+	 */
+	val = gmac_readl(port, XGMAC_RX_CFG_REG);
+	val &= ~XGMAC_RX_CFG_GPSL_MASK;
+	val |= XGMAC_RX_CFG_GPSLCE_MASK;
+	val |= (dev->mtu + ETH_HLEN + ETH_FCS_LEN) << XGMAC_RX_CFG_GPSL_SHIFT;
+	xgmac_writel(port, XGMAC_RX_CFG_REG, val);
+
+	phylink_start(port->phylink);
+
+	/* enable TX */
+	for (i = 0; i < port->txq_count; i++) {
+		struct tx_ring *txr = &port->tx_rings[i];
+		napi_enable(&txr->napi);
+		txr_enable(txr);
+	}
+
+	/* unmask TX interrupts */
+	for (i = 0; i < port->txq_count; i++) {
+		struct tx_ring *txr = &port->tx_rings[i];
+		edma_writel(priv, EDMA_TX_INT_MASKx_REG(txr->hw_index),
+			    TX_INT_PKT_DONE_MASK);
+	}
+
+	netif_tx_start_all_queues(dev);
+
+	/* make us visible to RX path */
+	cpu_port_mark_port_active(priv, port);
+
+	return 0;
+
+fail:
+	ess_cpu_port_stop(priv);
+
+	if (port->tx_rings) {
+		for (i = 0; i < port->txq_count; i++)
+			txr_release(&port->tx_rings[i]);
+		kfree(port->tx_rings);
+		port->tx_rings = NULL;
+	}
+
+	for_each_set_bit(hw_index, port->assigned_txdr, ESS_QUEUES_TXDR_MAX)
+		clear_bit(hw_index, priv->used_txdr);
+
+	bitmap_zero(port->assigned_txdr, ESS_QUEUES_TXDR_MAX);
+
+	return ret;
+}
+
+/*
+ *
+ */
+static int ess_netdev_close(struct net_device *dev)
+{
+	struct ess_port *port = netdev_priv(dev);
+	struct ipq95xx_ess_priv *priv = port->priv;
+	unsigned int hw_index;
+	int i;
+
+	/* make us invisible to RX */
+	cpu_port_mark_port_inactive(priv, port);
+
+	/* prevent poll() from being called & mask all interrupts */
+	for (i = 0; i < port->txq_count; i++) {
+		struct tx_ring *txr = &port->tx_rings[i];
+		disable_irq(txr->irq);
+		napi_disable(&txr->napi);
+		edma_writel(priv, EDMA_TX_INT_MASKx_REG(txr->hw_index), 0);
+	}
+
+	/* prevent hardxmit from being called */
+	netif_tx_disable(dev);
+
+	/* stop TX DMA */
+	for (i = 0; i < port->txq_count; i++) {
+		struct tx_ring *txr = &port->tx_rings[i];
+		int j;
+
+		/*
+		 * hardware does not like DMA being stopped when tx
+		 * queue is not empty and TSO is used, let queue empty
+		 * before stopping, it should not take long
+		 */
+		for (j = 0; j < 1000; j++) {
+			if (!txr_has_unsent_buffers(txr))
+				break;
+			udelay(10);
+		}
+
+		if (j == 1000)
+			netdev_err(dev, "failed to flush tx queue\n");
+		txr_stop(txr);
+	}
+
+	/* release TX dma queues */
+	for (i = 0; i < port->txq_count; i++) {
+		txr_release(&port->tx_rings[i]);
+		netdev_tx_reset_queue(netdev_get_tx_queue(dev, i));
+	}
+	kfree(port->tx_rings);
+	port->tx_rings = NULL;
+
+	/* unassigned rings to this port */
+	for_each_set_bit(hw_index, port->assigned_txdr, ESS_QUEUES_TXDR_MAX)
+		clear_bit(hw_index, priv->used_txdr);
+	bitmap_zero(port->assigned_txdr, ESS_QUEUES_TXDR_MAX);
+
+	ess_cpu_port_stop(priv);
+
+	phylink_stop(port->phylink);
+	phylink_disconnect_phy(port->phylink);
+
+	if (port->cur_uniphy_channel) {
+		ess_uniphy_channel_put(port->cur_uniphy_channel);
+		port->cur_uniphy_channel = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * Change the interface's MTU
+ */
+static int ess_netdev_change_mtu(struct net_device *dev,
+				 int new_mtu)
+{
+	bool running = netif_running(dev);
+	struct ess_port *port = netdev_priv(dev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+	unsigned int cur_max_mtu;
+	int ret = 0;
+
+	if (running) {
+		ret = ess_netdev_close(dev);
+		if (ret < 0)
+			return ret;
+	}
+
+	cur_max_mtu = compute_max_ports_mtu(port->priv);
+	dev->mtu = new_mtu;
+
+	if (new_mtu > cur_max_mtu && cport->active) {
+		/* need to change cpu port config */
+		__ess_cpu_port_stop(port->priv);
+		if (__ess_cpu_port_open(port->priv)) {
+			netdev_err(dev, "cannot reopen cpu port, "
+				   "rx stalled\n");
+			return ret;
+		}
+	}
+
+	if (running)
+		ret = ess_netdev_open(dev);
+
+	return ret;
+}
+
+/*
+ * Change the interface's mac address.
+ */
+static int ess_netdev_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	dev_addr_mod(dev, 0, addr->sa_data, ETH_ALEN);
+	return 0;
+}
+
+/*
+ *
+ */
+static void ess_port_release_data(struct ess_port *port)
+{
+	struct net_device *dev = port->netdev;
+
+	if (port->phylink)
+		phylink_destroy(port->phylink);
+	if (port->np)
+		of_node_put(port->np);
+	if (dev->tstats)
+		free_percpu(dev->tstats);
+}
+
+/*
+ *
+ */
+static void get_phyaddr_mmd(int mii_id, int *phyaddr, int *mmd)
+{
+	*phyaddr = FIELD_GET(MDIO_PHY_ID_PRTAD, mii_id);
+	*mmd = FIELD_GET(MDIO_PHY_ID_DEVAD, mii_id);
+}
+
+/*
+ *
+ */
+static int mii_bus_read(struct net_device *dev, int mii_id, int regnum)
+{
+	struct ess_port *port = netdev_priv(dev);
+
+	if (mii_id & MDIO_PHY_ID_C45) {
+		int phyaddr, mmd;
+
+		get_phyaddr_mmd(mii_id, &phyaddr, &mmd);
+
+		return port->mii_bus->read_c45(port->mii_bus,
+					       phyaddr, mmd, regnum);
+	}
+
+	return port->mii_bus->read(port->mii_bus, mii_id, regnum);
+}
+
+/*
+ *
+ */
+static void mii_bus_write(struct net_device *dev, int mii_id, int regnum,
+			  int value)
+{
+	struct ess_port *port = netdev_priv(dev);
+
+	if (mii_id & MDIO_PHY_ID_C45) {
+		int phyaddr, mmd;
+
+		get_phyaddr_mmd(mii_id, &phyaddr, &mmd);
+
+		port->mii_bus->write_c45(port->mii_bus,
+					 phyaddr, mmd, regnum, value);
+		return ;
+	}
+
+	port->mii_bus->write(port->mii_bus, mii_id, regnum, value);
+}
+
+/*
+ *
+ */
+static int ess_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct ess_port *port = netdev_priv(dev);
+
+	switch (cmd) {
+	case SIOCGMIIREG:
+	case SIOCSMIIREG: {
+		struct mii_if_info mii;
+
+		if (!port->mii_bus)
+			return -ENOTSUPP;
+
+		if (if_mii(ifr)->phy_id & MDIO_PHY_ID_C45 &&
+		    (!port->mii_bus->read_c45 || !port->mii_bus->write_c45))
+			/*
+			 * fail if a C45 access is requested and either
+			 * read_c45 or write_c45 is unsupported by the
+			 * mii_bus.
+			 */
+			return -ENOTSUPP;
+
+		mii.dev = dev;
+		mii.mdio_read = mii_bus_read;
+		mii.mdio_write = mii_bus_write;
+		mii.phy_id = 0;
+		mii.phy_id_mask = MDIO_PHY_ID_C45_MASK;
+		mii.reg_num_mask = 0xffff;
+		return generic_mii_ioctl(&mii, if_mii(ifr), cmd, NULL);
+	}
+	}
+	return -ENOTTY;
+}
+
+/*
+ *
+ */
+const struct net_device_ops ess_port_netdev_ops = {
+	.ndo_open	 	= ess_netdev_open,
+	.ndo_stop		= ess_netdev_close,
+	.ndo_start_xmit		= ess_netdev_xmit,
+	.ndo_get_stats64	= dev_get_tstats64,
+	.ndo_set_mac_address	= ess_netdev_set_mac_address,
+	.ndo_change_mtu		= ess_netdev_change_mtu,
+	.ndo_eth_ioctl		= ess_netdev_ioctl,
+};
+
+
+/*
+ *
+ */
+int ess_port_init(struct platform_device *pdev,
+		  struct ipq95xx_ess_priv *priv,
+		  const struct ess_port_config *pcfg,
+		  int port_id,
+		  unsigned int txdr_count)
+{
+	struct device_node *mdio_node;
+	struct ess_port *port;
+	struct net_device *dev;
+	struct phylink *phylink;
+	phy_interface_t phy_mode;
+	int ret;
+	netdev_features_t features;
+
+	dev = alloc_netdev_mqs(sizeof (*port), pcfg->name,
+			       NET_NAME_UNKNOWN, ether_setup,
+			       ESS_QUEUES_TXDR_MAX, 1);
+	if (!dev)
+		return -ENOMEM;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	/* note: xgmac cannot hardware filter below 1518 bytes
+	 * including FCS, make sure we don't lower mtu below that
+	 * value */
+	dev->min_mtu = ETH_DATA_LEN;
+	dev->max_mtu = ESS_MAX_MTU;
+	dev->netdev_ops = &ess_port_netdev_ops;
+	dev->ethtool_ops = &ess_port_ethtool_ops;
+	dev->dev_port = port_id;
+	dev_addr_mod(dev, 0, pcfg->mac_addr, ETH_ALEN);
+	netif_set_tso_max_segs(dev, ESS_TSO_MAX_SEGS);
+
+	features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+	dev->features = features;
+	dev->hw_features = features;
+
+	port = netdev_priv(dev);
+	port->priv = priv;
+	port->txq_buf_count = 1024;
+	port->id = port_id;
+	port->np = of_node_get(pcfg->np);
+	port->clocks = &priv->port_clocks[port_id];
+	port->netdev = dev;
+	port->ppe_port_id = pcfg->ppe_port_id;
+	port->uniphy_id = pcfg->uniphy_id;
+	port->uniphy_channel_id = pcfg->uniphy_channel_id;
+	port->tx_irq_coalesce_us = 100;
+	port->tx_irq_coalesce_pkts = 16;
+
+	/* default to using EEE if PHY supports it */
+	port->eee_enabled = true;
+	port->eee_tx_lpi_enabled = true;
+	port->txq_count = txdr_count;
+
+	/* default to using GMAC, it will be changed as needed */
+	ess_port_select_mac(port, ESS_MAC_GMAC);
+	port->cur_mac = ESS_MAC_GMAC;
+
+	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+	if (!dev->tstats)
+		goto fail;
+
+	netif_set_real_num_rx_queues(dev, 0);
+	netif_set_real_num_tx_queues(dev, port->txq_count);
+
+	port->phylink_pcs.ops = &ess_port_phylink_pcs_ops;
+	port->phylink_pcs.poll = true;
+
+	port->phylink_config.dev = &dev->dev;
+	port->phylink_config.type = PHYLINK_NETDEV;
+	port->phylink_config.mac_capabilities =
+		MAC_100 | MAC_10 | MAC_1000 |
+		MAC_2500FD | MAC_5000FD | MAC_10000FD;
+
+	ret = of_get_phy_mode(port->np, &phy_mode);
+	if (ret) {
+		dev_err(&dev->dev, "incorrect phy-mode\n");
+		goto fail;
+	}
+
+	ret = hwdesc_port_get_supported_phy_modes(
+		port->priv->ess_type,
+		port->id,
+		port->uniphy_id,
+		port->phylink_config.supported_interfaces);
+	if (ret)
+		goto fail;
+
+	phylink = phylink_create(&port->phylink_config,
+				 of_fwnode_handle(port->np),
+				 phy_mode, &ess_port_phylink_mac_ops);
+	if (IS_ERR(phylink)) {
+		ret = PTR_ERR(phylink);
+		goto fail;
+	}
+
+	port->phylink = phylink;
+
+	mdio_node = of_parse_phandle(port->np, "mdio-bus", 0);
+	if (mdio_node) {
+		port->mii_bus = of_mdio_find_bus(mdio_node);
+		of_node_put(mdio_node);
+		if (!port->mii_bus) {
+			ret = -EPROBE_DEFER;
+			goto fail;
+		}
+	}
+
+	ret = register_netdev(dev);
+	if (ret) {
+		netdev_err(dev, "error %d registering interface %s\n",
+			   ret, dev->name);
+		goto fail;
+	}
+
+	port_dbg_init(port);
+	list_add(&port->next, &priv->ports);
+	return 0;
+
+fail:
+	ess_port_release_data(port);
+	free_netdev(dev);
+	return ret;
+}
+
+/*
+ *
+ */
+void ess_port_release(struct ess_port *port)
+{
+	struct net_device *netdev = port->netdev;
+
+	if (port->mii_bus)
+		put_device(&port->mii_bus->dev);
+	port_dbg_release(port);
+	unregister_netdev(netdev);
+	ess_port_release_data(port);
+	list_del(&port->next);
+	free_netdev(netdev);
+}
+
+/*
+ *
+ */
+int ess_cpu_port_init(struct platform_device *pdev,
+		      const struct ess_cpu_port_config *cfg,
+		      struct ipq95xx_ess_priv *priv)
+{
+	struct ess_cpu_port *cport = &priv->cpu_port;
+
+	cport->percpu_priv = devm_alloc_percpu(&pdev->dev,
+					       *cport->percpu_priv);
+	if (!cport->percpu_priv)
+		return -ENOMEM;
+
+	cport->priv = priv;
+	cport->rx_qm_qid_first = cfg->rx_qm_qid_first;
+	cport->rx_qm_qid_count = cfg->rx_qm_qid_count;
+	cport->rxq_buf_count = 4096;
+	cport->rx_irq_coalesce_us = 25;
+	cport->rx_irq_coalesce_pkts = 16;
+
+	/* assign hardware index for each ring, we want one rx-fill
+	 * ring per rx-done ring */
+	cport->rxq_max_count = min_t(unsigned int,
+				     priv->rxfr_count,
+				     priv->rxdr_count);
+
+	/* limit maximum number of rx queues to assigned number of
+	 * PPE QM queues */
+	cport->rxq_max_count = min_t(unsigned int,
+				     cport->rxq_max_count,
+				     cport->rx_qm_qid_count);
+
+	/* default to use all available queues */
+	cport->rxq_count = cport->rxq_max_count;
+
+	init_dummy_netdev(&cport->dummy_netdev);
+
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF)) {
+		int ret;
+		ret = ff_init(priv);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+void ess_cpu_port_release(struct ipq95xx_ess_priv *priv)
+{
+	if (IS_ENABLED(CONFIG_IPQ95XX_FBX_FF))
+		ff_release(priv);
+}
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port_ethtool.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port_ethtool.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port_ethtool.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port_ethtool.c	2024-03-18 14:40:14.839741005 +0100
@@ -0,0 +1,505 @@
+#include "port_priv.h"
+
+static char ess_driver_name[] = "ipq95xx_ess";
+static char ess_driver_version[] = "1.0";
+
+struct ethtool_priv_flags_strings {
+	const char string[ETH_GSTRING_LEN];
+};
+
+static const struct ethtool_priv_flags_strings ess_priv_flags_strings[] = {
+	{ .string = "st-sync-ok", },
+};
+
+/*
+ * ethtool callbacks
+ */
+static void ess_ethtool_get_drvinfo(struct net_device *netdev,
+				    struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->driver, ess_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, ess_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+	strlcpy(drvinfo->bus_info, "internal", sizeof(drvinfo->bus_info));
+}
+
+/*
+ *
+ */
+struct ess_mac_stat {
+	int		offset;
+	int		size;
+	const char	name[ETH_GSTRING_LEN];
+};
+
+static const struct ess_mac_stat ess_gmac_mib[] = {
+	{ 0x0040, 4, "rx_broadcast_good" },
+	{ 0x0044, 4, "rx_pause" },
+	{ 0x0048, 4, "rx_multicast_good" },
+	{ 0x004c, 4, "rx_crc" },
+	{ 0x0050, 4, "rx_align_error" },
+	{ 0x0054, 4, "rx_runt" },
+	{ 0x0058, 4, "rx_frag" },
+	{ 0x005c, 4, "rx_jumbo_fcs_error" },
+	{ 0x0060, 4, "rx_jumbo_align_error" },
+	{ 0x0064, 4, "rx_pkts_64" },
+	{ 0x0068, 4, "rx_pkts_65_127" },
+	{ 0x006c, 4, "rx_pkts_128_255" },
+	{ 0x0070, 4, "rx_pkts_256_511" },
+	{ 0x0074, 4, "rx_pkts_512_1023" },
+	{ 0x0078, 4, "rx_pkts_1024_1517" },
+	{ 0x007c, 4, "rx_pkts_1518_more" },
+	{ 0x0080, 4, "rx_too_big" },
+	{ 0x0084, 8, "rx_bytes_good" },
+	{ 0x008c, 8, "rx_bad_bytes" },
+	{ 0x0094, 4, "rx_unicast" },
+
+	{ 0x00a0, 4, "tx_broadcast" },
+	{ 0x00a4, 4, "tx_pause" },
+	{ 0x00a8, 4, "tx_multicast" },
+	{ 0x00ac, 4, "tx_underrun" },
+	{ 0x00b0, 4, "tx_pkts_64" },
+	{ 0x00b4, 4, "tx_pkts_65_127" },
+	{ 0x00b8, 4, "tx_pkts_128_255" },
+	{ 0x00bc, 4, "tx_pkts_256_511" },
+	{ 0x00c0, 4, "tx_pkts_512_1023" },
+	{ 0x00c4, 4, "tx_pkts_1024_1517" },
+	{ 0x00c8, 4, "tx_pkts_1518_more" },
+	{ 0x00cc, 8, "tx_bytes" },
+	{ 0x00d4, 4, "tx_collisions" },
+	{ 0x00d8, 4, "tx_abortcol" },
+	{ 0x00dc, 4, "tx_multicol" },
+	{ 0x00e0, 4, "tx_singlecol" },
+	{ 0x00e4, 4, "tx_excess_defer" },
+	{ 0x00e8, 4, "tx_defer" },
+	{ 0x00ec, 4, "tx_late_col" },
+	{ 0x00f0, 4, "tx_unicast" },
+};
+
+static const struct ess_mac_stat ess_xgmac_mib[] = {
+	{ 0x0814, 8, "tx_bytes" },
+	{ 0x081c, 8, "tx_pkts" },
+	{ 0x0824, 8, "tx_broadcast_good" },
+	{ 0x082c, 8, "tx_multicast_good" },
+	{ 0x0834, 8, "tx_pkts_64" },
+	{ 0x083c, 8, "tx_pkts_65_127" },
+	{ 0x0844, 8, "tx_pkts_128_255" },
+	{ 0x084c, 8, "tx_pkts_256_511" },
+	{ 0x0854, 8, "tx_pkts_512_1023" },
+	{ 0x085c, 8, "tx_pkts_1024_1517" },
+	{ 0x0864, 8, "tx_unicast" },
+	{ 0x086c, 8, "tx_multicast" },
+	{ 0x0874, 8, "tx_broadcast" },
+	{ 0x087c, 8, "tx_underflow" },
+	{ 0x0884, 8, "tx_bytes_good" },
+	{ 0x088c, 8, "tx_pkts_good" },
+	{ 0x0894, 8, "tx_pause" },
+	{ 0x089c, 8, "tx_vlan" },
+	{ 0x08a4, 4, "tx_lpi_usecs" },
+	{ 0x08a8, 4, "tx_lpi_transitions" },
+
+	{ 0x0900, 8, "rx_pkts" },
+	{ 0x0908, 8, "rx_bytes" },
+	{ 0x0910, 8, "rx_bytes_good" },
+	{ 0x0918, 8, "rx_broadcast_good" },
+	{ 0x0920, 8, "rx_multicast_good" },
+	{ 0x0928, 8, "rx_crc" },
+	{ 0x0930, 4, "rx_runt" },
+	{ 0x0934, 4, "rx_jabber" },
+	{ 0x0938, 4, "rx_undersize" },
+	{ 0x093c, 4, "rx_oversize" },
+	{ 0x0940, 8, "rx_pkts_64" },
+	{ 0x0948, 8, "rx_pkts_65_127" },
+	{ 0x0950, 8, "rx_pkts_128_255" },
+	{ 0x0958, 8, "rx_pkts_256_511" },
+	{ 0x0960, 8, "rx_pkts_512_1023" },
+	{ 0x0968, 8, "rx_pkts_1024_1517" },
+	{ 0x0970, 8, "rx_unicast" },
+	{ 0x0978, 8, "rx_length_err" },
+	{ 0x0980, 8, "rx_range_err" },
+	{ 0x0988, 8, "rx_pause" },
+	{ 0x0990, 8, "rx_fifo_over" },
+	{ 0x0998, 8, "rx_vlan" },
+	{ 0x09a0, 4, "rx_watchdog" },
+	{ 0x09a4, 4, "rx_lpi_usecs" },
+	{ 0x09a8, 4, "rx_lpi_transitions" },
+	{ 0x09ac, 8, "rx_discard" },
+	{ 0x09b4, 8, "rx_discard_bytes" },
+};
+
+/*
+ *
+ */
+static int ess_ethtool_get_sset_count(struct net_device *netdev,
+				      int string_set)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	unsigned int count;
+
+	switch (string_set) {
+	case ETH_SS_STATS:
+		count = page_pool_ethtool_stats_get_count();
+		if (port->cur_mac == ESS_MAC_GMAC)
+			return count + ARRAY_SIZE(ess_gmac_mib);
+		else
+			return count + ARRAY_SIZE(ess_xgmac_mib);
+
+	case ETH_SS_PRIV_FLAGS:
+		return ARRAY_SIZE(ess_priv_flags_strings);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ *
+ */
+static void ess_ethtool_get_strings(struct net_device *netdev,
+				    u32 stringset, u8 *data)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+	{
+		const struct ess_mac_stat *s;
+		size_t count;
+
+		if (port->cur_mac == ESS_MAC_GMAC) {
+			count = ARRAY_SIZE(ess_gmac_mib);
+			s = ess_gmac_mib;
+		} else {
+			count = ARRAY_SIZE(ess_xgmac_mib);
+			s = ess_xgmac_mib;
+		}
+
+		for (i = 0; i < count; i++)
+			ethtool_sprintf(&p, "%s", s[i].name);
+
+		page_pool_ethtool_stats_get_strings(p);
+		break;
+	}
+
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(data, ess_priv_flags_strings,
+		       sizeof (ess_priv_flags_strings));
+		break;
+	}
+}
+
+/*
+ *
+ */
+static void
+ess_ethtool_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *er,
+			  struct kernel_ethtool_ringparam *ker,
+			  struct netlink_ext_ack *extack)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+
+	er->rx_max_pending = 4096;
+	er->tx_max_pending = 4096;
+	er->rx_pending = cport->rxq_buf_count;
+	er->tx_pending = port->txq_buf_count;
+}
+
+/*
+ *
+ */
+static int
+ess_ethtool_set_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *er,
+			  struct kernel_ethtool_ringparam *ker,
+			  struct netlink_ext_ack *extack)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+
+	if (er->rx_mini_pending || er->rx_jumbo_pending)
+		return -EINVAL;
+
+	if (!is_power_of_2(er->rx_pending) ||
+	    !is_power_of_2(er->tx_pending))
+		return -EINVAL;
+
+	if (er->tx_pending < ESS_TSO_MAX_SEGS)
+		return -EINVAL;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	/* cannot change rx while cpu port active */
+	if (er->rx_pending != cport->rxq_buf_count &&
+	    cport->active)
+		return -EBUSY;
+
+	cport->rxq_buf_count = er->rx_pending;
+	port->txq_buf_count = er->tx_pending;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void
+ess_ethtool_get_channels(struct net_device *netdev,
+			 struct ethtool_channels *c)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+
+	c->max_rx = cport->rxq_max_count;
+	c->max_tx = ESS_QUEUES_TXDR_MAX;
+	c->max_other = 0;
+	c->rx_count = cport->rxq_count;
+	c->tx_count = port->txq_count;
+	c->other_count = 0;
+	c->combined_count = 0;
+}
+
+/*
+ *
+ */
+static int
+ess_ethtool_set_channels(struct net_device *netdev,
+			 struct ethtool_channels *c)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (!c->tx_count || c->tx_count > ESS_QUEUES_TXDR_MAX)
+		return -EINVAL;
+
+	/* cannot change rx while cpu port active */
+	if (c->rx_count != cport->rxq_count &&
+	    cport->active)
+		return -EINVAL;
+
+	cport->rxq_count = c->rx_count;
+	port->txq_count = c->tx_count;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int
+ess_ethtool_get_coalesce(struct net_device *netdev,
+			 struct ethtool_coalesce *ec,
+			 struct kernel_ethtool_coalesce *kernel_coal,
+			 struct netlink_ext_ack *extack)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+
+	ec->rx_coalesce_usecs = cport->rx_irq_coalesce_us;
+	ec->rx_max_coalesced_frames = cport->rx_irq_coalesce_pkts;
+	ec->tx_coalesce_usecs = port->tx_irq_coalesce_us;
+	ec->tx_max_coalesced_frames = port->tx_irq_coalesce_pkts;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int
+ess_ethtool_set_coalesce(struct net_device *netdev,
+			 struct ethtool_coalesce *ec,
+			 struct kernel_ethtool_coalesce *kernel_coal,
+			 struct netlink_ext_ack *extack)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+
+	cport->rx_irq_coalesce_us = ec->rx_coalesce_usecs;
+	cport->rx_irq_coalesce_pkts = ec->rx_max_coalesced_frames;
+	port->tx_irq_coalesce_us = ec->tx_coalesce_usecs;
+	port->tx_irq_coalesce_pkts = ec->tx_max_coalesced_frames;
+
+	ess_cpu_port_update_rx_coalesce(cport);
+	ess_port_update_tx_coalesce(port);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void ess_ethtool_get_stats(struct net_device *netdev,
+				  struct ethtool_stats *stats,
+				  u64 *data)
+{
+	struct ess_port *port = netdev_priv(netdev);
+	struct ess_cpu_port *cport = &port->priv->cpu_port;
+	const struct ess_mac_stat *hmib;
+	u32 (*read_cb)(struct ess_port *, u32 reg);
+	struct page_pool_stats pp_stats = {};
+	size_t i, count, off;
+
+	if (port->cur_mac == ESS_MAC_GMAC) {
+		count = ARRAY_SIZE(ess_gmac_mib);
+		hmib = ess_gmac_mib;
+		read_cb = gmac_readl;
+	} else {
+		count = ARRAY_SIZE(ess_xgmac_mib);
+		hmib = ess_xgmac_mib;
+		read_cb = xgmac_readl;
+	}
+
+	off = 0;
+	for (i = 0; i < count; i++) {
+		const struct ess_mac_stat *s;
+		u64 val;
+
+		s = &hmib[i];
+
+		val = read_cb(port, s->offset);
+		if (s->size == sizeof(u64)) {
+			u32 hval = read_cb(port, s->offset + 4);
+			val |= (u64)hval << 32;
+		}
+
+		data[off++] = val;
+	}
+
+	for (i = 0; i < cport->rxq_count; i++)
+		page_pool_get_stats(cport->rxf_rings[i].pp, &pp_stats);
+	page_pool_ethtool_stats_get(&data[off], &pp_stats);
+}
+
+/*
+ *
+ */
+static int ess_ethtool_nway_reset(struct net_device *dev)
+{
+	struct ess_port *port = netdev_priv(dev);
+	return phylink_ethtool_nway_reset(port->phylink);
+}
+
+/*
+ *
+ */
+static int
+ess_ethtool_get_link_ksettings(struct net_device *dev,
+			       struct ethtool_link_ksettings *cmd)
+{
+	struct ess_port *port = netdev_priv(dev);
+	return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+/*
+ *
+ */
+static int
+ess_ethtool_set_link_ksettings(struct net_device *dev,
+			       const struct ethtool_link_ksettings *cmd)
+{
+	struct ess_port *port = netdev_priv(dev);
+	return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+/*
+ *
+ */
+static int ess_ethtool_get_eee(struct net_device *dev,
+			       struct ethtool_eee *eee)
+{
+	struct ess_port *port = netdev_priv(dev);
+
+	eee->eee_enabled = port->eee_enabled;
+	eee->eee_active = port->eee_active;
+	eee->tx_lpi_enabled = port->eee_tx_lpi_enabled;
+
+	if (port->cur_mac == ESS_MAC_XGMAC)
+		eee->tx_lpi_timer = xgmac_readl(port, XGMAC_LPI_ENTRY_TIMER_REG);
+	else
+		eee->tx_lpi_timer = 0;
+
+	return phylink_ethtool_get_eee(port->phylink, eee);
+}
+
+/*
+ *
+ */
+static int ess_ethtool_set_eee(struct net_device *dev,
+			       struct ethtool_eee *eee)
+{
+	struct ess_port *port = netdev_priv(dev);
+
+	port->eee_enabled = eee->eee_enabled;
+	port->eee_tx_lpi_enabled = eee->tx_lpi_enabled;
+
+	if (port->cur_mac == ESS_MAC_XGMAC)
+		xgmac_writel(port, XGMAC_LPI_ENTRY_TIMER_REG,
+			     eee->tx_lpi_timer);
+
+	return phylink_ethtool_set_eee(port->phylink, eee);
+}
+
+/*
+ *
+ */
+static u32 ess_ethtool_get_priv_flags(struct net_device *dev)
+{
+	struct ess_port *port = netdev_priv(dev);
+	u32 ret = 0;
+
+	if (port->cur_uniphy_channel)
+		ret |= ess_uniphy_sync_ok_get(port->cur_uniphy_channel) << 0;
+
+	return ret;
+}
+
+/*
+ *
+ */
+static struct phylink *ess_ethtool_get_phylink(struct net_device *dev)
+{
+	struct ess_port *port = netdev_priv(dev);
+	return port->phylink;
+}
+
+/*
+ *
+ */
+const struct ethtool_ops ess_port_ethtool_ops = {
+	.get_drvinfo		= ess_ethtool_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+
+	.get_ethtool_stats	= ess_ethtool_get_stats,
+	.get_sset_count		= ess_ethtool_get_sset_count,
+	.get_strings		= ess_ethtool_get_strings,
+
+	.get_ringparam		= ess_ethtool_get_ringparam,
+	.set_ringparam		= ess_ethtool_set_ringparam,
+
+	.get_channels		= ess_ethtool_get_channels,
+	.set_channels		= ess_ethtool_set_channels,
+
+	.supported_coalesce_params = (ETHTOOL_COALESCE_USECS |
+				      ETHTOOL_COALESCE_MAX_FRAMES),
+	.get_coalesce		= ess_ethtool_get_coalesce,
+	.set_coalesce		= ess_ethtool_set_coalesce,
+
+	.nway_reset		= ess_ethtool_nway_reset,
+	.get_link_ksettings	= ess_ethtool_get_link_ksettings,
+	.set_link_ksettings	= ess_ethtool_set_link_ksettings,
+
+	.get_eee		= ess_ethtool_get_eee,
+	.set_eee		= ess_ethtool_set_eee,
+
+	.get_phylink		= ess_ethtool_get_phylink,
+	.get_priv_flags		= ess_ethtool_get_priv_flags,
+};
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port_phylink.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port_phylink.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port_phylink.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port_phylink.c	2023-12-05 17:14:42.299715016 +0100
@@ -0,0 +1,566 @@
+#include "port_priv.h"
+
+/*
+ *
+ */
+void ess_port_select_mac(struct ess_port *port, enum ess_mac_type mac)
+{
+	u32 val;
+
+	val = ppe_readl(port->priv, PORT_MUX_CTRL_REG);
+	if (mac == ESS_MAC_GMAC)
+		val &= ~PORT_MUX_CTRL_PORTx_MAC_SEL_MASK(port->id);
+	else
+		val |= PORT_MUX_CTRL_PORTx_MAC_SEL_MASK(port->id);
+	ppe_writel(port->priv, PORT_MUX_CTRL_REG, val);
+}
+
+/*
+ *
+ */
+static int toggle_resets(struct ess_port *port)
+{
+	struct reset_control *rsts[] = {
+		port->clocks->mac_rst,
+		port->clocks->port_rst,
+	};
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(rsts); i++)
+		reset_control_assert(rsts[i]);
+	msleep(100);
+
+	for (i = 0; i < ARRAY_SIZE(rsts); i++)
+		reset_control_deassert(rsts[i]);
+	msleep(100);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int ess_port_clk_set(struct ess_port *port, bool enabled,
+			    unsigned int rate)
+{
+	struct clk *clks[] = {
+		port->clocks->uniphy_rx_clk,
+		port->clocks->uniphy_tx_clk,
+		port->clocks->mac_rx_clk,
+		port->clocks->mac_tx_clk,
+	};
+	int i, ret;
+
+	if (port->clocks_enabled == enabled)
+		return 0;
+
+	if (rate) {
+		for (i = 0; i < ARRAY_SIZE(clks); i++) {
+			ret = clk_set_rate(clks[i], rate);
+			if (ret)
+				return ret;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(clks); i++) {
+		if (enabled) {
+			int ret;
+			ret = clk_prepare_enable(clks[i]);
+			if (ret) {
+				while (--i >= 0)
+					clk_disable(clks[i]);
+				return ret;
+			}
+		} else
+			clk_disable_unprepare(clks[i]);
+	}
+
+	port->clocks_enabled = enabled;
+	return 0;
+}
+
+/*
+ *
+ */
+static void ess_port_set_bridge_forwarding(struct ess_port *port, bool enabled)
+{
+	u32 val;
+
+	val = ppe_readl(port->priv, PPE_PORT_BRCTL_REG(port->ppe_port_id));
+	if (enabled)
+		val |= PORT_BRCTL_TXMAC_EN_MASK;
+	else
+		val &= ~PORT_BRCTL_TXMAC_EN_MASK;
+	ppe_writel(port->priv, PPE_PORT_BRCTL_REG(port->ppe_port_id), val);
+}
+
+/*
+ *
+ */
+static void ess_pl_pcs_get_state(struct phylink_pcs *pcs,
+				 struct phylink_link_state *state)
+{
+	struct ess_port *port = container_of(pcs, struct ess_port, phylink_pcs);
+	/* struct net_device *ndev = port->netdev; */
+	/* netdev_info(ndev, "ess_pl_pcs_get_state\n"); */
+
+	if (!port->cur_uniphy_channel) {
+		state->link = 0;
+		return;
+	}
+
+	ess_uniphy_channel_status_get(port->cur_uniphy_channel, state);
+}
+
+/*
+ *
+ */
+static int ess_pl_pcs_config(struct phylink_pcs *pcs,
+			     unsigned int mode,
+			     phy_interface_t interface,
+			     const unsigned long *advertising,
+			     bool permit_pause_to_mac)
+{
+	struct ess_port *port = container_of(pcs, struct ess_port, phylink_pcs);
+	struct net_device *ndev = port->netdev;
+
+	/* netdev_info(ndev, "ess_pl_pcs_config\n"); */
+
+	if (port->cur_uniphy_channel) {
+		int cur_mode;
+		phy_interface_t cur_interface;
+
+		ess_uniphy_channel_config_get(port->cur_uniphy_channel,
+					      &cur_interface, &cur_mode);
+
+		if (cur_interface != interface || cur_mode != mode) {
+			netdev_info(ndev,
+				    "ess_pl_pcs_config: need to change "
+				    "interface from %s to %s",
+				    phy_modes(cur_interface),
+				    phy_modes(interface));
+			ess_uniphy_channel_put(port->cur_uniphy_channel);
+			port->cur_uniphy_channel = NULL;
+		}
+	}
+
+	if (!port->cur_uniphy_channel) {
+		struct ess_uniphy_channel *uc;
+
+		uc = ess_uniphy_channel_get(port->priv,
+					    port->uniphy_id,
+					    port->uniphy_channel_id,
+					    interface, mode,
+					    advertising);
+		if (IS_ERR(uc)) {
+			netdev_err(ndev, "failed to get uniphy channel: %ld\n",
+				   PTR_ERR(uc));
+			return PTR_ERR(uc);
+		}
+
+		port->cur_uniphy_channel = uc;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void ess_pl_pcs_an_restart(struct phylink_pcs *pcs)
+{
+	struct ess_port *port = container_of(pcs, struct ess_port, phylink_pcs);
+	if (!port->cur_uniphy_channel)
+		return;
+	ess_uniphy_channel_an_restart(port->cur_uniphy_channel);
+}
+
+/*
+ *
+ */
+static void ess_pl_pcs_link_up(struct phylink_pcs *pcs,
+			      unsigned int mode,
+			      phy_interface_t interface,
+			      int speed, int duplex)
+{
+	struct ess_port *port = container_of(pcs, struct ess_port, phylink_pcs);
+	ess_uniphy_channel_link_up(port->cur_uniphy_channel, speed, duplex);
+}
+
+/*
+ *
+ */
+static int ess_pl_mac_prepare(struct phylink_config *config, unsigned int mode,
+			      phy_interface_t interface)
+{
+	struct net_device *ndev = to_net_dev(config->dev);
+	struct ess_port *port = netdev_priv(ndev);
+	enum ess_mac_type needed_mac;
+
+	/* catch bad phy interface returned from aquantia PHY */
+	if (interface == PHY_INTERFACE_MODE_NA)
+		return -ENOTSUPP;
+
+	/* select correct mac depending on interface */
+	switch (interface) {
+	case PHY_INTERFACE_MODE_USXGMII:
+	case PHY_INTERFACE_MODE_10G_QXGMII:
+	case PHY_INTERFACE_MODE_10GBASER:
+		needed_mac = ESS_MAC_XGMAC;
+		break;
+	default:
+		needed_mac = ESS_MAC_GMAC;
+		break;
+	}
+
+	if (port->cur_mac != needed_mac) {
+		netdev_info(ndev, "ess_pl_mac_prepare, switching to mac %s\n",
+			    needed_mac == ESS_MAC_XGMAC ? "XGMAC" : "GMAC");
+		ess_port_select_mac(port, needed_mac);
+		port->cur_mac = needed_mac;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static struct phylink_pcs *ess_pl_mac_select_pcs(struct phylink_config *config,
+						 phy_interface_t interface)
+{
+	struct net_device *ndev = to_net_dev(config->dev);
+	struct ess_port *port = netdev_priv(ndev);
+
+	return &port->phylink_pcs;
+}
+
+/*
+ *
+ */
+static void ess_pl_mac_config(struct phylink_config *config, unsigned int mode,
+			      const struct phylink_link_state *state)
+{
+	/* struct net_device *ndev = to_net_dev(config->dev); */
+	/* struct ess_port *port = netdev_priv(ndev); */
+	/* netdev_info(ndev, "ess_pl_mac_config\n"); */
+
+}
+
+/*
+ *
+ */
+static int ess_pl_mac_finish(struct phylink_config *config, unsigned int mode,
+			     phy_interface_t interface)
+{
+	/* struct net_device *ndev = to_net_dev(config->dev); */
+	/* netdev_info(ndev, "ess_pl_mac_finish\n"); */
+	return 0;
+}
+
+/*
+ *
+ */
+static void ess_mac_gmac_control(struct ess_port *port, bool enable)
+{
+	u32 val;
+
+	val = gmac_readl(port, GMAC_ENABLE_REG);
+	if (enable)
+		val |= GMAC_ENABLE_RX_MASK | GMAC_ENABLE_TX_MASK;
+	else
+		val &= ~(GMAC_ENABLE_RX_MASK | GMAC_ENABLE_TX_MASK);
+	gmac_writel(port, GMAC_ENABLE_REG, val);
+}
+
+/*
+ *
+ */
+static void ess_mac_xgmac_control(struct ess_port *port, bool enable)
+{
+	u32 val;
+
+	val = xgmac_readl(port, XGMAC_TX_CFG_REG);
+	val |= XGMAC_TX_CFG_JABBER_DIS_MASK;
+	if (enable)
+		val |= XGMAC_TX_CFG_TX_EN_MASK;
+	else
+		val &= ~XGMAC_TX_CFG_TX_EN_MASK;
+	xgmac_writel(port, XGMAC_TX_CFG_REG, val);
+
+	val = xgmac_readl(port, XGMAC_RX_CFG_REG);
+
+	/* strip FCS */
+	val |= XGMAC_RX_CFG_ACS_MASK;
+	val |= XGMAC_RX_CFG_CST_MASK;
+
+	if (enable)
+		val |= XGMAC_RX_CFG_RX_EN_MASK;
+	else
+		val &= ~XGMAC_RX_CFG_RX_EN_MASK;
+
+	xgmac_writel(port, XGMAC_RX_CFG_REG, val);
+}
+
+/*
+ *
+ */
+static void ess_mac_control(struct ess_port *port, bool enable)
+{
+	if (port->cur_mac == ESS_MAC_GMAC)
+		ess_mac_gmac_control(port, enable);
+	else
+		ess_mac_xgmac_control(port, enable);
+}
+
+/*
+ *
+ */
+static void ess_mac_gmac_set_speed(struct ess_port *port,
+				   int speed, int duplex)
+{
+	u32 val;
+
+	val = gmac_readl(port, GMAC_SPEED_REG);
+	val &= ~GMAC_SPEED_MASK;
+	switch (speed) {
+	case SPEED_10:
+		val |= GMAC_SPEED_10 << GMAC_SPEED_SHIFT;
+		break;
+	case SPEED_100:
+		val |= GMAC_SPEED_100 << GMAC_SPEED_SHIFT;
+		break;
+	case SPEED_1000:
+		val |= GMAC_SPEED_1000 << GMAC_SPEED_SHIFT;
+		break;
+	}
+	gmac_writel(port, GMAC_SPEED_REG, val);
+
+	val = gmac_readl(port, GMAC_ENABLE_REG);
+	if (duplex)
+		val |= GMAC_ENABLE_FULL_DPX_MASK;
+	else
+		val &= ~GMAC_ENABLE_FULL_DPX_MASK;
+	gmac_writel(port, GMAC_ENABLE_REG, val);
+}
+
+/*
+ *
+ */
+static void ess_mac_xgmac_set_speed(struct ess_port *port,
+				    phy_interface_t interface,
+				    int speed)
+{
+	bool is_usx;
+	u32 val;
+
+	val = xgmac_readl(port, XGMAC_TX_CFG_REG);
+	val &= ~XGMAC_TX_CFG_SPEED_MASK;
+	switch (speed) {
+	case SPEED_10:
+	case SPEED_100:
+	case SPEED_1000:
+		/* FIXME, this is strange, how come it's the same
+		 * value */
+		val |= XGMAC_TX_CFG_SPEED_1000 << XGMAC_TX_CFG_SPEED_SHIFT;
+		break;
+	case SPEED_2500:
+		val |= XGMAC_TX_CFG_SPEED_2500 << XGMAC_TX_CFG_SPEED_SHIFT;
+		break;
+	case SPEED_5000:
+		val |= XGMAC_TX_CFG_SPEED_5000 << XGMAC_TX_CFG_SPEED_SHIFT;
+		break;
+	case SPEED_10000:
+		val |= XGMAC_TX_CFG_SPEED_10000 << XGMAC_TX_CFG_SPEED_SHIFT;
+		break;
+	}
+
+	is_usx = false;
+	switch (speed) {
+	case SPEED_2500:
+	case SPEED_5000:
+	case SPEED_10000:
+		switch (interface) {
+		case PHY_INTERFACE_MODE_USXGMII:
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+			is_usx = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (is_usx)
+		val |= XGMAC_TX_CFG_USS_MASK;
+	else
+		val &= ~XGMAC_TX_CFG_USS_MASK;
+
+	xgmac_writel(port, XGMAC_TX_CFG_REG, val);
+}
+
+/*
+ *
+ */
+static void ess_mac_set_speed(struct ess_port *port,
+			      phy_interface_t interface,
+			      int speed, int duplex)
+{
+	if (port->cur_mac == ESS_MAC_GMAC)
+		ess_mac_gmac_set_speed(port, speed, duplex);
+	else
+		ess_mac_xgmac_set_speed(port, interface, speed);
+}
+
+/*
+ *
+ */
+static int ess_port_set_eee(struct ess_port *port, bool enable)
+{
+	u32 val;
+
+	if (port->cur_mac == ESS_MAC_GMAC) {
+		/* do-able, but not implemented for lack of testing
+		 * hardware */
+		return -ENOTSUPP;
+	}
+
+	val = xgmac_readl(port, XGMAC_LPI_CS_REG);
+	if (enable)
+		val |= XGMAC_LPI_CS_TX_LPI_EN_MASK;
+	else
+		val &= ~XGMAC_LPI_CS_TX_LPI_EN_MASK;
+	val |= XGMAC_LPI_CS_PLS_MASK;
+	val |= XGMAC_LPI_CS_LPI_TXA_MASK;
+	val |= XGMAC_LPI_CS_LPI_TIMER_EN_MASK;
+	xgmac_writel(port, XGMAC_LPI_CS_REG, val);
+
+	/* set tick counter to 353 Mhz */
+	xgmac_writel(port, XGMAC_1US_TIC_CTR_REG, 353 - 1);
+
+	/* wakeup timer */
+	val = xgmac_readl(port, XGMAC_LPI_TMR_CTL_REG);
+	val &= ~XGMAC_LPI_TMR_CTL_TW_MASK;
+	val |= 32 << XGMAC_LPI_TMR_CTL_TW_SHIFT;
+	xgmac_writel(port, XGMAC_LPI_TMR_CTL_REG, val);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void ess_pl_mac_link_up(struct phylink_config *config,
+			       struct phy_device *phy,
+			       unsigned int mode, phy_interface_t interface,
+			       int speed, int duplex,
+			       bool tx_pause, bool rx_pause)
+{
+	struct net_device *ndev = to_net_dev(config->dev);
+	struct ess_port *port = netdev_priv(ndev);
+	unsigned int rate;
+	int ret;
+
+	/* netdev_info(ndev, "ess_pl_mac_link_up\n"); */
+
+	switch (speed) {
+	case 10:
+		if (interface == PHY_INTERFACE_MODE_10G_QXGMII ||
+		    interface == PHY_INTERFACE_MODE_USXGMII)
+			rate = RATE_1D25MHZ;
+		else
+			rate = RATE_2D5MHZ;
+		break;
+	case 100:
+		if (interface == PHY_INTERFACE_MODE_10G_QXGMII ||
+		    interface == PHY_INTERFACE_MODE_USXGMII)
+			rate = RATE_12D5MHZ;
+		else
+			rate = RATE_25MHZ;
+		break;
+	case 1000:
+		rate = RATE_125MHZ;
+		break;
+	case 2500:
+		switch (interface) {
+		case PHY_INTERFACE_MODE_USXGMII:
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+			rate = RATE_78MHZ;
+			break;
+		case PHY_INTERFACE_MODE_2500BASEX:
+		default:
+			rate = RATE_312MHZ;
+			break;
+		}
+		break;
+	case 5000:
+		rate = RATE_156MHZ;
+		break;
+	case 10000:
+		rate = RATE_312MHZ;
+		break;
+	}
+
+	ret = ess_port_clk_set(port, true, rate);
+	if (ret)
+		netdev_err(ndev, "failed to set port clocks to rate: %d %d\n",
+			   rate, ret);
+
+	ess_mac_set_speed(port, interface, speed, duplex);
+	toggle_resets(port);
+	ess_mac_control(port, true);
+	ess_port_set_bridge_forwarding(port, true);
+
+	if (phy && port->eee_enabled) {
+		port->eee_active = phy_init_eee(phy, false) >= 0;
+		if (ess_port_set_eee(port, port->eee_active &&
+				     port->eee_tx_lpi_enabled))
+			port->eee_active = false;
+	}
+}
+
+/*
+ *
+ */
+static void ess_pl_mac_link_down(struct phylink_config *config,
+				 unsigned int mode, phy_interface_t interface)
+{
+	struct net_device *ndev = to_net_dev(config->dev);
+	struct ess_port *port = netdev_priv(ndev);
+
+	/* netdev_info(ndev, "ess_pl_mac_link_down\n"); */
+
+	ess_port_set_bridge_forwarding(port, false);
+	/* FIXME: we are supposed to wait here to let the internal PPE
+	 * queues schedule all packets toward us, but this may never
+	 * finish because of backpressure from flow control, revisit
+	 * that */
+	udelay(100);
+	ess_mac_control(port, false);
+	ess_port_clk_set(port, false, 0);
+	ess_port_set_eee(port, false);
+}
+
+/*
+ *
+ */
+const struct phylink_pcs_ops ess_port_phylink_pcs_ops = {
+	.pcs_get_state = ess_pl_pcs_get_state,
+	.pcs_config = ess_pl_pcs_config,
+	.pcs_an_restart = ess_pl_pcs_an_restart,
+	.pcs_link_up = ess_pl_pcs_link_up,
+};
+
+/*
+ *
+ */
+const struct phylink_mac_ops ess_port_phylink_mac_ops = {
+	.mac_prepare = ess_pl_mac_prepare,
+	.mac_select_pcs = ess_pl_mac_select_pcs,
+	.mac_config = ess_pl_mac_config,
+	.mac_finish = ess_pl_mac_finish,
+	.mac_link_up = ess_pl_mac_link_up,
+	.mac_link_down = ess_pl_mac_link_down,
+};
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port_priv.h linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port_priv.h
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./port_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/port_priv.h	2023-12-21 17:30:06.445516508 +0100
@@ -0,0 +1,57 @@
+#ifndef PORT_PRIV_H_
+#define PORT_PRIV_H_
+
+#include "regs/ppe_regs.h"
+#include "regs/edma_regs.h"
+#include "ipq95xx_ess.h"
+
+/*
+ * IO helpers
+ */
+static inline u32 gmac_readl(struct ess_port *port, u32 reg)
+{
+	void *base = port->priv->regs[0] + GMAC_REGS_OFFSET(port->id);
+	WARN_ON(reg > 0x100);
+	return readl(base + reg);
+}
+
+static inline void gmac_writel(struct ess_port *port, u32 reg, u32 val)
+{
+	void *base = port->priv->regs[0] + GMAC_REGS_OFFSET(port->id);
+	WARN_ON(reg > 0x100);
+	writel(val, base + reg);
+}
+
+static inline u32 xgmac_readl(struct ess_port *port, u32 reg)
+{
+	void *base = port->priv->regs[0] + XGMAC_REGS_OFFSET(port->id);
+	WARN_ON(reg > 0xfff);
+	return readl(base + reg);
+}
+
+static inline void xgmac_writel(struct ess_port *port, u32 reg, u32 val)
+{
+	void *base = port->priv->regs[0] + XGMAC_REGS_OFFSET(port->id);
+	WARN_ON(reg > 0xfff);
+	writel(val, base + reg);
+}
+
+/*
+ * ports.c
+ */
+void ess_port_update_tx_coalesce(struct ess_port *port);
+void ess_cpu_port_update_rx_coalesce(struct ess_cpu_port *cport);
+
+/*
+ * port_ethtool.c
+ */
+extern const struct ethtool_ops ess_port_ethtool_ops;
+void ess_port_select_mac(struct ess_port *port, enum ess_mac_type mac);
+
+/*
+ * port_phylink.c
+ */
+extern const struct phylink_pcs_ops ess_port_phylink_pcs_ops;
+extern const struct phylink_mac_ops ess_port_phylink_mac_ops;
+
+#endif /* PORT_PRIV_H_ */
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/edma_desc.h linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/edma_desc.h
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/edma_desc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/edma_desc.h	2023-12-21 17:30:06.445516508 +0100
@@ -0,0 +1,135 @@
+#ifndef EDMA_DESC_H_
+#define EDMA_DESC_H_
+
+/*
+ * RxFill descriptor
+ */
+struct edma_rxfill_desc {
+	__le32 rdes0; /* Contains buffer address */
+	__le32 rdes1; /* Contains buffer size */
+	__le32 rdes2; /* Contains opaque */
+	__le32 rdes3; /* Contains opaque high bits */
+};
+
+/*
+ * RxDesc descriptor
+ */
+struct edma_rxdesc_desc {
+	__le32 rdes0; /* Contains buffer address */
+	__le32 rdes1; /* Contains more bit, priority bit, service code */
+	__le32 rdes2; /* Contains opaque */
+	__le32 rdes3; /* Contains opaque high bits */
+	__le32 rdes4; /* Contains destination and source information */
+	__le32 rdes5; /* Contains WiFi QoS, data length */
+	__le32 rdes6; /* Contains hash value, check sum status */
+	__le32 rdes7; /* Contains DSCP, packet offsets */
+};
+
+/* rdes4 definitions */
+#define EDMA_RXDESC_SRC_PORT_ID_SHIFT	0
+#define EDMA_RXDESC_SRC_PORT_ID_MASK	(0xfff << EDMA_RXDESC_SRC_PORT_ID_SHIFT)
+#define EDMA_RXDESC_SRC_PORT_TYPE_SHIFT	12
+#define EDMA_RXDESC_SRC_PORT_TYPE_MASK	(0xf << EDMA_RXDESC_SRC_PORT_TYPE_SHIFT)
+#define EDMA_RXDESC_DST_PORT_ID_SHIFT	16
+#define EDMA_RXDESC_DST_PORT_ID_MASK	(0xfff << EDMA_RXDESC_DST_PORT_ID_SHIFT)
+#define EDMA_RXDESC_DST_PORT_TYPE_SHIFT	28
+#define EDMA_RXDESC_DST_PORT_TYPE_MASK	(0xf << EDMA_RXDESC_DST_PORT_TYPE_SHIFT)
+
+/* rdes5 definitions */
+#define EDMA_RXDESC_PACKET_LEN_MASK	0x3FFFF
+#define EDMA_RXDESC_PACKET_LEN_GET(desc)				\
+	((le32_to_cpu((desc)->rdes5)) &	EDMA_RXDESC_PACKET_LEN_MASK)
+
+/* rdes6 definitions */
+#define EDMA_RXDESC_L4CSUM_STATUS_MASK		BIT(12)
+#define EDMA_RXDESC_L3CSUM_STATUS_MASK		BIT(13)
+
+
+/*
+ * EDMA Rx Secondary Descriptor
+ */
+struct edma_rx_sec_desc {
+	__le32 rx_sec0; /* Contains timestamp */
+	__le32 rx_sec1; /* Contains secondary checksum status */
+	__le32 rx_sec2; /* Contains QoS tag */
+	__le32 rx_sec3; /* Contains flow index details */
+	__le32 rx_sec4; /* Contains secondary packet offsets */
+	__le32 rx_sec5; /* Contains multicast bit, checksum */
+	__le32 rx_sec6; /* Contains SVLAN, CVLAN */
+	__le32 rx_sec7; /* Contains secondary SVLAN, CVLAN */
+};
+
+/*
+ * TxDesc descriptor
+ */
+struct edma_txdesc_desc {
+	__le32 tdes0; /* Low 32-bit of buffer address */
+	__le32 tdes1; /* Buffer recycling, PTP tag flag, PRI valid flag */
+	__le32 tdes2; /* Low 32-bit of opaque value */
+	__le32 tdes3; /* High 32-bit of opaque value */
+	__le32 tdes4; /* Source/Destination port info */
+	__le32 tdes5; /* VLAN offload, csum_mode, ip_csum_en, tso_en,
+		      data length */
+	__le32 tdes6; /* MSS/hash_value/PTP tag, data offset */
+	__le32 tdes7; /* L4/L3 offset, PROT type, L2 type, CVLAN/SVLAN
+		      tag, service code */
+};
+
+/* tdes1 definitions */
+#define EDMA_TXDESC_MORE_BIT_MASK	(1 << 30)
+
+/* tdes4 definitions */
+#define EDMA_TXDESC_SRC_PORT_ID_SHIFT	0
+#define EDMA_TXDESC_SRC_PORT_ID_MASK	(0xfff << EDMA_TXDESC_SRC_PORT_ID_SHIFT)
+#define EDMA_TXDESC_SRC_PORT_TYPE_SHIFT	12
+#define EDMA_TXDESC_SRC_PORT_TYPE_MASK	(0xf << EDMA_TXDESC_SRC_PORT_TYPE_SHIFT)
+#define EDMA_TXDESC_DST_PORT_ID_SHIFT	16
+#define EDMA_TXDESC_DST_PORT_ID_MASK	(0xfff << EDMA_TXDESC_DST_PORT_ID_SHIFT)
+#define EDMA_TXDESC_DST_PORT_TYPE_SHIFT	28
+#define EDMA_TXDESC_DST_PORT_TYPE_MASK	(0xf << EDMA_TXDESC_DST_PORT_TYPE_SHIFT)
+#define EDMA_TXDESC_PTYPE_PORT		2
+#define EDMA_TXDESC_MK_DPORT_ID(pid)				       \
+	((EDMA_TXDESC_PTYPE_PORT << EDMA_TXDESC_SRC_PORT_TYPE_SHIFT) | \
+	 (0 << EDMA_TXDESC_SRC_PORT_ID_SHIFT) |			       \
+	 (EDMA_TXDESC_PTYPE_PORT << EDMA_TXDESC_DST_PORT_TYPE_SHIFT) | \
+	 ((pid) << EDMA_TXDESC_DST_PORT_ID_SHIFT))
+
+
+/* tdes5 defintition */
+#define EDMA_TXDESC_DATA_LEN_SET_SHIFT	0
+#define EDMA_TXDESC_DATA_LEN_SET_MASK	(0x1ffff << EDMA_TXDESC_DATA_LEN_SET_SHIFT)
+#define EDMA_TXDESC_TSO_EN_MASK		(1 << 24)
+#define EDMA_TXDESC_GEN_IP_CSUM_MASK	(1 << 25)
+#define EDMA_TXDESC_GEN_L4_CSUM_MASK	(1 << 26)
+#define EDMA_TXDESC_ADV_OFFSET_MASK	(1 << 31)
+
+/* tdes6 defintition */
+#define EDMA_TXDESC_MSS_SHIFT		16
+#define EDMA_TXDESC_MSS_MASK		(0xffff << EDMA_TXDESC_MSS_SHIFT)
+
+
+/*
+ * EDMA Tx Secondary Descriptor
+ */
+struct edma_tx_sec_desc {
+	__le32 tx_sec0; /* Reserved */
+	__le32 tx_sec1; /* Custom csum offset, payload offset, TTL/NAT action */
+	__le32 rx_sec2; /* NAPT translated port, DSCP value, TTL value */
+	__le32 rx_sec3; /* Flow index value and valid flag */
+	__le32 rx_sec4; /* Reserved */
+	__le32 rx_sec5; /* Reserved */
+	__le32 rx_sec6; /* CVLAN/SVLAN command */
+	__le32 rx_sec7; /* CVLAN/SVLAN tag value */
+};
+
+/*
+ * TxCmpl descriptor
+ */
+struct edma_txcmpl_desc {
+	__le32 tdes0; /* Low 32-bit opaque value */
+	__le32 tdes1; /* High 32-bit opaque value */
+	__le32 tdes2; /* More fragment, transmit ring id, pool id */
+	__le32 tdes3; /* Error indications */
+};
+
+#endif /* !EDMA_DESC_H_ */
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/edma_regs.h linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/edma_regs.h
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/edma_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/edma_regs.h	2023-12-21 17:30:06.445516508 +0100
@@ -0,0 +1,381 @@
+#ifndef EDMA_REGS_H_
+#define EDMA_REGS_H_
+
+/*global dma control  */
+#define EDMA_PORT_CTRL_REG		(0x4)
+#define PORT_CTRL_PAD_EN_MASK		(1 << 0)
+#define PORT_CTRL_GLOB_EN_MASK		(1 << 1)
+#define PORT_CTRL_TX_DROP_EN_MASK	(1 << 2)
+
+/* global vlan insertion */
+#define EDMA_VLAN_CTRL_REG		(0x8)
+#define VLAN_CTRL_SVLAN_ID_SHIFT	0
+#define VLAN_CTRL_SVLAN_ID_MASK		(0xffff << VLAN_CTRL_SVLAN_ID_MASK)
+#define VLAN_CTRL_CVLAN_ID_SHIFT	16
+#define VLAN_CTRL_CVLAN_ID_MASK		(0xffff << VLAN_CTRL_CVLAN_ID_SHIFT)
+
+/* mapping between rx descriptor ring & rx fill ring */
+#define EDMA_RXD2FILL_MAPx_REG(x)	(0x14 + (x) * 0x4)
+#define RXD2FILL_Q2REG(q)		((q) / 10)
+#define RXD2FILL_MAP_IDx_SHIFT(q)	(((q) % 10) * 3)
+#define RXD2FILL_MAP_IDx_MASK(q)	(0x7 << RXD2FILL_MAP_IDx_SHIFT(q))
+
+/* tx configuration */
+#define EDMA_TXQ_CTRL_REG		(0x20)
+#define TXQ_CTRL_TXD_PREF_THRESH_SHIFT	0
+#define TXQ_CTRL_TXD_PREF_THRESH_MASK	(0xf << TXQ_CTRL_TXD_PREF_THRESH_SHIFT)
+#define TXQ_CTRL_TXCMPL_WB_THRESH_SHIFT	4
+#define TXQ_CTRL_TXCMPL_WB_THRESH_MASK	(0xf << TXQ_CTRL_TXCMPL_WB_THRESH_SHIFT)
+#define TXQ_CTRL_PKT_SRAM_THRESH_SHIFT	8
+#define TXQ_CTRL_PKT_SRAM_THRESH_MASK	(0xff << TXQ_CTRL_PKT_SRAM_THRESH_SHIFT)
+#define TXQ_CTRL_TXCMPL_WB_TIMER_SHIFT	16
+#define TXQ_CTRL_TXCMPL_WB_TIMER_MASK	(0xffff << TXQ_CTRL_TXCMPL_WB_TIMER_SHIFT)
+
+/* tx configuration, second register */
+#define EDMA_TXQ_CTRL2_REG		(0x24)
+#define TXQ_CTRL2_PKT_DRAM_BURSTL_SHIFT	0
+#define TXQ_CTRL2_PKT_DRAM_BURSTL_MASK	(0x1f << TXQ_CTRL2_PKT_DRAM_BURSTL_SHIFT)
+#define TXQ_CTRL2_TXCMPL_AFULL_TRESH_SHIFT	5
+#define TXQ_CTRL2_TXCMPL_AFULL_TRESH_MASK	(0x3f << TXQ_CTRL2_TXCMPL_AFULL_TRESH_SHIFT)
+#define TXQ_CTRL2_TX_CREDIT_MAX_SHIFT	11
+#define TXQ_CTRL2_TX_CREDIT_MAX_MASK	(0xf << TXQ_CTRL2_TX_CREDIT_MAX_SHIFT)
+
+#define TXQ_CTRL2_TX_CREDIT_FUN_DIS_MASK	(1 << 15)
+
+/* tx credit configuration */
+#define EDMA_TXQ_FC0_REG		(0x28)
+#define TXQ_FC0_CREDIT_PORT0_SHIFT	0
+#define TXQ_FC0_CREDIT_PORT0_MASK	(0xff << TXQ_FC0_CREDIT_PORT0_SHIFT)
+#define TXQ_FC0_CREDIT_PORT1_SHIFT	8
+#define TXQ_FC0_CREDIT_PORT1_MASK	(0xff << TXQ_FC0_CREDIT_PORT1_SHIFT)
+#define EDMA_TXQ_FC1_REG		(0x30)
+#define TXQ_FC1_CREDIT_PORT0_SHIFT	0
+#define TXQ_FC1_CREDIT_PORT0_MASK	(0xff << TXQ_FC1_CREDIT_PORT0_SHIFT)
+#define TXQ_FC1_CREDIT_PORT1_SHIFT	8
+#define TXQ_FC1_CREDIT_PORT1_MASK	(0xff << TXQ_FC1_CREDIT_PORT1_SHIFT)
+
+/* rx configuration */
+#define EDMA_RXQ_CTRL_REG		(0x3c)
+
+/* after a misc interrupt for any queue error, this register holds the
+ * qid of the faulty queue */
+#define EDMA_MISC_ERR_QID1_REG		(0x40)
+
+#define EDMA_RXQ_FC_TRHE_REG		(0x44)
+
+/* global dma control/config*/
+#define EDMA_DMAR_CTRL_REG		(0x48)
+#define DMAR_CTRL_REQ_PRI_SHIFT		0
+#define DMAR_CTRL_REQ_PRI_MASK		(0x7 << DMAR_CTRL_REQ_PRI_SHIFT)
+#define DMAR_CTRL_REQ_PRI_RR		0x0
+#define DMAR_CTRL_REQ_PRI_TXTPRF	0x1
+#define DMAR_CTRL_REQ_PRI_TPTXRF	0x2
+#define DMAR_CTRL_REQ_PRI_RFTXTP	0x3
+#define DMAR_CTRL_REQ_PRI_RFTPTX	0x4
+#define DMAR_CTRL_BURST_LEN_SHIFT	3
+#define DMAR_CTRL_BURST_LEN_MASK	(1 << DMAR_CTRL_BURST_LEN_SHIFT)
+#define DMAR_CTRL_BURST_LEN_256		0
+#define DMAR_CTRL_BURST_LEN_128		1
+/* number of simultaneous outstanding request for desc/packet data,
+ * actual value is <regval> + 1 */
+#define DMAR_CTRL_TXDATA_OST_NUM_SHIFT	4
+#define DMAR_CTRL_TXDATA_OST_NUM_MASK	(0x1f << DMAR_CTRL_TXDATA_OST_NUM_SHIFT)
+#define DMAR_CTRL_TXDESC_OST_NUM_SHIFT	9
+#define DMAR_CTRL_TXDESC_OST_NUM_MASK	(0x7 << DMAR_CTRL_TXDESC_OST_NUM_SHIFT)
+#define DMAR_CTRL_RXFILL_OST_NUM_SHIFT	12
+#define DMAR_CTRL_RXFILL_OST_NUM_MASK	(0x7 << DMAR_CTRL_RXFILL_OST_NUM_SHIFT)
+
+/* axi read config */
+#define EDMA_AXIR_CTRL_REG		(0x4c)
+
+/* axi write config */
+#define EDMA_AXIW_CTRL_REG		(0x50)
+
+/* unused */
+#define EDMA_MIN_MSS_REG		(0x54)
+
+/* edma looback control */
+#define EDMA_LOOPBACK_CTRL_REG		(0x58)
+
+/* dma error related interrupts */
+#define EDMA_MISC_INT_STAT_REG		(0x5c)
+#define EDMA_MISC_INT_MASK_REG		(0x60)
+#define MISC_INT_AXI_RD_ERR_MASK	(1 << 0)
+#define MISC_INT_AXI_WR_ERR_MASK	(1 << 1)
+#define MISC_RX_DESC_FIFO_FULL_MASK	(1 << 2)
+#define MISC_RX_ERR_BUF_SIZE_MASK	(1 << 3)
+#define MISC_TX_SRAM_FULL_MASK		(1 << 4)
+#define MISC_TX_CMPL_BUF_FULL_MASK	(1 << 5)
+#define MISC_TX_DATA_LEN_ERR_MASK	(1 << 6)
+#define MISC_TX_TIMEOUT_MASK		(1 << 7)
+
+#define EDMA_REQ0_FIFO_THRESH_REG	(0x80)
+#define EDMA_WP_OS_THRESH_REG		(0x84)
+
+/* after a misc interrupt for any queue error, this register holds the
+ * qid of the faulty queue */
+#define EDMA_MISC_ERR_QID2_REG		(0x88)
+
+/* mapping between tx descriptor ring & tx completion */
+#define EDMA_TXDESC2CMPL_MAPx_REG(x)	(0x8c + (x) * 0x4)
+#define TXDESC2CMP_Q2REG(q)		((q) / 6)
+#define TXDESC2CMPL_MAP_IDx_SHIFT(q)	(((q) % 6) * 5)
+#define TXDESC2CMPL_MAP_IDx_MASK(q)	(0x1f << TXDESC2CMPL_MAP_IDx_SHIFT(q))
+
+
+/*
+ * tx desc queues, 32 available
+ *
+ * hardware reads descriptors of packets to transmit from there
+ */
+
+/* base address (must be 8 bytes aligned) */
+#define EDMA_TXDESC_BAx_REG(x)		(0x1000 + 0x1000 * (x))
+
+/* producer index, moved by software */
+#define EDMA_TXDESC_PRODx_IDX_REG(x)	(0x1004 + 0x1000 * (x))
+
+/* consumer index, moved by hardware, read-only */
+#define EDMA_TXDESC_CONSx_IDX_REG(x)	(0x1008 + 0x1000 * (x))
+
+/* size of ring, 16 bits max */
+#define EDMA_TXDESC_RING_SIZEx_REG(x)	(0x100c + 0x1000 * (x))
+
+#define EDMA_TXDESC_CTRLx_REG(x)	(0x1010 + 0x1000 * (x))
+#define TXDESC_CTRL_EN_MASK		(1 << 0)
+/* configurable flow control group */
+#define TXDESC_CTRL_FC_GRP_ID_SHIFT	1
+#define TXDESC_CTRL_FC_GRP_ID_MASK	(0x7 << TXDESC_CTRL_FC_GRP_ID_SHIFT)
+/* configurable arbiter group */
+#define TXDESC_CTRL_ARB_GRP_ID_SHIFT	4
+#define TXDESC_CTRL_ARB_GRP_ID_MASK	(0x3 << TXDESC_CTRL_ARB_GRP_ID_SHIFT)
+#define TXDESC_CTRL_ARB_GRP_HIGHEST	0
+#define TXDESC_CTRL_ARB_GRP_HIGH	1
+#define TXDESC_CTRL_ARB_GRP_LOW		2
+#define TXDESC_CTRL_ARB_GRP_LOWEST	3
+
+/* base address for preheader desc (must be 8 bytes aligned) */
+#define EDMA_TXDESC_PH_BAx_REG(x)	(0x1014 + 0x1000 * (x))
+
+
+/*
+ * rx fill queues, 8 available
+ *
+ * "free queue" where hardware takes available free buffers from
+ */
+
+/* base address (must be 8 bytes aligned) */
+#define EDMA_RXFILL_BAx_REG(x)		(0x29000 + 0x1000 * (x))
+
+/* producer index, moved by software */
+#define EDMA_RXFILL_PRODx_IDX_REG(x)	(0x29004 + 0x1000 * (x))
+
+/* consumer index, moved by hardware, read-only */
+#define EDMA_RXFILL_CONSx_IDX_REG(x)	(0x29008 + 0x1000 * (x))
+
+/* size of ring, 16 bits max */
+#define EDMA_RXFILL_RING_SIZEx_REG(x)	(0x2900c + 0x1000 * (x))
+
+/* unused */
+#define EDMA_RXFILL_BUF1_SIZEx_REG(x)	(0x29010 + 0x1000 * (x))
+
+/* high/low watermark that trigger flow control (assert if available
+ * descriptors < lo, deassert if > hi */
+#define EDMA_RXFILL_FC_THRESHx_REG(x)	(0x29014 + 0x1000 * (x))
+#define EDMA_RXFILL_FC_THRESH_LO_SHIFT	0
+#define EDMA_RXFILL_FC_THRESH_LO_MASK	(0x3ff << EDMA_RXFILL_FC_THRESH_LO_SHIFT)
+#define EDMA_RXFILL_FC_THRESH_HI_SHIFT	12
+#define EDMA_RXFILL_FC_THRESH_HI_MASK	(0x3ff << EDMA_RXFILL_FC_THRESH_HI_SHIFT)
+
+/* FIXME: low watermark that trigger the urgent interrupt */
+#define EDMA_RXFILL_UGT_THRESHx_REG(x)	(0x29018 + 0x1000 * (x))
+
+/* single bit to enable hardware to poll the ring  */
+#define EDMA_RXFILL_ENx_REG(x)		(0x2901c + 0x1000 * (x))
+
+/* single bit to disable the ring */
+#define EDMA_RXFILL_DISABLEx_REG(x)	(0x29020 + 0x1000 * (x))
+
+/* single bit that is set to one when hardware has stopped using the ring */
+#define EDMA_RXFILL_DISABLE_DONEx_REG(x)	(0x29024 + 0x1000 * (x))
+
+/* interrupt status/mask */
+#define EDMA_RXFILL_INT_STATx_REG(x)	(0x31000 + 0x1000 * (x))
+#define EDMA_RXFILL_INT_MASKx_REG(x)	(0x31004 + 0x1000 * (x))
+#define RXFILL_INT_URG_MASK		(1 << 0)
+
+
+/*
+ * rx desc queues, 24 available
+ *
+ * "done queue" where hardware writes descriptors of received packet
+ */
+
+/* base address (must be 8 bytes aligned) */
+#define EDMA_RXDESC_BAx_REG(x)		(0x39000 + 0x1000 * (x))
+
+/* producer index, moved by hardware, read-only */
+#define EDMA_RXDESC_PRODx_IDX_REG(x)	(0x39004 + 0x1000 * (x))
+
+/* consumer index, moved by software */
+#define EDMA_RXDESC_CONSx_IDX_REG(x)	(0x39008 + 0x1000 * (x))
+
+/* size of ring, 16 bits max */
+#define EDMA_RXDESC_RING_SIZEx_REG(x)	(0x3900c + 0x1000 * (x))
+#define EDMA_RXDESC_RING_SIZE_RS_SHIFT	0
+#define EDMA_RXDESC_RING_SIZE_RS_MASK	(0xffff << EDMA_RXDESC_RING_SIZE_RS_SHIFT)
+#define EDMA_RXDESC_RING_SIZE_POFF_SHIFT	16
+#define EDMA_RXDESC_RING_SIZE_POFF_MASK	(0xff << EDMA_RXDESC_RING_SIZE_POFF_SHIFT)
+
+/* high/low watermark that trigger flow control (assert if available
+ * descriptors < lo, deassert if > hi */
+#define EDMA_RXDESC_THRESHx_REG(x)	(0x39010 + 0x1000 * (x))
+#define EDMA_RXDESC_THRESH_LO_SHIFT	0
+#define EDMA_RXDESC_THRESH_LO_MASK	(0x3ff << EDMA_RXDESC_THRESH_LO_SHIFT)
+#define EDMA_RXDESC_THRESH_HI_SHIFT	12
+#define EDMA_RXDESC_THRESH_HI_MASK	(0x3ff << EDMA_RXDESC_THRESH_HI_SHIFT)
+
+/* threshold that trigger urgent interrupt, when number of done desc
+ * is higher than this value */
+#define EDMA_RXDESC_UGT_THRESHx_REG(x)	(0x39014 + 0x1000 * (x))
+#define RXDESC_UGT_THRESHx_LOW_SHIFT	0
+#define RXDESC_UGT_THRESHx_LOW_MASK	(0xffff << RXDESC_UGT_THRESHx_LOW_SHIFT)
+
+/* control register */
+#define EDMA_RXDESC_CTRLx_REG(x)	(0x39018 + 0x1000 * (x))
+/* DS says unused */
+#define RXDESC_CTRL_EN_MASK		(1 << 0)
+#define RXDESC_CTRL_QDISC_EN_MASK	(1 << 1)
+#define RXDESC_CTRL_CTAG_REMOVE_MASK	(1 << 2)
+#define RXDESC_CTRL_STAG_REMOVE_MASK	(1 << 3)
+/* what the hardware writes in tdesc2/tdesc3  */
+#define RXDESC_CTRL_RET_MODE_SHIFT	4
+#define RXDESC_CTRL_RET_MODE_MASK	(1 << RXDESC_CTRL_RET_MODE_SHIFT)
+#define RXDESC_CTRL_RET_MODE_OPAQUE	0
+#define RXDESC_CTRL_RET_MODE_PA		1
+#define RXDESC_CTRL_VLD_BIT_MASK	(1 << 5)
+/* where to put the secondary header content, mixed with data or in
+ * dedicated ring */
+#define RXDESC_CTRL_PH_ADDR_SEL_SHIFT	6
+#define RXDESC_CTRL_PH_ADDR_SEL_MASK	(1 << RXDESC_CTRL_PH_ADDR_SEL_SHIFT)
+#define RXDESC_CTRL_PH_ADDR_SEL_INDATA	0
+#define RXDESC_CTRL_PH_ADDR_SEL_INRING	1
+#define RXDESC_CTRL_PH_LEN_SEL_SHIFT	7
+#define RXDESC_CTRL_PH_LEN_SEL_MASK	(1 << RXDESC_CTRL_PH_LEN_SEL_SHIFT)
+#define RXDESC_CTRL_PH_LEN_SEL_28B	0
+#define RXDESC_CTRL_PH_LEN_SEL_24B	1
+#define RXDESC_CTRL_DESC_SWAP_MASK	(1 << 8)
+
+/* ring backpressure counter */
+#define EDMA_RXDESC_BPCx_REG(x)		(0x3901c + 0x1000 * (x))
+
+/* single bit to disable the ring */
+#define EDMA_RXDESC_DISABLEx_REG(x)	(0x39020 + 0x1000 * (x))
+
+/* single bit that is set to one when hardware has stopped using the ring */
+#define EDMA_RXDESC_DISABLE_DONEx_REG(x)	(0x39024 + 0x1000 * (x))
+
+/* base address for preheader desc (must be 8 bytes aligned) */
+#define EDMA_RXDESC_PH_BAx_REG(x)	(0x39028 + 0x1000 * (x))
+
+/* rx desc queue related interrupt */
+#define EDMA_RXDESC_INT_STATx_REG(x)	(0x59000 + 0x1000 * (x))
+#define EDMA_RXDESC_INT_MASKx_REG(x)	(0x59004 + 0x1000 * (x))
+#define RXDESC_INT_PKT_DONE_MASK	(1 << 0)
+#define RXDESC_INT_URG_MASK		(1 << 1)
+
+/*
+ *  rx coalescing timer value (16 bits)
+ *
+ * unit in timer tick, 1 tick = (128 / ppe_freq_mhz) microseconds
+ * ppe_freq_mhz = 352, so max value is about ~24ms
+ */
+#define EDMA_US_TO_TIMER_TICKS(us)	(((us) * 352) / 128)
+
+#define EDMA_RX_MOD_TIMERx_REG(x)	(0x59008 + 0x1000 * (x))
+#define RX_MOD_TIMER_VAL_SHIFT		0
+#define RX_MOD_TIMER_VAL_MASK		(0xffff << RX_MOD_TIMER_VAL_SHIFT)
+
+/* rx interrupt configuration */
+#define EDMA_RX_INT_CTRLx_REG(x)	(0x5900c + 0x1000 * (x))
+/* ? */
+#define RX_INT_CTRL_TIMER_INT_EN_MASK	(1 << 0)
+/* ? */
+#define RX_INT_CTRL_NE_EN_MASK		(1 << 1)
+
+
+/*
+ * tx completion queues, 32 available
+ *
+ * "done queue" where hardware writes descriptors of transferred packet
+ */
+
+/* base address (must be 8 bytes aligned) */
+#define EDMA_TXCMPL_BAx_REG(x)		(0x79000 + 0x1000 * (x))
+
+/* producer index, moved by hardware, read-only */
+#define EDMA_TXCMPL_PRODx_IDX_REG(x)	(0x79004 + 0x1000 * (x))
+
+/* consumer index, moved by software */
+#define EDMA_TXCMPL_CONSx_IDX_REG(x)	(0x79008 + 0x1000 * (x))
+
+/* size of ring, 16 bits max */
+#define EDMA_TXCMPL_RING_SIZEx_REG(x)	(0x7900c + 0x1000 * (x))
+
+/*
+ * low watermark that trigger the urgent interrupt
+ *
+ * IRQ triggers when number of packets in completion queue goes above
+ * UGT_THRESH_LOW value, or sooner if IRQ mitigation timer is reached.
+ */
+#define EDMA_TXCMPL_UGT_THRESHx_REG(x)	(0x79010 + 0x1000 * (x))
+#define TXCMPL_UGT_THRESH_LOW_SHIFT	0
+#define TXCMPL_UGT_THRESH_LOW_MASK	(0xffff << TXCMPL_UGT_THRESH_LOW_SHIFT)
+#define TXCMPL_UGT_THRESH_FC_SHIFT	16
+#define TXCMPL_UGT_THRESH_FC_MASK	(0x3f << TXCMPL_UGT_THRESH_FC_SHIFT)
+
+/* ring control register */
+#define EDMA_TXCMPL_CTRLx_REG(x)	(0x79014 + 0x1000 * (x))
+#define TXCMPL_CTRL_RET_MODE_SHIFT	0
+#define TXCMPL_CTRL_RET_MODE_MASK	(1 << TXCMPL_CTRL_RET_MODE_SHIFT)
+#define TXCMPL_CTRL_RET_MODE_OPAQUE	0
+#define TXCMPL_CTRL_RET_MODE_PA		1
+#define TXCMPL_CTRL_VLT_BIT_MASK	(1 << 1)
+
+/* tx ring backpressure counter */
+#define EDMA_TXCMPL_BPCx_REG(x)		(0x79018 + 0x1000 * (x))
+
+/* interrupt status/mask */
+#define EDMA_TX_INT_STATx_REG(x)	(0x99000 + 0x1000 * (x))
+#define EDMA_TX_INT_MASKx_REG(x)	(0x99004 + 0x1000 * (x))
+#define TX_INT_PKT_DONE_MASK		(1 << 0)
+#define TX_INT_UGT_MASK			(1 << 1)
+
+/*
+ * tx moderation/coalescing timer, 16 bits
+ *
+ * timer unit (clock ticks at 1/352us, 1 timer unit = 128 clock ticks)
+ */
+#define EDMA_TX_MOD_TIMERx_REG(x)	(0x99008 + 0x1000 * (x))
+#define TX_MOD_TIMER_VAL_SHIFT		0
+#define TX_MOD_TIMER_VAL_MASK		(0xffff << TX_MOD_TIMER_VAL_SHIFT)
+
+
+/* tx interrupt config */
+#define EDMA_TX_INT_CTRLx_REG(x)	(0x9900c + 0x1000 * (x))
+#define TX_INT_CTRL_TIMER_INT_EN_MASK	(1 << 0)
+/* ? */
+#define TX_INT_CTRL_NE_EN_MASK		(1 << 1)
+/* ? */
+
+/*
+ * QID to RID table (not in HRD), PPE queue to edma rx ring mapping
+ * 4 mapping entry per reg (8 bits each)
+ */
+#define EDMA_QID2RIDx_TABLE_MEM(q)	(0xb9000 + (0x4 * (q)))
+#define EDMA_QID2RID_TABLE_SIZE		((256 + 44) / 4)
+#define QID2RID_Q2REG(q)		((q) / 4)
+#define QID2REG_RIDx_SHIFT(q)		(((q) % 4) * 8)
+#define QID2REG_RIDx_MASK(q)		(0xff << QID2REG_RIDx_SHIFT(q))
+
+#endif /* EDMA_REGS_H_ */
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/ppe_regs.h linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/ppe_regs.h
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/ppe_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/ppe_regs.h	2024-01-04 16:55:44.212658158 +0100
@@ -0,0 +1,1233 @@
+#ifndef PPE_REGS_H_
+#define PPE_REGS_H_
+
+#include <linux/types.h>
+
+/***************************************************************
+ *
+ * global registers
+ *
+ ***************************************************************/
+
+/* port mux control */
+#define PORT_MUX_CTRL_REG		(0x10)
+#define PORT_MUX_CTRL_PORTx_PCS_SEL_MASK(x)	(1 << (x))
+#define PORT_MUX_CTRL_PORTx_MAC_SEL_MASK(x)	(1 << ((x) + 8))
+
+/* port phy status */
+#define PORT_PHY_STS0_REG		(0x40)
+#define PORT_PHY_STS1_REG		(0x44)
+
+/* port0-3 in sts0, port4_pcs0 port4_pcs1 port5 in sts1  */
+#define PORT_PHY_STS_Px_SHIFT(x)	((x) * 8)
+#define PORT_PHY_STS_Px_MASK(x)		(0xf << PORT_PHY_STS_Px_SHIFT(x))
+
+#define PORT_PHY_STS_SPEED_SHIFT	0
+#define PORT_PHY_STS_SPEED_MASK		(0x7)
+#define PORT_PHY_STS_SPEED_10		0
+#define PORT_PHY_STS_SPEED_100		1
+#define PORT_PHY_STS_SPEED_1000		2
+#define PORT_PHY_STS_SPEED_10000	3
+#define PORT_PHY_STS_SPEED_2500		4
+#define PORT_PHY_STS_SPEED_5000		5
+#define PORT_PHY_STS_TX_FC_MASK		(1 << 3)
+#define PORT_PHY_STS_RX_FC_MASK		(1 << 4)
+#define PORT_PHY_STS_FD_MASK		(1 << 5)
+#define PORT_PHY_STS_AZ_EN_MASK		(1 << 6)
+#define PORT_PHY_STS_LINK_MASK		(1 << 7)
+
+
+/***************************************************************
+ *
+ * GMAC registers
+ *
+ ***************************************************************/
+#define GMAC_REGS_OFFSET(port_id)	(0x1000 + (port_id) * 0x200)
+
+/* enable control register */
+#define GMAC_ENABLE_REG			0x0
+#define GMAC_ENABLE_RX_MASK		(1 << 0)
+#define GMAC_ENABLE_TX_MASK		(1 << 1)
+#define GMAC_ENABLE_FULL_DPX_MASK	(1 << 4)
+#define GMAC_ENABLE_RX_FC_MASK		(1 << 5)
+#define GMAC_ENABLE_TX_FC_MASK		(1 << 6)
+
+/* speed control register */
+#define GMAC_SPEED_REG			0x4
+#define GMAC_SPEED_SHIFT		0
+#define GMAC_SPEED_MASK			(0x3 << GMAC_SPEED_SHIFT)
+#define GMAC_SPEED_10			0
+#define GMAC_SPEED_100			1
+#define GMAC_SPEED_1000			2
+
+/* pause mac address config register */
+#define GMAC_PAUSE_MAC_ADDR0_REG	0x8	/* ____b5b4 */
+#define GMAC_PAUSE_MAC_ADDR1_REG	0xc	/* b3b2b1b0 */
+
+/* mac control 0 */
+#define GMAC_CTRL0_REG			0x10
+
+/* mac control 1 */
+#define GMAC_CTRL1_REG			0x14
+
+/* mac control 2 */
+#define GMAC_CTRL2_REG			0x18
+/* actual MRU filter, anything bigger is dropped, value must includes
+ * FCS */
+#define GMAC_CTRL2_MAXLEN_SHIFT		8
+#define GMAC_CTRL2_MAXLEN_MASK		(0x3fff << GMAC_CTRL2_MAXLEN_SHIFT)
+
+/* MIB related */
+#define GMAC_MISC_REG			0x30
+/* only effect of this register is on MIB "too_big" packet counter,
+ * value must includes FCS */
+#define GMAC_MISC_JUMBO_SIZE_SHIFT	0
+#define GMAC_MISC_JUMBO_SIZE_MASK	(0x3fff << GMAC_MISC_JUMBO_SIZE_SHIFT)
+#define GMAC_MISC_PAUSE_ITVL_SHIFT	16
+#define GMAC_MISC_PAUSE_ITLV_MASK	(0xff << GMAC_MISC_PAUSE_ITVL_SHIFT)
+#define GMAC_MISC_FORCE_TX_LPI_TMASK	(1 << 24)
+#define GMAC_MISC_MAC_GATE_MASK		(1 << 25)
+
+/* MIB control */
+#define GMAC_MIB_CTRL_REG		0x34
+#define GMAC_MIB_CTRL_EN_MASK		(1 << 0)
+#define GMAC_MIB_CTRL_RESET_MASK	(1 << 1)
+#define GMAC_MIB_CTRL_CLR_ON_RD_MASK	(1 << 2)
+
+/* MIB values from 0x40 to 0xf0 */
+
+
+/***************************************************************
+ *
+ * XGMAC registers
+ *
+ ***************************************************************/
+#define XGMAC_REGS_OFFSET(port_id)	(0x500000 + (port_id) * 0x4000)
+
+/* TX control */
+#define XGMAC_TX_CFG_REG		0x0
+
+#define XGMAC_TX_CFG_TX_EN_MASK		(1 << 0)
+#define XGMAC_TX_CFG_JABBER_DIS_MASK	(1 << 16)
+#define XGMAC_TX_CFG_SPEED_SHIFT	29
+#define XGMAC_TX_CFG_SPEED_MASK		(0x3 << XGMAC_TX_CFG_SPEED_SHIFT)
+#define XGMAC_TX_CFG_SPEED_10000	0
+#define XGMAC_TX_CFG_SPEED_5000		1
+#define XGMAC_TX_CFG_SPEED_2500		2
+#define XGMAC_TX_CFG_SPEED_1000		3
+#define XGMAC_TX_CFG_USS_MASK		(1 << 31)
+
+/* RX control */
+#define XGMAC_RX_CFG_REG		0x4
+
+#define XGMAC_RX_CFG_RX_EN_MASK		(1 << 0)
+#define XGMAC_RX_CFG_ACS_MASK		(1 << 1) /* automatic pad/CRC strip */
+#define XGMAC_RX_CFG_CST_MASK		(1 << 2) /* CRC strip if ethertype > 0x600 */
+#define XGMAC_RX_CFG_DCRCC_MASK		(1 << 3) /* Disable CRC Checking for Received Packets */
+#define XGMAC_RX_CFG_SPEN_MASK		(1 << 4) /* Slow Protocol Detection Enable */
+#define XGMAC_RX_CFG_USP_MASK		(1 << 5) /* Unicast Slow Protocol Packet Detect */
+#define XGMAC_RX_CFG_GPSLCE_MASK	(1 << 6) /* Giant Packet Size Limit Control Enable  */
+#define XGMAC_RX_CFG_WD_MASK		(1 << 7) /* watchdog enable */
+#define XGMAC_RX_CFG_JE_MASK		(1 << 8) /* jumbo enable */
+#define XGMAC_RX_CFG_IPC_MASK		(1 << 9) /* Checksum Offload */
+#define XGMAC_RX_CFG_LM_MASK		(1 << 10) /* loopback mode */
+#define XGMAC_RX_CFG_HDSMS_MASK		(0x7 << 12) /* Maximum Size for Splitting the Header Data */
+#define XGMAC_RX_CFG_GPSL_SHIFT		16
+#define XGMAC_RX_CFG_GPSL_MASK		(0x3fff << 16) /* Giant Packet Size Limit */
+#define XGMAC_RX_CFG_ARPEN_MASK		(1UL << 31) /* ARP offload */
+
+/* packet filter */
+#define XGMAC_PKT_FLT_REG		0x8
+#define XGMAC_PKT_FLT_PR_MASK		(1 << 0) /* Promiscuous Mode */
+#define XGMAC_PKT_FLT_HUC_MASK		(1 << 1) /* Hash Unicast */
+#define XGMAC_PKT_FLT_HMC_MASK		(1 << 2) /* Hash Multicast */
+#define XGMAC_PKT_FLT_DAIF_MASK		(1 << 3) /* DA Inverse Filtering */
+#define XGMAC_PKT_FLT_PM_MASK		(1 << 4) /* Pass all multicast */
+#define XGMAC_PKT_FLT_DBF_MASK		(1 << 5) /* Disable Broadcast frames */
+#define XGMAC_PKT_FLT_PCF_SHIFT		6
+#define XGMAC_PKT_FLT_PCF_MASK		(3 << 6) /* control packet filter */
+#define XGMAC_PKT_FLT_SAIF_MASK		(1 << 8) /* Inverse Filtering */
+#define XGMAC_PKT_FLT_SAF_MASK		(1 << 9) /* Source Address Filter */
+#define XGMAC_PKT_FLT_HPF_MASK		(1 << 10) /* Hash or perfect Filter */
+#define XGMAC_PKT_FLT_VTFE_MASK		(1 << 16) /* vlan filter */
+#define XGMAC_PKT_FLT_RA_MASK		(1UL << 31) /* Receive all mode */
+
+/* tx flow control */
+#define XGMAC_Q0_TX_FLOW_CTRL_REG	0x70
+#define XGMAC_Q0_TX_FLOW_CTRL_TFE_MASK	(1 << 1)
+#define XGMAC_Q0_TX_FLOW_CTRL_PT_SHIFT	16
+
+/* rx flow control */
+#define XGMAC_RX_FLOW_CTRL_REG		0x90
+#define XGMAC_RX_FLOW_CTRL_RFE_MASK	(1 << 0) /* handle pause packet */
+#define XGMAC_RX_FLOW_CTRL_UP_MASK	(1 << 1) /* unicast pause detect */
+
+/* LPI control & status */
+#define XGMAC_LPI_CS_REG		0xd0
+#define XGMAC_LPI_CS_TX_LPI_ENT_MASK	(1 << 0)
+#define XGMAC_LPI_CS_TX_LPI_EXIT_MASK	(1 << 1)
+#define XGMAC_LPI_CS_RX_LPI_ENT_MASK	(1 << 2)
+#define XGMAC_LPI_CS_RX_LPI_EXIT_MASK	(1 << 3)
+#define XGMAC_LPI_CS_TX_LPI_ST_MASK	(1 << 8)
+#define XGMAC_LPI_CS_RX_LPI_ST_MASK	(1 << 9)
+#define XGMAC_LPI_CS_TX_LPI_EN_MASK	(1 << 16)
+#define XGMAC_LPI_CS_PLS_MASK		(1 << 17)
+#define XGMAC_LPI_CS_PLSEN_MASK		(1 << 18)
+#define XGMAC_LPI_CS_LPI_TXA_MASK	(1 << 19)
+#define XGMAC_LPI_CS_LPI_TIMER_EN_MASK	(1 << 20)
+
+/* LPI timer control */
+#define XGMAC_LPI_TMR_CTL_REG		0xd4
+
+/* micro second unit */
+#define XGMAC_LPI_TMR_CTL_TW_SHIFT	0
+#define XGMAC_LPI_TMR_CTL_TW_MASK	(0xffff << XGMAC_LPI_TMR_CTL_TW_SHIFT)
+
+/* milli second unit */
+#define XGMAC_LPI_TMR_CTL_LS_SHIFT	16
+#define XGMAC_LPI_TMR_CTL_LS_MASK	(0xffff << XGMAC_LPI_TMR_CTL_LS_SHIFT)
+
+
+/* LPI MAC entry timer, in micro second unit */
+#define XGMAC_LPI_ENTRY_TIMER_REG	0xd8
+
+/* LPI MAC 1us tick counter */
+#define XGMAC_1US_TIC_CTR_REG		0xdc
+
+
+/* MIB control */
+#define XGMAC_MMC_CTRL_REG		0x800
+#define XGMAC_MMC_CTRL_CNT_RST_MASK	(1 << 0)
+#define XGMAC_MMC_CTRL_STOP_ROLLOVER_MASK	(1 << 1)
+#define XGMAC_MMC_CTRL_CLR_ON_RD_MASK	(1 << 2)
+#define XGMAC_MMC_CTRL_CNT_FREEZE_MASK	(1 << 3)
+
+/* MIB values from 0x814 to 0x9b8 */
+
+
+/***************************************************************
+ *
+ * TDM registers
+ *
+ ***************************************************************/
+
+#define TDM_BASE_ADDR			0x00b000
+
+#define TDM_CTRL_REG			(TDM_BASE_ADDR + 0x0)
+#define TDM_CTRL_DEPTH_SHIFT		0
+#define TDM_CTRL_DEPTH_MASK		(0xff << TDM_CTRL_DEPTH_SHIFT)
+#define TDM_CTRL_OFFSET_SHIFT		8
+#define TDM_CTRL_OFFSET_MASK		(0x7f << TDM_CTRL_OFFSET_SHIFT)
+#define TDM_CTRL_EN_MASK		(1 << 31)
+
+#define TDM_CFG_REG(idx)		(TDM_BASE_ADDR + 0x1000 + (idx) * 0x10)
+#define TDM_CFG_PORT_NUM_SHIFT		0
+#define TDM_CFG_PORT_NUM_MASK		(0xf << TDM_CFG_PORT_NUM_SHIFT)
+#define TDM_CFG_PORT_DIR_EGRESS_MASK	(1 << 4)
+#define TDM_CFG_PORT_VALID_MASK		(1 << 5)
+
+#define TDM_DROP_STATS_PKTS_REG(idx)	(TDM_BASE_ADDR + 0x3000 + (idx) * 0x10)
+#define TDM_DROP_STATS_BYTES_LO_REG(idx)	(TDM_BASE_ADDR + 0x3004 + (idx) * 0x10)
+#define TDM_DROP_STATS_BYTES_HI_REG(idx)	(TDM_BASE_ADDR + 0x3008 + (idx) * 0x10)
+
+
+/***************************************************************
+ *
+ * FIXME registers
+ *
+ ***************************************************************/
+
+/* some base adresses */
+#define NSS_MAC_CSR_BASE_ADDR		0x001000
+#define NSS_PRX_CSR_BASE_ADDR		0x00b000
+#define INGRESS_VLAN_BASE_ADDR		0x00f000
+#define NSS_PTX_CSR_BASE_ADDR		0x020000
+#define IPE_L2_BASE_ADDR		0x060000
+#define IPO_CSR_BASE_ADDR		0x0b0000
+#define INGRESS_POLICER_BASE_ADDR	0x100000
+#define PRE_IPO_CSR_BASE_ADDR		0x180000
+#define TUNNEL_PARSER_BASE_ADDR		0x1d0000
+#define IPR_CSR_BASE_ADDR		0x1e0000
+#define IPE_L3_BASE_ADDR		0x200000
+#define TUNNEL_LOOKUP_BASE_ADDR		0x300000
+#define TRAFFIC_MANAGER_BASE_ADDR	0x400000
+#define NSS_BM_CSR_BASE_ADDR		0x600000
+#define QUEUE_MANAGER_BASE_ADDR		0x800000
+
+
+/***************************************************************
+ *
+ * PTX_CSR registers
+ *
+ ***************************************************************/
+
+/* port vlan config */
+#define PORT_EG_VLAN_REG(port)				\
+	(NSS_PTX_CSR_BASE_ADDR + 0x20 + (port) * 0x4)
+
+#define PORT_EG_VLAN_TX_COUNTING_MASK	(1 << 8)
+
+/* bridge control register */
+#define EG_BRIDGE_CFG_REG		(NSS_PTX_CSR_BASE_ADDR + 0x44)
+#define EG_BRIDGE_CFG_CNT_EN_MASK	(1 << 2)
+
+#define DROP_STAT_REG(port, i)				\
+	(NSS_PRX_CSR_BASE_ADDR + 0x3000 + (port) * 0x10 + (i) * 0x4)
+
+#define DROP_STAT0_PKTS_SHIFT		0
+#define DROP_STAT0_PKTS_MASK		(0xffffffff << DROP_STAT0_PKTS_SHIFT)
+#define DROP_STAT1_BYTES_LO_SHIFT	0
+#define DROP_STAT1_BYTES_LO_MASK	(0xffffffff << DROP_STAT1_BYTES_LO_SHIFT)
+#define DROP_STAT2_BYTES_HI_SHIFT	0
+#define DROP_STAT2_BYTES_HI_MASK	(0xff << DROP_STAT2_BYTES_HI_SHIFT)
+
+
+/* port tx counters */
+#define PORT_TX_CNT_TBL_REG(port, off)				\
+	(NSS_PTX_CSR_BASE_ADDR + 0x25000 + (port) * 0x10 + (off) * 0x4)
+
+#define PORT_TX_CNT0_PKTS_SHIFT		0
+#define PORT_TX_CNT0_PKTS_MASK		(0xffffffff << PORT_TX_CNT0_PKTS_SHIFT)
+#define PORT_TX_CNT1_BYTES_LO_SHIFT	0
+#define PORT_TX_CNT1_BYTES_LO_MASK	(0xffffffff << PORT_TX_CNT1_BYTES_LO_SHIFT)
+#define PORT_TX_CNT2_BYTES_HI_SHIFT	0
+#define PORT_TX_CNT2_BYTES_HI_MASK	(0xff << PORT_TX_CNT2_BYTES_HI_SHIFT)
+
+/* queue tx counters */
+#define QUEUE_TX_COUNTER_TBL_REG(q, i)					\
+	(NSS_PTX_CSR_BASE_ADDR + 0x2a000 + (q) * 0x10 + (i) * 0x4)
+
+
+/***************************************************************
+ *
+ * IPE_L2 registers
+ *
+ ***************************************************************/
+
+/*
+ * spanning tree per port state
+ */
+#define PPE_STP_STATE_REG(port)		\
+	(IPE_L2_BASE_ADDR + 0x100 + (port) * 0x4)
+#define PPE_STP_STATE_FORWARDING	3
+
+/*
+ * fdb table operation
+ */
+#define FDB_TBL_OP_REG			(IPE_L2_BASE_ADDR + 0x8)
+#define FDB_TBL_RD_OP_REG		(IPE_L2_BASE_ADDR + 0x10)
+
+#define FDB_TBL_OP_CMD_ID_SHIFT		0
+#define FDB_TBL_OP_CMD_ID_MASK		(0xf << FDB_TBL_OP_CMD_ID_SHIFT)
+#define FDB_TBL_OP_BYP_RSLT_EN_MASK	(1 << 4)
+#define FDB_TBL_OP_TYPE_SHIFT		5
+#define FDB_TBL_OP_TYPE_MASK		(0x7 << FDB_TBL_OP_TYPE_SHIFT)
+#define FDB_TBL_OP_TYPE_ID_ADD		0
+#define FDB_TBL_OP_TYPE_ID_DEL		1
+#define FDB_TBL_OP_TYPE_ID_GET		2
+#define FDB_TBL_OP_TYPE_ID_FLUSH	4
+#define FDB_TBL_OP_TYPE_ID_AGE		5
+
+#define FDB_TBL_OP_HASH_BLOCK_BMP_SHIFT	8
+#define FDB_TBL_OP_HASH_BLOCK_BMP_MASK	(0x3 << FDB_TBL_OP_HASH_BLOCK_BMP_SHIFT)
+#define FDB_TBL_OP_MODE_HASH_MASK	(0 << 10)
+#define FDB_TBL_OP_MODE_INDEX_MASK	(1 << 10)
+#define FDB_TBL_OP_ENTRY_INDEX_SHIFT	11
+#define FDB_TBL_OP_ENTRY_INDEX_MASK	(0x7ff << FDB_TBL_OP_ENTRY_INDEX_SHIFT)
+
+/* read only, VALID_CNT bits are clear on read */
+#define FDB_TBL_OP_RESULT_REG		(IPE_L2_BASE_ADDR + 0x20)
+#define FDB_TBL_RD_OP_RESULT_REG	(IPE_L2_BASE_ADDR + 0x30)
+
+#define FDB_TBL_OPR_CMD_ID_SHIFT	0
+#define FDB_TBL_OPR_CMD_ID_MASK		(0xf << FDB_TBL_OPR_CMD_ID_SHIFT)
+#define FDB_TBL_OPR_OP_RES_MASK		(1 << 4)
+#define FDB_TBL_OPR_VALID_CNT_SHIFT	5
+#define FDB_TBL_OPR_VALID_CNT_MASK	(0xf << FDB_TBL_OPR_VALID_CNT_SHIFT)
+#define FDB_TBL_OPR_ENTRY_INDEX_SHIFT	9
+#define FDB_TBL_OPR_ENTRY_INDEX_MASK	(0x7ff << FDB_TBL_OPR_ENTRY_INDEX_SHIFT)
+
+/* l2 global config */
+#define L2_GLOB_CFG_REG			(IPE_L2_BASE_ADDR + 0x38)
+#define L2_GLOB_CFG_LEARN_EN_MASK	(1 << 6)
+#define L2_GLOB_CFG_AGING_EN_MASK	(1 << 7)
+
+/*
+ * fdb table operation
+ */
+
+/* read only */
+#define FDB_TBL_RD_OP_RESULTx_REG(x)		\
+	(IPE_L2_BASE_ADDR + 0x200 + (x) * 4)
+
+#define FDB_ENT0_MAC_LO_SHIFT		0
+#define FDB_ENT0_MAC_LO_MASK		(0xffffffff << FDB_ENT0_MAC_LO_SHIFT)
+
+#define FDB_ENT1_MAC_HI_SHIFT		0
+#define FDB_ENT1_MAC_HI_MASK		(0xffff << FDB_ENT1_MAC_ADDR_HI_SHIFT)
+#define FDB_ENT1_ENTRY_VALID_MASK	(1 << 16)
+#define FDB_ENT1_LOOKUP_VALID_MASK	(1 << 17)
+#define FDB_ENT1_VSI_SHIFT		18
+#define FDB_ENT1_VSI_MASK		(0x3f << FDB_ENT1_VSI_SHIFT)
+#define FDB_ENT1_DST_INFO_LO_SHIFT	24
+#define FDB_ENT1_DST_INFO_LO_MASK	(0xff << FDB_ENT1_DST_INFO_LO_SHIFT)
+
+#define FDB_ENT2_DST_INFO_HI_SHIFT	0
+#define FDB_ENT2_DST_INFO_HI_MASK	(0x3f << FDB_ENT2_DST_INFO_HI_SHIFT)
+#define FDB_ENT2_SA_CMD_SHIFT		6
+#define FDB_ENT2_SA_CMD_MASK		(0x3 << FDB_ENT2_SA_CMD_SHIFT)
+#define FDB_ENT2_DA_CMD_SHIFT		8
+#define FDB_ENT2_DA_CMD_MASK		(0x3 << FDB_ENT2_DA_CMD_SHIFT)
+#define FDB_ENT2_HIT_AGE_SHIFT		10
+#define FDB_ENT2_HIT_AGE_MASK		(0x3 << FDB_ENT2_HIT_AGE_SHIFT)
+
+#define DST_INFO_ENC_TYPE_SHIFT		12
+#define DST_INFO_ENC_TYPE_MASK		(0x3 << DST_INFO_ENC_TYPE_SHIFT)
+#define DST_INFO_ENC_TYPE_PORT_ID	0x2
+#define DST_INFO_ENC_TYPE_PORT_BITMAP	0x3
+
+
+#define FDB_TBL_OP_DATAx_REG(x)			\
+	(IPE_L2_BASE_ADDR + 0x230 + (x) * 4)
+
+#define FDB_TBL_RD_OP_DATAx_REG(x)		\
+	(IPE_L2_BASE_ADDR + 0x260 + (x) * 4)
+
+/*
+ * bridge port control
+ */
+#define PPE_PORT_BRCTL_REG(port)	\
+	(IPE_L2_BASE_ADDR + 0x300 + (port) * 0x4)
+#define PORT_BRCTL_NEW_ADDR_LRN_EN_MASK	(1 << 0)
+#define PORT_BRCTL_STA_MOVE_LRN_EN_MASK	(1 << 3)
+#define PORT_BRCTL_PORT_ISOL_SHIFT	8
+#define PORT_BRCTL_PORT_ISOL_MASK	(0xff << PORT_BRCTL_PORT_ISOL_SHIFT)
+#define PORT_BRCTL_TXMAC_EN_MASK	(1 << 16)
+#define PORT_BRCTL_PROMISC_EN_MASK	(1 << 17)
+
+/*
+ * port membership for a given VSI, spans two registers
+ */
+#define VSI_TBLx_REG(vsi, offset)	\
+	(IPE_L2_BASE_ADDR + 0x3800 + (vsi) * 16 + (offset) * 0x4)
+
+#define VSI_TBL0_MEMB_MAP_SHIFT		0
+#define VSI_TBL0_MEMB_MAP_MASK		(0xff << VSI_TBL0_MEMB_MAP_SHIFT)
+#define VSI_TBL0_UNK_UNI_MAP_SHIFT	8
+#define VSI_TBL0_UNK_UNI_MAP_MASK	(0xff << VSI_TBL0_UNK_UNI_MAP_SHIFT)
+#define VSI_TBL0_UNK_MULTI_MAP_SHIFT	16
+#define VSI_TBL0_UNK_MULTI_MAP_MASK	(0xff << VSI_TBL0_UNK_MULTI_MAP_SHIFT)
+#define VSI_TBL0_BCAST_MAP_SHIFT	24
+#define VSI_TBL0_BCAST_MAP_MASK		(0xff << VSI_TBL0_BCAST_MAP_SHIFT)
+
+#define VSI_TBL1_NEW_ADDR_LEARN_EN_MASK	(1 << 0)
+#define VSI_TBL1_ADDR_MOVE_EN_MASK	(1 << 3)
+
+/*
+ * port param control (mru/mtu/profile/...)
+ *
+ * whole table content (3 items) needs to be written for change to
+ * take effect
+ */
+#define MRU_MTU_CTRL_TBL_REG(port, off)			\
+	(IPE_L2_BASE_ADDR + 0x5000 + (port) * 0x10 + (off) * 0x4)
+#define MRU_MTU_CTRL0_MRU_SHIFT		0
+#define MRU_MTU_CTRL0_MRU_MASK		(0x3fff << MRU_MTU_CTRL0_MRU_SHIFT)
+#define MRU_MTU_CTRL0_MRU_CMD_SHIFT	14
+#define MRU_MTU_CTRL0_MRU_CMD_MASK	(0x3 << MRU_MTU_CTRL0_MRU_CMD_SHIFT)
+#define MRU_MTU_CTRL0_MTU_SHIFT		16
+#define MRU_MTU_CTRL0_MTU_MASK		(0x3fff << MRU_MTU_CTRL0_MTU_SHIFT)
+#define MRU_MTU_CTRL0_MTU_CMD_SHIFT	30
+#define MRU_MTU_CTRL0_MTU_CMD_MASK	(0x3 << MRU_MTU_CTRL0_MTU_CMD_SHIFT)
+
+#define MRU_MTU_CTRL1_CNT_RX_EN_MASK	(1 << 0)
+#define MRU_MTU_CTRL1_CNT_TX_EN_MASK	(1 << 1)
+
+/*
+**/
+#define IN_L2_SERVICE_TBL_REG(code, off)		\
+	(IPE_L2_BASE_ADDR + 0x6000 + (code) * 0x10 + (off) * 0x4)
+
+/*
+ * per-port multicast control
+ */
+#define MC_MTU_CTRL_TBL_REG(port)			\
+	(IPE_L2_BASE_ADDR + 0xa00 + (port) * 0x4)
+#define MC_MTU_CTRL_CNT_EN_MASK		(1 << 17)
+
+/*
+ * virtual port membership for a given VSI, spans 9 registers
+ */
+#define VSI_REMAP_TBL_REG(vsi, x)			\
+	(IPE_L2_BASE_ADDR + 0x33000 + (vsi) * 0x40 + (x) * 4)
+
+/*
+ * virtual port configuration
+ */
+#define L2_VP_PORT_TBL_REG(vp, x)		\
+	(IPE_L2_BASE_ADDR + 0x38000 + (vp) * 0x10 + (x) * 4)
+
+#define L2_VP_PORT_TBL0_INVAL_VSI_FWD_EN_MASK	(1 << 0)
+#define L2_VP_PORT_TBL0_PROMISC_EN_MASK		(1 << 1)
+#define L2_VP_PORT_TBL0_DST_INFO_SHIFT		2
+#define L2_VP_PORT_TBL0_PHYS_PORT_SHIFT		10
+#define L2_VP_PORT_TBL0_NEW_LRN_EN_MASK		(1 << 13)
+
+#define L2_VP_PORT_TBL1_P_ISOL_BMP_SHIFT	3
+#define L2_VP_PORT_TBL1_ISOL_PROFILE_SHIFT	11
+#define L2_VP_PORT_TBL1_ISOL_EN_MASK		(1 << 17)
+#define L2_VP_PORT_TBL1_POLICER_EN_MASK		(1 << 18)
+#define L2_VP_PORT_TBL1_POLICER_IDX_SHIFT	19
+#define L2_VP_PORT_TBL1_VP_STATE_CHECK_MASK	(1 << 28)
+#define L2_VP_PORT_TBL1_TYPE_NORMAL_MASK	(1 << 29)
+#define L2_VP_PORT_TBL1_ACTIVE_MASK		(1 << 30)
+#define L2_VP_PORT_TBL1_EG_DATA_VALID_MASK	(1 << 31)
+
+/*
+ * virtual port isolation table (profile 0 to 63)
+ */
+#define VP_ISOL_TBL_REG(profile, x)					\
+	(IPE_L2_BASE_ADDR + 0x3c000 + (profile) * 0x10 + (x) * 4)
+
+/*
+ * vsi enqueue configuration (544 entries)
+ */
+#define PORT_VSI_ENQUEUE_MAP_REG(idx)					\
+	(IPE_L2_BASE_ADDR + 0x3d000 + (idx) * 4)
+
+/* idx generation */
+#define PORT_VSI_ENQUEUE_IDX_VSI_SHIFT	0
+#define PORT_VSI_ENQUEUE_IDX_VSI_MASK	(0x3f << PORT_VSI_ENQUEUE_IDX_VSI_SHIFT)
+#define PORT_VSI_ENQUEUE_IDX_PORT_SHIFT	6
+#define PORT_VSI_ENQUEUE_IDX_PORT_MASK	(0x7 << PORT_VSI_ENQUEUE_IDX_PORT_SHIFT)
+/* value generation */
+#define PORT_VSI_ENQUEUE_EVP_SHIFT	0
+#define PORT_VSI_ENQUEUE_EVP_MASK	(0xff << PORT_VSI_ENQUEUE_EVP_SHIFT)
+#define PORT_VSI_ENQUEUE_VALID_MASK	(1 << 8)
+
+/***************************************************************
+ *
+ * IPE_L3 registers
+ *
+ ***************************************************************/
+#define L3_VP_PORT_TBL0_REG(port)	(IPE_L3_BASE_ADDR + 0x4000 + \
+					 (port * 0x10))
+#define L3_VP_PORT_TBL1_REG(port)	(L3_VP_PORT_TBL0_REG(port) + 0x4)
+#define L3_VP_PORT_TBL1_VSI_VALID_SHIFT	9
+#define L3_VP_PORT_TBL1_VSI_VALID_MASK	(1 << L3_VP_PORT_TBL1_VSI_VALID_SHIFT)
+#define L3_VP_PORT_TBL1_VSI_SHIFT	10
+#define L3_VP_PORT_TBL1_VSI_MASK	(0x3f << L3_VP_PORT_TBL1_VSI_SHIFT)
+#define L3_VP_PORT_TBL2_REG(port)	(L3_VP_PORT_TBL0_REG(port) + 0x8)
+#define L3_VP_PORT_TBL3_REG(port)	(L3_VP_PORT_TBL0_REG(port) + 0xc)
+
+
+/***************************************************************
+ *
+ * INGRESS_POLICER registers
+ *
+ ***************************************************************/
+
+#define PORT_RX_CNT_TBL_REG(port, i)				\
+	(INGRESS_POLICER_BASE_ADDR + 0x50000 + (port) * 0x20 + (i) * 0x4)
+
+#define PHYS_PORT_RX_CNT_TBL_REG(port, i)				\
+	(INGRESS_POLICER_BASE_ADDR + 0x56000 + (port) * 0x20 + (i) * 0x4)
+
+#define PORT_RX_CNT0_PKTS_SHIFT		0
+#define PORT_RX_CNT0_PKTS_MASK		(0xffffffff << PORT_RX_CNT0_PKTS_SHIFT)
+#define PORT_RX_CNT1_BYTES_LO_SHIFT	0
+#define PORT_RX_CNT1_BYTES_LO_MASK	(0xffffffff << PORT_RX_CNT1_BYTES_LO_SHIFT)
+#define PORT_RX_CNT2_BYTES_HI_SHIFT	0
+#define PORT_RX_CNT2_BYTES_HI_MASK	(0xff << PORT_RX_CNT2_BYTES_HI_SHIFT)
+#define PORT_RX_CNT2_DROP_PKT_LO_SHIFT	8
+#define PORT_RX_CNT2_DROP_PKT_LO_MASK	(0xffffff << PORT_RX_CNT2_DROP_PKT_LO_SHIFT)
+#define PORT_RX_CNT3_DROP_PKT_HI_SHIFT	0
+#define PORT_RX_CNT3_DROP_PKT_HI_MASK	(0xff << PORT_RX_CNT3_DROP_PKT_HI_SHIFT)
+#define PORT_RX_CNT3_DROP_BYTES_LO_SHIFT	8
+#define PORT_RX_CNT3_DROP_BYTES_LO_MASK	(0xffffff << PORT_RX_CNT3_DROP_BYTES_LO_SHIFT)
+#define PORT_RX_CNT4_DROP_BYTES_HI_SHIFT	0
+#define PORT_RX_CNT4_DROP_BYTES_HI_MASK	(0xffff << PORT_RX_CNT4_DROP_BYTES_HI_SHIFT)
+
+#define DROP_CPU_CNT_TBL_REG(port, code, off)		    \
+	(INGRESS_POLICER_BASE_ADDR + 0x60000 + 256 * 0x10 + \
+	 (code * 8 * 0x10) + (port * 0x10) + (off) * 0x4)
+
+
+/***************************************************************
+ *
+ * TRAFFIC_MANAGER registers
+ *
+ ***************************************************************/
+
+/*
+ * used to configured depth of PSCH_TDM_CFG_TBL table
+ */
+#define PSCH_TDM_DEPTH_CFG_REG		(TRAFFIC_MANAGER_BASE_ADDR + 0x0)
+#define PSCH_TDM_DEPTH_CFG_SHIFT	0
+#define PSCH_TDM_DEPTH_CFG_MASK		(0xff << PSCH_TDM_DEPTH_CFG_SHIFT)
+
+/*
+ * tx traffic scheduler configuration, hardware has two stage
+ * scheduling and is fully reconfigurable
+ *
+ * two levels:
+ *   => L0 from queue_id to a "flow_id"
+ *   => L1 from "flow_id" to port_id
+ *   => L2 is port scheduler
+ *
+ * each stage can do strict prio and/or DRR scheduling
+ */
+
+/*
+ * table index by PPE qid, (table has 300 entries)
+ *
+ * SP_ID, also called "flow_id", it must be set to one of the 64 L1 SP
+ * available
+ */
+#define L0_FLOW_MAP_TBL_REG(queue_id)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x2000 + 0x10 * (queue_id))
+#define L0_FLOW_MAP_SP_ID_SHIFT		0
+#define L0_FLOW_MAP_C_PRI_SHIFT		6
+#define L0_FLOW_MAP_E_PRI_SHIFT		9
+#define L0_FLOW_MAP_C_DRR_WT_SHIFT	12
+#define L0_FLOW_MAP_E_DRR_WT_SHIFT	22
+
+/*
+ * each SP has 8 fixed prios for C path and 8 for E path, table index
+ * is SP_ID/prio (table has 64 * 8 entries)
+ *
+ * DDR_ID must be allocated from one of the 320 DRR available in L0
+ */
+#define L0_C_SP_CFG_TBL_REG(sp, prio)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x4000 + 0x80 * (sp) + 0x10 * (prio))
+#define L0_C_SP_CFG_DRR_ID_SHIFT	0
+#define L0_C_SP_CFG_DRR_CRED_UNIT_MASK	(1 << 8)
+#define L0_C_SP_CFG_DRR_CRED_UNIT_BYTE		0
+#define L0_C_SP_CFG_DRR_CRED_UNIT_PACKET	1
+
+/* same table as L0_C_SP_CFG_TBL_REG, but for "E path" */
+#define L0_E_SP_CFG_TBL_REG(sp, prio)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x6000 + 0x80 * (sp) + 0x10 * (prio))
+#define L0_E_SP_CFG_DRR_ID_SHIFT	0
+#define L0_E_SP_CFG_DRR_CRED_UNIT_MASK	(1 << 8)
+#define L0_E_SP_CFG_DRR_CRED_UNIT_BYTE		0
+#define L0_E_SP_CFG_DRR_CRED_UNIT_PACKET	1
+
+/* each L0/L1 resource must be associated to the final port_id, this
+ * is setup here */
+#define L0_FLOW_PORT_TBL_REG(queue_id)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x8000 + 0x10 * (queue_id))
+#define L0_FLOW_PORT_PNUM_SHIFT		0
+#define L0_FLOW_PORT_PNUM_MASK		(0xf << L0_FLOW_PORT_PNUM_SHIFT)
+
+/*
+ * mapping from EDMA rx queue id to PPE queue id bitmask used for flow
+ * control, so the PPE knowns which queues to stop scheduling when an
+ * EDMA rx queue is full.
+ */
+#define RING_Q_MAP_TBL_REG(dma_queue, bitmap_id)			\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x2a000 + 0x40 * (dma_queue) + 4 * (bitmap_id))
+
+/*
+ * table index by "flow_id", ie the SP_ID choosen in L0. (table has 64
+ * entries)
+ *
+ * SP_ID must be set to one of the 8 SP available
+ */
+#define L1_FLOW_MAP_TBL_REG(idx)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x40000 + 0x10 * (idx))
+#define L1_FLOW_MAP_SP_ID_SHIFT		0
+#define L1_FLOW_MAP_C_PRI_SHIFT		6
+#define L1_FLOW_MAP_E_PRI_SHIFT		9
+#define L1_FLOW_MAP_C_DRR_WT_SHIFT	12
+#define L1_FLOW_MAP_E_DRR_WT_SHIFT	22
+
+/*
+ * same concept as L0_C_SP_CFG_TBL_REG, but in L1
+ *
+ * DDR_ID must be allocated from one of the 72 DRR available in L1
+ */
+#define L1_C_SP_CFG_TBL_REG(sp, prio)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x42000 + 0x80 * (sp) + 0x10 * (prio))
+#define L1_C_SP_CFG_DRR_ID_SHIFT	0
+#define L1_C_SP_CFG_DRR_CRED_UNIT_MASK	(1 << 8)
+#define L1_C_SP_CFG_DRR_CRED_UNIT_BYTE		0
+#define L1_C_SP_CFG_DRR_CRED_UNIT_PACKET	1
+
+/* same table as L1_E_SP_CFG_TBL_REG, but for "E path" */
+#define L1_E_SP_CFG_TBL_REG(sp, prio)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x44000 + 0x80 * (sp) + 0x10 * (prio))
+#define L1_E_SP_CFG_DRR_ID_SHIFT	0
+#define L1_E_SP_CFG_DRR_CRED_UNIT_MASK	(1 << 8)
+#define L1_E_SP_CFG_DRR_CRED_UNIT_BYTE		0
+#define L1_E_SP_CFG_DRR_CRED_UNIT_PACKET	1
+
+/* each L0/L1 resource must be associated to the final port_id, this
+ * is setup here */
+#define L1_FLOW_PORT_TBL_REG(idx)				\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x46000 + 0x10 * (idx))
+#define L1_FLOW_PORT_PNUM_SHIFT		0
+#define L1_FLOW_PORT_PNUM_MASK		(0xf << L1_FLOW_PORT_PNUM_SHIFT)
+
+
+#define PSCH_TDM_CFG_TBL_REG(idx)			\
+	(TRAFFIC_MANAGER_BASE_ADDR + 0x7a000 + 0x10 * (idx))
+#define PSCH_TDM_CFG_DES_PORT_SHIFT	0
+#define PSCH_TDM_CFG_DES_PORT_MASK	(0xf << PSCH_TDM_CFG_DES_PORT_SHIFT)
+#define PSCH_TDM_CFG_ENS_PORT_SHIFT	4
+#define PSCH_TDM_CFG_ENS_PORT_MASK	(0xf << PSCH_TDM_CFG_ENS_PORT_SHIFT)
+#define PSCH_TDM_CFG_ENS_PORT_BM_SHIFT	8
+#define PSCH_TDM_CFG_ENS_PORT_BM_MASK	(0xff << PSCH_TDM_CFG_ENS_PORT_BM_SHIFT)
+#define PSCH_TDM_CFG_DES_SEC_PORT_EN_MASK	(1 << 16)
+#define PSCH_TDM_CFG_DES_SEC_PORT_SHIFT	17
+#define PSCH_TDM_CFG_DES_SEC_PORT_MASK	(0xf << PSCH_TDM_CFG_DES_SEC_PORT_SHIFT)
+
+
+/***************************************************************
+ *
+ * BUFFER_MANAGER registers
+ *
+ ***************************************************************/
+
+/*
+ * buffers allocation is tracked per port for flow control purpose.
+ *
+ * When too many buffers are allocated and flow-control is enabled,
+ * then switch will stop dequeing packets from TX DMA until in-flight
+ * packets are sent.
+ *
+ * Packets coming from EDMA TX are assigned id 0 to 7, depending on
+ * EDMA tx ring configuration (default is 0). This allows creating
+ * per-tx-dma-queue buffers pools, which is useful when that dma queue
+ * is used solely to send packets to a single physical port.
+ *
+ *
+ * BM allocation goes like this:
+ *
+ *  incoming packet from either TXDMA (virt-port) or physical port
+ *  assign corresponding port id
+ *
+ *  allocate from following:
+ *    - "guaranteed space" (pre-allocated static buffers)
+ *
+ *    - "shared space", port belong to one of four groups, each group has
+ *       a number of allocated buffers
+ *
+ *    - "reacted space", leftover buffers, with a per-port configurable limit
+ */
+#define BM_PORT_COUNT			15
+#define BM_PORT_IS_CPU(x)		((x) == 0)
+#define BM_PORT_IS_VIRTUAL(x)		((x) >= 1 && (x) <= 8)
+#define BM_PORT_IS_PHYSICAL(x)		((x) >= 8 || (x) <= 13)
+#define BM_PORT_IS_EIP(x)		((x) == 14)
+
+/*
+ * control flow-control enable/disable for this port
+ *
+ * when disabled, those limits are not enforced and port is free to
+ * use as many buffers as available
+ */
+#define PORT_FC_MODE_REG(port)					\
+	(NSS_BM_CSR_BASE_ADDR + 0x100 + (port) * 0x4)
+#define PORT_FC_MODE_EN_MASK	(1 << 0)
+
+#define PORT_FC_STAT_REG(port)					\
+	(NSS_BM_CSR_BASE_ADDR + 0x140 + (port) * 0x4)
+
+/* xon value (threshold that will cause port to release xoff) */
+#define PORT_FC_STAT_XON_TH_SHIFT	0
+#define PORT_FC_STAT_XON_TH_MASK	(0x3ff << PORT_FC_STAT_XON_TH_SHIFT)
+/* 0: not in FC, 1: FC active, 2 drop state */
+#define PORT_FC_STAT_ST_SHIFT		16
+#define PORT_FC_STAT_ST_MASK		(0x3 << PORT_FC_STAT_ST_SHIFT)
+
+/* assign that port to given shared group id (0 to 3) */
+#define PORT_FC_GRP_ID_REG(port)			\
+	(NSS_BM_CSR_BASE_ADDR + 0x180 + (port) * 0x4)
+
+/* read-only, current number of buffers used by port */
+#define PORT_FC_CNT_REG(port)					\
+	(NSS_BM_CSR_BASE_ADDR + 0x1c0 + (port) * 0x4)
+
+/* read-only, current number of buffers in reacted space used by
+ * port */
+#define PORT_FC_REACTED_CNT_REG(port)				\
+	(NSS_BM_CSR_BASE_ADDR + 0x1c0 + (port) * 0x4)
+
+/* read-only, current number of buffer used in given shared group */
+#define PORT_FC_SHARED_GRP_CNT_REG(grp)		\
+	(NSS_BM_CSR_BASE_ADDR + 0x280 + (grp) * 0x4)
+
+/* configure number of buffers available in given shared group (0 to 3) */
+#define PORT_FC_SHARED_GRP_CFG_REG(grp)		\
+	(NSS_BM_CSR_BASE_ADDR + 0x290 + (grp) * 0x4)
+
+/*
+ * if "shared dynamic" is disabled, port Xoff is asserted when number of
+ * allocated buffers is higher than:
+ *   PRE_ALLOC + SHARED_CEILING
+ *
+ * if "shared dynamic" is enabled, port Xoff is asserted when number of
+ * allocated buffers is higher than:
+ *   PRE_ALLOC + min(PORT_WEIGHT * SHARED_COUNT, SHARED_CEILING)
+ *
+ * port Xon is asserted when number of allocated buffers goes lower than:
+ *   max(XOFF_VALUE - RESUME_OFFSET, RESUME_FLOOR)
+ *
+ *  PORT_WEIGTH:
+ *  - 0: 1/64
+ *  - 1: 1/32
+ *  - 2: 1/16
+ *  - 3: 1/8
+ *  - 4: 1/4
+ *  - 5: 1/2
+ *  - 6: 1
+ *  - 7: 2
+ *
+ * react_limit must be higher than shared_ceiling
+ * shared_ceiling must be lower than shared group total value
+ */
+#define PORT_FC_CFG1_REG(port)		\
+	(NSS_BM_CSR_BASE_ADDR + 0x1000 + (port) * 0x10)
+
+#define PORT_FC_CFG_REACT_LIM_SHIFT	0
+#define PORT_FC_CFG_REACT_LIM_MASK	(0x1ff << PORT_FC_CFG_REACT_LIM_SHIFT)
+#define PORT_FC_CFG_RESM_FLOOR_SHIFT	9
+#define PORT_FC_CFG_RESM_FLOOR_MASK	(0x1ff << PORT_FC_CFG_RESM_FLOOR_SHIFT)
+#define PORT_FC_CFG_RESM_OFF_SHIFT	18
+#define PORT_FC_CFG_RESM_OFF_MASK	(0x7ff << PORT_FC_CFG_RESM_OFF_SHIFT)
+#define PORT_FC_CFG_SH_CEIL0_SHIFT	29
+#define PORT_FC_CFG_SH_CEIL0_MASK	(0x7 << PORT_FC_CFG_SH_CEIL0_SHIFT)
+
+#define PORT_FC_CFG2_REG(port)		\
+	(NSS_BM_CSR_BASE_ADDR + 0x1004 + (port) * 0x10)
+
+#define PORT_FC_CFG2_SH_CEIL1_SHIFT	0
+#define PORT_FC_CFG2_SH_CEIL1_MASK	(0xff << PORT_FC_CFG2_SH_CEIL1_SHIFT)
+#define PORT_FC_CFG2_SH_WEIGHT_SHIFT	8
+#define PORT_FC_CFG2_SH_WEIGHT_MASK	(0x7 << PORT_FC_CFG2_SH_WEIGHT_SHIFT)
+#define PORT_FC_CFG2_SH_DYN_MASK	(1 << 11)
+#define PORT_FC_CFG2_PREALLOC_SHIFT	12
+#define PORT_FC_CFG2_PREALLOC_MASK	(0x7ff << PORT_FC_CFG2_PREALLOC_SHIFT)
+
+/***************************************************************
+ *
+ * QUEUE_MANAGER registers
+ *
+ ***************************************************************/
+
+/*
+ * default hash value to use for RSS where there is no hash in packet
+ */
+#define UCAST_DEFAULT_HASH_REG	\
+	(QUEUE_MANAGER_BASE_ADDR + 0x60)
+
+/*
+ * Table used to decide on which queue a unicast packet should go.
+ *
+ * Indexed by:
+ *   - 0 + (<source_profile:2> | <destination_port:8>)
+ *   - 1024 + (<source_profile:2> | <cpu_code:8>)
+ *   - 2048 + (<source_profile:2> | <service_code:8>)
+ *
+ * Each table entry holds:
+ *   - <queue_id:8> | <profile_id:4>
+ *
+ * the resulting <queue_id> is further altered (simple addition) by
+ * the output of other lookup tables (UCAST_PRIO_MAP_TBL,
+ * UCAST_HASH_MAP_TBL) to generate the final queue id.
+ *
+ * the final queue id will be used only if no flow overrides the value
+ */
+#define UCAST_QUEUE_MAP_TBL_REG(idx)	\
+	(QUEUE_MANAGER_BASE_ADDR + 0x10000 + 0x10 * (idx))
+#define UCAST_QUEUE_MAP_PROF_SHIFT	0
+#define UCAST_QUEUE_MAP_QID_SHIFT	4
+
+/*
+ * Table used to alter queue_id decision of UCAST_QUEUE_MAP_TBL_REG to
+ * handle RSS.
+ *
+ * Indexed by:
+ *   - (<profile_id:4> | <hash_value:8>)
+ *
+ * Each table entry holds: <hash:8>
+ *
+ * the resulting <hash:8> is added to <queue_id> output of
+ * UCAST_QUEUE_MAP_TBL_REG
+ */
+#define UCAST_HASH_MAP_TBL_REG(idx)	\
+	(QUEUE_MANAGER_BASE_ADDR + 0x30000 + 0x10 * (idx))
+#define UCAST_HASH_MAP_HASH_SHIFT	0
+#define UCAST_HASH_MAP_PROF_SHIFT	8
+
+/*
+ * Table used to alter queue_id decision of UCAST_QUEUE_MAP_TBL_REG to
+ * handle priority.
+ *
+ * Indexed by:
+ *   - (<profile_id:4> | <input_priority:4>)
+ *
+ * Each table entry holds:
+ *   - <class:4>
+ *
+ * the resulting <class:4> is added to <queue_id> output of
+ * UCAST_QUEUE_MAP_TBL_REG
+ */
+#define UCAST_PRIO_MAP_TBL_REG(idx)	\
+	(QUEUE_MANAGER_BASE_ADDR + 0x42000 + 0x10 * (idx))
+#define UCAST_PRIO_MAP_INPRIO_SHIFT	0
+#define UCAST_PRIO_MAP_PROF_SHIFT	4
+
+/*
+ * table with pending packets for given queue
+ */
+#define AC_UNI_QUEUE_CNT_TBL_REG(idx)	\
+	(QUEUE_MANAGER_BASE_ADDR + 0x4e000 + 0x10 * (idx))
+
+/*
+ * table with dropped packets for given queue
+ */
+#define UNI_DROP_CNT_TBL_MAX_ITEMS			6
+#define UNI_DROP_CNT_TBL_REG(queue, item, off)		\
+	(QUEUE_MANAGER_BASE_ADDR + 0x1e0000 + 0x10 *		\
+	 (queue * UNI_DROP_CNT_TBL_MAX_ITEMS) + (item) * 0x10 + (off) * 0x4)
+
+/*
+ * unicast queue ac configuration (256 entries)
+ */
+#define AC_UNI_QUEUE_CFG_TBL_REG(idx, off)			\
+	(QUEUE_MANAGER_BASE_ADDR + 0x48000 + (idx) * 0x10 + (off) * 4)
+
+#define AC_UNI_CFG0_AC_EN_MASK		(1 << 0)
+#define AC_UNI_CFG0_WRED_EN_MASK	(1 << 1)
+#define AC_UNI_CFG0_FORCE_AC_EN_MASK	(1 << 2)
+#define AC_UNI_CFG0_COLOR_AWARE_MASK	(1 << 3)
+#define AC_UNI_CFG0_GRP_ID_SHIFT	4
+#define AC_UNI_CFG0_GRP_ID_MASK		(0x3 << AC_UNI_CFG0_GRP_ID_SHIFT)
+#define AC_UNI_CFG0_PREALLOC_LIM_SHIFT	6
+#define AC_UNI_CFG0_PREALLOC_LIM_MASK	(0x7ff << AC_UNI_CFG0_PREALLOC_LIM_SHIFT)
+#define AC_UNI_CFG0_SHRD_DYN_EN_MASK	(1 << 17)
+#define AC_UNI_CFG0_SHRD_WEIGHT_SHIFT	18
+#define AC_UNI_CFG0_SHRD_WEIGHT_MASK	(0x7 << AC_UNI_CFG0_SHRD_WEIGHT_SHIFT)
+#define AC_UNI_CFG0_SHRD_CEIL_SHIFT	21
+#define AC_UNI_CFG0_SHRD_CEIL_MASK	(0x7ff << AC_UNI_CFG0_SHRD_CEIL_SHIFT)
+
+#define AC_UNI_CFG1_GAP_GRN_GRN_MIN_SHIFT	0
+#define AC_UNI_CFG1_GAP_GRN_YEL_MAX_SHIFT	11
+#define AC_UNI_CFG1_GAP_GRN_YEL_MIN_LO_SHIFT	22
+
+#define AC_UNI_CFG2_GAP_GRN_YEL_MIN_HI_SHIFT	0
+#define AC_UNI_CFG2_GAP_RED_MAX_SHIFT		1
+#define AC_UNI_CFG2_GAP_RED_MIN_SHIFT		12
+#define AC_UNI_CFG2_RED_RESUME_OFF_LO_SHIFT	23
+
+#define AC_UNI_CFG3_RED_RESUME_OFF_HI_SHIFT	0
+#define AC_UNI_CFG3_YEL_RESUME_OFF_HI_SHIFT	2
+#define AC_UNI_CFG3_GRN_RESUME_OFF_HI_SHIFT	13
+
+/*
+ * multicast queue ac configuration (44 entries)
+ */
+#define AC_MUL_QUEUE_CFG_TBL_REG(idx, off)			\
+	(QUEUE_MANAGER_BASE_ADDR + 0x4a000 + (idx) * 0x10 + (off) * 4)
+
+#define AC_MUL_CFG0_AC_EN_MASK		(1 << 0)
+#define AC_MUL_CFG0_FORCE_AC_EN_MASK	(1 << 1)
+#define AC_MUL_CFG0_COLOR_AWARE_MASK	(1 << 2)
+#define AC_MUL_CFG0_GRP_ID_SHIFT	3
+#define AC_MUL_CFG0_GRP_ID_MASK		(0x3 << AC_MUL_CFG0_GRP_ID_SHIFT)
+#define AC_MUL_CFG0_PREALLOC_LIM_SHIFT	5
+#define AC_MUL_CFG0_PREALLOC_LIM_MASK	(0x7ff << AC_MUL_CFG0_PREALLOC_LIM_SHIFT)
+#define AC_MUL_CFG0_SHR_CEIL_SHIFT		16
+#define AC_MUL_CFG0_GAP_GRN_YEL_LO_SHIFT	27
+
+#define AC_MUL_CFG1_GAP_GRN_YEL_HI_SHIFT	0
+#define AC_MUL_CFG1_GAP_GRN_RED_SHIFT		6
+#define AC_MUL_CFG1_RED_RESUME_OFF_SHIFT	17
+#define AC_MUL_CFG1_YEL_RESUME_OFF_LO_SHIFT	28
+
+#define AC_MUL_CFG2_YEL_RESUME_OFF_HI_SHIFT	0
+#define AC_MUL_CFG2_GRN_RESUME_OFF_HI_SHIFT	7
+
+
+/*
+ * ac group configuration (4 entries)
+ */
+#define AC_GRP_CFG_TBL_REG(idx, off) \
+	(QUEUE_MANAGER_BASE_ADDR + 0x4c000 + (idx) * 0x10 + (off) * 4)
+
+#define AC_GRP_CFG0_AC_EN_MASK		(1 << 0)
+#define AC_GRP_CFG0_FORCE_AC_EN_MASK	(1 << 1)
+#define AC_GRP_CFG0_COLOR_AWARE_MASK	(1 << 2)
+#define AC_GRP_CFG0_GAP_GRN_RED_SHIFT	3
+#define AC_GRP_CFG0_GAP_GRN_YEL_SHIFT	14
+#define AC_GRP_CFG0_DROP_TRESH_LO_SHIFT	25
+
+#define AC_GRP_CFG1_DROP_TRESH_HI_SHIFT		0
+#define AC_GRP_CFG1_GRP_LIMIT_SHIFT		4
+#define AC_GRP_CFG1_RED_RESUME_OFF_SHIFT	15
+#define AC_GRP_CFG1_YEL_RESUME_OFF_LO_SHIFT	26
+
+#define AC_GRP_CFG2_YEL_RESUME_OFF_HI_SHIFT	0
+#define AC_GRP_CFG2_GRN_RESUME_OFF_SHIFT	5
+#define AC_GRP_CFG2_PALLOC_LIMIT_SHIFT		16
+
+
+/***************************************************************
+ *
+ * IPO registers
+ *
+ ***************************************************************/
+
+#define RSS_HASH_MASK_REG	(IPO_CSR_BASE_ADDR + 0x4318)
+
+#define RSS_HASH_SEED_REG	(IPO_CSR_BASE_ADDR + 0x431c)
+
+#define RSS_HASH_MIX_REG(x)	(IPO_CSR_BASE_ADDR + 0x4320 + (x) * 0x4)
+
+#define RSS_HASH_FIN_REG(x)	(IPO_CSR_BASE_ADDR + 0x4350 + (x) * 0x4)
+
+#define RSS_HASH_MASK_IPV4_REG	(IPO_CSR_BASE_ADDR + 0x4380)
+
+#define RSS_HASH_SEED_IPV4_REG	(IPO_CSR_BASE_ADDR + 0x4384)
+
+#define RSS_HASH_MIX_IPV4_REG(x) \
+	(IPO_CSR_BASE_ADDR + 0x4390 + (x) * 0x4)
+
+#define RSS_HASH_FIN_IPV4_REG(x) \
+	(IPO_CSR_BASE_ADDR + 0x43b0 + (x) * 0x4)
+
+
+/***************************************************************
+ *
+ * IPO rules definitions, FIXME: switch this to shift/mask instead of
+ * bitmask
+ *
+ ***************************************************************/
+
+#define IPO_RULE_TBL_REG(rule_id)	\
+	(IPO_CSR_BASE_ADDR + 0x0 + 0x10 * (rule_id))
+
+#define IPO_MASK_TBL_REG(rule_id)	\
+	(IPO_CSR_BASE_ADDR + 0x2000 + 0x10 * (rule_id))
+
+#define IPO_ACTION_TBL_REG(rule_id)	\
+	(IPE_L2_BASE_ADDR + 0x8000 + 0x20 * (rule_id))
+
+/* 64 entries */
+#define IPO_RULE_EXT1_TBL_REG(rule_id)	\
+	(IPO_CSR_BASE_ADDR + 0x4000 + 0x4 * (rule_id))
+#define IPO_RULE_EXT2_TBL_REG(rule_id)	\
+	(IPO_CSR_BASE_ADDR + 0x4100 + 0x4 * (rule_id))
+#define IPO_RULE_EXT4_TBL_REG(rule_id)	\
+	(IPO_CSR_BASE_ADDR + 0x4200 + 0x4 * (rule_id))
+
+struct ipo_rule_ext_1 {
+	u32  ext2_0:1;
+	u32  ext2_1:1;
+	u32  ext2_2:1;
+	u32  ext2_3:1;
+	u32  _reserved0:28;
+};
+
+union ipo_rule_ext_1_u {
+	u32 val;
+	struct ipo_rule_ext_1 bf;
+};
+
+struct ipo_rule_ext_2 {
+	u32  ext4_0:1;
+	u32  ext4_1:1;
+	u32  _reserved0:30;
+};
+
+union ipo_rule_ext_2_u {
+	u32 val;
+	struct ipo_rule_ext_2 bf;
+};
+
+struct ipo_rule_ext_4 {
+	u32  ext8:1;
+	u32  _reserved0:31;
+};
+
+union ipo_rule_ext_4_u {
+	u32 val;
+	struct ipo_rule_ext_4 bf;
+};
+
+
+#define IPO_RULE_TYPE_MAC_DA		0
+#define IPO_RULE_TYPE_MAC_SA		1
+#define IPO_RULE_TYPE_VLAN		2
+#define IPO_RULE_TYPE_L2_MISC		3
+#define IPO_RULE_TYPE_IPV4_DIP		4
+#define IPO_RULE_TYPE_IPV4_SIP		5
+#define IPO_RULE_TYPE_IPV6_DIP0		6
+#define IPO_RULE_TYPE_IPV6_DIP1		7
+#define IPO_RULE_TYPE_IPV6_DIP2		8
+#define IPO_RULE_TYPE_IPV6_SIP0		9
+#define IPO_RULE_TYPE_IPV6_SIP1		10
+#define IPO_RULE_TYPE_IPV6_SIP2		11
+#define IPO_RULE_TYPE_IPMISC		12
+#define IPO_RULE_TYPE_WINDOW0		13
+
+#define IPO_SRC_TYPE_PORTBITMAP		0
+#define IPO_SRC_TYPE_PORT		1
+#define IPO_SRC_TYPE_SERVICE_CODE	2
+#define IPO_SRC_TYPE_TUNNEL_L3_IF	3
+#define IPO_SRC_TYPE_VP_GROUP		4
+#define IPO_SRC_TYPE_TUNNEL_PORT	5
+#define IPO_SRC_TYPE_TUNNEL_VP_GROUP	6
+#define IPO_SRC_TYPE_SERVICE_PORTBITMAP	7
+
+struct ipo_rule_reg {
+	u32  rule_field_0:32;
+        u32  rule_field_1:21;
+        u32  range_en:1;
+        u32  inverse_en:1;
+        u32  rule_type:5;
+        u32  src_type:3;
+        u32  src_0:1;
+        u32  src_1:7;
+        u32  pri:9;
+        u32  res_chain:1;
+        u32  post_routing_en:1;
+        u32  _reserved0:14;
+};
+
+union ipo_rule_reg_u {
+	u32 val[3];
+	struct ipo_rule_reg bf;
+};
+
+struct ipo_mask_reg {
+	u32  maskfield_0:32;
+	u32  maskfield_1:21;
+	u32  _reserved0:11;
+};
+
+union ipo_mask_reg_u {
+	u32 val[2];
+	struct ipo_mask_reg bf;
+};
+
+#define DEST_INFO_TYPE_SHIFT	12
+#define DEST_INFO_TYPE_PORT_ID	2
+#define DEST_INFO_TYPE_PORT_BMP	3
+
+struct ipo_action {
+        u32  dest_info_change_en:1;
+        u32  fwd_cmd:2;
+        u32  dest_info:14;
+        u32  mirror_en:1;
+        u32  bypass_bitmap_0:14;
+        u32  bypass_bitmap_1:18;
+        u32  svid_change_en:1;
+        u32  stag_fmt:1;
+        u32  svid:12;
+        u32  cvid_change_en:1;
+        u32  ctag_fmt:1;
+        u32  cvid:12;
+        u32  dscp_tc_change_en:1;
+        u32  dscp_tc:8;
+        u32  stag_pcp_change_en:1;
+        u32  stag_pcp:3;
+        u32  stag_dei_change_en:1;
+        u32  stag_dei:1;
+        u32  ctag_pcp_change_en:1;
+        u32  ctag_pcp_0:2;
+        u32  ctag_pcp_1:1;
+        u32  ctag_dei_change_en:1;
+        u32  ctag_dei:1;
+        u32  enqueue_pri_change_en:1;
+        u32  enqueue_pri:4;
+        u32  int_dp_change_en:1;
+        u32  int_dp:2;
+        u32  policer_en:1;
+        u32  policer_index:9;
+        u32  qid_en:1;
+        u32  qid:8;
+        u32  service_code_en:1;
+        u32  service_code_0:1;
+        u32  service_code_1:7;
+        u32  syn_toggle:1;
+        u32  cpu_code_en:1;
+        u32  cpu_code:8;
+        u32  metadata_en:1;
+        u32  dscp_tc_mask:8;
+        u32  qos_res_prec:3;
+        u32  _reserved0:3;
+};
+
+union ipo_action_u {
+	u32 val[5];
+	struct ipo_action bf;
+};
+
+/***************************************************************
+ *
+ * IPR registers (input parser)
+ *
+ ***************************************************************/
+
+#define IPR_UDF_CTRL_REG(idx)	\
+	(IPR_CSR_BASE_ADDR + 0x400 + (idx) * 0x4)
+
+struct ipr_udf_ctrl {
+        u32  l3_type:2;
+        u32  l3_type_incl:1;
+        u32  _reserved0:1;
+        u32  l4_type:3;
+        u32  l4_type_incl:1;
+        u32  udf_profile:3;
+        u32  _reserved1:20;
+        u32  valid:1;
+};
+
+union ipr_udf_ctrl_u {
+        u32 val;
+	struct ipr_udf_ctrl bf;
+};
+
+#define IPR_UDF_PROFILE_BASE_REG(prof)	\
+	(IPR_CSR_BASE_ADDR + 0x440 + (prof) * 0x4)
+
+struct ipr_udf_profile_base {
+        u32  udf0_base:2;
+        u32  _reserved0:6;
+        u32  udf1_base:3;
+        u32  _reserved1:5;
+        u32  udf2_base:3;
+        u32  _reserved2:5;
+        u32  udf3_base:3;
+        u32  _reserved3:5;
+};
+
+union ipr_udf_profile_base_u {
+        u32 val;
+        struct ipr_udf_profile_base bf;
+};
+
+#define IPR_UDF_PROFILE_OFFSET_REG(prof)	\
+	(IPR_CSR_BASE_ADDR + 0x460 + (prof) * 0x4)
+
+struct ipr_udf_profile_offset {
+        u32  udf0_offset:6;
+        u32  _reserved0:2;
+        u32  udf1_offset:6;
+        u32  _reserved1:2;
+        u32  udf2_offset:6;
+        u32  _reserved2:2;
+        u32  udf3_offset:6;
+        u32  _reserved3:2;
+};
+
+union ipr_udf_profile_offset_u {
+        u32 val;
+        struct ipr_udf_profile_offset bf;
+};
+
+#endif /* PPE_REGS_H_ */
+
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/uniphy_regs.h linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/uniphy_regs.h
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./regs/uniphy_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/regs/uniphy_regs.h	2023-09-26 16:07:16.231543515 +0200
@@ -0,0 +1,459 @@
+#ifndef UNIPHY_REGS_H_
+#define UNIPHY_REGS_H_
+
+#define UNIPHY_REGS_OFFSET(id)		(id * 0x10000)
+
+/***************************************************************
+ *
+ * CSR0 area (direct AHB access)
+ *
+ ***************************************************************/
+
+/*
+ * calibration register (FIXME: not present in HRD)
+ */
+#define UPHY_CALIB_4_REG		0x1e0
+#define CALIB_4_IS_DONE_MASK		(1 << 7)
+
+/*
+ * misc2 register (FIXME: not present in HRD)
+ */
+#define UPHY_MISC2_REG			0x218
+#define MISC2_PHY_RATE_SHIFT		0
+#define MISC2_PHY_RATE_MASK		(0x3 << MISC2_PHY_RATE_SHIFT)
+#define MISC2_PHY_MODE_SHIFT		4
+#define MISC2_PHY_MODE_MASK		(0x7 << MISC2_PHY_MODE_SHIFT)
+#define MISC2_PHY_MODE_SGMII		3
+#define MISC2_PHY_MODE_SGMII_PLUS	5
+#define MISC2_PHY_MODE_USXGMII		7
+
+/*
+ * mode control register
+ */
+#define UPHY_MODE_CTRL_REG		0x46c
+#define MODE_CTRL_ANEG_MODE_SHIFT	0
+#define MODE_CTRL_ANEG_MODE_MASK	(1 << MODE_CTRL_ANEG_ATH_MODE_SHIFT)
+#define MODE_CTRL_ANEG_MODE_ATH		(0 << MODE_CTRL_ANEG_MODE_SHIFT)
+#define MODE_CTRL_ANEG_MODE_CISCO	(1 << MODE_CTRL_ANEG_MODE_SHIFT)
+
+/* SGMII channel selection, only valid for uniphy0 */
+#define MODE_CTRL_CHAN_SEL_SHIFT	1
+#define MODE_CTRL_CHAN_SEL_MASK		(3 << MODE_CTRL_CHAN_SEL_SHIFT)
+#define MODE_CTRL_CHAN_SEL_CH0		0
+#define MODE_CTRL_CHAN_SEL_CH1		1
+#define MODE_CTRL_CHAN_SEL_CH4		2
+
+/* working mode for [QP]SGMII channels */
+#define MODE_CTRL_CHAN_MODE_SHIFT	4
+#define MODE_CTRL_CHAN_MODE_MASK	(7 << MODE_CTRL_CHAN_MODE_SHIFT)
+#define MODE_CTRL_CHAN_MODE_1000BX	0
+#define MODE_CTRL_CHAN_MODE_SGMII_PHY	1
+#define MODE_CTRL_CHAN_MODE_SGMII_MAC	2
+
+/* only valid for uniphy0 */
+#define MODE_CTRL_MUX_MODE_SHIFT	8
+#define MODE_CTRL_MUX_MODE_MASK		(3 << MODE_CTRL_MUX_MODE_SHIFT)
+#define MODE_CTRL_MUX_MODE_SGMII	0x0
+#define MODE_CTRL_MUX_MODE_QSGMII	0x1
+#define MODE_CTRL_MUX_MODE_PSGMII	0x2
+
+/* only valid for uniphy[12] */
+#define MODE_CTRL_SG_MODE_MASK		(1 << 10)
+#define MODE_CTRL_SGPLUS_MODE_MASK	(1 << 11)
+
+#define MODE_CTRL_XPCS_MODE_MASK	(1 << 12)
+#define MODE_CTRL_USXG_EN_MASK		(1 << 13)
+
+#define MODE_CTRL_SGMII_VER_SHIFT	15
+#define MODE_CTRL_SGMII_VER_MASK	(1 << MODE_CTRL_SGMII_VER_SHIFT)
+#define MODE_CTRL_SGMII_VER_18		(0 << MODE_CTRL_SGMII_VER_SHIFT)
+#define MODE_CTRL_SGMII_VER_17		(1 << MODE_CTRL_SGMII_VER_SHIFT)
+
+/*
+ * channel status (was INPUT_OUTPUT_1) (FIXME: not present in HRD)
+ */
+#define UPHY_SGMII_ST_CHx_REG(x)	(0x474 + (x) * 0x18)
+#define SGMII_ST_CHx_SYNC_ST_MASK	(1 << 3)
+#define SGMII_ST_CHx_ANEG_COMPLETE_MASK	(1 << 7)
+
+/*
+ * LPA (was INPUT_OUTPUT_2) (FIXME: not present in HRD)
+ * MII_LPA format
+ */
+#define UPHY_SGMII_LPA_CHx_REG(x)	(0x478 + (x) * 0x18)
+
+/*
+ * SGMII control register (was INPUT_OUTPUT_4) (FIXME: not present in HRD)
+ */
+#define UPHY_SGMII_CTRL_CHx_REG(x)	(0x480 + (x) * 0x18)
+#define SGMII_CTRL_CHx_SPEED_SHIFT	1
+#define SGMII_CTRL_CHx_SPEED_MASK	(0x3 << SGMII_CTRL_CHx_SPEED_SHIFT)
+#define SGMII_CTRL_CHx_SPEED_10M	0
+#define SGMII_CTRL_CHx_SPEED_100M	1
+#define SGMII_CTRL_CHx_SPEED_1G		2
+#define SGMII_CTRL_CHx_FORCE_SPEED_MASK	(1 << 3)
+#define SGMII_CTRL_CHx_AN_ENABLE_MASK	(1 << 6)
+#define SGMII_CTRL_CHx_AN_RESTART_MASK	(1 << 7)
+
+/*
+ * SGMI autoneg control (was INPUT_OUTPUT_5) (FIXME: not present in HRD)
+ */
+#define UPHY_SGMII_ANEGC_CHx_REG(x)	(0x484 + (x) * 0x18)
+#define SGMII_ANEGC_CHx_AS_PAUSE_MASK	(1 << 0)
+#define SGMII_ANEGC_CHx_PAUSE_MASK	(1 << 1)
+#define SGMII_ANEGC_CHx_NEXT_PAGE_MASK	(1 << 2)
+#define SGMII_ANEGC_CHx_SG_PAUSE_TX_MASK	(1 << 3)
+#define SGMII_ANEGC_CHx_SPEED_SHIFT	4
+#define SGMII_ANEGC_CHx_SPEED_MASK	(0x3 << SGMII_ANEG_CHx_SPEED_SHIFT)
+#define SGMII_ANEGC_CHx_SPEED_10M	0
+#define SGMII_ANEGC_CHx_SPEED_100M	1
+#define SGMII_ANEGC_CHx_SPEED_1G	2
+#define SGMMI_ANEGC_CHx_SG_FD_MASK	(1 << 6)
+#define SGMMI_ANEGC_CHx_LINK_MASK	(1 << 7)
+#define SGMMI_ANEGC_CHx_RFAULT_SHIFT	8
+#define SGMMI_ANEGC_CHx_RFAULT_MASK	(0x3 << SGMMI_ANEGC_CHx_RFAULT_SHIFT)
+#define SGMMI_ANEGC_CHx_1000X_HD_MASK	(1 << 10)
+#define SGMMI_ANEGC_CHx_1000X_FD_MASK	(1 << 11)
+
+/*
+ * SGMII autoneg remote info (was INPUT_OUTPUT_6)
+ */
+#define UPHY_SGMII_ANEG_CHx_REG(x)	(0x488 + (x) * 0x18)
+#define SGMII_ANEG_CHx_RX_PAUSE_MASK	(1 << 0)
+#define SGMII_ANEG_CHx_TX_PAUSE_MASK	(1 << 1)
+#define SGMII_ANEG_CHx_ASYM_PAUSE_MASK	(1 << 2)
+#define SGMII_ANEG_CHx_PAUSE_MASK	(1 << 3)
+#define SGMII_ANEG_CHx_SPEED_SHIFT	4
+#define SGMII_ANEG_CHx_SPEED_MASK	(0x3 << SGMII_ANEG_CHx_SPEED_SHIFT)
+#define SGMII_ANEG_CHx_SPEED_10M	0
+#define SGMII_ANEG_CHx_SPEED_100M	1
+#define SGMII_ANEG_CHx_SPEED_1G		2
+#define SGMII_ANEG_CHx_FD_MASK		(1 << 6)
+#define SGMII_ANEG_CHx_LINK_MASK	(1 << 7)
+
+/*
+ * Link detect (FIXME: not present in HRD)
+ */
+#define UPHY_LINK_DET_REG		0x570
+#define UPHY_LINK_DET_EN_MASK		(0x7 << 6)
+
+/*
+ * USXG option (FIXME: not present in HRD)
+ */
+#define UPHY_USXG_OPT_REG		0x584
+#define USXG_OPT_GMII_SRC_SEL_MASK	(1 << 0)
+
+/*
+ * PLL register (FIXME: not present in HRD)
+ */
+#define UPHY_PLL_REG			0x780
+#define UPHY_PLL_RESET_MASK		(1 << 6)
+
+/*
+ * CSR1 indirect access registers
+ */
+#define UPHY_INDIR_ADDR_REG		0x83fc
+
+/***************************************************************
+ *
+ * CSR1 area (indirect access through CSR0 special registers)
+ *
+ ***************************************************************/
+
+/*
+ * SR_XS_PCS_CTRL1
+ */
+#define UPHY_SR_XS_PCS_CTRL1_REG	0x30000
+
+/*
+ * SR_XS_PCS_STS1
+ */
+#define UPHY_SR_XS_PCS_STS1_REG		0x30001
+
+/*
+ * SR_XS_PCS_SPD_ABL
+ */
+#define UPHY_SR_XS_PCS_SPD_ABL_REG	0x30004
+
+/*
+ * SR_XS_PCS_DEV_PKG1
+ */
+#define UPHY_SR_XS_PCS_DEV_PKG1_REG	0x30005
+
+/*
+ * SR_XS_PCS_DEV_PKG2
+ */
+#define UPHY_SR_XS_PCS_DEV_PKG2_REG	0x30006
+
+/*
+ * SR_XS_PCS_CTRL2
+ */
+#define UPHY_SR_XS_PCS_CTRL2_REG	0x30007
+
+/*
+ * SR_XS_PCS_STS2
+ */
+#define UPHY_SR_XS_PCS_STS2_REG		0x30008
+
+/*
+ * SR_XS_PCS_STS3
+ */
+#define UPHY_SR_XS_PCS_STS3_REG		0x30009
+
+/*
+ * SR_XS_PCS_EEE_ABL
+ */
+#define UPHY_SR_XS_PCS_EEE_ABL_REG	0x30014
+
+/*
+ * SR_XS_PCS_EEE_ABL2
+ */
+#define UPHY_SR_XS_PCS_EEE_ABL2_REG	0x30015
+
+/*
+ * SR_XS_PCS_EEE_WKERR
+ */
+#define UPHY_SR_XS_PCS_EEE_WKERR_REG	0x30016
+
+/*
+ * SR_XS_PCS_LSTS
+ */
+#define UPHY_SR_XS_PCS_LSTS_REG		0x30018
+
+/*
+ * SR_XS_PCS_KR_STS1
+ */
+#define UPHY_SR_XS_PCS_KR_STS1_REG	0x30020
+#define SR_XS_PCS_KR_STS1_PCS_LINKUP_MASK	(1 << 12)
+
+/*
+ * SR_XS_PCS_KR_STS2
+ */
+#define UPHY_SR_XS_PCS_KR_STS2_REG	0x30021
+#define SR_XS_PCS_KR_STS2_LL_HBER_MASK	(1 << 14)
+#define SR_XS_PCS_KR_STS2_LL_BLOCK_LOCK_MASK	(1 << 15)
+
+/*
+ * SR_XS_PCS_TP_Ax
+ */
+#define UPHY_SR_XS_PCS_TP_Ax_REG(x)	(0x30022 + (x))
+
+/*
+ * SR_XS_PCS_TP_Bx
+ */
+#define UPHY_SR_XS_PCS_TP_Bx_REG(x)	(0x30026 + (x))
+
+/*
+ * SR_XS_PCS_TP_CTRL
+ */
+#define UPHY_SR_XS_PCS_TP_CTRL_REG	0x3002a
+
+/*
+ * SR_XS_PCS_TP_ERRCTR
+ */
+#define UPHY_SR_XS_PCS_TP_ERRCTR_REG	0x3002b
+
+/*
+ * VR_XS_PCS_DIG_CTRL1
+ */
+#define UPHY_VR_XS_PCS_DIG_CTRL1_REG	0x38000
+#define VR_XS_PCS_DIG_CTRL1_USXG_EN_MASK	(1 << 9)
+#define VR_XS_PCS_DIG_CTRL1_USRA_RST_MASK	(1 << 10)
+#define VR_XS_PCS_DIG_CTRL1_VR_RST_MASK		(1 << 15)
+
+/*
+ * VR_XS_PCS_DIG_CTRL2
+ */
+#define UPHY_VR_XS_PCS_DIG_CTRL2_REG	0x38001
+
+/*
+ * VR_XS_PCS_DEBUG_CTRL
+ */
+#define UPHY_VR_XS_PCS_DEBUG_CTRL_REG	0x38005
+
+/*
+ * VR_XS_PCS_EEE_MCTRL0
+ */
+#define UPHY_VR_XS_PCS_EEE_MCTRL0_REG	0x38006
+#define VR_XS_PCS_EEE_MCTRL0_LTX_EN_MASK	(1 << 0)
+#define VR_XS_PCS_EEE_MCTRL0_LRX_EN_MASK	(1 << 1)
+#define VR_XS_PCS_EEE_MCTRL0_RX_QUIET_EN_MASK	(1 << 2)
+#define VR_XS_PCS_EEE_MCTRL0_TX_QUIET_EN_MASK	(1 << 3)
+#define VR_XS_PCS_EEE_MCTRL0_TX_EN_CTRL_MASK	(1 << 4)
+#define VR_XS_PCS_EEE_MCTRL0_EEE_SLR_BYP_MASK	(1 << 5)
+#define VR_XS_PCS_EEE_MCTRL0_SIGN_BIT_MASK	(1 << 6)
+#define VR_XS_PCS_EEE_MCTRL0_RX_EN_CTRL_MASK	(1 << 7)
+#define VR_XS_PCS_EEE_MCTRL0_MULT_FACT_100NS_SHIFT	8
+#define VR_XS_PCS_EEE_MCTRL0_MULT_FACT_100NS_MASK	(0xf << VR_XS_PCS_EEE_MCTRL0_MULT_FACT_100NS_SHIFT)
+#define VR_XS_PCS_EEE_MCTRL0_CLK_STOP_SHIFT	12
+#define VR_XS_PCS_EEE_MCTRL0_CLK_STOP_MASK	(0xf << VR_XS_PCS_EEE_MCTRL0_CLK_STOP_SHIFT)
+
+/*
+ * VR_XS_PCS_KR_CTRL
+ */
+#define UPHY_VR_XS_PCS_KR_CTRL_REG	0x38007
+#define VR_XS_PCS_KR_CTRL_MODE_SHIFT	10
+#define VR_XS_PCS_KR_CTRL_MODE_MASK	(0x7 << VR_XS_PCS_KR_CTRL_MODE_SHIFT)
+#define VR_XS_PCS_KR_CTRL_MODE_10G_SXGMII	0
+#define VR_XS_PCS_KR_CTRL_MODE_5G_SXGMII	1
+#define VR_XS_PCS_KR_CTRL_MODE_2_5G_SXGMII	2
+#define VR_XS_PCS_KR_CTRL_MODE_10G_DXGMII	3
+#define VR_XS_PCS_KR_CTRL_MODE_5G_DXGMII	4
+#define VR_XS_PCS_KR_CTRL_MODE_10G_QXGMII	5
+
+/*
+ * VR_XS_PCS_EEE_TXTIMER
+ */
+#define UPHY_VR_XS_PCS_EEE_TXTIMER_REG	0x38008
+#define VR_XS_PCS_EEE_TXTIMER_TSL_RES_SHIFT	0
+#define VR_XS_PCS_EEE_TXTIMER_TSL_RES_MASK	(0x3f << VR_XS_PCS_EEE_TXTIMER_TSL_RES_SHIFT)
+#define VR_XS_PCS_EEE_TXTIMER_T1U_RES_SHIFT	6
+#define VR_XS_PCS_EEE_TXTIMER_T1U_RES_MASK	(0x3 << VR_XS_PCS_EEE_TXTIMER_T1U_RES_SHIFT)
+#define VR_XS_PCS_EEE_TXTIMER_TWL_RES_SHIFT	8
+#define VR_XS_PCS_EEE_TXTIMER_TWL_RES_MASK	(0xf << VR_XS_PCS_EEE_TXTIMER_TWL_RES_SHIFT)
+
+/*
+ * VR_XS_PCS_EEE_RXTIMER
+ */
+#define UPHY_VR_XS_PCS_EEE_RXTIMER_REG	0x38009
+#define VR_XS_PCS_EEE_RXTIMER_RES_100U_SHIFT	0
+#define VR_XS_PCS_EEE_RXTIMER_RES_100U_MASK	(0xff << VR_XS_PCS_EEE_RXTIMER_RES_100U_SHIFT)
+#define VR_XS_PCS_EEE_RXTIMER_TWR_RES_SHIFT	8
+#define VR_XS_PCS_EEE_RXTIMER_TWR_RES_MASK	(0x3f << VR_XS_PCS_EEE_RXTIMER_TWR_RES_SHIFT)
+
+/*
+ * VR_XS_PCS_DIG_STS
+ */
+#define UPHY_VR_XS_PCS_DIG_STS_REG	0x3800a
+
+/*
+ * VR_XS_PCS_EEE_MCTRL1
+ */
+#define UPHY_VR_XS_PCS_EEE_MCTRL1_REG	0x3800b
+#define VR_XS_PCS_EEE_MCTRL1_TRANSP_TX_LPI_EN_MASK	(1 << 0)
+#define VR_XS_PCS_EEE_MCTRL1_TXEN_EA_TMR_SHIFT	1
+#define VR_XS_PCS_EEE_MCTRL1_TXEN_EA_TMR_MASK	(0x3f << VR_XS_PCS_EEE_MCTRL1_TXEN_EA_TMR_SHIFT)
+#define VR_XS_PCS_EEE_MCTRL1_TRANSP_RX_LPI_EN_MASK	(1 << 8)
+
+
+/*
+ * VR_XS_PCS_NSNPS_EEE_CTRL
+ */
+#define UPHY_VR_XS_PCS_NSNPS_EEE_CTRL_REG	0x3800c
+
+/*
+ * VR_XS_PCS_DIG_STS_ALT (duplicated name with 0x3800a)
+ */
+#define UPHY_VR_XS_PCS_DIG_STS_ALT_REG	0x38010
+
+/*
+ * VR_XS_PCS_CDT_STS
+ */
+#define UPHY_VR_XS_PCS_CDT_STS_REG	0x38018
+
+/*
+ * VR_XS_PCS_MISC_STS
+ */
+#define UPHY_VR_XS_PCS_MISC_STS_REG	0x38019
+
+
+
+/*
+ * per "virtual channel" mii register access, note that channel 0 is
+ * the main register
+ */
+#define CHANx_OFF(chan_id)	\
+	(chan_id == 0 ? 0x1f0000 : (0x1a0000 + (chan_id - 1) * 0x10000))
+
+/*
+ * SR_MII_CTRL
+ */
+#define UPHY_SR_MII_CTRL_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x0)
+#define UPHY_SR_MII_CTRL_REG		UPHY_SR_MII_CTRL_CHANx_REG(0)
+#define SR_MII_CTRL_FDX_MASK		(1 << 8)
+#define SR_MII_CTRL_AN_RESTART_MASK	(1 << 9)
+#define SR_MII_CTRL_AN_ENABLE_MASK	(1 << 12)
+#define SR_MII_CTRL_SPEED_MASK		((1 << 13) | (1 << 6) | (1 << 5))
+#define SR_MII_CTRL_SPEED_5G_MASK	((1 << 13) | (0 << 6) | (1 << 5))
+#define SR_MII_CTRL_SPEED_2D5G_MASK	((0 << 13) | (0 << 6) | (1 << 5))
+#define SR_MII_CTRL_SPEED_10G_MASK	((1 << 13) | (1 << 6) | (0 << 5))
+#define SR_MII_CTRL_SPEED_1G_MASK	((0 << 13) | (1 << 6) | (0 << 5))
+#define SR_MII_CTRL_SPEED_100M_MASK	((1 << 13) | (0 << 6) | (0 << 5))
+#define SR_MII_CTRL_SPEED_10M_MASK	((0 << 13) | (0 << 6) | (0 << 5))
+
+/*
+ * SR_MII_EXPN
+ */
+#define UPHY_SR_MII_EXPN_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x6)
+#define UPHY_SR_MII_EXPN_REG		UPHY_SR_MII_EXPN_CHANx_REG(0)
+
+/*
+ * VR_MII_DIG_CTRL1
+ */
+#define UPHY_VR_MII_DIG_CTRL1_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x8000)
+#define UPHY_VR_MII_DIG_CTRL1_REG	UPHY_VR_MII_DIG_CTRL1_CHANx_REG(0)
+#define VR_MII_DIG_CTRL1_USRA_RST_MASK	(1 << 5) /* only for chan[123] */
+
+/*
+ * VR_MII_AN_CTRL
+ */
+#define UPHY_VR_MII_AN_CTRL_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x8001)
+#define UPHY_VR_MII_AN_CTRL_REG		UPHY_VR_MII_AN_CTRL_CHANx_REG(0)
+
+#define VR_MIIx_AN_CTRL_AN_INTR_EN_MASK	(1 << 0)
+#define VR_MIIx_AN_CTRL_SGMII_LINK_STS_MASK	(1 << 4)
+#define VR_MIIx_AN_CTRL_MII_CTRL_SHIFT	8
+#define VR_MIIx_AN_CTRL_MII_CTRL_MASK 	(1 << 8)
+#define VR_MIIx_AN_CTRL_MII_CTRL_WIDTH_4BIT 	0
+#define VR_MIIx_AN_CTRL_MII_CTRL_WIDTH_8BIT 	1
+
+/*
+ * VR_MII_AN_INTR_STS
+ */
+#define UPHY_VR_MII_AN_INTR_STS_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x8002)
+#define UPHY_VR_MII_AN_INTR_STS_REG	UPHY_VR_MII_AN_INTR_STS_CHANx_REG(0)
+
+#define VR_MII_AN_INTR_STS_AN_DONE_MASK	(1 << 0)
+
+/* USXSGMII only aneg results */
+#define VR_MII_AN_INTR_STS_EEE_CLK_STOP_SUPP_MASK	(1 << 8)
+#define VR_MII_AN_INTR_STS_EEE_SUPP_MASK		(1 << 9)
+#define VR_MII_AN_INTR_STS_SPEED_SHIFT	10
+#define VR_MII_AN_INTR_STS_SPEED_MASK	(0x7 << VR_MII_AN_INTR_STS_SPEED_SHIFT)
+#define VR_MII_AN_INTR_STS_SPEED_10M	0
+#define VR_MII_AN_INTR_STS_SPEED_100M	1
+#define VR_MII_AN_INTR_STS_SPEED_1G	2
+#define VR_MII_AN_INTR_STS_SPEED_10G	3
+#define VR_MII_AN_INTR_STS_SPEED_2D5G	4
+#define VR_MII_AN_INTR_STS_SPEED_5G	5
+#define VR_MII_AN_INTR_STS_FDX_MASK	(1 << 13)
+#define VR_MII_AN_INTR_STS_LINK_UP_MASK	(1 << 14)
+
+/*
+ * VR_XAUI_MODE_CTRL
+ */
+#define UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x8004)
+#define UPHY_VR_XAUI_MODE_CTRL_REG	UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(0)
+#define VR_XAUI_MODE_CTRL_DIS_TX_IPG_CHK_MASK	(1 << 0)
+
+/*
+ * VR_MII_LINK_TIMER_CTRL
+ */
+#define UPHY_VR_MII_LINK_TIMER_CTRL_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x800a)
+#define UPHY_VR_MII_LINK_TIMER_CTRL_REG	UPHY_VR_MII_LINK_TIMER_CTRL_CHANx_REG(0)
+
+/*
+ * VR_MII_DIG_STS
+ */
+#define UPHY_VR_MII_DIG_STS_CHANx_REG(chan_id)	\
+	(CHANx_OFF(chan_id) + 0x8010)
+#define UPHY_VR_MII_DIG_STS_REG		UPHY_VR_MII_DIG_STS_CHANx_REG(0)
+
+#endif /* UNIPHY_REGS_H_ */
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./uniphy.c linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/uniphy.c
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./uniphy.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/uniphy.c	2023-12-05 17:14:42.299715016 +0100
@@ -0,0 +1,1524 @@
+#include "uniphy_priv.h"
+
+#define DEBOUNCE_INTV_MSECS	50
+#define DEBOUNCE_TOTAL_MSECS	300
+
+/*
+ *
+ */
+static void build_csr1_addr(u32 addr, u32 *r1, u32 *r2)
+{
+	*r1 = (addr & 0xffffff) >> 8;
+	*r2 = (0x20 << 10) | ((addr & 0xff) << 2);
+}
+
+/*
+ *
+ */
+u32 uphy_readl(struct ess_uniphy *uniphy, u32 reg)
+{
+	void *base = uniphy->priv->regs[2] + UNIPHY_REGS_OFFSET(uniphy->id);
+	u32 r1, r2;
+
+	if (WARN_ON(reg > 0x1fffff))
+		return 0;
+
+	if (reg < 0x7fff)
+		return readl(base + reg);
+
+	build_csr1_addr(reg, &r1, &r2);
+	writel(r1, base + UPHY_INDIR_ADDR_REG);
+	return readl(base + r2);
+}
+
+/*
+ *
+ */
+void uphy_writel(struct ess_uniphy *uniphy, u32 reg, u32 val)
+{
+	void *base = uniphy->priv->regs[2] + UNIPHY_REGS_OFFSET(uniphy->id);
+	u32 r1, r2;
+
+	if (WARN_ON(reg > 0x1fffff))
+		return;
+
+	if (reg < 0x7fff) {
+		writel(val, base + reg);
+		return;
+	}
+
+	build_csr1_addr(reg, &r1, &r2);
+	writel(r1, base + UPHY_INDIR_ADDR_REG);
+	writel(val, base + r2);
+}
+
+/*
+ *
+ */
+static void trigger_reset(struct ess_uniphy *uniphy)
+{
+	reset_control_assert(uniphy->soft_rst);
+	reset_control_assert(uniphy->sys_rst);
+	msleep(100);
+	reset_control_deassert(uniphy->soft_rst);
+	reset_control_deassert(uniphy->sys_rst);
+	msleep(10);
+}
+
+/*
+ *
+ */
+static int uniphy_powerup(struct ess_uniphy *uniphy)
+{
+	int ret;
+
+	ret = clk_prepare_enable(uniphy->clocks->ahb);
+	if (ret) {
+		uniphy_err(uniphy, "failed to enable ahb clock: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(uniphy->clocks->sys);
+	if (ret) {
+		uniphy_err(uniphy, "failed to enable sys clock: %d\n", ret);
+		clk_disable_unprepare(uniphy->clocks->ahb);
+		return ret;
+	}
+
+	trigger_reset(uniphy);
+	uniphy->powered_up = true;
+	return 0;
+}
+
+/*
+ *
+ */
+static void uniphy_powerdown(struct ess_uniphy *uniphy)
+{
+	if (!uniphy->powered_up)
+		return;
+
+	clk_disable_unprepare(uniphy->clocks->sys);
+	clk_disable_unprepare(uniphy->clocks->ahb);
+	trigger_reset(uniphy);
+	uniphy->powered_up = false;
+}
+
+/*
+ * return pcs link status
+ */
+static bool xpcs_has_10gr_link(struct ess_uniphy *uniphy)
+{
+	u32 val;
+	bool link;
+
+	/* link is up if 64/66b lock is achieved, and no high bit
+	 * error condition is achieved */
+	val = uphy_readl(uniphy, UPHY_SR_XS_PCS_KR_STS1_REG);
+	link = !!(val & SR_XS_PCS_KR_STS1_PCS_LINKUP_MASK);
+
+	/* to make sure we detect any link drop, we also use latched variant of
+	 * 64/66b lock */
+	val = uphy_readl(uniphy, UPHY_SR_XS_PCS_KR_STS2_REG);
+	link &= !!(val & SR_XS_PCS_KR_STS2_LL_BLOCK_LOCK_MASK);
+
+	/* and latched version of "high bit error count condition" */
+	link &= !(val & SR_XS_PCS_KR_STS2_LL_HBER_MASK);
+
+	return link;
+}
+
+/*
+ *
+ */
+static bool uniphy_has_8b10b_sync_get(struct ess_uniphy_channel *uc)
+{
+	u32 val;
+
+	val = uphy_readl(uc->uniphy, UPHY_SGMII_ST_CHx_REG(uc->id));
+	return !!(val & SGMII_ST_CHx_SYNC_ST_MASK);
+}
+
+/*
+ *
+ */
+static int uniphy_calibrate(struct ess_uniphy *uniphy)
+{
+	size_t i;
+
+	for (i = 0; i < 100; i++) {
+		u32 val;
+
+		val = uphy_readl(uniphy, UPHY_CALIB_4_REG);
+		if (val & CALIB_4_IS_DONE_MASK)
+			return 0;
+
+		usleep_range(1000, 2000);
+	}
+	uniphy_err(uniphy, "uniphy calibration timeout\n");
+	return -ETIMEDOUT;
+}
+
+/*
+ *
+ */
+static void uniphy_pll_reset(struct ess_uniphy *uniphy)
+{
+	u32 val;
+
+	/* reset PLL */
+	val = uphy_readl(uniphy, UPHY_PLL_REG);
+	val &= ~UPHY_PLL_RESET_MASK;
+	uphy_writel(uniphy, UPHY_PLL_REG, val);
+	msleep(10);
+	val |= UPHY_PLL_RESET_MASK;
+	uphy_writel(uniphy, UPHY_PLL_REG, val);
+	msleep(10);
+}
+
+/*
+ *
+ */
+static int uniphy_1000basex_mode_set(struct ess_uniphy *uniphy,
+				     struct ess_uniphy_channel *uc,
+				     phy_interface_t interface,
+				     int mode)
+{
+	int ret;
+	u32 val;
+
+	/* setup misc2 register */
+	val = uphy_readl(uniphy, UPHY_MISC2_REG);
+	val &= ~MISC2_PHY_MODE_MASK;
+	val |= MISC2_PHY_MODE_SGMII << MISC2_PHY_MODE_SHIFT;
+	uphy_writel(uniphy, UPHY_MISC2_REG, val);
+
+	uniphy_pll_reset(uniphy);
+
+	reset_control_assert(uniphy->xpcs_rst);
+
+	val = (MODE_CTRL_CHAN_MODE_1000BX << MODE_CTRL_CHAN_MODE_SHIFT);
+	if (uniphy->id == 0) {
+		u32 ch_mask;
+
+		/* FIXME: no hardware to test this */
+		switch (uc->id) {
+		case 0:
+			ch_mask = MODE_CTRL_CHAN_SEL_CH0;
+			break;
+		case 1:
+			ch_mask = MODE_CTRL_CHAN_SEL_CH1;
+			break;
+		case 4:
+			ch_mask = MODE_CTRL_CHAN_SEL_CH4;
+			break;
+		default:
+			WARN(1, "channel cannot do 1000base-x");
+			ch_mask = 0;
+			break;
+		}
+
+		val |= (ch_mask << MODE_CTRL_CHAN_SEL_SHIFT);
+		val |= (MODE_CTRL_MUX_MODE_SGMII <<
+			MODE_CTRL_MUX_MODE_SHIFT);
+		val |= (MODE_CTRL_CHAN_SEL_CH0 <<
+			MODE_CTRL_CHAN_SEL_SHIFT);
+	} else
+		val |= MODE_CTRL_SG_MODE_MASK;
+
+	uphy_writel(uniphy, UPHY_MODE_CTRL_REG, val);
+
+	trigger_reset(uniphy);
+
+	ret = uniphy_calibrate(uniphy);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void uniphy_1000basex_channel_init(struct ess_uniphy_channel *uc,
+					  int mode,
+					  const unsigned long *advertising)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	u32 val;
+
+	if (phylink_test(advertising, Autoneg)) {
+                val = SGMMI_ANEGC_CHx_1000X_FD_MASK;
+                if (phylink_test(advertising, Pause))
+			val |= SGMII_ANEGC_CHx_PAUSE_MASK;
+                if (phylink_test(advertising, Asym_Pause))
+			val |= SGMII_ANEGC_CHx_AS_PAUSE_MASK;
+		uphy_writel(uniphy, UPHY_SGMII_ANEGC_CHx_REG(uc->id), val);
+	}
+
+	val = uphy_readl(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id));
+	if (phylink_test(advertising, Autoneg)) {
+		val &= ~SGMII_CTRL_CHx_FORCE_SPEED_MASK;
+		val &= ~SGMII_CTRL_CHx_SPEED_MASK;
+		val |= SGMII_CTRL_CHx_AN_ENABLE_MASK;
+		val |= SGMII_CTRL_CHx_AN_RESTART_MASK;
+	} else {
+		val &= ~SGMII_CTRL_CHx_SPEED_MASK;
+		val &= ~SGMII_CTRL_CHx_AN_ENABLE_MASK;
+		val |= SGMII_CTRL_CHx_FORCE_SPEED_MASK;
+		val |= SGMII_CTRL_CHx_SPEED_1G;
+	}
+	uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+
+	if (phylink_test(advertising, Autoneg)) {
+		udelay(10);
+		val &= ~SGMII_CTRL_CHx_AN_RESTART_MASK;
+		uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+		uc->aneg_next_restart = jiffies + msecs_to_jiffies(10000);
+	}
+}
+
+/*
+ *
+ */
+static void
+uniphy_1000basex_channel_status_get(struct ess_uniphy_channel *uc,
+				    struct phylink_link_state *state)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	bool link, tx_pause, rx_pause, aneg_failed;
+	int fd_bit;
+        u32 lpa, val;
+
+	state->link = 0;
+	link = uniphy_has_8b10b_sync_get(uc);
+	if (!phylink_test(state->advertising, Autoneg)) {
+                state->link = link;
+                if (link) {
+                        state->speed = SPEED_1000;
+                        state->duplex = DUPLEX_FULL;
+                }
+                return;
+        }
+
+	val = uphy_readl(uniphy, UPHY_SGMII_ST_CHx_REG(uc->id));
+	state->an_complete = !!(val & SGMII_ST_CHx_ANEG_COMPLETE_MASK);
+
+	if (!link || state->an_complete)
+		uc->aneg_next_restart = jiffies + msecs_to_jiffies(5000);
+	else if (time_after(jiffies, uc->aneg_next_restart)) {
+		/* work around frozen autoneg after a link down in
+		 * front of cisco switch (aneg restart had no
+		 * effect) */
+		val = uphy_readl(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id));
+		val &= ~SGMII_CTRL_CHx_AN_ENABLE_MASK;
+		uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+		udelay(10);
+		val |= SGMII_CTRL_CHx_AN_ENABLE_MASK;
+		uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+		uc->aneg_next_restart = jiffies + msecs_to_jiffies(10000);
+	}
+
+	if (!state->an_complete)
+		return;
+
+	/* resolve autoneg result, we only support full duplex */
+	lpa = uphy_readl(uniphy, UPHY_SGMII_LPA_CHx_REG(uc->id));
+
+	fd_bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT;
+	mii_lpa_mod_linkmode_x(state->lp_advertising, lpa, fd_bit);
+
+        /* check for autoneg failure on duplex */
+        aneg_failed = false;
+
+	if (!linkmode_test_bit(fd_bit, state->advertising) ||
+	    !linkmode_test_bit(fd_bit, state->lp_advertising))
+		aneg_failed = true;
+
+	linkmode_resolve_pause(state->advertising,
+			       state->lp_advertising,
+			       &tx_pause, &rx_pause);
+
+	if (tx_pause)
+		state->pause |= MLO_PAUSE_TX;
+	if (rx_pause)
+		state->pause |= MLO_PAUSE_RX;
+
+	if (aneg_failed) {
+		if (!uc->aneg_error_reported) {
+			uniphy_err(uniphy,
+				   "chan:%d autoneg error (lpa 0x%x)",
+				   uc->id, lpa);
+			uc->aneg_error_reported = true;
+                }
+                return;
+        }
+
+        state->link = link;
+        if (link) {
+                state->speed = SPEED_1000;
+                state->duplex = DUPLEX_FULL;
+        }
+}
+
+/*
+ *
+ */
+static void
+uniphy_1000basex_an_restart(struct ess_uniphy_channel *uc)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	u32 val;
+
+	val = uphy_readl(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id));
+	val |= SGMII_CTRL_CHx_AN_RESTART_MASK;
+	uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+	udelay(10);
+	val &= ~SGMII_CTRL_CHx_AN_RESTART_MASK;
+	uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+}
+
+/*
+ *
+ */
+static int uniphy_pqsgmii_mode_set(struct ess_uniphy *uniphy,
+				   struct ess_uniphy_channel *uc,
+				   phy_interface_t interface,
+				   int mode)
+{
+	int i, ret;
+	u32 val;
+
+	/* setup misc2 register */
+	val = uphy_readl(uniphy, UPHY_MISC2_REG);
+	val &= ~MISC2_PHY_MODE_MASK;
+	val |= MISC2_PHY_MODE_SGMII << MISC2_PHY_MODE_SHIFT;
+	uphy_writel(uniphy, UPHY_MISC2_REG, val);
+
+	uniphy_pll_reset(uniphy);
+
+	reset_control_assert(uniphy->xpcs_rst);
+
+	val = MODE_CTRL_CHAN_MODE_SGMII_MAC << MODE_CTRL_CHAN_MODE_SHIFT;
+	switch (interface) {
+	case PHY_INTERFACE_MODE_SGMII:
+		if (uniphy->id == 0) {
+			u32 ch_mask;
+
+			val |= (MODE_CTRL_MUX_MODE_SGMII <<
+				MODE_CTRL_MUX_MODE_SHIFT);
+
+			switch (uc->id) {
+			case 0:
+				ch_mask = MODE_CTRL_CHAN_SEL_CH0;
+				break;
+			case 1:
+				ch_mask = MODE_CTRL_CHAN_SEL_CH1;
+				break;
+			case 4:
+				ch_mask = MODE_CTRL_CHAN_SEL_CH4;
+				break;
+			default:
+				WARN(1, "channel cannot do SGMII");
+				ch_mask = 0;
+				break;
+			}
+
+			val |= (ch_mask << MODE_CTRL_CHAN_SEL_SHIFT);
+		} else
+			val |= MODE_CTRL_SG_MODE_MASK;
+		break;
+	case PHY_INTERFACE_MODE_QSGMII:
+		val |= MODE_CTRL_MUX_MODE_QSGMII << MODE_CTRL_MUX_MODE_SHIFT;
+		break;
+	case PHY_INTERFACE_MODE_PSGMII:
+		val |= MODE_CTRL_MUX_MODE_PSGMII << MODE_CTRL_MUX_MODE_SHIFT;
+		break;
+	default:
+		break;
+	}
+	uphy_writel(uniphy, UPHY_MODE_CTRL_REG, val);
+
+	/* setup all channels without autoneg so it's not done behind
+	 * our back before we setup the channel */
+	for (i = 0; i < 5; i++) {
+		val = uphy_readl(uniphy, UPHY_SGMII_CTRL_CHx_REG(i));
+		val &= ~SGMII_CTRL_CHx_AN_ENABLE_MASK;
+		uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(i), val);
+	}
+
+	trigger_reset(uniphy);
+
+	ret = uniphy_calibrate(uniphy);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void uniphy_pqsgmii_channel_init(struct ess_uniphy_channel *uc,
+					int mode)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	u32 val;
+
+	uc->aneg_error_reported = false;
+	val = uphy_readl(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id));
+	val &= ~SGMII_CTRL_CHx_SPEED_MASK;
+
+	if (phylink_autoneg_inband(mode)) {
+		val &= ~SGMII_CTRL_CHx_FORCE_SPEED_MASK;
+		val |= SGMII_CTRL_CHx_AN_ENABLE_MASK;
+	} else {
+		val |= SGMII_CTRL_CHx_FORCE_SPEED_MASK;
+		val &= ~SGMII_CTRL_CHx_AN_ENABLE_MASK;
+	}
+	uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+}
+
+/*
+ *
+ */
+static void uniphy_pqsgmii_channel_force_speed(struct ess_uniphy_channel *uc,
+					       int speed, int duplex)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	u32 val;
+
+	val = uphy_readl(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id));
+	val &= ~SGMII_CTRL_CHx_SPEED_MASK;
+	switch (speed) {
+	case SPEED_10:
+		val |= SGMII_CTRL_CHx_SPEED_10M << SGMII_CTRL_CHx_SPEED_SHIFT;
+		break;
+	case SPEED_100:
+		val |= SGMII_CTRL_CHx_SPEED_100M << SGMII_CTRL_CHx_SPEED_SHIFT;
+		break;
+	case SPEED_1000:
+		val |= SGMII_CTRL_CHx_SPEED_1G << SGMII_CTRL_CHx_SPEED_SHIFT;
+		break;
+	}
+	uphy_writel(uniphy, UPHY_SGMII_CTRL_CHx_REG(uc->id), val);
+}
+
+/*
+ *
+ */
+static void uniphy_pqsgmii_channel_status_get(struct ess_uniphy_channel *uc,
+					      struct phylink_link_state *state)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	u32 val;
+
+	val = uphy_readl(uniphy, UPHY_SGMII_ANEG_CHx_REG(uc->id));
+
+	state->link = !!(val & SGMII_ANEG_CHx_LINK_MASK);
+	state->duplex = (val & SGMII_ANEG_CHx_FD_MASK) ?
+		DUPLEX_FULL : DUPLEX_HALF;
+
+	val &= SGMII_ANEG_CHx_SPEED_MASK;
+	val >>= SGMII_ANEG_CHx_SPEED_SHIFT;
+	switch (val) {
+	case SGMII_ANEG_CHx_SPEED_10M:
+		state->speed = SPEED_10;
+		break;
+	case SGMII_ANEG_CHx_SPEED_100M:
+		state->speed = SPEED_100;
+		break;
+	case SGMII_ANEG_CHx_SPEED_1G:
+		state->speed = SPEED_1000;
+		break;
+	}
+}
+
+/*
+ *
+ */
+static int uniphy_10gr_mode_set(struct ess_uniphy *uniphy)
+{
+	u32 val;
+	int ret;
+
+	/* setup misc2 register */
+	val = uphy_readl(uniphy, UPHY_MISC2_REG);
+	val &= ~MISC2_PHY_MODE_MASK;
+	val |= MISC2_PHY_MODE_USXGMII << MISC2_PHY_MODE_SHIFT;
+	uphy_writel(uniphy, UPHY_MISC2_REG, val);
+
+	uniphy_pll_reset(uniphy);
+
+	/* make sure xpcs reset is held */
+	reset_control_assert(uniphy->xpcs_rst);
+
+	/* setup uniphy in 10g-baser mode */
+	val = MODE_CTRL_XPCS_MODE_MASK;
+	uphy_writel(uniphy, UPHY_MODE_CTRL_REG, val);
+
+	/* ignore rx los signal (FIXME: seems to have no effect) */
+	val = uphy_readl(uniphy, UPHY_LINK_DET_REG);
+	val |= UPHY_LINK_DET_EN_MASK;
+	uphy_writel(uniphy, UPHY_LINK_DET_REG, val);
+
+	trigger_reset(uniphy);
+
+	ret = uniphy_calibrate(uniphy);
+	if (ret)
+		return ret;
+
+	reset_control_deassert(uniphy->xpcs_rst);
+	msleep(10);
+
+	/* debounce link for this mode */
+	uniphy->debounced_link = false;
+	uniphy->debounced_link_down_latch = false;
+	uniphy->debounced_consec_up = 0;
+	queue_delayed_work(uniphy->debounce_wq,
+			   &uniphy->debounce_work,
+			   msecs_to_jiffies(DEBOUNCE_INTV_MSECS));
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void debounce_wq_func(struct work_struct *work)
+{
+	struct delayed_work *del_work = to_delayed_work(work);
+	struct ess_uniphy *uniphy = container_of(del_work, struct ess_uniphy,
+						 debounce_work);
+	struct ess_uniphy_channel *uc;
+	bool link;
+
+	mutex_lock(&uniphy->lock);
+	uc = &uniphy->channels[0];
+	link = xpcs_has_10gr_link(uc->uniphy);
+	if (link != uniphy->debounced_link) {
+		if (!link) {
+			uniphy->debounced_link_down_latch = true;
+			uniphy->debounced_link = false;
+			uniphy->debounced_consec_up = 0;
+		} else {
+			++uniphy->debounced_consec_up;
+			if (uniphy->debounced_consec_up *
+			    DEBOUNCE_INTV_MSECS >= DEBOUNCE_TOTAL_MSECS)
+				uniphy->debounced_link = true;
+		}
+	}
+	mutex_unlock(&uniphy->lock);
+
+	queue_delayed_work(uniphy->debounce_wq,
+			   &uniphy->debounce_work,
+			   msecs_to_jiffies(DEBOUNCE_INTV_MSECS));
+}
+
+/*
+ *
+ */
+static void uniphy_10gr_channel_status_get(struct ess_uniphy_channel *uc,
+					   struct phylink_link_state *state)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+
+	if (uniphy->debounced_link_down_latch) {
+		uniphy->debounced_link_down_latch = false;
+		state->link = false;
+	} else
+		state->link = uniphy->debounced_link;
+
+	if (state->link) {
+		state->duplex = DUPLEX_FULL;
+		state->speed = SPEED_10000;
+	}
+}
+
+/*
+ *
+ */
+static int uniphy_usxgmii_mode_set(struct ess_uniphy *uniphy,
+				   phy_interface_t interface,
+				   int mode)
+{
+	u32 val;
+	int ret;
+
+	/* setup misc2 register */
+	val = uphy_readl(uniphy, UPHY_MISC2_REG);
+	val &= ~MISC2_PHY_MODE_MASK;
+	val |= MISC2_PHY_MODE_USXGMII << MISC2_PHY_MODE_SHIFT;
+	uphy_writel(uniphy, UPHY_MISC2_REG, val);
+
+	uniphy_pll_reset(uniphy);
+
+	/* make sure xpcs reset is held */
+	reset_control_assert(uniphy->xpcs_rst);
+
+	/* setup uniphy in usxgmii mode */
+	val = MODE_CTRL_ANEG_MODE_CISCO | MODE_CTRL_XPCS_MODE_MASK;
+	uphy_writel(uniphy, UPHY_MODE_CTRL_REG, val);
+
+	if (interface == PHY_INTERFACE_MODE_10G_QXGMII) {
+		val = uphy_readl(uniphy, UPHY_USXG_OPT_REG);
+		val |= USXG_OPT_GMII_SRC_SEL_MASK;
+		uphy_writel(uniphy, UPHY_USXG_OPT_REG, val);
+	}
+
+	trigger_reset(uniphy);
+
+	ret = uniphy_calibrate(uniphy);
+	if (ret)
+		return ret;
+
+	reset_control_deassert(uniphy->xpcs_rst);
+	msleep(10);
+
+	/* enable usxgmii mode */
+	val = uphy_readl(uniphy, UPHY_VR_XS_PCS_DIG_CTRL1_REG);
+	val |= VR_XS_PCS_DIG_CTRL1_USXG_EN_MASK;
+	uphy_writel(uniphy, UPHY_VR_XS_PCS_DIG_CTRL1_REG, val);
+
+	val = uphy_readl(uniphy, UPHY_VR_XS_PCS_KR_CTRL_REG);
+	val &= ~VR_XS_PCS_KR_CTRL_MODE_MASK;
+	switch (interface) {
+	case PHY_INTERFACE_MODE_USXGMII:
+		val |= VR_XS_PCS_KR_CTRL_MODE_10G_SXGMII <<
+			VR_XS_PCS_KR_CTRL_MODE_SHIFT;
+		break;
+	case PHY_INTERFACE_MODE_10G_QXGMII:
+		val |= VR_XS_PCS_KR_CTRL_MODE_10G_QXGMII <<
+			VR_XS_PCS_KR_CTRL_MODE_SHIFT;
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+	uphy_writel(uniphy, UPHY_VR_XS_PCS_KR_CTRL_REG, val);
+
+	/* tune alignment marker count for multi port mode */
+	if (interface == PHY_INTERFACE_MODE_10G_QXGMII)
+		uphy_writel(uniphy, UPHY_VR_XS_PCS_DIG_STS_REG, 0x6018);
+
+	/* issue xpcs vendor reset */
+	val = uphy_readl(uniphy, UPHY_VR_XS_PCS_DIG_CTRL1_REG);
+	val |= VR_XS_PCS_DIG_CTRL1_VR_RST_MASK;
+	uphy_writel(uniphy, UPHY_VR_XS_PCS_DIG_CTRL1_REG, val);
+
+	/* setup EEE transparent mode (FIXME: setup because of
+	 * qca8084) */
+	if (interface == PHY_INTERFACE_MODE_10G_QXGMII) {
+		/*
+		 * configure eee related timer value
+		 */
+		val = uphy_readl(uniphy, UPHY_VR_XS_PCS_EEE_MCTRL0_REG);
+		val |= VR_XS_PCS_EEE_MCTRL0_SIGN_BIT_MASK |
+			(1 << VR_XS_PCS_EEE_MCTRL0_MULT_FACT_100NS_SHIFT);
+		uphy_writel(uniphy, UPHY_VR_XS_PCS_EEE_MCTRL0_REG, val);
+
+		val = uphy_readl(uniphy, UPHY_VR_XS_PCS_EEE_TXTIMER_REG);
+		val |= (0xa << VR_XS_PCS_EEE_TXTIMER_TSL_RES_SHIFT) |
+			(0x3 << VR_XS_PCS_EEE_TXTIMER_T1U_RES_SHIFT) |
+			(0x16 << VR_XS_PCS_EEE_TXTIMER_TWL_RES_SHIFT);
+		uphy_writel(uniphy, UPHY_VR_XS_PCS_EEE_TXTIMER_REG, val);
+
+		val = uphy_readl(uniphy, UPHY_VR_XS_PCS_EEE_RXTIMER_REG);
+		val |= (0xc8 << VR_XS_PCS_EEE_RXTIMER_RES_100U_SHIFT) |
+			(0x1c << VR_XS_PCS_EEE_TXTIMER_TWL_RES_SHIFT);
+		uphy_writel(uniphy, UPHY_VR_XS_PCS_EEE_RXTIMER_REG, val);
+
+		/*
+		 * Transparent LPI mode and LPI pattern enable
+		 */
+		val = uphy_readl(uniphy, UPHY_VR_XS_PCS_EEE_MCTRL1_REG);
+		val |= VR_XS_PCS_EEE_MCTRL1_TRANSP_TX_LPI_EN_MASK |
+			VR_XS_PCS_EEE_MCTRL1_TRANSP_RX_LPI_EN_MASK;
+		uphy_writel(uniphy, UPHY_VR_XS_PCS_EEE_MCTRL1_REG, val);
+
+		val = uphy_readl(uniphy, UPHY_VR_XS_PCS_EEE_MCTRL0_REG);
+		val |= VR_XS_PCS_EEE_MCTRL0_LTX_EN_MASK |
+			VR_XS_PCS_EEE_MCTRL0_LRX_EN_MASK;
+		uphy_writel(uniphy, UPHY_VR_XS_PCS_EEE_MCTRL0_REG, val);
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void uniphy_usxgmii_channel_init(struct ess_uniphy_channel *uc,
+					int mode)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	u32 val;
+
+	/* select 8 bits-s wide MII width for 10/100M mode,
+	 * this is what the GMAC expects */
+	val = uphy_readl(uniphy, UPHY_VR_MII_AN_CTRL_CHANx_REG(uc->id));
+	val &= ~VR_MIIx_AN_CTRL_MII_CTRL_MASK;
+	val |= (VR_MIIx_AN_CTRL_MII_CTRL_WIDTH_8BIT <<
+		VR_MIIx_AN_CTRL_MII_CTRL_SHIFT);
+
+	if (phylink_autoneg_inband(mode)) {
+		/* we use the interrupt "autoneg done" bit to know
+		 * that augoneg has finished, but it won't be set
+		 * unless we enable this */
+		val |= VR_MIIx_AN_CTRL_AN_INTR_EN_MASK;
+	}
+	uphy_writel(uniphy, UPHY_VR_MII_AN_CTRL_CHANx_REG(uc->id), val);
+
+
+	if (phylink_autoneg_inband(mode)) {
+		/* start with autoneg disabled until RPCS link is
+		 * up */
+		val = uphy_readl(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id));
+		val &= ~SR_MII_CTRL_AN_ENABLE_MASK;
+		uphy_writel(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id), val);
+
+		/* remaining of usxgmii channel autoneg will be done
+		 * when RPCS link is up */
+		uc->usxgmii_aneg_state = USX_ANEG_INIT;
+	}
+
+	/* disable IPG check */
+	val = uphy_readl(uniphy, UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(uc->id));
+	val |= VR_XAUI_MODE_CTRL_DIS_TX_IPG_CHK_MASK;
+	uphy_writel(uniphy, UPHY_VR_XAUI_MODE_CTRL_CHANx_REG(uc->id), val);
+}
+
+/*
+ *
+ */
+static void uniphy_usxgmii_channel_status_get(struct ess_uniphy_channel *uc,
+					      struct phylink_link_state *state)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	bool rpcs_link = xpcs_has_10gr_link(uniphy);
+	bool aneg_done, link;
+	u32 val;
+
+again:
+	switch (uc->usxgmii_aneg_state) {
+	case USX_ANEG_INIT:
+		/* make sure autoneg is disabled while we wait for
+		 * rpcs link */
+		val = uphy_readl(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id));
+		val &= ~SR_MII_CTRL_AN_ENABLE_MASK;
+		uphy_writel(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id), val);
+		uc->usxgmii_aneg_state = USX_ANEG_RPCS_DOWN;
+		fallthrough;
+
+	case USX_ANEG_RPCS_DOWN:
+		if (!rpcs_link)
+			return;
+
+		uniphy_dbg(uniphy, "usgxmii raw link is now UP\n");
+		fallthrough;
+
+	case USX_ANEG_SETUP:
+		/* disable auto-neg */
+		val = uphy_readl(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id));
+		val &= ~SR_MII_CTRL_AN_ENABLE_MASK;
+		uphy_writel(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id), val);
+
+		/* clear autoneg done */
+		uphy_writel(uniphy, UPHY_VR_MII_AN_INTR_STS_CHANx_REG(uc->id),
+			    0);
+
+		/* setup auto-neg capabilities */
+		val = uphy_readl(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id));
+		val &= ~SR_MII_CTRL_SPEED_MASK;
+		val |= SR_MII_CTRL_FDX_MASK;
+		uphy_writel(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id), val);
+
+		/* enable autoneg */
+		val |= SR_MII_CTRL_AN_ENABLE_MASK;
+		uphy_writel(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id), val);
+		uc->usxgmii_aneg_state = USX_ANEG_WAIT;
+		break;
+
+	case USX_ANEG_WAIT:
+	case USX_ANEG_DONE_LINK_DOWN:
+	case USX_ANEG_DONE_LINK_UP:
+		if (!rpcs_link) {
+			uniphy_dbg(uniphy, "usgxmii raw link is now DOWN\n");
+			/* rpcs link went down, we will need to redo
+			 * the autoneg */
+			uc->usxgmii_aneg_state = USX_ANEG_INIT;
+			return;
+		}
+
+		/* lookup autoneg result for the right usxgmii channel */
+		val = uphy_readl(uniphy,
+				 UPHY_VR_MII_AN_INTR_STS_CHANx_REG(uc->id));
+
+		aneg_done = (val & VR_MII_AN_INTR_STS_AN_DONE_MASK);
+
+		if (!aneg_done) {
+			/* not finished */
+			uniphy_dbg(uniphy, "usgxmii autoneg not done\n");
+			if (uc->usxgmii_aneg_state == USX_ANEG_DONE_LINK_UP) {
+				/* no change in previous link state */
+				state->link = true;
+				state->speed = uc->usxgmii_aneg_speed;
+				state->duplex = uc->usxgmii_aneg_duplex;
+			}
+			return;
+		}
+
+		uniphy_dbg(uniphy, "usgxmii autoneg complete: %04x\n", val);
+
+		/* clear autoneg done bit */
+		uphy_writel(uniphy, UPHY_VR_MII_AN_INTR_STS_CHANx_REG(uc->id),
+			    0);
+
+		/* check link status */
+		link = !!(val & VR_MII_AN_INTR_STS_LINK_UP_MASK);
+		if (!link) {
+			/* actual link is down */
+			uniphy_dbg(uniphy, "usgxmii aneg link is down\n");
+			state->link = false;
+
+			if (uc->usxgmii_aneg_state == USX_ANEG_DONE_LINK_UP) {
+				/* we need to resetup aneg again
+				 * because the speed configured
+				 * previously is the one we now
+				 * advertise now */
+				uc->usxgmii_aneg_state = USX_ANEG_SETUP;
+				goto again;
+			}
+
+			uc->usxgmii_aneg_state = USX_ANEG_DONE_LINK_DOWN;
+			return;
+		}
+
+		/* capture negociated speed */
+		switch ((val & VR_MII_AN_INTR_STS_SPEED_MASK) >>
+			VR_MII_AN_INTR_STS_SPEED_SHIFT) {
+		case VR_MII_AN_INTR_STS_SPEED_10M:
+			uc->usxgmii_aneg_speed = SPEED_10;
+			break;
+		case VR_MII_AN_INTR_STS_SPEED_100M:
+			uc->usxgmii_aneg_speed = SPEED_100;
+			break;
+		case VR_MII_AN_INTR_STS_SPEED_1G:
+			uc->usxgmii_aneg_speed = SPEED_1000;
+			break;
+		case VR_MII_AN_INTR_STS_SPEED_10G:
+			uc->usxgmii_aneg_speed = SPEED_10000;
+			break;
+		case VR_MII_AN_INTR_STS_SPEED_2D5G:
+			uc->usxgmii_aneg_speed = SPEED_2500;
+			break;
+		case VR_MII_AN_INTR_STS_SPEED_5G:
+			uc->usxgmii_aneg_speed = SPEED_5000;
+			break;
+		default:
+			uniphy_dbg(uniphy, "usgxmii INVALID SPEED: %x\n",
+				   val);
+			return;
+		}
+
+		uc->usxgmii_aneg_duplex = (val & VR_MII_AN_INTR_STS_FDX_MASK) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+
+		uniphy_dbg(uniphy, "usgxmii aneg link is up, "
+			   "speed is %d, duplex %d\n",
+			   uc->usxgmii_aneg_speed,
+			   uc->usxgmii_aneg_duplex);
+
+		/* reflect negociated speed in PCS so that it does the
+		 * correct replication of MII data from MAC */
+		val = uphy_readl(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id));
+		val &= ~SR_MII_CTRL_SPEED_MASK;
+		switch (uc->usxgmii_aneg_speed) {
+		case SPEED_10:
+			val |= SR_MII_CTRL_SPEED_10M_MASK;
+			break;
+		case SPEED_100:
+			val |= SR_MII_CTRL_SPEED_100M_MASK;
+			break;
+		case SPEED_1000:
+			val |= SR_MII_CTRL_SPEED_1G_MASK;
+			break;
+		case SPEED_2500:
+			val |= SR_MII_CTRL_SPEED_2D5G_MASK;
+			break;
+		case SPEED_5000:
+			val |= SR_MII_CTRL_SPEED_5G_MASK;
+			break;
+		case SPEED_10000:
+			val |= SR_MII_CTRL_SPEED_10G_MASK;
+			break;
+		}
+
+		if (uc->usxgmii_aneg_duplex == DUPLEX_FULL)
+			val |= SR_MII_CTRL_FDX_MASK;
+		else
+			val &= ~SR_MII_CTRL_FDX_MASK;
+
+		uphy_writel(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id), val);
+
+		/* issue usra vendor reset */
+		if (uc->id == 0) {
+			val = uphy_readl(uniphy, UPHY_VR_XS_PCS_DIG_CTRL1_REG);
+			val |= VR_XS_PCS_DIG_CTRL1_USRA_RST_MASK;
+			uphy_writel(uniphy, UPHY_VR_XS_PCS_DIG_CTRL1_REG, val);
+		} else {
+			val = uphy_readl(uniphy,
+					 UPHY_VR_MII_DIG_CTRL1_CHANx_REG(uc->id));
+			val |= VR_MII_DIG_CTRL1_USRA_RST_MASK;
+			uphy_writel(uniphy,
+				    UPHY_VR_MII_DIG_CTRL1_CHANx_REG(uc->id),
+				    val);
+		}
+
+		state->speed = uc->usxgmii_aneg_speed;
+		state->duplex = uc->usxgmii_aneg_duplex;
+		uc->usxgmii_aneg_state = USX_ANEG_DONE_LINK_UP;
+		break;
+	}
+}
+
+/*
+ *
+ */
+static void uniphy_usxgmii_channel_force_speed(struct ess_uniphy_channel *uc,
+					       int speed, int duplex)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	u32 val;
+
+	val = uphy_readl(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id));
+
+	val &= ~SR_MII_CTRL_SPEED_MASK;
+	switch (speed) {
+	case SPEED_10:
+		val |= SR_MII_CTRL_SPEED_10M_MASK;
+		break;
+	case SPEED_100:
+		val |= SR_MII_CTRL_SPEED_100M_MASK;
+		break;
+	case SPEED_1000:
+		val |= SR_MII_CTRL_SPEED_1G_MASK;
+		break;
+	case SPEED_2500:
+		val |= SR_MII_CTRL_SPEED_2D5G_MASK;
+		break;
+	case SPEED_5000:
+		val |= SR_MII_CTRL_SPEED_5G_MASK;
+		break;
+	case SPEED_10000:
+		val |= SR_MII_CTRL_SPEED_10G_MASK;
+		break;
+	}
+
+	if (duplex == DUPLEX_FULL)
+		val |= SR_MII_CTRL_FDX_MASK;
+	else
+		val &= ~SR_MII_CTRL_FDX_MASK;
+
+	uphy_writel(uniphy, UPHY_SR_MII_CTRL_CHANx_REG(uc->id), val);
+}
+
+/*
+ * return uniphy in-band link status, only called when mode is
+ * MLO_AN_INBAND
+ */
+void ess_uniphy_channel_status_get(struct ess_uniphy_channel *uc,
+				   struct phylink_link_state *state)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+
+	/* set default to no-link */
+	state->speed = SPEED_UNKNOWN;
+	state->duplex = DUPLEX_UNKNOWN;
+	state->pause = MLO_PAUSE_NONE;
+	state->an_complete = 0;
+	state->link = 0;
+
+	mutex_lock(&uniphy->lock);
+
+	if (WARN_ON(!(uniphy->channels_used & (1 << uc->id)))) {
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	if (WARN_ON(!phylink_autoneg_inband(uniphy->cur_mode))) {
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	switch (state->interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+		uniphy_1000basex_channel_status_get(uc, state);
+		break;
+	case PHY_INTERFACE_MODE_2500BASEX:
+		/* FIXME: implement */
+		break;
+	case PHY_INTERFACE_MODE_10GBASER:
+		uniphy_10gr_channel_status_get(uc, state);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+	case PHY_INTERFACE_MODE_QSGMII:
+	case PHY_INTERFACE_MODE_PSGMII:
+		uniphy_pqsgmii_channel_status_get(uc, state);
+		break;
+	case PHY_INTERFACE_MODE_USXGMII:
+	case PHY_INTERFACE_MODE_10G_QXGMII:
+		uniphy_usxgmii_channel_status_get(uc, state);
+		break;
+	default:
+		break;
+	}
+
+	uniphy_dbg(uniphy, "uniphy_channel_status_get chan;%d "
+		   "link:%d speed:%d\n",
+		   uc->id, state->link, state->speed);
+
+	mutex_unlock(&uniphy->lock);
+}
+
+/*
+ * callback when phylink wants us to restart autoneg on the
+ * inband-link, only makes sense on 8023z (1000base-x/2500base-x)
+ */
+void ess_uniphy_channel_an_restart(struct ess_uniphy_channel *uc)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+
+	mutex_lock(&uniphy->lock);
+
+	if (WARN_ON(!(uniphy->channels_used & (1 << uc->id)))) {
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	if (WARN_ON(!phylink_autoneg_inband(uniphy->cur_mode))) {
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	switch (uniphy->cur_interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+	case PHY_INTERFACE_MODE_2500BASEX:
+		uniphy_1000basex_an_restart(uc);
+		break;
+	default:
+		break;
+	}
+
+	mutex_unlock(&uniphy->lock);
+}
+
+/*
+ * callback when link goes up, always called, but only makes sense for
+ * MLO_AN_PHY. Used to setup PCS when copper side speed needs to be
+ * known at the PCS level.
+ */
+void ess_uniphy_channel_link_up(struct ess_uniphy_channel *uc,
+				int speed, int duplex)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+
+	mutex_lock(&uniphy->lock);
+
+	uniphy_dbg(uniphy,
+		   "ess_uniphy_channel_link_up chan:%d speed:%d duplex:%d\n",
+		   uc->id, speed, duplex);
+
+	if (WARN_ON(!(uniphy->channels_used & (1 << uc->id)))) {
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	if (phylink_autoneg_inband(uniphy->cur_mode)) {
+		/* nothing to do */
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	switch (uniphy->cur_interface) {
+	case PHY_INTERFACE_MODE_USXGMII:
+	case PHY_INTERFACE_MODE_10G_QXGMII:
+		uniphy_usxgmii_channel_force_speed(uc, speed, duplex);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+	case PHY_INTERFACE_MODE_QSGMII:
+	case PHY_INTERFACE_MODE_PSGMII:
+		uniphy_pqsgmii_channel_force_speed(uc, speed, duplex);
+		break;
+	case PHY_INTERFACE_MODE_1000BASEX:
+	case PHY_INTERFACE_MODE_2500BASEX:
+	case PHY_INTERFACE_MODE_10GBASER:
+		/* nothing to do */
+		break;
+	default:
+		break;
+	}
+
+	mutex_unlock(&uniphy->lock);
+}
+
+/*
+ * setup uniphy in the correct interface/mode, and raise refcount
+ */
+struct ess_uniphy_channel *
+ess_uniphy_channel_get(struct ipq95xx_ess_priv *priv,
+		       unsigned int uniphy_id,
+		       unsigned int channel_id,
+		       phy_interface_t interface, int mode,
+		       const unsigned long *advertising)
+{
+	struct ess_uniphy *uniphy;
+	struct ess_uniphy_channel *uc;
+	unsigned int rate;
+	int ret;
+
+	if (WARN_ON(uniphy_id >= priv->hw_uniphy_count))
+		return ERR_PTR(-EINVAL);
+
+	uniphy = &priv->uniphys[uniphy_id];
+
+	if (WARN_ON(channel_id >= ARRAY_SIZE(uniphy->channels)))
+		return ERR_PTR(-EINVAL);
+
+	uc = &uniphy->channels[channel_id];
+
+	mutex_lock(&uniphy->lock);
+
+	if (WARN_ON(uniphy->channels_used & (1 << uc->id))) {
+		mutex_unlock(&uniphy->lock);
+		return ERR_PTR(-EBUSY);
+	}
+
+	/* check uniphy sharing is possible */
+	if (uniphy->channels_used) {
+
+		/* only some interface modes support multiple
+		 * channels */
+		switch (interface) {
+		case PHY_INTERFACE_MODE_QSGMII:
+		case PHY_INTERFACE_MODE_PSGMII:
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+			break;
+		default:
+			uniphy_err(uniphy,
+				   "interface mode %s does not support "
+				   "multiple channel\n",
+				   phy_modes(interface));
+			mutex_unlock(&uniphy->lock);
+			return ERR_PTR(-ENOTSUPP);
+		}
+
+		/* cannot set different channels of same uniphy to
+		 * different interface mode */
+		if (uniphy->cur_interface != interface) {
+			uniphy_err(uniphy, "interface conflict\n");
+			mutex_unlock(&uniphy->lock);
+			return ERR_PTR(-EBUSY);
+		}
+
+		/* only some interface types support having different
+		 * inband autoneg mode per-channel */
+		switch (interface) {
+		case PHY_INTERFACE_MODE_QSGMII:
+		case PHY_INTERFACE_MODE_PSGMII:
+			break;
+		default:
+			if (uniphy->cur_mode != mode) {
+				uniphy_err(uniphy,
+				   "inband-aneg mode conflict\n");
+				mutex_unlock(&uniphy->lock);
+				return ERR_PTR(-EBUSY);
+			}
+			break;
+		}
+	}
+
+	if (!uniphy->channels_used) {
+		uniphy_dbg(uniphy,
+			   "initializing in mode %s (in-band-autoneg:%d)\n",
+			   phy_modes(interface),
+			   (mode == MLO_AN_INBAND) ? 1 : 0);
+
+		ret = uniphy_powerup(uniphy);
+		if (ret) {
+			mutex_unlock(&uniphy->lock);
+			return ERR_PTR(ret);
+		}
+
+		switch (interface) {
+		case PHY_INTERFACE_MODE_1000BASEX:
+			ret = uniphy_1000basex_mode_set(uniphy, uc,
+							interface, mode);
+			break;
+		case PHY_INTERFACE_MODE_2500BASEX:
+			/* FIXME: implent */
+			ret = -ENOTSUPP;
+			break;
+		case PHY_INTERFACE_MODE_SGMII:
+		case PHY_INTERFACE_MODE_QSGMII:
+		case PHY_INTERFACE_MODE_PSGMII:
+			ret = uniphy_pqsgmii_mode_set(uniphy, uc,
+						      interface, mode);
+			break;
+		case PHY_INTERFACE_MODE_USXGMII:
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+			ret = uniphy_usxgmii_mode_set(uniphy, interface, mode);
+			break;
+		case PHY_INTERFACE_MODE_10GBASER:
+			ret = uniphy_10gr_mode_set(uniphy);
+			break;
+		default:
+			uniphy_err(uniphy,
+				   "unsupported interface requested: %s\n",
+				   phy_modes(interface));
+			ret = -ENOTSUPP;
+			break;
+		}
+
+		if (ret) {
+			uniphy_powerdown(uniphy);
+			mutex_unlock(&uniphy->lock);
+			return ERR_PTR(ret);
+		}
+
+		/*
+		 * setup clock speed
+		 *
+		 * those are not internal clocks, but just references
+		 * to external clocks, which can't be fixed-clock
+		 * since they change rate depending on which speed the
+		 * serdes is running (eg, sgmii: 125Mhz, 2500base-x:
+		 * 156.25Mhz, 10g: 312.5Mhz).
+		 *
+		 * setup actual clock rate for this link mode so that
+		 * linux clk core can compute the correct dividers for
+		 * internal clocks
+		 */
+		switch (interface) {
+		case PHY_INTERFACE_MODE_1000BASEX:
+		case PHY_INTERFACE_MODE_SGMII:
+		case PHY_INTERFACE_MODE_QSGMII:
+		case PHY_INTERFACE_MODE_PSGMII:
+			rate = RATE_125MHZ;
+			break;
+		case PHY_INTERFACE_MODE_2500BASEX:
+		case PHY_INTERFACE_MODE_10GBASER:
+		case PHY_INTERFACE_MODE_USXGMII:
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+			rate = RATE_312MHZ;
+			break;
+		default:
+			rate = 0;
+			break;
+		}
+
+		ret = clk_set_rate(uniphy->clocks->rx->clk, rate);
+		ret |= clk_set_rate(uniphy->clocks->tx->clk, rate);
+		if (ret) {
+			uniphy_err(uniphy,
+				   "failed to set uniphy clock rate: %d\n",
+				   ret);
+			mutex_unlock(&uniphy->lock);
+			return ERR_PTR(ret);
+		}
+	}
+
+	uniphy_dbg(uniphy,
+		   "configuring channel %d for %s mode\n",
+		   uc->id, phylink_autoneg_inband(mode) ?
+		   "in-band-autoneg" : "forced-speed");
+
+	/* now setup channel */
+	switch (interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+		uniphy_1000basex_channel_init(uc, mode, advertising);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+	case PHY_INTERFACE_MODE_QSGMII:
+	case PHY_INTERFACE_MODE_PSGMII:
+		uniphy_pqsgmii_channel_init(uc, mode);
+		break;
+	case PHY_INTERFACE_MODE_USXGMII:
+	case PHY_INTERFACE_MODE_10G_QXGMII:
+		uniphy_usxgmii_channel_init(uc, mode);
+		break;
+	default:
+		break;
+	}
+
+	if (!ret) {
+		uniphy->channels_used |= (1 << uc->id);
+		uniphy->cur_interface = interface;
+		uniphy->cur_mode = mode;
+		uniphy_dbg(uniphy, "per-channel usage now 0x%x\n",
+			   uniphy->channels_used);
+	}
+	mutex_unlock(&uniphy->lock);
+	return uc;
+}
+
+/*
+ * release reference count on uniphy channel
+ */
+void ess_uniphy_channel_put(struct ess_uniphy_channel *uc)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+
+	mutex_lock(&uniphy->lock);
+
+	if (WARN_ON(!(uniphy->channels_used & (1 << uc->id)))) {
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	uniphy->channels_used &= ~(1 << uc->id);
+
+	if (!uniphy->channels_used)
+		uniphy_dbg(uniphy, "uniphy now unused\n");
+	else
+		uniphy_dbg(uniphy, "per-channel usage now 0x%x\n",
+			   uniphy->channels_used);
+
+	if (!uniphy->channels_used) {
+		cancel_delayed_work(&uniphy->debounce_work);
+		uniphy_powerdown(uniphy);
+	}
+
+	mutex_unlock(&uniphy->lock);
+}
+
+/*
+ *
+ */
+void
+ess_uniphy_channel_config_get(struct ess_uniphy_channel *uc,
+			      phy_interface_t *interface, int *mode)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+
+	mutex_lock(&uniphy->lock);
+
+	if (!WARN_ON(!(uniphy->channels_used & (1 << uc->id)))) {
+		*interface = PHY_INTERFACE_MODE_NA;
+		mutex_unlock(&uniphy->lock);
+		return;
+	}
+
+	*interface = uniphy->cur_interface;
+	*mode = uniphy->cur_mode;
+	mutex_unlock(&uniphy->lock);
+}
+
+/*
+ *
+ */
+bool ess_uniphy_sync_ok_get(struct ess_uniphy_channel *uc)
+{
+	struct ess_uniphy *uniphy = uc->uniphy;
+	bool sync_ok = false;
+
+	mutex_lock(&uniphy->lock);
+	if (uniphy->channels_used) {
+		switch (uniphy->cur_interface) {
+		case PHY_INTERFACE_MODE_USXGMII:
+		case PHY_INTERFACE_MODE_10G_QXGMII:
+		case PHY_INTERFACE_MODE_10GBASER:
+			sync_ok = xpcs_has_10gr_link(uniphy);
+			break;
+		case PHY_INTERFACE_MODE_SGMII:
+		case PHY_INTERFACE_MODE_QSGMII:
+		case PHY_INTERFACE_MODE_PSGMII:
+		case PHY_INTERFACE_MODE_1000BASEX:
+		case PHY_INTERFACE_MODE_2500BASEX:
+			sync_ok = uniphy_has_8b10b_sync_get(uc);
+			break;
+		default:
+			break;
+		}
+	}
+	mutex_unlock(&uniphy->lock);
+	return sync_ok;
+}
+
+/*
+ *
+ */
+int ess_uniphy_init(struct ipq95xx_ess_priv *priv,
+		    unsigned int uniphy_id)
+{
+	struct device *dev = &priv->pdev->dev;
+	struct ess_uniphy *uniphy = &priv->uniphys[uniphy_id];
+	char name[32];
+	int ret;
+	size_t i;
+
+	uniphy->id = uniphy_id;
+	uniphy->priv = priv;
+	mutex_init(&uniphy->lock);
+	uniphy->cur_interface = PHY_INTERFACE_MODE_NA;
+	uniphy->channels_used = 0;
+	uniphy->clocks = &priv->uniphy_clocks[uniphy_id];
+
+	for (i = 0; i < ARRAY_SIZE(uniphy->channels); i++) {
+		struct ess_uniphy_channel *uc = &uniphy->channels[i];
+		uc->id = i;
+		uc->uniphy = uniphy;
+	}
+
+	/* get a reference to all needed resets */
+	scnprintf(name, sizeof (name), "uniphy%u_soft_rst", uniphy_id);
+	uniphy->soft_rst = devm_reset_control_get(dev, name);
+	if (IS_ERR(uniphy->soft_rst)) {
+		uniphy_err(uniphy, "failed to get reset %s: %ld\n",
+			   name, PTR_ERR(uniphy->soft_rst));
+		return ret;
+	}
+
+	scnprintf(name, sizeof (name), "uniphy%u_xpcs_rst", uniphy_id);
+	uniphy->xpcs_rst = devm_reset_control_get(dev, name);
+	if (IS_ERR(uniphy->xpcs_rst)) {
+		uniphy_err(uniphy, "failed to get reset %s: %ld\n",
+			   name, PTR_ERR(uniphy->xpcs_rst));
+		return ret;
+	}
+
+	scnprintf(name, sizeof (name), "uniphy%u_sys_rst", uniphy_id);
+	uniphy->sys_rst = devm_reset_control_get(dev, name);
+	if (IS_ERR(uniphy->sys_rst)) {
+		uniphy_err(uniphy, "failed to get reset %s: %ld\n",
+			   name, PTR_ERR(uniphy->sys_rst));
+		return ret;
+	}
+
+	/* create debounce workqueue */
+	scnprintf(uniphy->debounce_wq_name,
+		  sizeof (uniphy->debounce_wq_name),
+		  "phylink-wq-%s%d",  dev_name(dev), uniphy_id);
+
+	uniphy->debounce_wq =
+		create_singlethread_workqueue(uniphy->debounce_wq_name);
+	if (!uniphy->debounce_wq)
+		return -ENOMEM;
+
+	INIT_DELAYED_WORK(&uniphy->debounce_work, debounce_wq_func);
+
+	uniphy_dbg_init(uniphy);
+
+	return 0;
+}
+
+/*
+ *
+ */
+void ess_uniphy_release(struct ess_uniphy *uniphy)
+{
+	if (uniphy->debounce_wq) {
+		cancel_delayed_work(&uniphy->debounce_work);
+		destroy_workqueue(uniphy->debounce_wq);
+	}
+	uniphy_dbg_release(uniphy);
+}
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./uniphy_priv.h linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/uniphy_priv.h
--- linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx./uniphy_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/qualcomm/ipq95xx/uniphy_priv.h	2023-05-22 20:30:14.541854042 +0200
@@ -0,0 +1,42 @@
+#ifndef UNIPHY_PRIV_H_
+#define UNIPHY_PRIV_H_
+
+#include "regs/uniphy_regs.h"
+#include "ipq95xx_ess.h"
+
+/*
+ * per-uniphy printk wrappers
+ */
+#define uniphy_printk(level, uni, fmt, ...)			  \
+	do {							  \
+		dev_printk(level, &uni->priv->pdev->dev,	  \
+			   "uniphy%d: " fmt, uni->id,		  \
+			   ##__VA_ARGS__);			  \
+	} while (0)
+
+#define uniphy_err(uni, fmt, ...)				\
+	uniphy_printk(KERN_ERR, uni, fmt, ##__VA_ARGS__)
+
+#define uniphy_warn(uni, fmt, ...)				\
+	uniphy_printk(KERN_ERR, uni, fmt, ##__VA_ARGS__)
+
+#if defined(DEBUG)
+#define uniphy_dbg(uni, fmt, ...)					\
+	uniphy_printk(KERN_DEBUG, uni, fmt, ##__VA_ARGS__)
+#else
+#define uniphy_dbg(uni, fmt, ...)					\
+({									\
+	if (0)								\
+		uniphy_printk(KERN_DEBUG, uni, fmt, ##__VA_ARGS__);	\
+})
+#endif
+
+u32 uphy_readl(struct ess_uniphy *uniphy, u32 reg);
+
+void uphy_writel(struct ess_uniphy *uniphy, u32 reg, u32 val);
+
+void uniphy_dbg_init(struct ess_uniphy *uniphy);
+
+void uniphy_dbg_release(struct ess_uniphy *uniphy);
+
+#endif /* UNIPHY_PRIV_H_ */
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/wintegra./Kconfig linux-6.4-fbx/drivers/net/ethernet/wintegra/Kconfig
--- linux-6.4-fbx/drivers/net/ethernet/wintegra./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/wintegra/Kconfig	2023-03-09 15:06:11.368234332 +0100
@@ -0,0 +1,10 @@
+config NET_VENDOR_WINTEGRA
+	bool
+
+config WINTEGRA_WINPATH3_ETH
+	tristate "Wintegra Winpath3 internal mac support"
+	depends on WINTEGRA_WINPATH3
+	select NET_VENDOR_WINTEGRA
+	select NET_CORE
+	select MII
+	select PHYLIB
diff -Nruw linux-6.4-fbx/drivers/net/ethernet/wintegra./Makefile linux-6.4-fbx/drivers/net/ethernet/wintegra/Makefile
--- linux-6.4-fbx/drivers/net/ethernet/wintegra./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/ethernet/wintegra/Makefile	2023-03-09 15:06:11.368234332 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_WINTEGRA_WINPATH3_ETH) += wp3_eth.o
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/phy/qca8084.c	2023-12-05 17:14:42.303715125 +0100
@@ -0,0 +1,2565 @@
+/*
+ * light driver for QCA8084/QCA8085 (aka "Manathan")
+ *
+ * QCA8084/QCA8085: quad ports 2.5Gbit/s (only 8085 has macsec)
+ * QCA8082: dual ports 2.5Gbit/s (has macsec)
+ */
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "qca8084.h"
+
+enum stat_access_type {
+	PHY,
+	MMD
+};
+
+struct qca8084_hw_stat {
+	const char *string;
+	enum stat_access_type access_type;
+	u8 mmd_id;
+	u16 reg;
+	u8 count;
+	u32 mask;
+};
+
+static struct qca8084_hw_stat qca8084_hw_stats[] = {
+	{ "phy_idle_errors", PHY, 0x00, 0xa, 1, GENMASK(7, 0) },
+	{ "eee_wake_errors", MMD, 0x03, 0x16, 1, GENMASK(15, 0) },
+	{ "ingress_frames",  MMD, 0x07, 0x802a, 2, GENMASK(31, 0) },
+	{ "ingress_bad_crc",  MMD, 0x07, 0x802c, 1, GENMASK(15, 0) },
+	{ "egress_frames",  MMD, 0x07, 0x802d, 2, GENMASK(31, 0) },
+	{ "egress_bad_crc",  MMD, 0x07, 0x802f, 1, GENMASK(15, 0) },
+};
+
+struct qca8084_package_priv {
+	struct mii_bus		*bus;
+	struct mutex		lock;
+	unsigned int		ephy_count;
+	unsigned int		serdes_count;
+	u16			soc_base;
+	phy_interface_t		uniphy1_mode;
+	struct phy_device	*uniphy1_phydev;
+	struct phy_device	*xpcs_phydev;
+
+	/* used for logging */
+	struct phy_device	*phydev;
+};
+
+struct qca8084_priv {
+	unsigned int		ptype;
+	unsigned int		physid;
+	int			led_pin;
+	bool			led_act_blink;
+	bool			led_link_speed_any;
+	u64			stats[ARRAY_SIZE(qca8084_hw_stats)];
+};
+
+struct qca8084_clk_parent_data {
+	u64 freq;
+	u32 id;
+	u32 cfg;
+};
+
+struct qca8084_clk {
+	const char *name;
+	u32 rcgr;
+	u32 cdiv;
+	u32 cbcr;
+	u32 reset_bit;
+	const struct qca8084_clk_parent_data *parents;
+	size_t nr_parents;
+};
+
+/*
+ * clocks definition switch core
+ */
+static const struct qca8084_clk_parent_data gcc_switch_core_parent_data[] = {
+	{
+		.freq = QCA8084_XO_CLK_RATE_50M,
+		.id = QCA8084_P_XO,
+		.cfg = 0
+	},
+	{
+		.freq = UQXGMII_SPEED_2500M_CLK,
+		.id = QCA8084_P_UNIPHY1_TX312P5M,
+		.cfg = 1,
+	},
+};
+
+const struct qca8084_clk gcc_switch_core_clk = {
+	.name		= "gcc_switch_core_clk",
+	.rcgr		= 0x04,
+	.cdiv		= 0x00,
+	.cbcr		= 0x08,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_switch_core_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_switch_core_parent_data),
+};
+
+const struct qca8084_clk gcc_apb_bridge_clk = {
+	.name		= "gcc_apb_bridge_clk",
+	.rcgr		= 0x04,
+	.cdiv		= 0x00,
+	.cbcr		= 0x10,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * same parent data for mac 1, 2, and 3 RX/TX clk
+ */
+static const struct qca8084_clk_parent_data gcc_mac123_tx_clk_parent_data[] = {
+	{
+		.freq	= QCA8084_XO_CLK_RATE_50M,
+		.id	= QCA8084_P_XO,
+		.cfg	= 0,
+	} ,
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY1_TX312P5M,
+		.cfg	= 6,
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY1_RX312P5M,
+		.cfg	= 7,
+	},
+};
+
+static const struct qca8084_clk_parent_data gcc_mac123_rx_clk_parent_data[] = {
+	{
+		.freq	= QCA8084_XO_CLK_RATE_50M,
+		.id	= QCA8084_P_XO,
+		.cfg	= 0,
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY1_TX312P5M,
+		.cfg	= 6,
+	},
+};
+
+/*
+ * dedicated parent data for mac 4 RX/TX clk
+ */
+static const struct qca8084_clk_parent_data gcc_mac4_tx_clk_parent_data[] = {
+	{
+		.freq	= QCA8084_XO_CLK_RATE_50M,
+		.id	= QCA8084_P_XO,
+		.cfg	= 0,
+	},
+	{
+		.freq	= UQXGMII_SPEED_1000M_CLK,
+		.id	= QCA8084_P_UNIPHY0_RX,
+		.cfg	= 1,
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY0_RX,
+		.cfg	= 1,
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY1_TX312P5M,
+		.cfg	= 3,
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY1_RX312P5M,
+		.cfg	= 7
+	},
+};
+
+static const struct qca8084_clk_parent_data gcc_mac4_rx_clk_parent_data[] = {
+	{
+		.freq	= QCA8084_XO_CLK_RATE_50M,
+		.id	= QCA8084_P_XO,
+		.cfg	= 0,
+	},
+	{
+		.freq	= UQXGMII_SPEED_1000M_CLK,
+		.id	= QCA8084_P_UNIPHY0_TX,
+		.cfg	= 2
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY0_TX,
+		.cfg	= 2
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY1_TX312P5M,
+		.cfg	= 3,
+	},
+};
+
+/*
+ * port1
+ */
+const struct qca8084_clk gcc_mac1_srds1_ch0_rx_clk = {
+	.name		= "gcc_mac1_srds1_ch0_rx_clk",
+	.rcgr		= 0x44,
+	.cdiv		= 0x48,
+	.cbcr		= 0x50,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac1_srds1_ch0_tx_clk = {
+	.name		= "gcc_mac1_srds1_ch0_tx_clk",
+	.rcgr		= 0x64,
+	.cdiv		= 0x68,
+	.cbcr		= 0x70,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac1_srds1_ch0_xgmii_rx_clk = {
+	.name		= "gcc_mac1_srds1_ch0_xgmii_rx_clk",
+	.rcgr		= 0x44,
+	.cdiv		= 0x4c,
+	.cbcr		= 0x5c,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac1_srds1_ch0_xgmii_tx_clk = {
+	.name		= "gcc_mac1_srds1_ch0_xgmii_tx_clk",
+	.rcgr		= 0x64,
+	.cdiv		= 0x6c,
+	.cbcr		= 0x7c,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_rx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac1_gephy0_tx_clk = {
+	.name		= "gcc_mac1_gephy0_tx_clk",
+	.rcgr		= 0x44,
+	.cdiv		= 0x48,
+	.cbcr		= 0x58,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac1_gephy0_rx_clk = {
+	.name		= "gcc_mac1_gephy0_rx_clk",
+	.rcgr		= 0x64,
+	.cdiv		= 0x68,
+	.cbcr		= 0x78,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * port2
+ */
+const struct qca8084_clk gcc_mac2_srds1_ch1_rx_clk = {
+	.name		= "gcc_mac2_srds1_ch1_rx_clk",
+	.rcgr		= 0x84,
+	.cdiv		= 0x88,
+	.cbcr		= 0x90,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac2_srds1_ch1_tx_clk = {
+	.name		= "gcc_mac2_srds1_ch1_tx_clk",
+	.rcgr		= 0xa4,
+	.cdiv		= 0xa8,
+	.cbcr		= 0xb0,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac2_srds1_ch1_xgmii_rx_clk = {
+	.name		= "gcc_mac2_srds1_ch1_xgmii_rx_clk",
+	.rcgr		= 0x84,
+	.cdiv		= 0x8c,
+	.cbcr		= 0x9c,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac2_srds1_ch1_xgmii_tx_clk = {
+	.name		= "gcc_mac2_srds1_ch1_xgmii_tx_clk",
+	.rcgr		= 0xa4,
+	.cdiv		= 0xac,
+	.cbcr		= 0xbc,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_rx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac2_gephy1_tx_clk = {
+	.name		= "gcc_mac2_gephy1_tx_clk",
+	.rcgr		= 0x84,
+	.cdiv		= 0x88,
+	.cbcr		= 0x98,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac2_gephy1_rx_clk = {
+	.name		= "gcc_mac2_gephy1_rx_clk",
+	.rcgr		= 0xa4,
+	.cdiv		= 0xa8,
+	.cbcr		= 0xb8,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * port3
+ */
+const struct qca8084_clk gcc_mac3_srds1_ch2_rx_clk = {
+	.name		= "gcc_mac3_srds1_ch2_rx_clk",
+	.rcgr		= 0xc4,
+	.cdiv		= 0xc8,
+	.cbcr		= 0xd0,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac3_srds1_ch2_tx_clk = {
+	.name		= "gcc_mac3_srds1_ch2_tx_clk",
+	.rcgr		= 0xe4,
+	.cdiv		= 0xe8,
+	.cbcr		= 0xf0,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac3_srds1_ch2_xgmii_rx_clk = {
+	.name		= "gcc_mac3_srds1_ch2_xgmii_rx_clk",
+	.rcgr		= 0xc4,
+	.cdiv		= 0xcc,
+	.cbcr		= 0xdc,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac3_srds1_ch2_xgmii_tx_clk = {
+	.name		= "gcc_mac3_srds1_ch2_xgmii_tx_clk",
+	.rcgr		= 0xe4,
+	.cdiv		= 0xec,
+	.cbcr		= 0xfc,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_rx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac3_gephy2_tx_clk = {
+	.name		= "gcc_mac3_gephy2_tx_clk",
+	.rcgr		= 0xc4,
+	.cdiv		= 0xc8,
+	.cbcr		= 0xd8,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac3_gephy2_rx_clk = {
+	.name		= "gcc_mac3_gephy2_rx_clk",
+	.rcgr		= 0xe4,
+	.cdiv		= 0xe8,
+	.cbcr		= 0xf8,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * port4
+ */
+const struct qca8084_clk gcc_mac4_srds1_ch3_rx_clk = {
+	.name		= "gcc_mac4_srds1_ch3_rx_clk",
+	.rcgr		= 0x104,
+	.cdiv		= 0x108,
+	.cbcr		= 0x110,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac4_srds1_ch3_tx_clk = {
+	.name		= "gcc_mac4_srds1_ch3_tx_clk",
+	.rcgr		= 0x124,
+	.cdiv		= 0x128,
+	.cbcr		= 0x130,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac4_srds1_ch3_xgmii_rx_clk = {
+	.name		= "gcc_mac4_srds1_ch3_xgmii_rx_clk",
+	.rcgr		= 0x104,
+	.cdiv		= 0x10c,
+	.cbcr		= 0x11c,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac4_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac4_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac4_srds1_ch3_xgmii_tx_clk = {
+	.name		= "gcc_mac4_srds1_ch3_xgmii_tx_clk",
+	.rcgr		= 0x124,
+	.cdiv		= 0x12c,
+	.cbcr		= 0x13c,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac4_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac4_rx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac4_gephy3_tx_clk = {
+	.name		= "gcc_mac4_gephy3_tx_clk",
+	.rcgr		= 0x104,
+	.cdiv		= 0x108,
+	.cbcr		= 0x118,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mac4_gephy3_rx_clk = {
+	.name		= "gcc_mac4_gephy3_rx_clk",
+	.rcgr		= 0x124,
+	.cdiv		= 0x128,
+	.cbcr		= 0x138,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * port1 MAC RX/TX clocks
+ */
+const struct qca8084_clk gcc_mac1_tx_clk = {
+	.name		= "gcc_mac1_tx_clk",
+	.rcgr		= 0x44,
+	.cdiv		= 0x48,
+	.cbcr		= 0x54,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac1_rx_clk = {
+	.name		= "gcc_mac1_rx_clk",
+	.rcgr		= 0x64,
+	.cdiv		= 0x68,
+	.cbcr		= 0x74,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_rx_clk_parent_data),
+};
+
+/*
+ * port2 MAC RX/TX clocks
+ */
+const struct qca8084_clk gcc_mac2_tx_clk = {
+	.name		= "gcc_mac2_tx_clk",
+	.rcgr		= 0x84,
+	.cdiv		= 0x88,
+	.cbcr		= 0x94,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac2_rx_clk = {
+	.name		= "gcc_mac2_rx_clk",
+	.rcgr		= 0xa4,
+	.cdiv		= 0xa8,
+	.cbcr		= 0xb4,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_rx_clk_parent_data),
+};
+
+/*
+ * port3 MAC RX/TX clocks
+ */
+const struct qca8084_clk gcc_mac3_tx_clk = {
+	.name		= "gcc_mac3_tx_clk",
+	.rcgr		= 0xc4,
+	.cdiv		= 0xc8,
+	.cbcr		= 0xd4,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac3_rx_clk = {
+	.name		= "gcc_mac3_rx_clk",
+	.rcgr		= 0xe4,
+	.cdiv		= 0xe8,
+	.cbcr		= 0xf4,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac123_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac123_rx_clk_parent_data),
+};
+
+/*
+ * port4 MAC RX/TX clocks
+ */
+const struct qca8084_clk gcc_mac4_tx_clk = {
+	.name		= "gcc_mac4_tx_clk",
+	.rcgr		= 0x104,
+	.cdiv		= 0x108,
+	.cbcr		= 0x114,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac4_tx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac4_tx_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_mac4_rx_clk = {
+	.name		= "gcc_mac4_rx_clk",
+	.rcgr		= 0x124,
+	.cdiv		= 0x128,
+	.cbcr		= 0x134,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_mac4_rx_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_mac4_rx_clk_parent_data),
+};
+
+/*
+ * AHB bridge clock
+ */
+static const struct qca8084_clk_parent_data gcc_ahb_clk_parent_data[] = {
+	{
+		.freq	= QCA8084_XO_CLK_RATE_50M,
+		.id	= QCA8084_P_XO,
+		.cfg	= 0
+	},
+	{
+		.freq	= UQXGMII_SPEED_2500M_CLK,
+		.id	= QCA8084_P_UNIPHY1_TX312P5M,
+		.cfg	= 2
+	},
+};
+
+const struct qca8084_clk gcc_ahb_clk = {
+	.name		= "gcc_ahb_clk",
+	.rcgr		= 0x16c,
+	.cdiv		= 0x000,
+	.cbcr		= 0x170,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_ahb_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_ahb_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_sec_ctrl_ahb_clk = {
+	.name		= "gcc_sec_ctrl_ahb_clk",
+	.rcgr		= 0x16c,
+	.cdiv		= 0x000,
+	.cbcr		= 0x174,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_tlmm_clk = {
+	.name		= "gcc_tlmm_clk",
+	.rcgr		= 0x16c,
+	.cdiv		= 0x000,
+	.cbcr		= 0x178,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+
+const struct qca8084_clk gcc_tlmm_ahb_clk = {
+	.name		= "gcc_tlmm_ahb_clk",
+	.rcgr		= 0x16c,
+	.cdiv		= 0x000,
+	.cbcr		= 0x190,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_cnoc_ahb_clk = {
+	.name		= "gcc_cnoc_ahb_clk",
+	.rcgr		= 0x16c,
+	.cdiv		= 0x000,
+	.cbcr		= 0x194,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/* XXX: same clock in gcc-ipq9574 ... */
+static const struct qca8084_clk gcc_mdio_ahb_clk = {
+	.name		= "gcc_mdio_ahb_clk",
+	.rcgr		= 0x16c,
+	.cdiv		= 0x000,
+	.cbcr		= 0x198,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_mdio_master_ahb_clk = {
+	.name		= "gcc_mdio_master_ahb_clk",
+	.rcgr		= 0x16c,
+	.cdiv		= 0x000,
+	.cbcr		= 0x19c,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * system
+ */
+static const struct qca8084_clk_parent_data gcc_sys_clk_parent_data[] = {
+	{
+		.freq	= QCA8084_XO_CLK_RATE_50M,
+		.id	= QCA8084_P_XO,
+		.cfg	= 0
+	},
+};
+
+const struct qca8084_clk gcc_srds0_sys_clk = {
+	.name		= "gcc_srds0_sys_clk",
+	.rcgr		= 0x1a4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1a8,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_sys_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_sys_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_srds1_sys_clk = {
+	.name		= "gcc_srds1_sys_clk",
+	.rcgr		= 0x1a4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1ac,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_sys_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_sys_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_gephy0_sys_clk = {
+	.name		= "gcc_gephy0_sys_clk",
+	.rcgr		= 0x1a4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1b0,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_gephy1_sys_clk = {
+	.name		= "gcc_gephy1_sys_clk",
+	.rcgr		= 0x1a4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1b4,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_gephy2_sys_clk = {
+	.name		= "gcc_gephy2_sys_clk",
+	.rcgr		= 0x1a4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1b8,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+const struct qca8084_clk gcc_gephy3_sys_clk = {
+	.name		= "gcc_gephy3_sys_clk",
+	.rcgr		= 0x1a4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1bc,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * SEC control (whavetever that is)
+ */
+const struct qca8084_clk gcc_sec_ctrl_clk = {
+	.name		= "gcc_sec_ctrl_clk",
+	.rcgr		= 0x1c4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1c8,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+	.parents	= gcc_sys_clk_parent_data,
+	.nr_parents	= ARRAY_SIZE(gcc_sys_clk_parent_data),
+};
+
+const struct qca8084_clk gcc_sec_ctrl_sense_clk = {
+	.name		= "gcc_sec_ctrl_sense_clk",
+	.rcgr		= 0x1c4,
+	.cdiv		= 0x000,
+	.cbcr		= 0x1d0,
+	.reset_bit	= CBCR_CLK_RESET_BIT,
+};
+
+/*
+ * global
+ */
+const struct qca8084_clk gcc_global_rst = {
+	.name = "gcc_global_rst",
+	.rcgr		= 0x000,
+	.cdiv		= 0x000,
+	.cbcr		= 0x308,
+	.reset_bit	= 0,
+};
+
+const struct qca8084_clk gcc_uniphy_xpcs_rst = {
+	.name = "gcc_uniphy_xpcs_rst",
+	.rcgr		= 0x000,
+	.cdiv		= 0x000,
+	.cbcr		= 0x30c,
+	.reset_bit	= 0,
+};
+
+struct port_rxtx_clk {
+	const struct qca8084_clk *mac_rx_clk;
+	const struct qca8084_clk *mac_tx_clk;
+	const struct qca8084_clk *xgmii_rx_clk;
+	const struct qca8084_clk *xgmii_tx_clk;
+	const struct qca8084_clk *srds_rx_clk;
+	const struct qca8084_clk *srds_tx_clk;
+	const struct qca8084_clk *gephy_rx_clk;
+	const struct qca8084_clk *gephy_tx_clk;
+};
+
+static const struct qca8084_clk *ports_clocks[] = {
+	&gcc_mac1_srds1_ch0_rx_clk,
+	&gcc_mac1_srds1_ch0_tx_clk,
+	&gcc_mac1_srds1_ch0_xgmii_rx_clk,
+	&gcc_mac1_srds1_ch0_xgmii_tx_clk,
+	&gcc_mac1_gephy0_tx_clk,
+	&gcc_mac1_gephy0_rx_clk,
+	&gcc_mac2_srds1_ch1_rx_clk,
+	&gcc_mac2_srds1_ch1_tx_clk,
+	&gcc_mac2_srds1_ch1_xgmii_rx_clk,
+	&gcc_mac2_srds1_ch1_xgmii_tx_clk,
+	&gcc_mac2_gephy1_tx_clk,
+	&gcc_mac2_gephy1_rx_clk,
+	&gcc_mac3_srds1_ch2_rx_clk,
+	&gcc_mac3_srds1_ch2_tx_clk,
+	&gcc_mac3_srds1_ch2_xgmii_rx_clk,
+	&gcc_mac3_srds1_ch2_xgmii_tx_clk,
+	&gcc_mac3_gephy2_tx_clk,
+	&gcc_mac3_gephy2_rx_clk,
+	&gcc_mac4_srds1_ch3_rx_clk,
+	&gcc_mac4_srds1_ch3_tx_clk,
+	&gcc_mac4_srds1_ch3_xgmii_rx_clk,
+	&gcc_mac4_srds1_ch3_xgmii_tx_clk,
+	&gcc_mac4_gephy3_tx_clk,
+	&gcc_mac4_gephy3_rx_clk,
+};
+
+static const struct port_rxtx_clk port_rxtx_clks[] = {
+	[0] = {
+		.mac_rx_clk = &gcc_mac1_rx_clk,
+		.mac_tx_clk = &gcc_mac1_tx_clk,
+		.xgmii_rx_clk = &gcc_mac1_srds1_ch0_xgmii_rx_clk,
+		.xgmii_tx_clk = &gcc_mac1_srds1_ch0_xgmii_tx_clk,
+		.srds_rx_clk = &gcc_mac1_srds1_ch0_rx_clk,
+		.srds_tx_clk = &gcc_mac1_srds1_ch0_tx_clk,
+		.gephy_rx_clk = &gcc_mac1_gephy0_tx_clk,
+		.gephy_tx_clk = &gcc_mac1_gephy0_rx_clk
+	},
+	[1] = {
+		.mac_rx_clk = &gcc_mac2_rx_clk,
+		.mac_tx_clk = &gcc_mac2_tx_clk,
+		.xgmii_rx_clk = &gcc_mac2_srds1_ch1_xgmii_rx_clk,
+		.xgmii_tx_clk = &gcc_mac2_srds1_ch1_xgmii_tx_clk,
+		.srds_rx_clk = &gcc_mac2_srds1_ch1_rx_clk,
+		.srds_tx_clk = &gcc_mac2_srds1_ch1_tx_clk,
+		.gephy_rx_clk = &gcc_mac2_gephy1_rx_clk,
+		.gephy_tx_clk = &gcc_mac2_gephy1_tx_clk
+	},
+	[2] = {
+		.mac_rx_clk = &gcc_mac3_rx_clk,
+		.mac_tx_clk = &gcc_mac3_tx_clk,
+		.xgmii_rx_clk = &gcc_mac3_srds1_ch2_xgmii_rx_clk,
+		.xgmii_tx_clk = &gcc_mac3_srds1_ch2_xgmii_tx_clk,
+		.srds_rx_clk = &gcc_mac3_srds1_ch2_rx_clk,
+		.srds_tx_clk = &gcc_mac3_srds1_ch2_tx_clk,
+		.gephy_rx_clk = &gcc_mac3_gephy2_rx_clk,
+		.gephy_tx_clk = &gcc_mac3_gephy2_tx_clk
+	},
+	[3] = {
+		.mac_rx_clk = &gcc_mac4_rx_clk,
+		.mac_tx_clk = &gcc_mac4_tx_clk,
+		.xgmii_rx_clk = &gcc_mac4_srds1_ch3_xgmii_rx_clk,
+		.xgmii_tx_clk = &gcc_mac4_srds1_ch3_xgmii_tx_clk,
+		.srds_rx_clk = &gcc_mac4_srds1_ch3_rx_clk,
+		.srds_tx_clk = &gcc_mac4_srds1_ch3_tx_clk,
+		.gephy_rx_clk = &gcc_mac4_gephy3_rx_clk,
+		.gephy_tx_clk = &gcc_mac4_gephy3_tx_clk
+	},
+};
+
+/*
+ *
+ */
+static inline struct qca8084_package_priv *phy_ppriv(struct phy_device *phydev)
+{
+	return (struct qca8084_package_priv *)phydev->shared->priv;
+}
+
+/*
+ *
+ */
+static void mdio_split_addr(u32 regaddr, u16 *r1, u16 *r2,
+			    u16 *page, u16 *switch_phy_id)
+{
+	*r1 = regaddr & 0x1c;
+
+	regaddr >>= 5;
+	*r2 = regaddr & 0x7;
+
+	regaddr >>= 3;
+	*page = regaddr & 0xffff;
+
+	regaddr >>= 16;
+	*switch_phy_id = regaddr & 0xff;
+}
+
+/*
+ *
+ */
+static u32 qca8084_soc_read(struct qca8084_package_priv *ppriv, u32 reg)
+{
+	u16 r1, r2, page, switch_phy_id;
+	u16 lo, hi;
+	u32 val;
+
+	mdio_split_addr(reg, &r1, &r2, &page, &switch_phy_id);
+
+	/* NOTE: even if it seems the addresses used on MDIO bus by
+	 * the indirect access are distinct from the ones used by
+	 * copper phydevs, we must *NOT* mix that indirect access with
+	 * them, or we will read corrupted data, hence the global mdio
+	 * bus lock here */
+	phy_lock_mdio_bus(ppriv->phydev);
+        __mdiobus_write(ppriv->bus,
+			ppriv->soc_base | 0x8 | (switch_phy_id >> 5),
+			switch_phy_id & 0x1f, page);
+	udelay(100);
+        lo = __mdiobus_read(ppriv->bus, ppriv->soc_base | r2, r1);
+        hi = __mdiobus_read(ppriv->bus, ppriv->soc_base | r2, r1 + 2);
+	val = (hi << 16) | lo;
+	phy_unlock_mdio_bus(ppriv->phydev);
+
+	return (hi << 16) | lo;
+}
+
+/*
+ *
+ */
+static void qca8084_soc_write(struct qca8084_package_priv *ppriv,
+			      u32 reg, u32 val)
+{
+	u16 r1, r2, page, switch_phy_id;
+
+	mdio_split_addr(reg, &r1, &r2, &page, &switch_phy_id);
+	phy_lock_mdio_bus(ppriv->phydev);
+
+        __mdiobus_write(ppriv->bus,
+			ppriv->soc_base | 0x8 | (switch_phy_id >> 5),
+			switch_phy_id & 0x1f, page);
+	udelay(100);
+        __mdiobus_write(ppriv->bus, ppriv->soc_base | r2, r1, val & 0xffff);
+        __mdiobus_write(ppriv->bus, ppriv->soc_base | r2, r1 + 2, val >> 16);
+	phy_unlock_mdio_bus(ppriv->phydev);
+}
+
+/*
+ *
+ */
+static void qca8084_soc_modify(struct qca8084_package_priv *ppriv, u32 reg,
+			       u32 mask, u32 value)
+{
+	u32 new_val, val;
+
+	val = qca8084_soc_read(ppriv, reg);
+	new_val = (val & ~mask) | value;
+	if (new_val == val)
+		return;
+	qca8084_soc_write(ppriv, reg, new_val);
+}
+
+/*
+ * clocks helpers
+ */
+static int __qca8084_clk_ctl(struct qca8084_package_priv *ppriv,
+			     const struct qca8084_clk *clk,
+			     bool enable)
+{
+
+	qca8084_soc_modify(ppriv, QCA8084_CLK_BASE_REG + clk->cbcr,
+			   CBCR_CLK_ENABLE,
+			   enable ? CBCR_CLK_ENABLE : 0);
+
+	if (enable) {
+		u32 reg;
+
+		udelay(1);
+		reg = qca8084_soc_read(ppriv, QCA8084_CLK_BASE_REG +
+					 clk->cbcr);
+		if (reg & CBCR_CLK_OFF) {
+			phydev_err(ppriv->phydev,
+				   "failed to enable clock %s\n",
+				   clk->name);
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static inline int
+qca8084_clk_enable(struct qca8084_package_priv *ppriv,
+		   const struct qca8084_clk *clk)
+{
+	return __qca8084_clk_ctl(ppriv, clk, true);
+}
+
+static inline  int
+qca8084_clk_disable(struct qca8084_package_priv *ppriv,
+		    const struct qca8084_clk *clk)
+{
+	return __qca8084_clk_ctl(ppriv, clk, false);
+}
+
+static inline int
+__qca8084_reset_ctl(struct qca8084_package_priv *ppriv,
+		    const struct qca8084_clk *clk,
+		    bool assert)
+{
+	qca8084_soc_modify(ppriv, QCA8084_CLK_BASE_REG + clk->cbcr,
+			   (1 << clk->reset_bit),
+			   assert ? (1 << clk->reset_bit) : 0);
+	return 0;
+}
+
+static int
+qca8084_reset_assert(struct qca8084_package_priv *ppriv,
+		     const struct qca8084_clk *clk)
+{
+	return __qca8084_reset_ctl(ppriv, clk, true);
+}
+
+static
+int qca8084_reset_deassert(struct qca8084_package_priv *ppriv,
+			   const struct qca8084_clk *clk)
+{
+	return __qca8084_reset_ctl(ppriv, clk, false);
+}
+
+static void
+qca8084_reset_pulse(struct qca8084_package_priv *ppriv,
+		    const struct qca8084_clk *clk)
+{
+	qca8084_reset_assert(ppriv, clk);
+	udelay(10);
+	qca8084_reset_deassert(ppriv, clk);
+}
+
+/*
+ *
+ */
+static int
+clk_rcg_update(struct qca8084_package_priv *ppriv,
+	       const struct qca8084_clk *clk)
+{
+	u32 reg;
+	int i;
+
+	reg = qca8084_soc_read(ppriv, QCA8084_CLK_BASE_REG + clk->rcgr - 4);
+	reg |= RCGR_CMD_UPDATE;
+	qca8084_soc_write(ppriv, QCA8084_CLK_BASE_REG + clk->rcgr - 4, reg);
+
+	for (i = 0; i < 1000; i++) {
+ 		reg = qca8084_soc_read(ppriv, QCA8084_CLK_BASE_REG +
+				       clk->rcgr - 4);
+		if ((reg & RCGR_CMD_UPDATE) == 0)
+			return 0;
+		msleep(1);
+	}
+
+	phydev_err(ppriv->phydev,
+		   "%s: timed out waiting for CMD update clear.\n", clk->name);
+	return -ETIMEDOUT;
+}
+
+/*
+ *
+ */
+static const struct qca8084_clk_parent_data *
+clk_get_parent_data(const struct qca8084_clk *clk, u32 parent_id)
+{
+	size_t i;
+
+	for (i = 0; i < clk->nr_parents; ++i) {
+		const struct qca8084_clk_parent_data *cur = &clk->parents[i];
+
+		if (cur->id == parent_id)
+			return cur;
+	}
+	return NULL;
+}
+
+/*
+ *
+ */
+static const struct qca8084_clk_parent_data *
+clk_get_current_parent_data(struct qca8084_package_priv *ppriv,
+			    const struct qca8084_clk *clk)
+{
+	u32 reg;
+	u32 cur_cfg;
+	size_t i;
+
+	reg = qca8084_soc_read(ppriv, QCA8084_CLK_BASE_REG + clk->rcgr);
+	cur_cfg = (reg & RCGR_SRC_SEL_MASK) >> RCGR_SRC_SEL_SHIFT;
+
+	for (i = 0; i < clk->nr_parents; ++i) {
+		const struct qca8084_clk_parent_data *cur = &clk->parents[i];
+
+		if (cur->cfg == cur_cfg)
+			return cur;
+	}
+
+	phydev_err(ppriv->phydev,
+		   "%s: parent data for config %d not found\n",
+		   clk->name, cur_cfg);
+	return NULL;
+}
+
+/*
+ *
+ */
+static int
+clk_set_parent(struct qca8084_package_priv *ppriv,
+	       const struct qca8084_clk *clk,
+	       u32 parent_id)
+{
+	const struct qca8084_clk_parent_data *the_data = NULL;
+	u32 reg;
+	u32 cur_config;
+
+	the_data = clk_get_parent_data(clk, parent_id);
+	if (!the_data) {
+		phydev_err(ppriv->phydev,
+			   "%s: parent ID %d not found\n",
+			   clk->name, parent_id);
+		return -ENOENT;
+	}
+
+	reg = qca8084_soc_read(ppriv, QCA8084_CLK_BASE_REG + clk->rcgr);
+	cur_config = (reg & RCGR_SRC_SEL_MASK) >> RCGR_SRC_SEL_SHIFT;
+
+	if (cur_config == the_data->cfg) {
+		phydev_dbg(ppriv->phydev,
+			   "%s: already configured to use source %02x\n",
+			   clk->name,  cur_config);
+		return 0;
+	}
+
+	reg &= ~RCGR_SRC_SEL_MASK;
+	reg |= (the_data->cfg << RCGR_SRC_SEL_SHIFT);
+	qca8084_soc_write(ppriv, QCA8084_CLK_BASE_REG + clk->rcgr, reg);
+
+	return clk_rcg_update(ppriv, clk);
+}
+
+/*
+ *
+ */
+static int qca8084_clk_set_rate(struct qca8084_package_priv *ppriv,
+				const struct qca8084_clk *clk,
+				u32 freq)
+{
+	const struct qca8084_clk_parent_data *clk_pdata;
+	u32 div, cdiv, reg;
+
+	clk_pdata = clk_get_current_parent_data(ppriv, clk);
+	if (!clk_pdata) {
+		phydev_err(ppriv->phydev,
+			   "%s: invalid parent clock configuration.\n",
+			   clk->name);
+		return -EINVAL;
+	}
+
+	switch (clk_pdata->id) {
+	case QCA8084_P_UNIPHY0_RX:
+	case QCA8084_P_UNIPHY0_TX:
+	case QCA8084_P_UNIPHY1_RX:
+	case QCA8084_P_UNIPHY1_TX:
+		return -EINVAL;
+	}
+
+	switch (freq) {
+	case UQXGMII_XPCS_SPEED_2500M_CLK:
+		/*
+		 * when configuring XPSC clock to
+		 * UQXGMII_XPCS_SPEED_2500M_CLK, the RCGR divider need
+		 * to be bypassed, since there are two dividers from
+		 * the same RCGR, one is for XPCS clock, the other is
+		 * for EPHY port clock.
+		 */
+		if (clk_pdata->freq != UQXGMII_SPEED_2500M_CLK) {
+			phydev_err(ppriv->phydev,
+				   "%s: invalid parent rate.\n", clk->name);
+			return -EINVAL;
+		}
+
+		div = 0;
+		cdiv = UQXGMII_SPEED_2500M_CLK / UQXGMII_XPCS_SPEED_2500M_CLK
+			- 1;
+		break;
+	default:
+		/*
+		 * calculate the RCGR divider prate/rate =
+		 * (rcg_divider + 1) / 2
+		 */
+		cdiv = 0;
+		div = clk_pdata->freq * 2 / freq - 1;
+
+		if (div > RCGR_DIV_MAX) {
+			/*
+			 * if the RCG divider can't meet the
+			 * requirement, the CDIV reg can be simply
+			 * divided by 10 to satisfy the required clock
+			 * rate.
+			 */
+			u64 parent_freq;
+
+			cdiv = 9;
+			parent_freq = clk_pdata->freq /
+				(cdiv + 1) * 2;
+			div = parent_freq / freq - 1;
+		}
+		break;
+	}
+
+	if (cdiv && !clk->cdiv) {
+		phydev_err(ppriv->phydev,
+			   "%s: a cdiv value is needed but clock has no cdiv "
+			   "register\n", clk->name);
+		return -EINVAL;
+	}
+
+	/*
+	 * update CDIV and RCGR
+	 */
+	if (clk->cdiv)
+		qca8084_soc_modify(ppriv, QCA8084_CLK_BASE_REG + clk->cdiv,
+				   CDIVR_DIVIDER_MASK,
+				   cdiv << CDIVR_DIVIDER_SHIFT);
+
+	reg = qca8084_soc_read(ppriv, QCA8084_CLK_BASE_REG + clk->rcgr);
+	reg &= ~RCGR_HDIV_MASK;
+	reg |= RCGR_HDIV(div);
+	qca8084_soc_write(ppriv, QCA8084_CLK_BASE_REG + clk->rcgr, reg);
+
+	return clk_rcg_update(ppriv, clk);
+}
+
+/*
+ *
+ */
+static int gcc_common_clk_parent_enable(struct qca8084_package_priv *ppriv)
+{
+	/*
+	 * switch core
+	 */
+	clk_set_parent(ppriv, &gcc_switch_core_clk,
+		       QCA8084_P_UNIPHY1_TX312P5M);
+	qca8084_clk_set_rate(ppriv, &gcc_switch_core_clk,
+			     UQXGMII_SPEED_2500M_CLK);
+
+	/*
+	 * Disable switch core clock to save power in phy mode
+	 */
+	qca8084_clk_disable(ppriv, &gcc_switch_core_clk);
+
+	qca8084_clk_enable(ppriv, &gcc_apb_bridge_clk);
+
+	/*
+	 * AHB bridge
+	 */
+	clk_set_parent(ppriv, &gcc_ahb_clk, QCA8084_P_UNIPHY1_TX312P5M);
+	qca8084_clk_set_rate(ppriv, &gcc_ahb_clk,
+			     QCA8084_AHB_CLK_RATE_104P17M);
+	qca8084_clk_enable(ppriv, &gcc_ahb_clk);
+	qca8084_clk_enable(ppriv, &gcc_sec_ctrl_ahb_clk);
+	qca8084_clk_enable(ppriv, &gcc_tlmm_clk);
+	qca8084_clk_enable(ppriv, &gcc_tlmm_ahb_clk);
+	qca8084_clk_enable(ppriv, &gcc_cnoc_ahb_clk);
+	qca8084_clk_enable(ppriv, &gcc_mdio_ahb_clk);
+	qca8084_clk_enable(ppriv, &gcc_mdio_master_ahb_clk);
+
+	/*
+	 * System
+	 */
+	clk_set_parent(ppriv, &gcc_srds0_sys_clk, QCA8084_P_XO);
+	qca8084_clk_set_rate(ppriv, &gcc_srds0_sys_clk,
+		     QCA8084_SYS_CLK_RATE_25M);
+
+	/*
+	 * Disable SerDes0 clock to save power in phy mode (presumably
+	 * this used in switch mode ?)
+	 */
+	qca8084_clk_disable(ppriv, &gcc_srds0_sys_clk);
+
+	qca8084_clk_enable(ppriv, &gcc_srds1_sys_clk);
+	qca8084_clk_enable(ppriv, &gcc_gephy0_sys_clk);
+	qca8084_clk_enable(ppriv, &gcc_gephy1_sys_clk);
+	qca8084_clk_enable(ppriv, &gcc_gephy2_sys_clk);
+	qca8084_clk_enable(ppriv, &gcc_gephy3_sys_clk);
+
+	clk_set_parent(ppriv, &gcc_sec_ctrl_clk, QCA8084_P_XO);
+	qca8084_clk_set_rate(ppriv, &gcc_sec_ctrl_clk,
+			     QCA8084_SYS_CLK_RATE_25M);
+	qca8084_clk_enable(ppriv, &gcc_sec_ctrl_clk);
+	qca8084_clk_enable(ppriv, &gcc_sec_ctrl_sense_clk);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int qca8084_gcc_clock_init(struct qca8084_package_priv *ppriv)
+{
+	gcc_common_clk_parent_enable(ppriv);
+
+	clk_set_parent(ppriv, &gcc_mac1_tx_clk,
+		       QCA8084_P_UNIPHY1_RX312P5M);
+	clk_set_parent(ppriv, &gcc_mac1_rx_clk,
+		       QCA8084_P_UNIPHY1_TX312P5M);
+	clk_set_parent(ppriv, &gcc_mac2_tx_clk,
+		       QCA8084_P_UNIPHY1_RX312P5M);
+	clk_set_parent(ppriv, &gcc_mac2_rx_clk,
+		       QCA8084_P_UNIPHY1_TX312P5M);
+	clk_set_parent(ppriv, &gcc_mac3_tx_clk,
+		       QCA8084_P_UNIPHY1_RX312P5M);
+	clk_set_parent(ppriv, &gcc_mac3_rx_clk,
+		       QCA8084_P_UNIPHY1_TX312P5M);
+	clk_set_parent(ppriv, &gcc_mac4_tx_clk,
+		       QCA8084_P_UNIPHY1_RX312P5M);
+	clk_set_parent(ppriv, &gcc_mac4_rx_clk,
+		       QCA8084_P_UNIPHY1_TX312P5M);
+
+	return 0;
+}
+
+
+static int qca8084_uniphy_calibrate(struct qca8084_package_priv *ppriv,
+				    u32 uniphy_addr)
+{
+	int remaining_tries = 1000;
+
+	do {
+		u16 reg;
+
+		reg = mdiobus_c45_read(ppriv->bus, uniphy_addr, 1,
+				       QCA8084_UNIPHY_MMD1_CALIBRATION4);
+		--remaining_tries;
+		if (reg & QCA8084_UNIPHY_MMD1_CALIBRATION_DONE)
+			return 0;
+		msleep(1);
+	} while (remaining_tries);
+
+	phydev_err(ppriv->phydev,
+		   "timeout waiting for calibration of qca8084 uniphy %d\n",
+		   uniphy_addr);
+	return -ETIMEDOUT;
+}
+
+/*
+ *
+ */
+static int qca8084_uniphy_xpcs_soft_reset(struct qca8084_package_priv *ppriv,
+					  u32 xpcs_addr)
+{
+	int i;
+
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			   QCA8084_UNIPHY_MMD3_DIG_CTRL1, 0x8000,
+			   QCA8084_UNIPHY_MMD3_XPCS_SOFT_RESET);
+
+	for (i = 0; i < 1000; i++) {
+		u16 reg;
+
+		reg = mdiobus_c45_read(ppriv->bus, xpcs_addr, 3,
+				       QCA8084_UNIPHY_MMD3_DIG_CTRL1);
+
+		if ((reg & QCA8084_UNIPHY_MMD3_XPCS_SOFT_RESET) == 0)
+			return 0;
+	}
+
+	phydev_err(ppriv->phydev, "XPCS soft reset timeout\n");
+	return -ETIMEDOUT;
+}
+
+/*
+ *
+ */
+static int
+__qca8084_interface_uqxgmii_mode_set(struct qca8084_package_priv *ppriv,
+				     u32 uniphy_addr, u32 xpcs_addr)
+{
+	size_t i;
+
+	/*
+	 * reset xpcs
+	 */
+	qca8084_reset_assert(ppriv, &gcc_uniphy_xpcs_rst);
+
+	/*
+	 * select xpcs mode
+	 */
+	mdiobus_c45_modify(ppriv->bus, uniphy_addr, 1,
+			   QCA8084_UNIPHY_MMD1_MODE_CTRL, 0x1f00,
+			   QCA8084_UNIPHY_MMD1_XPCS_MODE);
+
+	/*
+	 * configure datapath as usxgmii
+	 */
+	mdiobus_c45_modify(ppriv->bus, uniphy_addr, 1,
+			   QCA8084_UNIPHY_MMD1_GMII_DATAPASS_SEL,
+			   QCA8084_UNIPHY_MMD1_DATAPASS_MASK,
+			   QCA8084_UNIPHY_MMD1_DATAPASS_USXGMII);
+
+	/*
+	 * reset and release uniphy GMII/XGMII and ethphy GMII
+	 */
+	for (i = 0; i < ARRAY_SIZE(ports_clocks); ++i) {
+		qca8084_reset_pulse(ppriv, ports_clocks[i]);
+	}
+
+
+	/*
+	 * ana software reset and release
+	 */
+	mdiobus_modify(ppriv->bus, uniphy_addr,
+		       QCA8084_UNIPHY_PLL_POWER_ON_AND_RESET,
+		       0x40, QCA8084_UNIPHY_ANA_SOFT_RESET);
+	msleep(10);
+	mdiobus_modify(ppriv->bus, uniphy_addr,
+		       QCA8084_UNIPHY_PLL_POWER_ON_AND_RESET,
+		       0x40, QCA8084_UNIPHY_ANA_SOFT_RELEASE);
+
+	/*
+	 * wait for uniphy calibration to finish
+	 */
+	qca8084_uniphy_calibrate(ppriv, uniphy_addr);
+
+	/*
+	 * Enable SSCG (Spread Spectrum Clock Generator)
+	 */
+	mdiobus_c45_modify(ppriv->bus, uniphy_addr, 1,
+			   QCA8084_UNIPHY_MMD1_CDA_CONTROL1, 0x8,
+			   QCA8084_UNIPHY_MMD1_SSCG_ENABLE);
+
+	/*
+	 * release XPCS reset
+	 */
+	qca8084_reset_deassert(ppriv, &gcc_uniphy_xpcs_rst);
+
+	/*
+	 * Set BaseR mode
+	 */
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			   QCA8084_UNIPHY_MMD3_PCS_CTRL2, 0xf,
+			   QCA8084_UNIPHY_MMD3_PCS_TYPE_10GBASE_R);
+
+	/* /\* */
+	/*  * Wait for 10GBase-R link up event */
+	/*  *\/ */
+	/* qca8084_uniphy_wait_for_10gbaser_linkup(ppriv, xpcs_addr); */
+
+	/*
+	 * enable UQXGMII mode
+	 */
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			   QCA8084_UNIPHY_MMD3_DIG_CTRL1, 0x200,
+			   QCA8084_UNIPHY_MMD3_USXGMII_EN);
+
+	/*
+	 * set UQXGMII mode
+	 */
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			   QCA8084_UNIPHY_MMD3_VR_RPCS_TPC, 0x1c00,
+			   QCA8084_UNIPHY_MMD3_QXGMII_EN);
+
+	/*
+	 * set AM interval
+	 */
+	mdiobus_c45_write(ppriv->bus, xpcs_addr, 3,
+			  QCA8084_UNIPHY_MMD3_MII_AM_INTERVAL,
+			  QCA8084_UNIPHY_MMD3_MII_AM_INTERVAL_VAL);
+
+	/*
+	 * xpcs software reset
+	 */
+	qca8084_uniphy_xpcs_soft_reset(ppriv, xpcs_addr);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int port_to_mmd(int port)
+{
+	static const u32 port_to_mmd_conv[] = {
+		31,
+		26,
+		27,
+		28,
+	};
+
+	if (port > (int)ARRAY_SIZE(port_to_mmd_conv))
+		return -EINVAL;
+	return port_to_mmd_conv[port];
+}
+
+/*
+ *
+ */
+static void
+qca8084_uniphy_xpcs_8023az_enable(struct qca8084_package_priv *ppriv,
+				  u32 xpcs_addr)
+{
+	u16 reg;
+
+	reg = mdiobus_c45_read(ppriv->bus, xpcs_addr, 3,
+			       QCA8084_UNIPHY_MMD3_AN_LP_BASE_ABL2);
+	if ((reg & QCA8084_UNIPHY_MMD3_XPCS_EEE_CAP) == 0)
+		return;
+
+
+	/*
+	 * Configure the EEE related timer
+	 */
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			QCA8084_UNIPHY_MMD3_EEE_MODE_CTRL, 0x0f40,
+			QCA8084_UNIPHY_MMD3_EEE_RES_REGS |
+			QCA8084_UNIPHY_MMD3_EEE_SIGN_BIT_REGS);
+
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			QCA8084_UNIPHY_MMD3_EEE_TX_TIMER, 0x1fff,
+			QCA8084_UNIPHY_MMD3_EEE_TSL_REGS |
+			QCA8084_UNIPHY_MMD3_EEE_TLU_REGS |
+			QCA8084_UNIPHY_MMD3_EEE_TWL_REGS);
+
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			QCA8084_UNIPHY_MMD3_EEE_RX_TIMER, 0x1fff,
+			QCA8084_UNIPHY_MMD3_EEE_100US_REG_REGS |
+			QCA8084_UNIPHY_MMD3_EEE_RWR_REG_REGS);
+
+	/*
+	 * enable TRN_LPI
+	 */
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			QCA8084_UNIPHY_MMD3_EEE_MODE_CTRL1, 0x101,
+			QCA8084_UNIPHY_MMD3_EEE_TRANS_LPI_MODE |
+			QCA8084_UNIPHY_MMD3_EEE_TRANS_RX_LPI_MODE);
+
+	/*
+	 * enable TX/RX LPI pattern
+	 */
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+			QCA8084_UNIPHY_MMD3_EEE_MODE_CTRL, 0x3,
+			QCA8084_UNIPHY_MMD3_EEE_EN);
+}
+
+/*
+ *
+ */
+static int
+qca8084_quadphy_phy_set_mode_uqxgmii(struct qca8084_package_priv *ppriv)
+{
+	u32 uniphy_addr;
+	int xpcs_addr;
+	size_t i;
+
+	uniphy_addr = ppriv->uniphy1_phydev->mdio.addr;
+	xpcs_addr = ppriv->xpcs_phydev->mdio.addr;
+
+	/*
+	 * disable IPG_tuning bypass
+	 */
+	mdiobus_c45_modify(ppriv->bus, uniphy_addr, 1,
+			   QCA8084_UNIPHY_MMD1_BYPASS_TUNING_IPG,
+			   QCA8084_UNIPHY_MMD1_BYPASS_TUNING_IPG_EN, 0);
+
+	/*
+	 * disable uniphy GMII/XGMII clock and disable ethphy GMII
+	 * clock
+	 */
+	for (i = 0; i < ARRAY_SIZE(ports_clocks); ++i)
+		qca8084_clk_disable(ppriv, ports_clocks[i]);
+
+	/*
+	 * configure uqxgmii mode
+	 */
+	__qca8084_interface_uqxgmii_mode_set(ppriv, uniphy_addr, xpcs_addr);
+
+	for (i = 0; i < ppriv->ephy_count; ++i) {
+		int mmd = port_to_mmd(i);
+
+		if (mmd < 0)
+			return mmd;
+
+		/*
+		 * enable auto-neg complete interrupt, mii using
+		 * mii-4bits, configure as PHY mode
+		 */
+		mdiobus_c45_modify(ppriv->bus, xpcs_addr, mmd,
+				   QCA8084_UNIPHY_MMD_MII_AN_INT_MSK, 0x109,
+				   QCA8084_UNIPHY_MMD_AN_COMPLETE_INT |
+				   QCA8084_UNIPHY_MMD_MII_4BITS_CTRL |
+				   QCA8084_UNIPHY_MMD_TX_CONFIG_CTRL);
+
+		/*
+		 * disable autoneg ability, reset speed to 10
+		 */
+		mdiobus_c45_modify(ppriv->bus, xpcs_addr, mmd,
+				   QCA8084_UNIPHY_MMD_MII_CTRL,
+				   QCA8084_UNIPHY_MMD_XPCS_SPEED_MASK |
+				   QCA8084_UNIPHY_MMD_MII_AN_ENABLE,
+				   QCA8084_UNIPHY_MMD_XPCS_SPEED_10);
+
+		/*
+		 * disable TICD
+		 */
+		mdiobus_c45_modify(ppriv->bus, xpcs_addr, mmd,
+				   QCA8084_UNIPHY_MMD_MII_XAUI_MODE_CTRL, 0x1,
+				   QCA8084_UNIPHY_MMD_TX_IPG_CHECK_DISABLE);
+	}
+
+	/*
+	 * enable EEE for xpcs
+	 */
+	qca8084_uniphy_xpcs_8023az_enable(ppriv, xpcs_addr);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int
+qca8084_port_speed_clock_set(struct phy_device *phydev,
+			     u32 speed)
+{
+	struct qca8084_package_priv *ppriv = phy_ppriv(phydev);
+	struct qca8084_priv *priv = phydev->priv;
+	const struct port_rxtx_clk *pclk;
+	u32 freq;
+
+	pclk = &port_rxtx_clks[priv->physid];
+
+	switch(speed) {
+	case 2500:
+		freq = UQXGMII_SPEED_2500M_CLK;
+		break;
+	case 1000:
+		freq = UQXGMII_SPEED_1000M_CLK;
+		break;
+	case 100:
+		freq = UQXGMII_SPEED_100M_CLK;
+		break;
+	case 10:
+		freq = UQXGMII_SPEED_10M_CLK;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	qca8084_clk_set_rate(ppriv, pclk->mac_rx_clk, freq);
+	qca8084_clk_set_rate(ppriv, pclk->mac_tx_clk, freq);
+
+	if (freq == UQXGMII_SPEED_2500M_CLK)
+		/*
+		 * XGMII take the different clock rate from MAC clock
+		 * when the link speed is 2.5G.
+		 */
+		freq = UQXGMII_XPCS_SPEED_2500M_CLK;
+
+	qca8084_clk_set_rate(ppriv, pclk->xgmii_rx_clk, freq);
+	qca8084_clk_set_rate(ppriv, pclk->xgmii_tx_clk, freq);
+
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int qca8084_uniphy_xpcs_speed_set(struct phy_device *phydev, u32 speed)
+{
+	struct qca8084_package_priv *ppriv = phy_ppriv(phydev);
+	struct qca8084_priv *priv = phydev->priv;
+	u32 mmd = port_to_mmd(priv->physid);
+	u32 xpcs_addr = ppriv->xpcs_phydev->mdio.addr;
+	u32 hw_speed;
+
+	switch(speed) {
+	case 2500:
+		hw_speed = QCA8084_UNIPHY_MMD_XPCS_SPEED_2500;
+		break;
+	case 1000:
+		hw_speed = QCA8084_UNIPHY_MMD_XPCS_SPEED_1000;
+		break;
+	case 100:
+		hw_speed = QCA8084_UNIPHY_MMD_XPCS_SPEED_100;
+		break;
+	case 10:
+		hw_speed = QCA8084_UNIPHY_MMD_XPCS_SPEED_10;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	mdiobus_c45_modify(ppriv->bus, xpcs_addr, mmd,
+			   QCA8084_UNIPHY_MMD_MII_CTRL,
+			   QCA8084_UNIPHY_MMD_XPCS_SPEED_MASK,
+			   hw_speed);
+	return 0;
+}
+
+/*
+ *
+ */
+static int qca8084_uniphy_uqxgmii_function_reset(struct phy_device *phydev)
+{
+	struct qca8084_package_priv *ppriv = phy_ppriv(phydev);
+	struct qca8084_priv *priv = phydev->priv;
+	u32 port_mmd = port_to_mmd(priv->physid);
+	u32 xpcs_addr = ppriv->xpcs_phydev->mdio.addr;
+	u32 uniphy_addr = ppriv->uniphy1_phydev->mdio.addr;
+
+	mdiobus_c45_modify(ppriv->bus, uniphy_addr, 1,
+			   QCA8084_UNIPHY_MMD1_USXGMII_RESET,
+			   1 << priv->physid, 0);
+	msleep(1);
+	mdiobus_c45_modify(ppriv->bus, uniphy_addr, 1,
+			   QCA8084_UNIPHY_MMD1_USXGMII_RESET,
+			   1 << priv->physid,
+			   1 << priv->physid);
+
+	if (priv->physid == 0)
+		mdiobus_c45_modify(ppriv->bus, xpcs_addr, 3,
+				   QCA8084_UNIPHY_MMD_MII_DIG_CTRL,
+				   0x400, QCA8084_UNIPHY_MMD3_USXG_FIFO_RESET);
+	else
+		mdiobus_c45_modify(ppriv->bus, xpcs_addr, port_mmd,
+				   QCA8084_UNIPHY_MMD_MII_DIG_CTRL,
+				   0x20, QCA8084_UNIPHY_MMD_USXG_FIFO_RESET);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void qca8084_phy_fifo_reset(struct phy_device *phydev, bool do_assert)
+{
+	u16 reg;
+
+	reg = phy_read(phydev, QCA8084_FIFO_CONTROL);
+	if (do_assert)
+		reg &= ~QCA8084_FIFO_RESET_MASK;
+	else
+		reg |= QCA8084_FIFO_RESET_MASK;
+	phy_write(phydev, QCA8084_FIFO_CONTROL, reg);
+}
+
+/*
+ *
+ */
+static void qca8084_phy_function_reset(struct phy_device *phydev)
+{
+	qca8084_phy_fifo_reset(phydev, true);
+	msleep(50);
+	qca8084_phy_fifo_reset(phydev, false);
+}
+
+static void qca8084_phy_ipg_config(struct phy_device *phydev, u32 speed)
+{
+	u16 reg;
+
+	reg = mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr, 7,
+			       QCA8084_PHY_MMD7_IPG_10_11_ENABLE);
+	if (speed == 1000)
+		reg |= QCA8084_PHY_MMD7_IPG_11_EN;
+	else
+		reg &= ~QCA8084_PHY_MMD7_IPG_11_EN;
+	mdiobus_c45_write(phydev->mdio.bus, phydev->mdio.addr, 7,
+			  QCA8084_PHY_MMD7_IPG_10_11_ENABLE, reg);
+}
+
+/*
+ *
+ */
+static void qca8084_link_change_notify(struct phy_device *phydev)
+{
+	struct qca8084_package_priv *ppriv = phy_ppriv(phydev);
+	struct qca8084_priv *priv = phydev->priv;
+	const struct port_rxtx_clk *pclk;
+	int (*clkfun)(struct qca8084_package_priv *,
+		      const struct qca8084_clk *clk);
+	int speed;
+
+	if (!phydev->link)
+		speed = SPEED_10;
+	else
+		speed = phydev->speed;
+
+	/*
+	 * set gmii+ clock to uniphy1 and ethphy
+	 */
+	qca8084_port_speed_clock_set(phydev, speed);
+
+	/*
+	 * set xpcs speed
+	 */
+	qca8084_uniphy_xpcs_speed_set(phydev, speed);
+
+	/*
+	 * GMII/XGMII clock and ETHPHY GMII clock enable/disable,
+	 * depending on link state
+	 */
+	if (phydev->link)
+		clkfun = qca8084_clk_enable;
+	else
+		clkfun = qca8084_clk_disable;
+
+	pclk = &port_rxtx_clks[priv->physid];
+
+	clkfun(ppriv, pclk->srds_rx_clk);
+	clkfun(ppriv, pclk->srds_tx_clk);
+	clkfun(ppriv, pclk->xgmii_rx_clk);
+	clkfun(ppriv, pclk->xgmii_tx_clk);
+	clkfun(ppriv, pclk->gephy_tx_clk);
+	clkfun(ppriv, pclk->gephy_rx_clk);
+
+	/* delay from SSDK is 100ms, reduced to 1ms because it does
+	 * not seem useful */
+	msleep(1);
+
+	/*
+	 * UNIPHY GMII/XGMII interface and ETHPHY GMII interface reset
+	 * and release
+	 */
+	qca8084_reset_pulse(ppriv, pclk->srds_rx_clk);
+	qca8084_reset_pulse(ppriv, pclk->srds_tx_clk);
+	qca8084_reset_pulse(ppriv, pclk->xgmii_rx_clk);
+	qca8084_reset_pulse(ppriv, pclk->xgmii_tx_clk);
+	qca8084_reset_pulse(ppriv, pclk->gephy_tx_clk);
+	qca8084_reset_pulse(ppriv, pclk->gephy_rx_clk);
+
+	/*
+	 * ipg_tune and xgmii2gmii reset for uniphy and ETHPHY,
+	 * function reset.
+	 */
+	qca8084_uniphy_uqxgmii_function_reset(phydev);
+
+	/*
+	 * ethphy function reset: PHY_FIFO_RESET
+	 */
+	if (phydev->link)
+		qca8084_phy_function_reset(phydev);
+	else
+		qca8084_phy_fifo_reset(phydev, true);
+
+	/*
+	 * change IPG from 10 to 11 for 1G speed
+	 */
+	qca8084_phy_ipg_config(phydev, phydev->speed);
+}
+
+/*
+ *
+ */
+static void qca8084_set_ephy_mdio_addr(struct qca8084_package_priv *ppriv,
+				       int ephy_id,
+				       int addr)
+{
+	u32 val;
+
+	if (WARN_ON(ephy_id >= ppriv->ephy_count))
+		return;
+
+	val = qca8084_soc_read(ppriv, QCA8084_EPHY_MAP_REG);
+	val &= ~(0x1f << (ephy_id * 5));
+	val |= (addr << (ephy_id * 5));
+	qca8084_soc_write(ppriv, QCA8084_EPHY_MAP_REG, val);
+}
+
+/*
+ *
+ */
+static void qca8084_set_serdes_mdio_addr(struct qca8084_package_priv *ppriv,
+					 int serdes_id,
+					 int addr)
+{
+	u32 val;
+
+	if (WARN_ON(serdes_id >= ppriv->serdes_count))
+		return;
+
+	val = qca8084_soc_read(ppriv, QCA8084_UPHY_MAP_REG);
+	val &= ~(0x1f << (serdes_id * 5));
+	val |= (addr << (serdes_id * 5));
+	qca8084_soc_write(ppriv, QCA8084_UPHY_MAP_REG, val);
+}
+
+/*
+ * efuse related
+ */
+struct phy_efuse_cfg {
+	u32 row;
+	u32 ldo_shift;
+	u32 ldo_mask;
+	u32 icc_shift;
+	u32 icc_mask;
+};
+
+struct phy_efuse_cfg phy_efuse_configs[] = {
+	[0] = {
+		.row = QCA8084_QFPROM_RAW_CALIBRATION_ROW4_LSB_REG,
+		.ldo_shift = 18,
+		.ldo_mask = 0xf,
+		.icc_shift = 22,
+		.icc_mask = 0x1f,
+	},
+	[1] = {
+		.row = QCA8084_QFPROM_RAW_CALIBRATION_ROW7_LSB_REG,
+		.ldo_shift = 23,
+		.ldo_mask = 0xf,
+		.icc_shift = 27,
+		.icc_mask = 0x1f,
+	},
+	[2] = {
+		.row = QCA8084_QFPROM_RAW_CALIBRATION_ROW8_LSB_REG,
+		.ldo_shift = 23,
+		.ldo_mask = 0xf,
+		.icc_shift = 27,
+		.icc_mask = 0x1f,
+	},
+	[3] = {
+		.row = QCA8084_QFPROM_RAW_CALIBRATION_ROW6_MSB_REG,
+		.ldo_shift = 14,
+		.ldo_mask = 0xf,
+		.icc_shift = 18,
+		.icc_mask = 0x1f,
+	},
+};
+
+/*
+ * read/write helpers for indirect access to phy debug port
+ */
+static int qca8084_phy_debug_read(struct mii_bus *bus, int addr, u32 reg)
+{
+	mdiobus_write(bus, addr, QCA8084_DEBUG_PORT_ADDR, reg);
+	return mdiobus_read(bus, addr, QCA8084_DEBUG_PORT_DATA);
+}
+
+static void qca8084_phy_debug_write(struct mii_bus *bus, int addr, u32 reg,
+				    u16 val)
+{
+	mdiobus_write(bus, addr, QCA8084_DEBUG_PORT_ADDR, reg);
+	mdiobus_write(bus, addr, QCA8084_DEBUG_PORT_DATA, val);
+}
+
+static int qca8084_load_efuse(struct qca8084_package_priv *ppriv,
+			      int ephy_id, int free_mdio_addr)
+{
+	const struct phy_efuse_cfg *cfg = &phy_efuse_configs[ephy_id];
+	u32 reg, ldo_efuse, icc_efuse;
+
+	/* temporary map it */
+	qca8084_set_ephy_mdio_addr(ppriv, ephy_id, free_mdio_addr);
+
+	reg = qca8084_soc_read(ppriv, cfg->row);
+	ldo_efuse = (reg & (cfg->ldo_mask << cfg->ldo_shift)) >> cfg->ldo_shift;
+	icc_efuse = (reg & (cfg->icc_mask << cfg->icc_shift)) >> cfg->icc_shift;
+
+	reg = qca8084_phy_debug_read(ppriv->bus, free_mdio_addr,
+				     QCA8084_PHY_LDO_EFUSE_REG);
+	reg = (reg & ~0xf0) | (ldo_efuse << 4);
+	qca8084_phy_debug_write(ppriv->bus, free_mdio_addr,
+				QCA8084_PHY_LDO_EFUSE_REG, reg);
+
+	reg = qca8084_phy_debug_read(ppriv->bus, free_mdio_addr,
+				     QCA8084_PHY_ICC_EFUSE_REG);
+	reg = (reg & ~0x1f) | (icc_efuse << 0);
+	qca8084_phy_debug_write(ppriv->bus, free_mdio_addr,
+				QCA8084_PHY_ICC_EFUSE_REG, reg);
+
+	/* unmap it */
+	qca8084_set_ephy_mdio_addr(ppriv, ephy_id, 0);
+
+	return 0;
+}
+
+/*
+ * clock and reset helpers.
+ */
+static void qca8084_clk_early_enable(struct qca8084_package_priv *ppriv,
+				     uint32_t reg)
+{
+	u32 val;
+
+	val = qca8084_soc_read(ppriv, reg);
+	val |= (1 << 0);
+	qca8084_soc_write(ppriv, reg, val);
+}
+
+static void qca8084_clk_early_disable(struct qca8084_package_priv *ppriv,
+				      uint32_t reg)
+{
+	u32 val;
+
+	val = qca8084_soc_read(ppriv, reg);
+	val &= ~(1 << 0);
+	qca8084_soc_write(ppriv, reg, val);
+}
+
+static void qca8084_clk_reset_pre(struct qca8084_package_priv *ppriv,
+				  uint32_t reg)
+{
+	u32 val;
+
+	val = qca8084_soc_read(ppriv, reg);
+	val |= (1 << 2);
+	qca8084_soc_write(ppriv, reg, val);
+}
+
+static void qca8084_clk_reset_post(struct qca8084_package_priv *ppriv,
+				   uint32_t reg)
+{
+	u32 val;
+
+	val = qca8084_soc_read(ppriv, reg);
+	val &= ~(1 << 2);
+	qca8084_soc_write(ppriv, reg, val);
+}
+
+/*
+ * after a hardware reset, the chip comes up with all the clocks
+ * disabled, and various subsystems reset.
+ *
+ * in that state, only the configuration space registers are
+ * available.
+ *
+ * this function enables the various internal chip clocks, and also
+ * performs efuse loading if required.
+ *
+ * after that step, the ethernet and serdes PHYs should be enumerable
+ * on the MDIO bus.
+ */
+static void qca8084_clk_init(struct qca8084_package_priv *ppriv,
+			     int free_mdio_addr)
+{
+	u32 reg, rev_id;
+	size_t i;
+
+	/*
+	 * Enable serdes
+	 */
+	qca8084_clk_early_enable(ppriv, QCA8084_SRDS0_SYS_CBCR_REG);
+	qca8084_clk_early_enable(ppriv, QCA8084_SRDS1_SYS_CBCR_REG);
+
+	/*
+	 * Reset serdes
+	 */
+	qca8084_clk_reset_pre(ppriv, QCA8084_SRDS0_SYS_CBCR_REG);
+	qca8084_clk_reset_pre(ppriv, QCA8084_SRDS1_SYS_CBCR_REG);
+
+	/*
+	 * Disable EPHY GMII clock, presumably for both TX and RX.
+	 */
+	for (i = 0; i < 2 * ppriv->ephy_count; ++i)
+		qca8084_clk_early_disable(ppriv, QCA8084_GEPHY0_TX_CBCR_REG(i));
+
+	/*
+	 * Enable ephy
+	 */
+	qca8084_clk_early_enable(ppriv, QCA8084_EPHY0_SYS_CBCR_REG);
+	qca8084_clk_early_enable(ppriv, QCA8084_EPHY1_SYS_CBCR_REG);
+	qca8084_clk_early_enable(ppriv, QCA8084_EPHY2_SYS_CBCR_REG);
+	qca8084_clk_early_enable(ppriv, QCA8084_EPHY3_SYS_CBCR_REG);
+
+	/*
+	 * Reset ephy
+	 */
+	qca8084_clk_reset_pre(ppriv, QCA8084_EPHY0_SYS_CBCR_REG);
+	qca8084_clk_reset_pre(ppriv, QCA8084_EPHY1_SYS_CBCR_REG);
+	qca8084_clk_reset_pre(ppriv, QCA8084_EPHY2_SYS_CBCR_REG);
+	qca8084_clk_reset_pre(ppriv, QCA8084_EPHY3_SYS_CBCR_REG);
+
+	/*
+	 * release resets
+	 */
+	msleep(20);
+	qca8084_clk_reset_post(ppriv, QCA8084_SRDS0_SYS_CBCR_REG);
+	qca8084_clk_reset_post(ppriv, QCA8084_SRDS1_SYS_CBCR_REG);
+	qca8084_clk_reset_post(ppriv, QCA8084_EPHY0_SYS_CBCR_REG);
+	qca8084_clk_reset_post(ppriv, QCA8084_EPHY1_SYS_CBCR_REG);
+	qca8084_clk_reset_post(ppriv, QCA8084_EPHY2_SYS_CBCR_REG);
+	qca8084_clk_reset_post(ppriv, QCA8084_EPHY3_SYS_CBCR_REG);
+	msleep(1);
+
+	/*
+	 * Deassert EPHY DSP
+	 */
+	reg = qca8084_soc_read(ppriv, QCA8084_GCC_GEPHY_MISC_REG);
+	reg &= ~0x1f;
+	qca8084_soc_write(ppriv, QCA8084_GCC_GEPHY_MISC_REG, reg);
+
+	/*
+	 * for ES chips, we need to load efuse manually
+	 */
+	reg = qca8084_soc_read(ppriv, QCA8084_QFPROM_RAW_PTE_ROW2_MSB_REG);
+	rev_id = (reg >> 16) & 0xff;
+	switch (rev_id) {
+	case 1:
+	case 2:
+		/* ES samples */
+		phydev_warn(ppriv->phydev,
+			    "device is an ES sample (rev %d)\n", rev_id);
+		for (i = 0; i < ppriv->ephy_count; ++i)
+			qca8084_load_efuse(ppriv, i, free_mdio_addr);
+		break;
+	}
+
+	/*
+	 * Enable efuse loading into analog circuit
+	 */
+	reg = qca8084_soc_read(ppriv, QCA8084_EPHY_MAP_REG);
+	reg &= ~EPHY_MAP_EFUSE_LOAD_PHY01n;
+	reg &= ~EPHY_MAP_EFUSE_LOAD_PHY23n;
+	qca8084_soc_write(ppriv, QCA8084_EPHY_MAP_REG, reg);
+
+	mdelay(10);
+}
+
+/*
+ *
+ */
+static int qca8084_package_probe(struct phy_device *phydev, int free_mdio_addr)
+{
+	struct qca8084_package_priv *ppriv = phy_ppriv(phydev);
+	u32 id;
+	int i;
+
+	ppriv->phydev = phydev;
+	ppriv->bus = phydev->mdio.bus;
+	ppriv->uniphy1_mode = PHY_INTERFACE_MODE_NA;
+
+	/*
+	 * PHY uses half of the MDIO address space for "SOC" access
+	 * either top of bottom depending on bootstrap configuration,
+	 * the other half is used to address individual PHY whose
+	 * address can be software remapped
+	 *
+	 * try to probe the device on both possible offsets
+	 */
+	ppriv->soc_base = 0x00;
+	id = qca8084_soc_read(ppriv, QCA8084_SOC_ID_REG) >> 8;
+	if (id != QCA8084_SOC_ID) {
+		ppriv->soc_base = 0x10;
+		id = qca8084_soc_read(ppriv, QCA8084_SOC_ID_REG) >> 8;
+		if (id != QCA8084_SOC_ID) {
+			phydev_err(phydev, "failed to find soc base address\n");
+			return -ENODEV;
+		}
+	}
+
+	switch (id) {
+	case QCA8084_SOC_ID:
+		ppriv->ephy_count = QCA8084_EPHY_COUNT;
+		ppriv->serdes_count = QCA8084_SERDES_COUNT;
+		break;
+	}
+
+	/* map all PHY mdio address to zero for now */
+	for (i = 0; i < ppriv->ephy_count; i++)
+		qca8084_set_ephy_mdio_addr(ppriv, i, 0);
+
+	for (i = 0; i < ppriv->serdes_count; i++)
+		qca8084_set_serdes_mdio_addr(ppriv, i, 0);
+
+	/* early clock setup so that ephy respond to registers access */
+	qca8084_clk_init(ppriv, free_mdio_addr);
+
+	return 0;
+}
+
+/*
+ * ADC clock inversion, set as falling to fix link issues.
+ */
+enum {
+	ADC_MODE_RISING,
+	ADC_MODE_FALLING,
+};
+
+static void qca8084_phy_adc_edge_set(struct phy_device *phydev,
+				     int adc_mode)
+{
+	u16 mode_val;
+	u16 reg;
+
+	switch (adc_mode) {
+	case ADC_MODE_RISING:
+		mode_val = ANA_INTERFACE_CLK_SEL_ADC_EDGE_RISING;
+		break;
+	case ADC_MODE_FALLING:
+		mode_val = ANA_INTERFACE_CLK_SEL_ADC_EDGE_FALLING;
+		break;
+	default:
+		return;
+	}
+
+	reg = qca8084_phy_debug_read(phydev->mdio.bus, phydev->mdio.addr,
+				     QCA8084_PHY_DEBUG_ANA_INTERFACE_CLK_SEL);
+	reg &= ~ANA_INTERFACE_CLK_SEL_ADC_EDGE_MASK;
+	reg |= mode_val;
+	qca8084_phy_debug_write(phydev->mdio.bus, phydev->mdio.addr,
+				QCA8084_PHY_DEBUG_ANA_INTERFACE_CLK_SEL, reg);
+
+	/*
+	 * apparently, a phy soft reset is needed after changing the
+	 * ADC edge mode.
+	 */
+	genphy_soft_reset(phydev);
+}
+
+/*
+ *
+ */
+static int qca8084_probe(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+	struct device_node *of_node = dev->of_node;
+	struct qca8084_package_priv *ppriv;
+	struct qca8084_priv *priv;
+	u32 physid, ptype;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	phydev->priv = priv;
+
+	/* device will span the whole MDIO address space, there cannot
+	 * be more than one of these on a given bus so we use 0 as
+	 * cookie for package
+	 *
+	 * we will do actual probing once in package probe
+	 */
+	if (devm_phy_package_join(dev, phydev, 0, sizeof (*ppriv)))
+		return -ENOMEM;
+
+	ppriv = (struct qca8084_package_priv *)phydev->shared->priv;
+	if (phy_package_probe_once(phydev)) {
+		int ret;
+
+		ret = qca8084_package_probe(phydev, phydev->mdio.addr);
+		if (ret)
+			return ret;
+	}
+
+	/* setup PHY mdio address, fully dynamic */
+	ret = of_property_read_u32(of_node, "qca,phy-physid", &physid);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(of_node, "qca,phy-type", &ptype);
+	if (ret)
+		return ret;
+
+	if (of_property_read_u32(of_node, "qca,led-tlmm-pin", &priv->led_pin))
+		priv->led_pin = -1;
+	priv->led_act_blink = of_property_read_bool(of_node,
+						    "qca,led-act-blink");
+	priv->led_link_speed_any = of_property_read_bool(of_node,
+						 "qca,led-link-speed-any");
+
+	switch (ptype) {
+	case 0:
+		if (physid >= QCA8084_EPHY_COUNT)
+			return -EINVAL;
+		qca8084_set_ephy_mdio_addr(ppriv, physid, phydev->mdio.addr);
+		qca8084_phy_adc_edge_set(phydev, ADC_MODE_FALLING);
+
+		/* copper PHY are enabled by default after
+		 * package_probe, power them down */
+		genphy_suspend(phydev);
+		break;
+	case 1:
+		if (physid >= QCA8084_SERDES_COUNT)
+			return -EINVAL;
+		qca8084_set_serdes_mdio_addr(ppriv, physid, phydev->mdio.addr);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	priv->ptype = ptype;
+	priv->physid = physid;
+
+	/* capture phydev of uniphy1 & xpcs for later */
+	if (ptype == 1 && physid == 1)
+		ppriv->uniphy1_phydev = phydev;
+
+	if (ptype == 1 && physid == 2)
+		ppriv->xpcs_phydev = phydev;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int qca8084_package_init(struct phy_device *phydev)
+{
+	struct phy_package_shared *shared = phydev->shared;
+	struct qca8084_package_priv *ppriv;
+
+	ppriv = (struct qca8084_package_priv *)shared->priv;
+
+	if (phydev->interface != PHY_INTERFACE_MODE_10G_QXGMII)
+		return -ENOTSUPP;
+
+	qca8084_quadphy_phy_set_mode_uqxgmii(ppriv);
+	qca8084_gcc_clock_init(ppriv);
+	ppriv->uniphy1_mode = phydev->interface;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int qca8084_config_leds(struct phy_device *phydev)
+{
+	struct qca8084_priv *priv = phydev->priv;
+	struct qca8084_package_priv *ppriv = phydev->shared->priv;
+	u16 led_ctrl_val = 0;
+
+	if (priv->led_pin < 0) {
+		phydev_info(phydev, "no led configured.\n");
+		return 0;
+	}
+
+	phydev_dbg(phydev, "configure TLMM pin %i as led ...\n",
+		    priv->led_pin);
+	qca8084_soc_modify(ppriv, QCA8084_TLMM_CFG_REG(priv->led_pin),
+			   TLMM_CFG_FNSEL_MASK,
+			   TLMM_CFG_FNSEL(TLMM_CFG_FNSEL_Px_LED_1));
+
+	if (priv->led_act_blink)
+		led_ctrl_val |= LEDx_CTRL_BLINK_ANY_DIR;
+	if (priv->led_link_speed_any)
+		led_ctrl_val |= LEDx_CTRL_LINK_ANY_SPEED;
+
+	mdiobus_c45_write(ppriv->bus, phydev->mdio.addr, 7,
+			  QCA808X_PHY_MMD7_LED1_CTRL,
+			  led_ctrl_val);
+
+	mdiobus_c45_modify(ppriv->bus, phydev->mdio.addr, 7,
+			   QCA808X_PHY_MMD7_LED_POLARITY_CTRL,
+			   LED_POLARITY_MASK, LED_POLARITY_MASK);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int qca8084_config_init(struct phy_device *phydev)
+{
+	struct qca8084_package_priv *ppriv;
+	int ret, val;
+
+	ppriv = (struct qca8084_package_priv *)phydev->shared->priv;
+	if (!ppriv->uniphy1_phydev) {
+		phydev_err(phydev, "cannot configure, did not find the "
+			   "uniphy1 phydev during probe");
+		return -ENODEV;
+	}
+
+	if (!ppriv->xpcs_phydev) {
+		phydev_err(phydev, "cannot configure, did not find the "
+			   "xpcs phydev during probe");
+		return -ENODEV;
+	}
+
+	/* enable packet counters so that stats are workable */
+	val = phy_read_mmd(phydev, 7, 0x8029);
+	if (val < 0)
+		return val;
+	/* enable packet counting */
+	val |= (1 << 0);
+	/* set to clear on read */
+	val |= (1 << 1);
+	phy_write_mmd(phydev, 7, 0x8029, val);
+
+	if (phy_package_init_once(phydev)) {
+		ret = qca8084_package_init(phydev);
+		if (ret)
+			return ret;
+	} else {
+		/* make sure everyone agrees on the serdes phy interface */
+		if (ppriv->uniphy1_mode != phydev->interface) {
+			phydev_err(phydev, "interface shall be the same "
+				   "for all qca808x phys\n");
+			return -EINVAL;
+		}
+	}
+
+	return qca8084_config_leds(phydev);
+}
+
+/*
+ *
+ */
+static int qca8084_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+	u16 val;
+
+	switch (ctrl) {
+	case ETH_TP_MDI:
+		val = QCA8084_SFC_MANUAL_MDI;
+		break;
+	case ETH_TP_MDI_X:
+		val = QCA8084_SFC_MANUAL_MDIX;
+		break;
+	case ETH_TP_MDI_AUTO:
+		val = QCA8084_SFC_AUTOMATIC_CROSSOVER;
+		break;
+	default:
+		return 0;
+	}
+
+	return phy_modify_changed(phydev, QCA8084_SPECIFIC_FUNCTION_CONTROL,
+				  QCA8084_SFC_MDI_CROSSOVER_MODE_M,
+				  FIELD_PREP(QCA8084_SFC_MDI_CROSSOVER_MODE_M, val));
+}
+
+/*
+ *
+ */
+static int qca8084_config_aneg(struct phy_device *phydev)
+{
+	int ret = 0;
+
+	ret = qca8084_config_mdix(phydev, phydev->mdix_ctrl);
+	if (ret < 0)
+		return ret;
+
+	/* Changes of the midx bits are disruptive to the normal operation;
+	 * therefore any changes to these registers must be followed by a
+	 * software reset to take effect.
+	 */
+	if (ret == 1) {
+		ret = genphy_soft_reset(phydev);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (phydev->autoneg == AUTONEG_ENABLE) {
+		int phy_ctrl = 0;
+
+		if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+				      phydev->advertising))
+			phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+		ret = phy_modify_mmd_changed(phydev,
+					     MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+					     MDIO_AN_10GBT_CTRL_ADV2_5G,
+					     phy_ctrl);
+		if (ret < 0)
+			return ret;
+	}
+
+	return __genphy_config_aneg(phydev, ret);
+}
+
+/*
+ *
+ */
+static int qca8084_get_sset_count(struct phy_device *phydev)
+{
+	return ARRAY_SIZE(qca8084_hw_stats);
+}
+
+/*
+ *
+ */
+static void qca8084_get_strings(struct phy_device *phydev, u8 *data)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qca8084_hw_stats); i++) {
+		strscpy(data + i * ETH_GSTRING_LEN,
+			qca8084_hw_stats[i].string, ETH_GSTRING_LEN);
+	}
+}
+
+/*
+ *
+ */
+static u64 qca8084_get_stat(struct phy_device *phydev, int idx)
+{
+	const struct qca8084_hw_stat *stat = &qca8084_hw_stats[idx];
+	struct qca8084_priv *priv = phydev->priv;
+	u64 ret;
+	int i;
+
+	ret = 0;
+	for (i = 0; i < stat->count; i++) {
+		int val;
+
+		ret <<= 16;
+		if (stat->access_type == MMD)
+			val = phy_read_mmd(phydev, stat->mmd_id,
+					   stat->reg + i);
+		else
+			val = phy_read(phydev, stat->reg);
+
+		if (val < 0)
+			return U64_MAX;
+
+		ret |= (u16)val;
+	}
+
+	ret = ret & stat->mask;
+	priv->stats[idx] += ret;
+	return priv->stats[idx];
+}
+
+/*
+ *
+ */
+static void qca8084_get_stats(struct phy_device *phydev,
+			     struct ethtool_stats *stats, u64 *data)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qca8084_hw_stats); i++)
+		data[i] = qca8084_get_stat(phydev, i);
+}
+
+/*
+ *
+ */
+static int qca8084_get_features(struct phy_device *phydev)
+{
+	linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+			 phydev->supported);
+
+	/* usxgmii does not support half duplex, we need to mask these
+	 * modes */
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			   phydev->supported);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+			   phydev->supported);
+	linkmode_clear_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+			   phydev->supported);
+
+	/* this PHY supports EEE 2.5Gbit/s */
+	linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+			 phydev->supported_eee);
+
+	return genphy_read_abilities(phydev);
+}
+
+/*
+ *
+ */
+static int qca8084_read_status(struct phy_device *phydev)
+{
+	int ss, err, val;
+
+	err = genphy_update_link(phydev);
+	if (err)
+		return err;
+
+	phydev->speed = SPEED_UNKNOWN;
+	phydev->duplex = DUPLEX_UNKNOWN;
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
+
+	err = genphy_read_lpa(phydev);
+	if (err < 0)
+		return err;
+
+	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+	if (val < 0)
+		return val;
+
+	mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+
+	ss = phy_read(phydev, QCA8084_SPECIFIC_STATUS);
+	if (ss < 0)
+		return ss;
+
+	if (ss & QCA8084_SS_SPEED_DUPLEX_RESOLVED) {
+		int sfc;
+
+		sfc = phy_read(phydev, QCA8084_SPECIFIC_FUNCTION_CONTROL);
+		if (sfc < 0)
+			return sfc;
+
+		switch (ss & QCA8084_SS_SPEED_MASK) {
+		case QCA8084_SS_SPEED_10:
+			phydev->speed = SPEED_10;
+			break;
+		case QCA8084_SS_SPEED_100:
+			phydev->speed = SPEED_100;
+			break;
+		case QCA8084_SS_SPEED_1000:
+			phydev->speed = SPEED_1000;
+			break;
+		case QCA8084_SS_SPEED_2500:
+			phydev->speed = SPEED_2500;
+			break;
+		}
+		if (ss & QCA8084_SS_DUPLEX)
+			phydev->duplex = DUPLEX_FULL;
+		else
+			phydev->duplex = DUPLEX_HALF;
+
+		if (ss & QCA8084_SS_MDIX)
+			phydev->mdix = ETH_TP_MDI_X;
+		else
+			phydev->mdix = ETH_TP_MDI;
+
+		switch (FIELD_GET(QCA8084_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+		case QCA8084_SFC_MANUAL_MDI:
+			phydev->mdix_ctrl = ETH_TP_MDI;
+			break;
+		case QCA8084_SFC_MANUAL_MDIX:
+			phydev->mdix_ctrl = ETH_TP_MDI_X;
+			break;
+		case QCA8084_SFC_AUTOMATIC_CROSSOVER:
+			phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+			break;
+		}
+	}
+
+	if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+		phy_resolve_aneg_pause(phydev);
+
+	return 0;
+}
+
+static struct phy_driver qca8084_driver[] = {
+{
+	PHY_ID_MATCH_EXACT(QCA8084_PHY_ID),
+	.name			= "Qualcomm QCA8084",
+	.flags			= 0,
+	.probe			= qca8084_probe,
+	.soft_reset		= genphy_soft_reset,
+	.link_change_notify	= qca8084_link_change_notify,
+	.get_features		= qca8084_get_features,
+	.config_init		= qca8084_config_init,
+	.config_aneg		= qca8084_config_aneg,
+	.get_sset_count		= qca8084_get_sset_count,
+	.get_strings		= qca8084_get_strings,
+	.get_stats		= qca8084_get_stats,
+	.read_status		= qca8084_read_status,
+	.suspend		= genphy_suspend,
+	.resume			= genphy_resume,
+}};
+
+module_phy_driver(qca8084_driver);
+
+static struct mdio_device_id __maybe_unused qca_tbl[] = {
+	{ PHY_ID_MATCH_EXACT(QCA8084_PHY_ID) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca_tbl);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA8084/QCA8085 PHY driver");
+MODULE_AUTHOR("Maxime Bizon");
+MODULE_LICENSE("GPL");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/phy/qca8084.h	2023-10-26 15:00:01.218735023 +0200
@@ -0,0 +1,244 @@
+#ifndef QCA8084_H_
+#define QCA8084_H_
+
+#define QCA8084_PHY_ID			0x004dd180
+#define QCA8084_SOC_ID			0x17
+
+#define QCA8084_EPHY_COUNT		4
+#define QCA8084_SERDES_COUNT		3
+
+/*
+ * SOC address space
+ */
+#define QCA8084_SOC_ID_REG			0x0
+#define QCA8084_UPHY_MAP_REG			0xc90f014
+#define QCA8084_EPHY_MAP_REG			0xc90f018
+#define  EPHY_MAP_EFUSE_LOAD_PHY01n		(1 << 20)
+#define  EPHY_MAP_EFUSE_LOAD_PHY23n		(1 << 21)
+#define QCA8084_GEPHY0_TX_CBCR_REG(x)		(0xc800058 + x * 0x20)
+#define QCA8084_SRDS0_SYS_CBCR_REG		0xc8001a8
+#define QCA8084_SRDS1_SYS_CBCR_REG		0xc8001ac
+#define QCA8084_EPHY0_SYS_CBCR_REG		0xc8001b0
+#define QCA8084_EPHY1_SYS_CBCR_REG		0xc8001b4
+#define QCA8084_EPHY2_SYS_CBCR_REG		0xc8001b8
+#define QCA8084_EPHY3_SYS_CBCR_REG		0xc8001bc
+#define QCA8084_GCC_GEPHY_MISC_REG			0xc800304
+#define QCA8084_QFPROM_RAW_PTE_ROW2_MSB_REG		0xc900014
+#define QCA8084_QFPROM_RAW_CALIBRATION_ROW4_LSB_REG	0xc900048
+#define QCA8084_QFPROM_RAW_CALIBRATION_ROW6_MSB_REG	0xc90005c
+#define QCA8084_QFPROM_RAW_CALIBRATION_ROW7_LSB_REG	0xc900060
+#define QCA8084_QFPROM_RAW_CALIBRATION_ROW8_LSB_REG	0xc900068
+
+#define QCA8084_TLMM_CFG_REG(pin)		(0xc400000 + (pin) * 0x1000)
+
+
+/*
+ * EPHY address space
+ */
+
+/* vendor specific clause 22 registers */
+#define QCA8084_SPECIFIC_FUNCTION_CONTROL	0x10
+#define QCA8084_SFC_MDI_CROSSOVER_MODE_M	GENMASK(6, 5)
+#define QCA8084_SFC_AUTOMATIC_CROSSOVER		0x3
+#define QCA8084_SFC_MANUAL_MDIX			0x1
+#define QCA8084_SFC_MANUAL_MDI			0x0
+
+#define QCA8084_SPECIFIC_STATUS			0x11
+#define QCA8084_SS_MDIX				BIT(6)
+#define QCA8084_SS_SPEED_MASK			GENMASK(9, 7)
+#define QCA8084_SS_SPEED_2500			(4 << 7)
+#define QCA8084_SS_SPEED_1000			(2 << 7)
+#define QCA8084_SS_SPEED_100			(1 << 7)
+#define QCA8084_SS_SPEED_10			(0 << 7)
+#define QCA8084_SS_SPEED_DUPLEX_RESOLVED	BIT(11)
+#define QCA8084_SS_DUPLEX			BIT(13)
+
+#define QCA8084_FIFO_CONTROL			0x19
+#define QCA8084_FIFO_RESET_MASK			0x3
+
+#define QCA8084_DEBUG_PORT_ADDR			0x1d
+#define QCA8084_DEBUG_PORT_DATA			0x1e
+
+/* internal/debug phy registers (access through clause 22 debug regs) */
+#define QCA8084_PHY_LDO_EFUSE_REG	0x180
+#define QCA8084_PHY_ICC_EFUSE_REG	0x280
+
+#define QCA8084_PHY_DEBUG_ANA_INTERFACE_CLK_SEL			0x8b80
+# define ANA_INTERFACE_CLK_SEL_ADC_EDGE_MASK			0x00f0
+# define ANA_INTERFACE_CLK_SEL_ADC_EDGE_FALLING			0x00f0
+# define ANA_INTERFACE_CLK_SEL_ADC_EDGE_RISING			0x0000
+
+/* Clause 45 MMD3 phy registers */
+#define QCA8084_PHY_MMD3_ADDR_8023AZ_EEE_2500M_CAPABILITY	0x0015
+# define QCA8084_PHY_EEE_CAPABILITY_2500M			0x1
+
+/* MMD7 phy registers */
+#define QCA8084_PHY_MMD7_IPG_10_11_ENABLE			0x901d
+# define QCA8084_PHY_MMD7_IPG_11_EN				0x1
+
+#define QCA808X_PHY_MMD7_LED_POLARITY_CTRL	0x901a
+# define LED_POLARITY_MASK			0x0040
+
+#define QCA808X_PHY_MMD7_LED0_CTRL		0x8078
+#define QCA808X_PHY_MMD7_LED1_CTRL		0x8074
+#define QCA808X_PHY_MMD7_LED2_CTRL		0x8076
+# define LEDx_CTRL_LINK_2500M_LIGHT_EN		0x8000
+# define LEDx_CTRL_LINK_1000M_LIGHT_EN		0x0040
+# define LEDx_CTRL_LINK_100M_LIGHT_EN		0x0020
+# define LEDx_CTRL_LINK_10M_LIGHT_EN		0x0010
+# define LEDx_CTRL_RX_TRAFFIC_BLINK_EN		0x0200
+# define LEDx_CTRL_TX_TRAFFIC_BLINK_EN		0x0400
+
+#define LEDx_CTRL_LINK_ANY_SPEED	(LEDx_CTRL_LINK_10M_LIGHT_EN |	 \
+					 LEDx_CTRL_LINK_100M_LIGHT_EN |	 \
+					 LEDx_CTRL_LINK_1000M_LIGHT_EN | \
+					 LEDx_CTRL_LINK_2500M_LIGHT_EN)
+#define LEDx_CTRL_BLINK_ANY_DIR		(LEDx_CTRL_RX_TRAFFIC_BLINK_EN | \
+					 LEDx_CTRL_TX_TRAFFIC_BLINK_EN)
+
+
+
+/*
+ * UNIPHY address space
+ */
+
+/* clause 22 space registers */
+#define QCA8084_UNIPHY_PLL_POWER_ON_AND_RESET		0
+# define QCA8084_UNIPHY_ANA_SOFT_RESET			0
+# define QCA8084_UNIPHY_ANA_SOFT_RELEASE		0x40
+
+/*
+ * XPCS address space
+ */
+
+/* MMD1 registers */
+#define QCA8084_UNIPHY_MMD1_CDA_CONTROL1		0x0020
+# define QCA8084_UNIPHY_MMD1_SSCG_ENABLE		0x8
+#define QCA8084_UNIPHY_MMD1_CALIBRATION4		0x0078
+# define QCA8084_UNIPHY_MMD1_CALIBRATION_DONE		0x80
+#define QCA8084_UNIPHY_MMD1_BYPASS_TUNING_IPG		0x0189
+# define QCA8084_UNIPHY_MMD1_BYPASS_TUNING_IPG_EN	0x0fff
+#define QCA8084_UNIPHY_MMD1_MODE_CTRL			0x011b
+# define QCA8084_UNIPHY_MMD1_XPCS_MODE			0x1000
+#define QCA8084_UNIPHY_MMD1_CHANNEL0_CFG		0x0120
+#define QCA8084_UNIPHY_MMD1_GMII_DATAPASS_SEL		0x0180
+# define QCA8084_UNIPHY_MMD1_DATAPASS_MASK		0x1
+# define QCA8084_UNIPHY_MMD1_DATAPASS_USXGMII		0x1
+# define QCA8084_UNIPHY_MMD1_DATAPASS_SGMII		0x0
+#define QCA8084_UNIPHY_MMD1_USXGMII_RESET		0x018c
+
+/* MMD3 registers */
+#define QCA8084_UNIPHY_MMD3_PCS_CTRL2			0x7
+# define QCA8084_UNIPHY_MMD3_PCS_TYPE_10GBASE_R		0
+#define QCA8084_UNIPHY_MMD3_AN_LP_BASE_ABL2		0x14
+# define QCA8084_UNIPHY_MMD3_XPCS_EEE_CAP		0x40
+#define QCA8084_UNIPHY_MMD3_10GBASE_R_PCS_STATUS1	0x20
+# define QCA8084_UNIPHY_MMD3_10GBASE_R_UP		0x1000
+#define QCA8084_UNIPHY_MMD3_DIG_CTRL1			0x8000
+# define QCA8084_UNIPHY_MMD3_XPCS_SOFT_RESET		0x8000
+# define QCA8084_UNIPHY_MMD3_USXGMII_EN			0x200
+#define QCA8084_UNIPHY_MMD3_EEE_MODE_CTRL		0x8006
+# define QCA8084_UNIPHY_MMD3_EEE_EN			0x3
+# define QCA8084_UNIPHY_MMD3_EEE_RES_REGS		0x100
+# define QCA8084_UNIPHY_MMD3_EEE_SIGN_BIT_REGS		0x40
+#define QCA8084_UNIPHY_MMD3_VR_RPCS_TPC			0x8007
+# define QCA8084_UNIPHY_MMD3_QXGMII_EN			0x1400
+#define QCA8084_UNIPHY_MMD3_EEE_TX_TIMER		0x8008
+# define QCA8084_UNIPHY_MMD3_EEE_TSL_REGS		0xa
+# define QCA8084_UNIPHY_MMD3_EEE_TLU_REGS		0xc0
+# define QCA8084_UNIPHY_MMD3_EEE_TWL_REGS		0x1600
+#define QCA8084_UNIPHY_MMD3_EEE_RX_TIMER		0x8009
+# define QCA8084_UNIPHY_MMD3_EEE_100US_REG_REGS		0xc8
+# define QCA8084_UNIPHY_MMD3_EEE_RWR_REG_REGS		0x1c00
+#define QCA8084_UNIPHY_MMD3_MII_AM_INTERVAL		0x800a
+# define QCA8084_UNIPHY_MMD3_MII_AM_INTERVAL_VAL	0x6018
+#define QCA8084_UNIPHY_MMD3_EEE_MODE_CTRL1		0x800b
+# define QCA8084_UNIPHY_MMD3_EEE_TRANS_LPI_MODE		0x1
+# define QCA8084_UNIPHY_MMD3_EEE_TRANS_RX_LPI_MODE	0x100
+
+/* MMD26 27 28 31 registers (per-port MMDs) */
+#define QCA8084_UNIPHY_MMD_MII_CTRL			0
+# define QCA8084_UNIPHY_MMD_XPCS_SPEED_MASK		0x2060
+# define QCA8084_UNIPHY_MMD_XPCS_SPEED_2500		0x20
+# define QCA8084_UNIPHY_MMD_XPCS_SPEED_1000		0x40
+# define QCA8084_UNIPHY_MMD_XPCS_SPEED_100		0x2000
+# define QCA8084_UNIPHY_MMD_XPCS_SPEED_10		0
+# define QCA8084_UNIPHY_MMD_MII_AN_ENABLE		0x1000
+# define QCA8084_UNIPHY_MMD_MII_AN_RESTART		0x200
+#define QCA8084_UNIPHY_MMD_MII_DIG_CTRL			0x8000
+# define QCA8084_UNIPHY_MMD3_USXG_FIFO_RESET		0x400
+# define QCA8084_UNIPHY_MMD_USXG_FIFO_RESET		0x20
+#define QCA8084_UNIPHY_MMD_MII_AN_INT_MSK		0x8001
+# define QCA8084_UNIPHY_MMD_AN_COMPLETE_INT		0x1
+# define QCA8084_UNIPHY_MMD_TX_CONFIG_CTRL		0x8
+# define QCA8084_UNIPHY_MMD_MII_4BITS_CTRL		0x0
+#define QCA8084_UNIPHY_MMD_MII_ERR_SEL			0x8002
+# define QCA8084_UNIPHY_MMD_MII_AN_COMPLETE_INT		0x1
+#define QCA8084_UNIPHY_MMD_MII_XAUI_MODE_CTRL		0x8004
+# define QCA8084_UNIPHY_MMD_TX_IPG_CHECK_DISABLE	0x1
+
+
+/*
+ * clocks definitions
+ */
+enum {
+	QCA8084_P_XO,
+	QCA8084_P_UNIPHY0_RX,
+	QCA8084_P_UNIPHY0_TX,
+	QCA8084_P_UNIPHY1_RX,
+	QCA8084_P_UNIPHY1_TX,
+	QCA8084_P_UNIPHY1_RX312P5M,
+	QCA8084_P_UNIPHY1_TX312P5M,
+	QCA8084_P_MAX,
+};
+
+#define UQXGMII_SPEED_2500M_CLK			312500000
+#define UQXGMII_SPEED_1000M_CLK			125000000
+#define UQXGMII_SPEED_100M_CLK			25000000
+#define UQXGMII_SPEED_10M_CLK			2500000
+#define UQXGMII_XPCS_SPEED_2500M_CLK		78125000
+#define QCA8084_AHB_CLK_RATE_104P17M		104160000
+#define QCA8084_SYS_CLK_RATE_25M		25000000
+#define QCA8084_XO_CLK_RATE_50M			50000000
+
+#define QCA8084_CLK_BASE_REG                   0x0c800000
+
+/*
+ * CBCR register fields
+ */
+#define CBCR_CLK_ENABLE		(1 << 0)
+#define CBCR_CLK_RESET_BIT	2
+#define CBCR_CLK_OFF		(1u << 31)
+
+/*
+ * RGC register fields
+ */
+#define RCGR_HDIV_MASK		(0x3f << 0)
+#define RCGR_HDIV_SHIFT		0
+#define RCGR_HDIV(x)		((x) << 0)
+
+#define RCGR_SRC_SEL_MASK	(0x7 << 8)
+#define RCGR_SRC_SEL(x)		((x) << 8)
+#define RCGR_SRC_SEL_SHIFT	8
+#define RCGR_DIV_MAX		0x1f
+
+/*
+ * CMD register fields
+ */
+#define RCGR_CMD_ROOT_OFF	(1u << 31)
+#define RCGR_CMD_UPDATE		(1 << 0)
+
+/*
+ * CDIV register fields
+ */
+#define CDIVR_DIVIDER_MASK	0x1f
+#define CDIVR_DIVIDER_SHIFT	0
+
+/*
+ * TLMM configuration registers fields.
+ */
+#define TLMM_CFG_FNSEL(x)		(((x) & 0xf) << 2)
+#define TLMM_CFG_FNSEL_MASK		TLMM_CFG_FNSEL(0xf)
+#define TLMM_CFG_FNSEL_Px_LED_1		0x1
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/phy/realtek-hwmon.c	2023-03-13 18:45:13.507496124 +0100
@@ -0,0 +1,138 @@
+/*
+ * realtek-hwmon.c for realtek-hwmon
+ * Created by <nschichan@freebox.fr> on Mon Mar  8 17:05:09 2021
+ */
+
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/ctype.h>
+#include <linux/hwmon.h>
+
+#include "realtek.h"
+
+#define RTL8221B_TSRR_REG		0xbd84
+#define RTL8221B_TSRR_TSOUT_SYNC_MASK	(0x3ffL)
+#define RTL8221B_TSRR_TSOUT_SYNC_SIGN	(0x200)
+
+#if IS_REACHABLE(CONFIG_HWMON)
+
+static umode_t realtek_hwmon_is_visible(const void *data,
+					enum hwmon_sensor_types type,
+					u32 attr, int channel)
+{
+	if (type != hwmon_temp)
+		return 0;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		return 0444;
+	default:
+		return 0;
+	}
+}
+
+static int realtek_hwmon_get(struct phy_device *phydev, long *value)
+{
+	int raw;
+
+	raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_TSRR_REG);
+	if (raw < 0)
+		return raw;
+
+	raw &= RTL8221B_TSRR_TSOUT_SYNC_MASK;
+
+	if (raw & RTL8221B_TSRR_TSOUT_SYNC_SIGN) {
+		/*
+		 * negative value: sign extend it.
+		 */
+		*value = raw | ~RTL8221B_TSRR_TSOUT_SYNC_MASK;
+	} else {
+		*value = raw;
+	}
+
+	*value *= 500;
+
+	return 0;
+}
+
+static int realtek_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+			      u32 attr, int channel, long *value)
+{
+	if (type != hwmon_temp)
+		return -ENOTSUPP;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		return realtek_hwmon_get(dev_get_drvdata(dev), value);
+	default:
+		return -ENOTSUPP;
+	}
+}
+
+static const struct hwmon_ops realtek_hwmon_ops = {
+	.is_visible = realtek_hwmon_is_visible,
+	.read = realtek_hwmon_read,
+};
+
+static u32 realtek_hwmon_temp_config[] = {
+	HWMON_T_INPUT,
+	0,
+};
+
+static const struct hwmon_channel_info realtek_hwmon_temp = {
+	.type = hwmon_temp,
+	.config = realtek_hwmon_temp_config,
+};
+
+static u32 realtek_hwmon_chip_config[] = {
+	HWMON_C_REGISTER_TZ,
+	0,
+};
+
+static const struct hwmon_channel_info realtek_hwmon_chip = {
+	.type = hwmon_chip,
+	.config = realtek_hwmon_chip_config,
+};
+
+
+static const struct hwmon_channel_info *realtek_hwmon_info[] = {
+	&realtek_hwmon_chip,
+	&realtek_hwmon_temp,
+	NULL,
+};
+
+static const struct hwmon_chip_info realtek_hwmon_chip_info = {
+	.ops = &realtek_hwmon_ops,
+	.info = realtek_hwmon_info,
+};
+
+int realtek_hwmon_probe(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+	char *name;
+	int i, j;
+	struct device *hdev;
+
+	name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
+	for (i = j = 0; name[i]; i++) {
+		if (isalnum(name[i]) ||
+		    name[i] == '.' || name[i] == ':') {
+			if (i != j)
+				name[j] = name[i];
+			j++;
+		}
+	}
+	name[j] = '\0';
+
+	hdev = devm_hwmon_device_register_with_info(dev,
+						    name,
+						    phydev,
+						    &realtek_hwmon_chip_info,
+						    NULL);
+
+	return PTR_ERR_OR_ZERO(hdev);
+}
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/phy/realtek.h	2023-03-13 18:45:13.507496124 +0100
@@ -0,0 +1,19 @@
+/*
+ * realtek.h for realtek-hwmon
+ * Created by <nschichan@freebox.fr> on Mon Mar  8 17:05:57 2021
+ */
+
+#pragma once
+
+#if IS_REACHABLE(CONFIG_HWMON)
+
+int realtek_hwmon_probe(struct phy_device *phydev);
+
+#else
+
+static inline int realtek_hwmon_probe(struct phy_device *phydev)
+{
+	return 0;
+}
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/ahb.h	2024-01-19 17:01:19.853846702 +0100
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_AHB_H
+#define ATH12K_AHB_H
+
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include "core.h"
+#include "debug.h"
+
+#define DT_NODE_RPROC_TEXTPD_NAME "d100000.remoteproc:remoteproc_pd4"
+#define DT_NODE_RPROC_ROOTPD_NAME "d100000.remoteproc"
+
+#define ATH12K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH12K_AHB_SMP2P_SMEM_MSG		GENMASK(15, 0)
+#define ATH12K_AHB_SMP2P_SMEM_SEQ_NO		GENMASK(31, 16)
+#define ATH12K_AHB_SMP2P_SMEM_VALUE_MASK	0xFFFFFFFF
+#define ATH12K_PCI_CE_WAKE_IRQ	2
+#define ATH12K_PCI_IRQ_CE0_OFFSET	3
+
+enum ath12k_ahb_smp2p_msg_id {
+	ATH12K_AHB_POWER_SAVE_ENTER = 1,
+	ATH12K_AHB_POWER_SAVE_EXIT,
+};
+
+struct ath12k_base;
+
+struct ath12k_ahb {
+	struct rproc *tgt_rproc, *tgt_text_rproc, *tgt_rrproc;
+	struct {
+		struct device *dev;
+		struct iommu_domain *iommu_domain;
+		dma_addr_t msa_paddr;
+		u32 msa_size;
+		dma_addr_t ce_paddr;
+		u32 ce_size;
+		bool use_tz;
+	} fw;
+	struct {
+		unsigned short seq_no;
+		unsigned int smem_bit;
+		struct qcom_smem_state *smem_state;
+	} smp2p_info;
+};
+
+static inline struct ath12k_ahb *ath12k_ahb_priv(struct ath12k_base *ab)
+{
+	return (struct ath12k_ahb *)ab->drv_priv;
+}
+static inline int ath12k_rproc_register_subsys_notifier(struct ath12k_base *ab)
+{
+#ifdef CONFIG_REMOTEPROC
+/*
+	struct ath12k_ahb *ab_ahb = ath12k_ahb_priv(ab);
+	ab->ssr_atomic_upd_handle = qcom_register_ssr_atomic_notifier(ab_ahb->tgt_rproc->name,
+								      &ab->atomic_ssr_nb);
+	if (!ab->ssr_atomic_upd_handle) {
+		ath12k_err(ab, "failed to register user_pd atomic handle\n");
+		return -EINVAL;
+	}
+
+	ab->ssr_upd_handle = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name, &ab->ssr_nb);
+	if (!ab->ssr_upd_handle)
+		return -EINVAL;
+
+	if (ab->userpd_id == USERPD_0) {
+		ab->ssr_atomic_rpd_handle = qcom_register_ssr_atomic_notifier(ab_ahb->tgt_rrproc->name,
+									      &ab->rpd_atomic_ssr_nb);
+		if (!ab->ssr_atomic_rpd_handle) {
+			ath12k_err(ab, "failed to register root_pd atomic handle\n");
+			return -EINVAL;
+		}
+		ab->ssr_rpd_handle = qcom_register_ssr_notifier(ab_ahb->tgt_rrproc->name,
+								&ab->rpd_ssr_nb);
+		if (!ab->ssr_rpd_handle)
+			return -EINVAL;
+	}
+*/
+	return 0;
+#else
+	return -ENODEV;
+#endif
+}
+static inline int ath12k_rproc_unregister_subsys_notifier(struct ath12k_base *ab)
+{
+#ifdef CONFIG_REMOTEPROC
+/*
+	if (ab->ssr_upd_handle)
+		qcom_unregister_ssr_notifier(ab->ssr_upd_handle, &ab->ssr_nb);
+	if (ab->ssr_atomic_upd_handle)
+		qcom_unregister_ssr_atomic_notifier(ab->ssr_atomic_upd_handle,
+						    &ab->atomic_ssr_nb);
+	if (ab->ssr_atomic_rpd_handle)
+		qcom_unregister_ssr_atomic_notifier(ab->ssr_atomic_rpd_handle,
+						    &ab->rpd_atomic_ssr_nb);
+	if (ab->ssr_rpd_handle)
+		qcom_unregister_ssr_notifier(ab->ssr_rpd_handle, &ab->rpd_ssr_nb);
+*/
+	return 0;
+#else
+	return -ENODEV;
+#endif
+}
+
+static const struct ath12k_bus_params ath12k_ahb_bus_params = {
+	.fixed_bdf_addr = false,
+	.fixed_mem_region = true,
+};
+
+static const struct ath12k_bus_params ath12k_internal_pci_bus_params = {
+	.fixed_bdf_addr = false,
+	.fixed_mem_region = true,
+};
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/bondif.h	2024-03-18 14:40:14.843741115 +0100
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __BONDIF_H
+#define __BONDIF_H
+
+#define MAX_MLO_CHIPS 3
+#define MAX_PDEV_PER_SOC 3
+
+#define ATH12K_PPE_DS_2G_CORE_MASK		0x1
+#define ATH12K_PPE_DS_5G_CORE_MASK		0x2
+#define ATH12K_PPE_DS_6G_CORE_MASK		0x4
+#define ATH12K_PPE_DS_DEFAULT_CORE_MASK		0x7
+
+void ath12k_disable_ppe_for_link_netdev(struct ath12k_base *ab,
+				       struct ath12k_link_vif *arvif,
+				       struct net_device *link_dev);
+void ath12k_enable_ppe_for_link_netdev(struct ath12k_base *ab,
+				      struct ath12k_link_vif *arvif,
+				      struct net_device *link_dev);
+int ath12k_free_bonddev_for_sfe(struct wireless_dev *wdev,
+				struct ieee80211_vif *vif,
+				int link_num);
+int ath12k_mac_op_set_multicast_to_unicast(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif,
+					   const bool enabled);
+int ath12k_bond_link_enslave(struct ath12k_link_vif *arvif, struct net_device *link_dev);
+void ath12k_bond_link_release(struct ath12k_link_vif *arvif);
+void ath12k_bond_enable_ppe_ds(void);
+void ath12k_bond_disable_ppe_ds(void);
+extern int g_bonded_interface_model;
+extern unsigned int ath12k_ppe_ds_enabled;
+extern unsigned int ath12k_mlo_capable;
+int ath12k_bond_dev_cb(struct wireless_dev *wdev, struct net_device *dev, bool is_register);
+extern int (*driver_bond_dev_cb)(struct wireless_dev *wdev, struct net_device *dev, bool is_register);
+enum ath12k_bond_state {
+	ATH12K_BOND_SETUP_INPROGRESS,
+	ATH12K_BOND_REGISTERED,
+	ATH12K_BOND_LINK0_REGISTERED,
+	ATH12K_BOND_LINK1_REGISTERED,
+	ATH12K_BOND_LINK2_REGISTERED,
+
+};
+struct ath12k_mld_dev {
+	struct net_device *netdev;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	struct wireless_dev *wdev; //dummy one for bond registration. Optimize Bond MLO API.
+	struct net_device *link_dev[MAX_MLO_CHIPS][MAX_PDEV_PER_SOC];
+	u8 primary_chipid, primary_pdevid;
+	bool mcast_dev_set;
+	bool mcast_to_ucast_en;
+	unsigned long bond_state;
+};
+
+#endif //__BONDIF_H
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/coredump.c	2024-03-18 14:40:14.843741115 +0100
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/devcoredump.h>
+#include <linux/platform_device.h>
+#include <linux/dma-direction.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include "core.h"
+#include "coredump.h"
+#include "pci.h"
+#include "mhi.h"
+#include "debug.h"
+
+struct ath12k_coredump_segment_info ath12k_coredump_seg_info;
+EXPORT_SYMBOL(ath12k_coredump_seg_info);
+struct ath12k_coredump_info ath12k_coredump_ram_info;
+EXPORT_SYMBOL(ath12k_coredump_ram_info);
+
+static void *ath12k_coredump_find_segment(loff_t user_offset,
+					  struct ath12k_dump_segment *segment,
+					  int num_seg, size_t *data_left)
+{
+	int i;
+
+	for (i = 0; i < num_seg; i++, segment++) {
+		if (user_offset < segment->len) {
+			*data_left = user_offset;
+			return segment;
+		}
+		user_offset -= segment->len;
+	}
+
+	*data_left = 0;
+	return NULL;
+}
+
+static ssize_t ath12k_coredump_read_q6dump(char *buffer, loff_t offset, size_t count,
+					   void *data, size_t header_size)
+{
+	struct ath12k_coredump_state *dump_state = data;
+	struct ath12k_dump_segment *segments = dump_state->segments;
+	struct ath12k_dump_segment *seg;
+	void *elfcore = dump_state->header;
+	size_t data_left, copy_size, bytes_left = count;
+	void __iomem *addr;
+
+	/* Copy the header first */
+	if (offset < header_size) {
+		copy_size = header_size - offset;
+		copy_size = min(copy_size, bytes_left);
+
+		memcpy(buffer, elfcore + offset, copy_size);
+		offset += copy_size;
+		bytes_left -= copy_size;
+		buffer += copy_size;
+
+		return copy_size;
+	}
+
+	while (bytes_left) {
+		seg = ath12k_coredump_find_segment(offset - header_size, segments,
+						   dump_state->num_seg, &data_left);
+		/* End of segments check */
+		if (!seg) {
+			pr_info("Ramdump complete %lld bytes read\n", offset);
+			return 0;
+		}
+
+		if (data_left)
+			copy_size = min_t(size_t, bytes_left, data_left);
+		else
+			copy_size = bytes_left;
+
+		addr = (void __iomem *)seg->vaddr;
+		addr += data_left;
+		memcpy_fromio(buffer, addr, copy_size);
+
+		offset += copy_size;
+		buffer += copy_size;
+		bytes_left -= copy_size;
+	}
+
+	return count - bytes_left;
+}
+
+static void ath12k_coredump_free_q6dump(void *data)
+{
+	struct ath12k_coredump_state *dump_state = data;
+
+	complete(&dump_state->dump_done);
+}
+
+void ath12k_coredump_build_inline(struct ath12k_base *ab,
+				  struct ath12k_dump_segment *segments, int num_seg)
+{
+	struct ath12k_coredump_state dump_state;
+	struct timespec64 timestamp;
+	struct ath12k_dump_file_data *file_data;
+	size_t header_size;
+	struct ath12k_pci *ar_pci = (struct ath12k_pci *)ab->drv_priv;
+	struct device *dev;
+	u8 *buf;
+
+	header_size = sizeof(*file_data);
+	header_size += num_seg * sizeof(*segments);
+	header_size = PAGE_ALIGN(header_size);
+	buf = kzalloc(header_size, GFP_ATOMIC);
+	if (!buf) {
+		ath12k_warn(ab, "Failed to allocate memory for coredump\n");
+		return;
+	}
+
+	file_data = (struct ath12k_dump_file_data *)buf;
+	strscpy(file_data->df_magic, "ATH12K-FW-DUMP",
+		sizeof(file_data->df_magic));
+	file_data->len = cpu_to_le32(header_size);
+	file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2);
+	if (ab->hif.bus == ATH12K_BUS_AHB || ab->hif.bus == ATH12K_BUS_HYBRID) {
+		file_data->chip_id = ab->qmi.target.chip_id;
+		file_data->qrtr_id = ab->qmi.service_ins_id;
+		file_data->bus_id = ab->userpd_id;
+	} else {
+		file_data->chip_id = cpu_to_le32(ar_pci->dev_id);
+		file_data->qrtr_id = cpu_to_le32(ar_pci->ab->qmi.service_ins_id);
+		file_data->bus_id = pci_domain_nr(ar_pci->pdev->bus);
+	}
+	dev = ab->dev;
+	guid_gen(&file_data->guid);
+	ktime_get_real_ts64(&timestamp);
+	file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
+	file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
+	file_data->num_seg = cpu_to_le32(num_seg);
+	file_data->seg_size = cpu_to_le32(sizeof(*segments));
+
+	/* copy segment details to file */
+	buf += offsetof(struct ath12k_dump_file_data, seg);
+	file_data->seg = (struct ath12k_dump_segment *)buf;
+	memcpy(file_data->seg, segments, num_seg * sizeof(*segments));
+
+	dump_state.header = file_data;
+	dump_state.num_seg = num_seg;
+	dump_state.segments = segments;
+	init_completion(&dump_state.dump_done);
+
+	dev_coredumpm(dev, THIS_MODULE, &dump_state, header_size, GFP_KERNEL,
+		      ath12k_coredump_read_q6dump, ath12k_coredump_free_q6dump);
+
+	/* Wait until the dump is read and free is called */
+	wait_for_completion(&dump_state.dump_done);
+	kfree(file_data);
+}
+
+static enum ath12k_fw_crash_dump_type
+ath12k_coredump_get_dump_type(u32 mem_region_type)
+{
+	enum ath12k_fw_crash_dump_type dump_type;
+
+	/* note: only add the supported memory type,
+		which is used to calculate the rem_seg_cnt */
+	switch(mem_region_type) {
+	case HOST_DDR_REGION_TYPE:
+		dump_type = ATH12K_FW_REMOTE_MEM_DATA;
+		break;
+	case CALDB_MEM_REGION_TYPE:
+		dump_type = ATH12K_FW_CALDB;
+		break;
+	case M3_DUMP_REGION_TYPE:
+		dump_type = ATH12K_FW_M3_DUMP;
+		break;
+	case PAGEABLE_MEM_REGION_TYPE:
+		dump_type = ATH12K_FW_PAGEABLE_DATA;
+		break;
+	case MLO_GLOBAL_MEM_REGION_TYPE:
+		dump_type = ATH12K_FW_MLO_GLOBAL;
+		break;
+	default:
+		dump_type = ATH12K_FW_DUMP_TYPE_MAX;
+		break;
+	}
+
+	return dump_type;
+}
+
+static void ath12k_coredump_q6crash_reason(struct ath12k_base *ab)
+{
+        int i = 0;
+        uint64_t coredump_offset = 0;
+        struct ath12k_pci *ar_pci = (struct ath12k_pci *)ab->drv_priv;
+        struct mhi_controller *mhi_ctrl = ar_pci->mhi_ctrl;
+        struct mhi_buf *mhi_buf;
+        struct image_info *rddm_image;
+        struct ath12k_coredump_q6ramdump_header *ramdump_header;
+        struct ath12k_coredump_q6ramdump_entry *ramdump_table;
+        char *msg = NULL;
+        struct pci_dev *pci_dev = ar_pci->pdev;
+
+        rddm_image = mhi_ctrl->rddm_image;
+        mhi_buf = rddm_image->mhi_buf;
+
+        ath12k_info(ab, "CRASHED - [DID:DOMAIN:BUS:SLOT] - %x:%04u:%02u:%02u\n",
+                    pci_dev->device, pci_dev->bus->domain_nr,
+                    pci_dev->bus->number, PCI_SLOT(pci_dev->devfn));
+
+        /* Get RDDM header size */
+        ramdump_header = (struct ath12k_coredump_q6ramdump_header *)mhi_buf[0].buf;
+        ramdump_table = ramdump_header->ramdump_table;
+        coredump_offset = le32_to_cpu(ramdump_header->header_size);
+
+        /* Traverse ramdump table to get coredump offset */
+        while (i < MAX_RAMDUMP_TABLE_SIZE) {
+                if (!strncmp(ramdump_table->description, COREDUMP_DESC,
+                             sizeof(COREDUMP_DESC)) ||
+                    !strncmp(ramdump_table->description, Q6_SFR_DESC,
+                             sizeof(Q6_SFR_DESC))) {
+                        break;
+                }
+                coredump_offset += le64_to_cpu(ramdump_table->size);
+                ramdump_table++;
+                i++;
+        }
+
+        if (i == MAX_RAMDUMP_TABLE_SIZE) {
+                ath12k_warn(ab, "Cannot find '%s' entry in ramdump\n",
+                            COREDUMP_DESC);
+                return;
+        }
+
+        /* Locate coredump data from the ramdump segments */
+        for (i = 0; i < rddm_image->entries; i++) {
+                if (coredump_offset < mhi_buf[i].len) {
+                        msg = mhi_buf[i].buf + coredump_offset;
+                        break;
+                }
+
+                coredump_offset -= mhi_buf[i].len;
+        }
+
+        if (msg && msg[0])
+                ath12k_err(ab, "Fatal error received from wcss!\n%s\n",
+                            msg);
+}
+
+void ath12k_coredump_download_rddm(struct ath12k_base *ab)
+{
+	struct ath12k_pci *ar_pci = (struct ath12k_pci *)ab->drv_priv;
+	struct mhi_controller *mhi_ctrl = ar_pci->mhi_ctrl;
+	struct image_info *rddm_img, *fw_img;
+	struct ath12k_dump_segment *segment, *seg_info;
+	int i, rem_seg_cnt = 0, len, num_seg, seg_sz, qdss_seg_cnt = 1;
+	int skip_count = 0;
+	enum ath12k_fw_crash_dump_type mem_type;
+	struct ath12k_coredump_segment_info *chip_seg;
+	int dump_count;
+	struct ath12k_hw_group *ag = ab->ag;
+	bool state = false;
+
+	if (ab->in_panic)
+		state = true;
+
+	ath12k_mhi_coredump(mhi_ctrl, state);
+	ath12k_coredump_q6crash_reason(ab);
+
+	rddm_img = mhi_ctrl->rddm_image;
+	fw_img = mhi_ctrl->fbc_image;
+
+	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+		if (ab->qmi.target_mem[i].type == HOST_DDR_REGION_TYPE ||
+		    (ab->qmi.target_mem[i].type == CALDB_MEM_REGION_TYPE && ath12k_cold_boot_cal && ab->hw_params->cold_boot_calib) ||
+		    ab->qmi.target_mem[i].type == M3_DUMP_REGION_TYPE ||
+		    ab->qmi.target_mem[i].type == PAGEABLE_MEM_REGION_TYPE ||
+		    ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE)
+			rem_seg_cnt++;
+	}
+
+	num_seg = fw_img->entries + rddm_img->entries + rem_seg_cnt;
+	if (ab->is_qdss_tracing)
+		num_seg += qdss_seg_cnt;
+
+	len = num_seg * sizeof(*segment);
+
+	segment = kzalloc(len, GFP_NOWAIT);
+	if (!segment) {
+		ath12k_err(ab, " Failed to allocate memory for segment for rddm download\n");
+		return;
+	}
+
+	seg_info = segment;
+	for (i = 0; i < fw_img->entries ; i++) {
+		if (!fw_img->mhi_buf[i].buf) {
+			skip_count++;
+			continue;
+		}
+		seg_sz = fw_img->mhi_buf[i].len;
+		seg_info->len = PAGE_ALIGN(seg_sz);
+		seg_info->addr = fw_img->mhi_buf[i].dma_addr;
+		seg_info->vaddr = fw_img->mhi_buf[i].buf;
+		seg_info->type = ATH12K_FW_CRASH_PAGING_DATA;
+		seg_info++;
+	}
+
+	for (i = 0; i < rddm_img->entries; i++) {
+		if (!rddm_img->mhi_buf[i].buf) {
+			skip_count++;
+			continue;
+		}
+		seg_sz = rddm_img->mhi_buf[i].len;
+		seg_info->len = PAGE_ALIGN(seg_sz);
+		seg_info->addr = rddm_img->mhi_buf[i].dma_addr;
+		seg_info->vaddr = rddm_img->mhi_buf[i].buf;
+		seg_info->type = ATH12K_FW_CRASH_RDDM_DATA;
+		seg_info++;
+	}
+
+	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+		mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
+		if(mem_type == ATH12K_FW_DUMP_TYPE_MAX) {
+			ath12k_info(ab, "target mem region type %d not supported", ab->qmi.target_mem[i].type);
+			continue;
+		}
+
+		if (mem_type == ATH12K_FW_CALDB &&
+		    !(ath12k_cold_boot_cal && ab->hw_params->cold_boot_calib))
+			continue;
+
+		if (!ab->qmi.target_mem[i].paddr) {
+			skip_count++;
+			ath12k_info(ab, "Skipping mem region type %d", ab->qmi.target_mem[i].type);
+ 			continue;
+		}
+
+		seg_info->len = ab->qmi.target_mem[i].size;
+		seg_info->addr = ab->qmi.target_mem[i].paddr;
+		seg_info->vaddr = ab->qmi.target_mem[i].v.ioaddr;
+		seg_info->type = mem_type;
+		ath12k_info(ab,
+		    "seg vaddr is %px len is 0x%x type %d\n",
+			    seg_info->vaddr,
+			    seg_info->len,
+			    seg_info->type);
+		seg_info++;
+	}
+
+	if (ab->is_qdss_tracing) {
+		seg_info->len = ab->qmi.qdss_mem[0].size;
+		seg_info->addr = ab->qmi.qdss_mem[0].paddr;
+		seg_info->vaddr = ab->qmi.qdss_mem[0].v.ioaddr;
+		seg_info->type = ATH12K_FW_QDSS_DATA;
+		seg_info++;
+	}
+
+	num_seg = num_seg - skip_count;
+
+	if (!ab->fw_recovery_support || ab->in_panic) {
+		if (ag->mlo_capable) {
+			dump_count = atomic_read(&ath12k_coredump_ram_info.num_chip);
+			if (dump_count >= ATH12K_MAX_SOCS) {
+				ath12k_err(ab, "invalid chip number %d\n",
+					   dump_count);
+				return;
+			} else {
+				chip_seg = &ath12k_coredump_ram_info.chip_seg_info[dump_count];
+				chip_seg->chip_id = ar_pci->dev_id;
+				chip_seg->qrtr_id = ar_pci->ab->qmi.service_ins_id;
+				chip_seg->bus_id = pci_domain_nr(ar_pci->pdev->bus);
+				chip_seg->num_seg = num_seg;
+				chip_seg->seg = segment;
+				atomic_inc(&ath12k_coredump_ram_info.num_chip);
+			}
+		} else {
+			/* This part of code for 12.2 without mlo_capable=1 */
+			dump_count = atomic_read(&ath12k_coredump_ram_info.num_chip);
+			chip_seg = &ath12k_coredump_ram_info.chip_seg_info[dump_count];
+			chip_seg->chip_id = ar_pci->dev_id;
+			chip_seg->qrtr_id = ar_pci->ab->qmi.service_ins_id;
+			chip_seg->bus_id = pci_domain_nr(ar_pci->pdev->bus);
+			chip_seg->num_seg = num_seg;
+			chip_seg->seg = segment;
+			atomic_inc(&ath12k_coredump_ram_info.num_chip);
+		}
+
+		chip_seg = &ath12k_coredump_seg_info;
+		chip_seg->chip_id = ar_pci->dev_id;
+		chip_seg->qrtr_id = ar_pci->ab->qmi.service_ins_id;
+		chip_seg->bus_id = pci_domain_nr(ar_pci->pdev->bus);
+		chip_seg->num_seg = num_seg;
+		chip_seg->seg = segment;
+
+		ath12k_core_issue_bug_on(ab);
+
+	} else if (!ab->in_panic) {
+		ath12k_info(ab, "WLAN target is restarting");
+		ath12k_coredump_build_inline(ab, segment, num_seg);
+		kfree(segment);
+	}
+
+}
+
+void ath12k_coredump_qdss_dump(struct ath12k_base *ab,
+			       struct ath12k_qmi_event_qdss_trace_save_data *event_data)
+{
+	struct ath12k_dump_segment *segment;
+	int len, num_seg;
+	void *dump;
+
+	num_seg = event_data->mem_seg_len;
+	len = sizeof(*segment);
+	segment = vzalloc(len);
+	if (!segment) {
+		ath12k_warn(ab, "fail to alloc memory for qdss\n");
+		return;
+	}
+
+	if (event_data->total_size &&
+	    event_data->total_size <= ab->qmi.qdss_mem[0].size)
+		dump = vzalloc(event_data->total_size);
+	if (!dump) {
+		vfree(segment);
+		return;
+	}
+
+	if (num_seg == 1) {
+		segment->len = event_data->mem_seg[0].size;
+		segment->vaddr = ab->qmi.qdss_mem[0].v.ioaddr;
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "seg vaddr is 0x%p len is 0x%x\n",
+			   segment->vaddr, segment->len);
+		segment->type = ATH12K_FW_QDSS_DATA;
+	} else if (num_seg == 2) {
+		/*FW sends 2 segments with segment 0 and segment 1 */
+
+		if (event_data->mem_seg[1].addr != ab->qmi.qdss_mem[0].paddr) {
+			ath12k_warn(ab, "Invalid seg 0 addr 0x%llx\n",
+			    event_data->mem_seg[1].addr);
+			goto out;
+		}
+		if (event_data->mem_seg[0].size + event_data->mem_seg[1].size !=
+		    ab->qmi.qdss_mem[0].size) {
+			ath12k_warn(ab, "Invalid total size 0x%x 0x%x\n",
+				    event_data->mem_seg[0].size,
+				    event_data->mem_seg[1].size);
+			goto out;
+		}
+
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "qdss mem seg0 addr 0x%llx size 0x%x\n",
+			   event_data->mem_seg[0].addr, event_data->mem_seg[0].size);
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "qdss mem seg1 addr 0x%llx size 0x%x\n",
+			   event_data->mem_seg[1].addr, event_data->mem_seg[1].size);
+
+		memcpy(dump,
+		       ab->qmi.qdss_mem[0].v.ioaddr + event_data->mem_seg[1].size,
+		       event_data->mem_seg[0].size);
+		memcpy(dump + event_data->mem_seg[0].size,
+		       ab->qmi.qdss_mem[0].v.ioaddr, event_data->mem_seg[1].size);
+
+		segment->len = event_data->mem_seg[0].size + event_data->mem_seg[1].size;
+		segment->vaddr = dump;
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "seg vaddr is 0x%p and len is 0x%x\n",
+			   segment->vaddr, segment->len);
+		segment->type = ATH12K_FW_QDSS_DATA;
+	}
+	ath12k_coredump_build_inline(ab, segment, 1);
+out:
+	vfree(segment);
+	vfree(dump);
+}
+
+void ath12k_coredump_m3_dump(struct ath12k_base *ab,
+			     struct ath12k_qmi_m3_dump_upload_req_data *event_data)
+{
+	struct target_mem_chunk *target_mem = ab->qmi.target_mem;
+	struct ath12k_qmi_m3_dump_data m3_dump_data;
+	void *dump;
+	int i, ret = 0;
+
+	dump = vzalloc(event_data->size);
+	if (!dump) {
+		return;
+	}
+
+	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+		if (target_mem[i].paddr == event_data->addr &&
+		    event_data->size <= target_mem[i].size)
+			break;
+	}
+
+	if (i == ab->qmi.mem_seg_count) {
+		ath12k_warn(ab, "qmi invalid paddr from firmware for M3 dump\n");
+		ret = -EINVAL;
+		vfree(dump);
+		goto send_resp;
+	}
+
+	m3_dump_data.addr = target_mem[i].v.ioaddr;
+	m3_dump_data.size = event_data->size;
+	m3_dump_data.pdev_id = event_data->pdev_id;
+	m3_dump_data.timestamp = ktime_to_ms(ktime_get());
+
+	memcpy(dump, m3_dump_data.addr, m3_dump_data.size);
+
+	dev_coredumpv(ab->dev, dump, m3_dump_data.size,
+		      GFP_KERNEL);
+
+send_resp:
+       ret = ath12k_qmi_m3_dump_upload_done_ind_send(ab, event_data->pdev_id, ret);
+       if (ret < 0)
+               ath12k_warn(ab, "qmi M3 dump upload done failed\n");
+}
\ No newline at end of file
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/coredump.h	2024-01-19 17:01:19.853846702 +0100
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#define ATH12K_FW_CRASH_DUMP_VERSION 1
+#define ATH12K_FW_CRASH_DUMP_V2      2
+
+#define MAX_RAMDUMP_TABLE_SIZE  6
+#define COREDUMP_DESC           "Q6-COREDUMP"
+#define Q6_SFR_DESC             "Q6-SFR"
+
+#define DESC_STRING_SIZE 20
+#define FILE_NAME_STRING_SIZE 20
+
+
+enum ath12k_fw_crash_dump_type {
+	ATH12K_FW_CRASH_PAGING_DATA,
+	ATH12K_FW_CRASH_RDDM_DATA,
+	ATH12K_FW_REMOTE_MEM_DATA,
+	ATH12K_FW_PAGEABLE_DATA,
+	ATH12K_FW_M3_DUMP,
+	ATH12K_FW_QDSS_DATA,
+	ATH12K_FW_CALDB,
+        ATH12K_FW_AFC,
+        ATH12K_FW_MLO_GLOBAL,
+
+	/* keep last */
+	ATH12K_FW_DUMP_TYPE_MAX,
+};
+
+struct ath12k_dump_segment {
+	unsigned long addr;
+	void *vaddr;
+	unsigned int len;
+	unsigned int type;
+};
+
+struct ath12k_dump_file_data {
+	/* "ATH12K-FW-DUMP" */
+	char df_magic[16];
+	__le32 len;
+	/* file dump version */
+	__le32 version;
+	/* pci device id */
+	__le32 chip_id;
+	/* qrtr instance id */
+	__le32 qrtr_id;
+	/* pci domain id */
+	u8 bus_id;
+	guid_t guid;
+	/* time-of-day stamp */
+	__le64 tv_sec;
+	/* time-of-day stamp, nano-seconds */
+	__le64 tv_nsec;
+	/* room for growth w/out changing binary format */
+	u8 unused[8];
+	/* number of segments */
+	__le32 num_seg;
+	/* ath12k_dump_segment struct size */
+	__le32 seg_size;
+
+	struct ath12k_dump_segment *seg;
+	/* struct ath12k_dump_segment + more */
+
+	u8 data[0];
+} __packed;
+
+struct ath12k_coredump_state {
+	struct ath12k_dump_file_data *header;
+	struct ath12k_dump_segment *segments;
+	struct completion dump_done;
+	u32 num_seg;
+};
+
+struct ath12k_coredump_segment_info {
+	 u32 chip_id;
+	 u32 qrtr_id;
+	 u32 num_seg;
+	 struct ath12k_dump_segment *seg;
+	 u8 bus_id;
+};
+
+struct ath12k_coredump_info {
+	atomic_t num_chip;
+	struct ath12k_coredump_segment_info chip_seg_info[ATH12K_MAX_SOCS];
+};
+
+struct ath12k_coredump_q6ramdump_entry {
+        __le64 base_address;
+        __le64 actual_phys_address;
+        __le64 size;
+        char description[DESC_STRING_SIZE];
+        char file_name[FILE_NAME_STRING_SIZE];
+};
+
+struct ath12k_coredump_q6ramdump_header {
+        __le32 version;
+        __le32 header_size;
+        struct ath12k_coredump_q6ramdump_entry ramdump_table[MAX_RAMDUMP_TABLE_SIZE];
+};
+
+#ifdef CONFIG_WANT_DEV_COREDUMP
+void ath12k_coredump_download_rddm(struct ath12k_base *ab);
+void ath12k_coredump_build_inline(struct ath12k_base *ab,
+				  struct ath12k_dump_segment *segments, int num_seg);
+void ath12k_coredump_qdss_dump(struct ath12k_base *ab,
+			       struct ath12k_qmi_event_qdss_trace_save_data *event_data);
+void ath12k_coredump_m3_dump(struct ath12k_base *ab,
+			     struct ath12k_qmi_m3_dump_upload_req_data *event_data);
+#else
+static inline void ath12k_coredump_download_rddm(struct ath12k_base *ab)
+{
+}
+
+static inline void ath12k_coredump_build_inline(struct ath12k_base *ab,
+						struct ath12k_dump_segment *segments,
+						int num_seg)
+{
+}
+static inline void
+ath12k_coredump_qdss_dump(struct ath12k_base *ab,
+			  struct ath12k_qmi_event_qdss_trace_save_data *event_data)
+{
+}
+static inline void
+ath12k_coredump_m3_dump(struct ath12k_base *ab,
+			struct ath12k_qmi_m3_dump_upload_req_data *event_data)
+{
+}
+#endif
+
+#endif
+
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debugfs.c	2024-03-18 14:40:14.843741115 +0100
@@ -0,0 +1,5326 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of.h>
+
+#include "debugfs.h"
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+#include "peer.h"
+#include "qmi.h"
+
+static struct dentry *debugfs_ath12k;
+
+static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
+	"REO2SW1_RING",
+	"REO2SW2_RING",
+	"REO2SW3_RING",
+	"REO2SW4_RING",
+	"WBM2REO_LINK_RING",
+	"REO2TCL_RING",
+	"REO2FW_RING",
+	"RELEASE_RING",
+	"PPE_RELEASE_RING",
+	"TCL2TQM_RING",
+	"TQM_RELEASE_RING",
+	"REO_RELEASE_RING",
+	"WBM2SW0_RELEASE_RING",
+	"WBM2SW1_RELEASE_RING",
+	"WBM2SW2_RELEASE_RING",
+	"WBM2SW3_RELEASE_RING",
+	"REO_CMD_RING",
+	"REO_STATUS_RING",
+};
+
+static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
+	"FW2RXDMA_BUF_RING",
+	"FW2RXDMA_STATUS_RING",
+	"FW2RXDMA_LINK_RING",
+	"SW2RXDMA_BUF_RING",
+	"WBM2RXDMA_LINK_RING",
+	"RXDMA2FW_RING",
+	"RXDMA2SW_RING",
+	"RXDMA2RELEASE_RING",
+	"RXDMA2REO_RING",
+	"MONITOR_STATUS_RING",
+	"MONITOR_BUF_RING",
+	"MONITOR_DESC_RING",
+	"MONITOR_DEST_RING",
+};
+
+static void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
+{
+	spin_lock_bh(&ar->data_lock);
+	ar->fw_stats_done = false;
+	ath12k_fw_stats_reset(ar);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+				     struct ath12k_fw_stats *stats)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_pdev *pdev;
+	bool is_end;
+	static unsigned int num_vdev, num_bcn;
+	size_t total_vdevs_started = 0;
+
+	int i;
+	if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+		if (list_empty(&stats->vdevs)) {
+			ath12k_warn(ab, "empty vdev stats");
+			return;
+		}
+		/* FW sends all the active VDEV stats irrespective of PDEV,
+		 * hence limit until the count of all VDEVs started
+		 */
+		for (i = 0; i < ab->num_radios; i++) {
+			pdev = rcu_dereference(ab->pdevs_active[i]);
+			if (pdev && pdev->ar)
+				total_vdevs_started += ar->num_started_vdevs;
+		}
+
+		is_end = ((++num_vdev) == total_vdevs_started);
+
+		list_splice_tail_init(&stats->vdevs,
+				      &ar->fw_stats.vdevs);
+
+		if (is_end) {
+			ar->fw_stats_done = true;
+			num_vdev = 0;
+		}
+		return;
+	}
+
+	if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+		if (list_empty(&stats->bcn)) {
+			ath12k_warn(ab, "empty bcn stats");
+			return;
+		}
+		/* Mark end until we reached the count of all started VDEVs
+		 * within the PDEV
+		 */
+		is_end = ((++num_bcn) == ar->num_started_vdevs);
+
+		list_splice_tail_init(&stats->bcn,
+				      &ar->fw_stats.bcn);
+
+		if (is_end) {
+			ar->fw_stats_done = true;
+			num_bcn = 0;
+		}
+	}
+}
+
+static ssize_t ath12k_write_wmi_ctrl_path_stats(struct file *file,
+		const char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	struct wmi_ctrl_path_stats_cmd_param param = {0};
+	u8 buf[128] = {0};
+	int ret;
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (ret < 0)
+		return ret;
+
+	buf[ret] = '\0';
+
+	ret = sscanf(buf, "%u %u", &param.stats_id, &param.action);
+	if (ret != 2)
+		return -EINVAL;
+
+	if (!param.action || param.action > WMI_REQUEST_CTRL_PATH_STAT_RESET)
+		return -EINVAL;
+
+	ret = ath12k_wmi_send_wmi_ctrl_stats_cmd(ar, &param);
+	return ret ? ret : count;
+}
+
+int wmi_ctrl_path_pdev_stat(struct ath12k *ar, char __user *ubuf,
+			    size_t count, loff_t *ppos)
+{
+	const int size = 2048;
+	char *buf;
+	u8 i;
+	int len = 0, ret_val;
+	u16 index_tx = 0;
+	u16 index_rx = 0;
+	char fw_tx_mgmt_subtype[WMI_MAX_STRING_LEN] = {0};
+	char fw_rx_mgmt_subtype[WMI_MAX_STRING_LEN] = {0};
+	struct wmi_ctrl_path_stats_list *stats;
+	struct wmi_ctrl_path_pdev_stats *pdev_stats;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	list_for_each_entry(stats, &ar->debug.wmi_list, list) {
+		if (!stats)
+			break;
+
+		pdev_stats = stats->stats_ptr;
+
+		if (!pdev_stats)
+			break;
+
+		for (i = 0; i < WMI_MGMT_FRAME_SUBTYPE_MAX; i++) {
+			index_tx += snprintf(&fw_tx_mgmt_subtype[index_tx],
+					WMI_MAX_STRING_LEN - index_tx,
+					" %u:%u,", i,
+					pdev_stats->tx_mgmt_subtype[i]);
+			index_rx += snprintf(&fw_rx_mgmt_subtype[index_rx],
+					WMI_MAX_STRING_LEN - index_rx,
+					" %u:%u,", i,
+					pdev_stats->rx_mgmt_subtype[i]);
+		}
+
+		len += scnprintf(buf + len, size - len,
+				"WMI_CTRL_PATH_PDEV_TX_STATS:\n");
+		len += scnprintf(buf + len, size - len,
+				"fw_tx_mgmt_subtype = %s\n",
+				fw_tx_mgmt_subtype);
+		len += scnprintf(buf + len, size - len,
+				"fw_rx_mgmt_subtype = %s\n",
+				fw_rx_mgmt_subtype);
+		len += scnprintf(buf + len, size - len,
+				"scan_fail_dfs_violation_time_ms = %u\n",
+				pdev_stats->scan_fail_dfs_violation_time_ms);
+		len += scnprintf(buf + len, size - len,
+				"nol_chk_fail_last_chan_freq = %u\n",
+				pdev_stats->nol_chk_fail_last_chan_freq);
+		len += scnprintf(buf + len, size - len,
+				"nol_chk_fail_time_stamp_ms = %u\n",
+				pdev_stats->nol_chk_fail_time_stamp_ms);
+		len += scnprintf(buf + len, size - len,
+				"tot_peer_create_cnt = %u\n",
+				pdev_stats->tot_peer_create_cnt);
+		len += scnprintf(buf + len, size - len,
+				"tot_peer_del_cnt = %u\n",
+				pdev_stats->tot_peer_del_cnt);
+		len += scnprintf(buf + len, size - len,
+				"tot_peer_del_resp_cnt = %u\n",
+				pdev_stats->tot_peer_del_resp_cnt);
+		len += scnprintf(buf + len, size - len,
+				"vdev_pause_fail_rt_to_sched_algo_fifo_full_cnt = %u\n",
+				pdev_stats->vdev_pause_fail_rt_to_sched_algo_fifo_full_cnt);
+	}
+
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ret_val =  simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	kfree(buf);
+	return ret_val;
+}
+
+int wmi_ctrl_path_cal_stat(struct ath12k *ar, char __user *ubuf,
+			   size_t count, loff_t *ppos)
+{
+	const int size = 4096;
+	char *buf;
+	u8 cal_type_mask, cal_prof_mask, is_periodic_cal;
+	int len = 0, ret_val;
+	struct wmi_ctrl_path_stats_list *stats;
+	struct wmi_ctrl_path_cal_stats *cal_stats;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += scnprintf(buf + len, size - len,
+			"WMI_CTRL_PATH_CAL_STATS\n");
+	len += scnprintf(buf + len, size - len,
+			"%-25s %-25s %-17s %-16s %-16s %-16s\n",
+			"cal_profile", "cal_type",
+			"cal_triggered_cnt", "cal_fail_cnt",
+			"cal_fcs_cnt", "cal_fcs_fail_cnt");
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	list_for_each_entry(stats, &ar->debug.wmi_list, list) {
+		if (!stats)
+			break;
+
+		cal_stats = stats->stats_ptr;
+
+		if (!cal_stats)
+			break;
+
+		cal_prof_mask = FIELD_GET(WMI_CTRL_PATH_CAL_PROF_MASK,
+				cal_stats->cal_info);
+		if (cal_prof_mask == WMI_CTRL_PATH_STATS_CAL_PROFILE_INVALID)
+			continue;
+
+		cal_type_mask = FIELD_GET(WMI_CTRL_PATH_CAL_TYPE_MASK,
+				cal_stats->cal_info);
+		is_periodic_cal = FIELD_GET(WMI_CTRL_PATH_IS_PERIODIC_CAL,
+				cal_stats->cal_info);
+
+
+		if (!is_periodic_cal) {
+			len += scnprintf(buf + len, size - len,
+			   "%-25s %-25s %-17d %-16d %-16d %-16d\n",
+			   wmi_ctrl_path_cal_prof_id_to_name(cal_prof_mask),
+			   wmi_ctrl_path_cal_type_id_to_name(cal_type_mask),
+			   cal_stats->cal_triggered_cnt,
+			   cal_stats->cal_fail_cnt,
+			   cal_stats->cal_fcs_cnt,
+			   cal_stats->cal_fcs_fail_cnt);
+		} else {
+			len += scnprintf(buf + len, size - len,
+			   "%-25s %-25s %-17d %-16d %-16d %-16d\n",
+			   "PERIODIC_CAL",
+			   wmi_ctrl_path_periodic_cal_type_id_to_name(cal_type_mask),
+			   cal_stats->cal_triggered_cnt,
+			   cal_stats->cal_fail_cnt,
+			   cal_stats->cal_fcs_cnt,
+			   cal_stats->cal_fcs_fail_cnt);
+		}
+
+	}
+
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ret_val =  simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	kfree(buf);
+	return ret_val;
+}
+
+int wmi_ctrl_path_awgn_stat(struct ath12k *ar, char __user *ubuf,
+			    size_t count, loff_t *ppos)
+{
+	struct wmi_ctrl_path_stats_list *stats;
+	struct wmi_ctrl_path_awgn_stats *awgn_stats;
+	const int size = 2048;
+	int len = 0, ret_val;
+	char *buf;
+
+	buf = kzalloc(size, GFP_KERNEL);
+
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+
+	list_for_each_entry(stats, &ar->debug.wmi_list, list) {
+
+		if (!stats)
+			break;
+		awgn_stats = stats->stats_ptr;
+
+		if (!awgn_stats)
+			break;
+
+		len += scnprintf(buf + len, size - len,
+				 "WMI_CTRL_PATH_AWGN_STATS_TLV:\n");
+		len += scnprintf(buf + len, size - len,
+				 "awgn_send_evt_cnt = %u\n",
+				 awgn_stats->awgn_send_evt_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_pri_int_cnt = %u\n",
+				 awgn_stats->awgn_pri_int_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_sec_int_cnt = %u\n",
+				 awgn_stats->awgn_sec_int_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_pkt_drop_trigger_cnt = %u\n",
+				 awgn_stats->awgn_pkt_drop_trigger_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_pkt_drop_trigger_reset_cnt = %u\n",
+				 awgn_stats->awgn_pkt_drop_trigger_reset_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_bw_drop_cnt = %u\n",
+				 awgn_stats->awgn_bw_drop_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_bw_drop_reset_cnt = %u\n",
+				 awgn_stats->awgn_bw_drop_reset_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_cca_int_cnt = %u\n",
+				 awgn_stats->awgn_cca_int_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_cca_int_reset_cnt = %u\n",
+				 awgn_stats->awgn_cca_int_reset_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_cca_ack_blk_cnt = %u\n",
+				 awgn_stats->awgn_cca_ack_blk_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_cca_ack_reset_cnt = %u\n",
+				 awgn_stats->awgn_cca_ack_reset_cnt);
+		len += scnprintf(buf + len, size - len,
+				 "awgn_int_bw_cnt-AWGN_20[0]: %u\n",
+				 awgn_stats->awgn_int_bw_cnt[0]);
+		len += scnprintf(buf + len, size - len,
+				 "AWGN_40[1]: %u\n",
+				 awgn_stats->awgn_int_bw_cnt[1]);
+		len += scnprintf(buf + len, size - len,
+				 "AWGN_80[2]: %u\n",
+				 awgn_stats->awgn_int_bw_cnt[2]);
+		len += scnprintf(buf + len, size - len,
+				 "AWGN_160[3]: %u\n",
+				 awgn_stats->awgn_int_bw_cnt[3]);
+		len += scnprintf(buf + len, size - len,
+				 "AWGN_320[5]: %u\n",
+				 awgn_stats->awgn_int_bw_cnt[5]);
+	}
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ret_val =  simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	kfree(buf);
+
+	return ret_val;
+}
+
+int wmi_ctrl_path_btcoex_stat(struct ath12k *ar, char __user *ubuf,
+			size_t count, loff_t *ppos)
+{
+	struct wmi_ctrl_path_stats_list *stats;
+	struct wmi_ctrl_path_btcoex_stats *btcoex_stats;
+	const int size = 2048;
+	int len = 0, ret_val;
+	char *buf;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&ar->wmi_ctrl_path_stats_lock);
+	list_for_each_entry(stats, &ar->debug.wmi_list, list) {
+		if (!stats)
+			break;
+
+		btcoex_stats = stats->stats_ptr;
+
+		if (!btcoex_stats)
+			break;
+
+		len += scnprintf(buf + len, size - len,
+				"WMI_CTRL_PATH_BTCOEX_STATS:\n");
+		len += scnprintf(buf + len, size - len,
+				"pdev_id = %u\n",
+				btcoex_stats->pdev_id);
+		len += scnprintf(buf + len, size - len,
+				"bt_tx_req_cntr = %u\n",
+				btcoex_stats->bt_tx_req_cntr);
+		len += scnprintf(buf + len, size - len,
+				"bt_rx_req_cntr = %u\n",
+				btcoex_stats->bt_rx_req_cntr);
+		len += scnprintf(buf + len, size - len,
+				"bt_req_nack_cntr = %u\n",
+				btcoex_stats->bt_req_nack_cntr);
+		len += scnprintf(buf + len, size - len,
+				"wl_tx_req_nack_schd_bt_reason_cntr = %u\n",
+				btcoex_stats->wl_tx_req_nack_schd_bt_reason_cntr);
+		len += scnprintf(buf + len, size - len,
+				"wl_tx_req_nack_current_bt_reason_cntr = %u\n",
+				btcoex_stats->wl_tx_req_nack_current_bt_reason_cntr);
+		len += scnprintf(buf + len, size - len,
+				"wl_tx_req_nack_other_wlan_tx_reason_cntr = %u\n",
+				btcoex_stats->wl_tx_req_nack_other_wlan_tx_reason_cntr);
+		len += scnprintf(buf + len, size - len,
+				"wl_in_tx_abort_cntr = %u\n",
+				btcoex_stats->wl_in_tx_abort_cntr);
+		len += scnprintf(buf + len, size - len,
+				"wl_tx_auto_resp_req_cntr = %u\n",
+				btcoex_stats->wl_tx_auto_resp_req_cntr);
+		len += scnprintf(buf + len, size - len,
+				"wl_tx_req_ack_cntr = %u\n",
+				btcoex_stats->wl_tx_req_ack_cntr);
+		len += scnprintf(buf + len, size - len,
+				"wl_tx_req_cntr = %u\n",
+				btcoex_stats->wl_tx_req_cntr);
+	}
+
+	ath12k_wmi_crl_path_stats_list_free(ar, &ar->debug.wmi_list);
+	mutex_unlock(&ar->wmi_ctrl_path_stats_lock);
+	ret_val =  simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	kfree(buf);
+	return ret_val;
+}
+
+static ssize_t ath12k_read_wmi_ctrl_path_stats(struct file *file,
+		char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int ret_val = 0;
+	u32 tagid = 0;
+
+	tagid = ar->debug.wmi_ctrl_path_stats_tagid;
+
+	switch (tagid) {
+	case WMI_CTRL_PATH_PDEV_STATS:
+		ret_val = wmi_ctrl_path_pdev_stat(ar, ubuf, count, ppos);
+		break;
+	case WMI_CTRL_PATH_CAL_STATS:
+		ret_val = wmi_ctrl_path_cal_stat(ar, ubuf, count, ppos);
+		break;
+	case WMI_CTRL_PATH_BTCOEX_STATS:
+		ret_val = wmi_ctrl_path_btcoex_stat(ar, ubuf, count, ppos);
+		break;
+	case WMI_CTRL_PATH_AWGN_STATS:
+		ret_val = wmi_ctrl_path_awgn_stat(ar, ubuf, count, ppos);
+		break;
+		/* Add case for newly wmi ctrl path added stats here */
+	default:
+		/* Unsupported tag */
+		ret_val = -EINVAL;
+		break;
+	}
+
+	return ret_val;
+}
+
+static const struct file_operations ath12k_fops_wmi_ctrl_stats = {
+	.write = ath12k_write_wmi_ctrl_path_stats,
+	.open = simple_open,
+	.read = ath12k_read_wmi_ctrl_path_stats,
+};
+
+void ath12k_debugfs_wmi_ctrl_stats(struct ath12k *ar)
+{
+
+	ar->wmi_ctrl_stat = debugfs_create_file("wmi_ctrl_stats", 0644,
+			ar->debug.debugfs_pdev,
+			ar,
+			&ath12k_fops_wmi_ctrl_stats);
+
+	INIT_LIST_HEAD(&ar->debug.wmi_list);
+	mutex_init(&ar->wmi_ctrl_path_stats_lock);
+	init_completion(&ar->debug.wmi_ctrl_path_stats_rcvd);
+}
+
+static int ath12k_debugfs_fw_stats_request(struct ath12k *ar,
+					   struct stats_request_params *req_param)
+{
+	struct ath12k_base *ab = ar->ab;
+	unsigned long timeout, time_left;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* FW stats can get split when exceeding the stats data buffer limit.
+	 * In that case, since there is no end marking for the back-to-back
+	 * received 'update stats' event, we keep a 3 seconds timeout in case,
+	 * fw_stats_done is not marked yet
+	 */
+	timeout = jiffies + msecs_to_jiffies(3 * HZ);
+
+	ath12k_debugfs_fw_stats_reset(ar);
+
+	reinit_completion(&ar->fw_stats_complete);
+
+	ret = ath12k_wmi_send_stats_request_cmd(ar, req_param->stats_id,
+						req_param->vdev_id, req_param->pdev_id);
+
+	if (ret) {
+		ath12k_warn(ab, "could not request fw stats (%d)\n",
+			    ret);
+		return ret;
+	}
+
+	time_left =
+	wait_for_completion_timeout(&ar->fw_stats_complete,
+				    1 * HZ);
+	if (!time_left)
+		return -ETIMEDOUT;
+
+	for (;;) {
+		if (time_after(jiffies, timeout))
+			break;
+
+		spin_lock_bh(&ar->data_lock);
+		if (ar->fw_stats_done) {
+			spin_unlock_bh(&ar->data_lock);
+			break;
+		}
+		spin_unlock_bh(&ar->data_lock);
+	}
+	return 0;
+}
+
+static int ath12k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct ath12k_base *ab = ar->ab;
+	struct stats_request_params req_param;
+	void *buf = NULL;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	buf = vmalloc(ATH12K_FW_STATS_BUF_SIZE);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	req_param.pdev_id = ar->pdev->pdev_id;
+	req_param.vdev_id = 0;
+	req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+	ret = ath12k_debugfs_fw_stats_request(ar, &req_param);
+	if (ret) {
+		ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+		goto err_free;
+	}
+
+	ath12k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id,
+				 buf);
+
+	file->private_data = buf;
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_free:
+	vfree(buf);
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath12k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_pdev_stats(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+	.open = ath12k_open_pdev_stats,
+	.release = ath12k_release_pdev_stats,
+	.read = ath12k_read_pdev_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct stats_request_params req_param;
+	void *buf = NULL;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	buf = vmalloc(ATH12K_FW_STATS_BUF_SIZE);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	req_param.pdev_id = ar->pdev->pdev_id;
+	/* VDEV stats is always sent for all active VDEVs from FW */
+	req_param.vdev_id = 0;
+	req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+	ret = ath12k_debugfs_fw_stats_request(ar, &req_param);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+		goto err_free;
+	}
+
+	ath12k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id,
+				 buf);
+
+	file->private_data = buf;
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_free:
+	vfree(buf);
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath12k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_vdev_stats(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+	.open = ath12k_open_vdev_stats,
+	.release = ath12k_release_vdev_stats,
+	.read = ath12k_read_vdev_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath12k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct ath12k_link_vif *arvif;
+	struct stats_request_params req_param;
+	void *buf = NULL;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	buf = vmalloc(ATH12K_FW_STATS_BUF_SIZE);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	req_param.stats_id = WMI_REQUEST_BCN_STAT;
+	req_param.pdev_id = ar->pdev->pdev_id;
+
+	/* loop all active VDEVs for bcn stats */
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (!arvif->is_up)
+			continue;
+
+		req_param.vdev_id = arvif->vdev_id;
+		ret = ath12k_debugfs_fw_stats_request(ar, &req_param);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+			goto err_free;
+		}
+	}
+
+	ath12k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id,
+				 buf);
+
+	/* since beacon stats request is looped for all active VDEVs, saved fw
+	 * stats is not freed for each request until done for all active VDEVs
+	 */
+	spin_lock_bh(&ar->data_lock);
+	ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+	spin_unlock_bh(&ar->data_lock);
+
+	file->private_data = buf;
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_free:
+	vfree(buf);
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath12k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_bcn_stats(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+	.open = ath12k_open_bcn_stats,
+	.release = ath12k_release_bcn_stats,
+	.read = ath12k_read_bcn_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_enable_vdev_stats_offload(struct file *file,
+						      const char __user *ubuf,
+						      size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	bool enable;
+	int ret;
+
+	if (kstrtobool_from_user(ubuf, count, &enable))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (enable == ar->fw_stats.en_vdev_stats_ol) {
+		ret = count;
+		goto out;
+	}
+
+	ar->fw_stats.en_vdev_stats_ol = enable;
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath12k_read_enable_vdev_stats_offload(struct file *file,
+						     char __user *ubuf,
+						     size_t count, loff_t *ppos)
+
+{
+	char buf[32] = {0};
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%u\n",
+			ar->fw_stats.en_vdev_stats_ol);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats_offload = {
+	.read = ath12k_read_enable_vdev_stats_offload,
+	.write = ath12k_write_enable_vdev_stats_offload,
+	.open = simple_open
+};
+
+static ssize_t ath12k_debug_fw_reset_stats_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	int ret;
+	size_t len = 0, buf_len = 500;
+	char *buf;
+
+	buf = kmalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_bh(&ab->base_lock);
+	len += scnprintf(buf + len, buf_len - len,
+			 "fw_crash_counter\t\t%d\n", ab->stats.fw_crash_counter);
+	len += scnprintf(buf + len, buf_len - len,
+			 "last_recovery_time\t\t%d\n", ab->stats.last_recovery_time);
+	spin_unlock_bh(&ab->base_lock);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations fops_fw_reset_stats = {
+	.open = simple_open,
+	.read = ath12k_debug_fw_reset_stats_read,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_read_simulate_fw_crash(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	const char buf[] =
+		"To simulate firmware crash write one of the keywords to this file:\n"
+		"`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+		"`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath12k_write_simulate_fw_crash(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar = ab->pdevs[0].ar;
+	char buf[32] = {0};
+	ssize_t rc;
+	int i, ret, radioup = 0;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+		if (ar && ar->state == ATH12K_STATE_ON) {
+			radioup = 1;
+			break;
+		}
+	}
+	/* filter partial writes and invalid commands */
+	if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+		return -EINVAL;
+
+	rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+	if (rc < 0)
+		return rc;
+
+	/* drop the possible '\n' from the end */
+	if (buf[*ppos - 1] == '\n')
+		buf[*ppos - 1] = '\0';
+
+	if (radioup == 0) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (!strcmp(buf, "assert")) {
+		ath12k_info(ab, "simulating firmware assert crash\n");
+		ret = ath12k_wmi_force_fw_hang_cmd(ar,
+						   ATH12K_WMI_FW_HANG_ASSERT_TYPE,
+						   ATH12K_WMI_FW_HANG_DELAY, false);
+	} else {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (ret) {
+		ath12k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+		goto exit;
+	}
+
+	ret = count;
+
+exit:
+	return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+	.read = ath12k_read_simulate_fw_crash,
+	.write = ath12k_write_simulate_fw_crash,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_enable_extd_tx_stats(struct file *file,
+						 const char __user *ubuf,
+						 size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	u32 filter;
+	int ret;
+
+	if (kstrtouint_from_user(ubuf, count, 0, &filter))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	if (filter == ar->debug.extd_tx_stats) {
+		ret = count;
+		goto out;
+	}
+
+	ar->debug.extd_tx_stats = filter;
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath12k_read_enable_extd_tx_stats(struct file *file,
+						char __user *ubuf,
+						size_t count, loff_t *ppos)
+
+{
+	char buf[32] = {0};
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+			ar->debug.extd_tx_stats);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+	.read = ath12k_read_enable_extd_tx_stats,
+	.write = ath12k_write_enable_extd_tx_stats,
+	.open = simple_open
+};
+
+static ssize_t ath12k_write_extd_rx_stats(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	struct htt_rx_ring_tlv_filter tlv_filter = {0};
+	u32 enable, ring_id, rx_filter = 0;
+	int ret, i;
+
+	if (kstrtouint_from_user(ubuf, count, 0, &enable))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (enable > 1) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (enable == ar->debug.extd_rx_stats) {
+		ret = count;
+		goto exit;
+	}
+
+	if (enable) {
+		rx_filter =  HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO;
+
+		tlv_filter.rx_filter = rx_filter;
+		tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+		tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+		tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+		tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+			HTT_RX_FP_DATA_FILTER_FLASG3;
+	} else {
+		tlv_filter = ath12k_mac_mon_status_filter_default;
+	}
+
+	ar->debug.rx_filter = tlv_filter.rx_filter;
+	tlv_filter.offset_valid = false;
+
+	for (i = 0; i < ar->ab->hw_params->num_rxmda_per_pdev; i++) {
+		ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i,
+						       HAL_RXDMA_MONITOR_DST,
+						       DP_RXDMA_REFILL_RING_SIZE,
+						       &tlv_filter);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+			goto exit;
+		}
+	}
+
+	ar->debug.extd_rx_stats = enable;
+	ret = count;
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath12k_read_extd_rx_stats(struct file *file,
+					 char __user *ubuf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	char buf[32];
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			ar->debug.extd_rx_stats);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+	.read = ath12k_read_extd_rx_stats,
+	.write = ath12k_write_extd_rx_stats,
+	.open = simple_open,
+};
+
+static int ath12k_reset_nrp_filter(struct ath12k *ar,
+				   bool reset)
+{
+	int i = 0;
+	int ret = 0;
+	u32 ring_id = 0;
+	u32 rx_filter = 0;
+	struct htt_rx_ring_tlv_filter tlv_filter = {0};
+
+	if (!reset) {
+		rx_filter = ar->debug.rx_filter;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_MPDU_END;
+
+		tlv_filter.rx_filter = rx_filter;
+		tlv_filter.pkt_filter_flags0 = HTT_RX_MO_MGMT_FILTER_FLAGS0;
+		tlv_filter.pkt_filter_flags1 = HTT_RX_MO_MGMT_FILTER_FLAGS1;
+		tlv_filter.pkt_filter_flags2 = HTT_RX_MO_CTRL_FILTER_FLASG2;
+		tlv_filter.pkt_filter_flags3 = HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+			HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+	} else {
+		tlv_filter.rx_filter = ar->debug.rx_filter;
+	}
+	tlv_filter.offset_valid = false;
+
+	for (i = 0; i < ar->ab->hw_params->num_rxmda_per_pdev; i++) {
+		ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i,
+						       HAL_RXDMA_MONITOR_DST,
+						       DP_RXDMA_REFILL_RING_SIZE,
+						       &tlv_filter);
+		if (ret) {
+			ath12k_err(ar->ab,
+				   "failed to setup filter for monitor buf %d\n", ret);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+void ath12k_debugfs_nrp_cleanup_all(struct ath12k *ar)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_neighbor_peer *nrp, *tmp;
+
+	spin_lock_bh(&ab->base_lock);
+	list_for_each_entry_safe(nrp, tmp, &ab->neighbor_peers, list) {
+		if (nrp->is_filter_on)
+			complete(&nrp->filter_done);
+		list_del(&nrp->list);
+		kfree(nrp);
+	}
+
+	ab->num_nrps = 0;
+	spin_unlock_bh(&ab->base_lock);
+
+	debugfs_remove_recursive(ar->debug.debugfs_nrp);
+}
+
+void ath12k_debugfs_nrp_clean(struct ath12k *ar, const u8 *addr)
+{
+	int i, j;
+	char fname[MAC_UNIT_LEN * ETH_ALEN] = {0};
+
+	for (i = 0, j = 0; i < (MAC_UNIT_LEN * ETH_ALEN); i += MAC_UNIT_LEN, j++) {
+		if (j == ETH_ALEN - 1) {
+			snprintf(fname + i, sizeof(fname) - i, "%02x", *(addr + j));
+			break;
+		}
+		snprintf(fname + i, sizeof(fname) - i, "%02x:", *(addr + j));
+	}
+
+	spin_lock_bh(&ar->ab->base_lock);
+	ar->ab->num_nrps--;
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	debugfs_lookup_and_remove(fname, ar->debug.debugfs_nrp);
+	if (!ar->ab->num_nrps) {
+		debugfs_remove_recursive(ar->debug.debugfs_nrp);
+		ath12k_reset_nrp_filter(ar, true);
+	}
+}
+
+static ssize_t ath12k_read_nrp_rssi(struct file *file,
+				    char __user *ubuf,
+				    size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_link_vif *arvif = NULL;
+	struct ath12k_neighbor_peer *nrp = NULL, *tmp;
+	struct ath12k_wmi_peer_create_arg peer_param = {0};
+	u8 macaddr[ETH_ALEN] = {0};
+	loff_t file_pos = *ppos;
+	struct path *fpath = &file->f_path;
+	char *fname = fpath->dentry->d_iname;
+	char buf[128] = {0};
+	int i = 0;
+	int j = 0;
+	int len = 0;
+	int vdev_id = -1;
+	bool nrp_found = false;
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH12K_STATE_ON) {
+		mutex_unlock(&ar->conf_mutex);
+		return -ENETDOWN;
+	}
+	mutex_unlock(&ar->conf_mutex);
+
+	if (file_pos > 0)
+		return 0;
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+			vdev_id = arvif->vdev_id;
+			break;
+		}
+	}
+	if (vdev_id < 0) {
+		ath12k_warn(ab, "unable to get vdev for AP interface\n");
+		return 0;
+	}
+
+	for (i = 0, j = 0;  i < MAC_UNIT_LEN * ETH_ALEN; i += MAC_UNIT_LEN, j++) {
+		if (sscanf(fname + i, "%hhX", &macaddr[j]) <= 0)
+			return -EINVAL;
+	}
+
+	spin_lock_bh(&ab->base_lock);
+	list_for_each_entry(nrp, &ab->neighbor_peers, list) {
+		if (ether_addr_equal(macaddr, nrp->addr)) {
+			reinit_completion(&nrp->filter_done);
+			nrp->vdev_id = vdev_id;
+			nrp->is_filter_on = false;
+			break;
+		}
+	}
+	spin_unlock_bh(&ab->base_lock);
+
+	peer_param.vdev_id = nrp->vdev_id;
+	peer_param.peer_addr = nrp->addr;
+	peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+	if (!ath12k_peer_create(ar, arvif, NULL, &peer_param)) {
+		spin_lock_bh(&ab->base_lock);
+		list_for_each_entry_safe(nrp, tmp, &ab->neighbor_peers, list) {
+			if (ether_addr_equal(nrp->addr, peer_param.peer_addr)) {
+				nrp_found = true;
+				break;
+			}
+		}
+		spin_unlock_bh(&ab->base_lock);
+
+		if (nrp_found) {
+			spin_lock_bh(&ab->base_lock);
+			nrp->is_filter_on = true;
+			spin_unlock_bh(&ab->base_lock);
+
+			wait_for_completion_interruptible_timeout(&nrp->filter_done, 5 * HZ);
+
+			spin_lock_bh(&ab->base_lock);
+			nrp->is_filter_on = false;
+			spin_unlock_bh(&ab->base_lock);
+
+			len = scnprintf(buf, sizeof(buf),
+					"Neighbor Peer MAC\t\tRSSI\t\tTime\n");
+			len += scnprintf(buf + len, sizeof(buf) - len, "%pM\t\t%u\t\t%lld\n",
+					 nrp->addr, nrp->rssi, nrp->timestamp);
+		} else {
+			ath12k_peer_delete(ar, vdev_id, macaddr);
+			ath12k_warn(ab, "%pM not found in nrp list\n", macaddr);
+			return -EINVAL;
+		}
+		ath12k_peer_delete(ar, vdev_id, macaddr);
+	} else {
+		ath12k_warn(ab, "unable to create peer for nrp[%pM]\n", macaddr);
+		return -EINVAL;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_read_nrp_rssi = {
+	.read = ath12k_read_nrp_rssi,
+	.open = simple_open,
+};
+
+static ssize_t ath12k_write_nrp_mac(struct file *file,
+				    const char __user *ubuf,
+				    size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_peer *peer = NULL;
+	struct ath12k_neighbor_peer *nrp = NULL, *tmp = NULL;
+	u8 mac[ETH_ALEN] = {0};
+	char fname[MAC_UNIT_LEN * ETH_ALEN] = {0};
+	char *str = NULL;
+	char *buf = vmalloc(count);
+	char *ptr = buf;
+	int i = 0;
+	int j = 0;
+	int ret = count;
+	int action = 0;
+	ssize_t rc = 0;
+	bool del_nrp = false;
+
+	mutex_lock(&ar->conf_mutex);
+
+	rc = simple_write_to_buffer(buf, count, ppos, ubuf, count);
+	if (rc <= 0)
+		goto exit;
+
+	/* To remove '\n' at end of buffer */
+	buf[count - 1] = '\0';
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	str = strsep(&buf, ",");
+	if (!strcmp(str, "add"))
+		action = NRP_ACTION_ADD;
+	else if (!strcmp(str, "del"))
+		action = NRP_ACTION_DEL;
+	else {
+		ath12k_err(ab, "error: invalid argument\n");
+		goto exit;
+	}
+
+	memset(mac, 0, sizeof(mac));
+	while ((str = strsep(&buf, ":")) != NULL) {
+		if (i >= ETH_ALEN || kstrtou8(str, 16, mac + i)) {
+			ath12k_warn(ab, "error: invalid mac address\n");
+			goto exit;
+		}
+		i++;
+	}
+
+	if (i != ETH_ALEN) {
+		ath12k_warn(ab, "error: invalid mac address\n");
+		goto exit;
+	}
+
+	if (!is_valid_ether_addr(mac)) {
+		ath12k_err(ab, "error: invalid mac address\n");
+		goto exit;
+	}
+
+	for (i = 0, j = 0; i < (MAC_UNIT_LEN * ETH_ALEN); i += MAC_UNIT_LEN, j++) {
+		if (j == ETH_ALEN - 1) {
+			snprintf(fname + i, sizeof(fname) - i, "%02x", mac[j]);
+			break;
+		}
+		snprintf(fname + i, sizeof(fname) - i, "%02x:", mac[j]);
+	}
+
+	switch (action) {
+	case NRP_ACTION_ADD:
+		if (ab->num_nrps == (ATH12K_MAX_NRPS - 1)) {
+			ath12k_warn(ab, "max nrp reached, cannot create more\n");
+			goto exit;
+		}
+
+		list_for_each_entry(nrp, &ab->neighbor_peers, list) {
+			if (ether_addr_equal(nrp->addr, mac)) {
+				ath12k_warn(ab, "cannot add existing neighbor peer\n");
+				goto exit;
+			}
+		}
+
+		spin_lock_bh(&ab->base_lock);
+		peer = ath12k_peer_find_by_addr(ab, mac);
+		if (peer) {
+			ath12k_warn(ab, "cannot add exisitng peer [%pM] as nrp\n", mac);
+			spin_unlock_bh(&ab->base_lock);
+			goto exit;
+		}
+		spin_unlock_bh(&ab->base_lock);
+
+		nrp = kzalloc(sizeof(*nrp), GFP_KERNEL);
+		if (!nrp)
+			goto exit;
+
+		init_completion(&nrp->filter_done);
+		ether_addr_copy(nrp->addr, mac);
+
+		spin_lock_bh(&ab->base_lock);
+		list_add_tail(&nrp->list, &ab->neighbor_peers);
+		spin_unlock_bh(&ab->base_lock);
+
+		if (!ab->num_nrps) {
+			ar->debug.debugfs_nrp = debugfs_create_dir("nrp_rssi",
+								   ar->debug.debugfs_pdev);
+			ath12k_reset_nrp_filter(ar, false);
+		}
+		spin_lock_bh(&ab->base_lock);
+		ab->num_nrps++;
+		spin_unlock_bh(&ab->base_lock);
+
+		debugfs_create_file(fname, 0644,
+				    ar->debug.debugfs_nrp, ar,
+				    &fops_read_nrp_rssi);
+		break;
+	case NRP_ACTION_DEL:
+		if (!ar->ab->num_nrps) {
+			ath12k_err(ab, "error: no nac added\n");
+			goto exit;
+		}
+
+		spin_lock_bh(&ab->base_lock);
+		list_for_each_entry_safe(nrp, tmp, &ab->neighbor_peers, list) {
+			if (ether_addr_equal(nrp->addr, mac)) {
+				list_del(&nrp->list);
+				kfree(nrp);
+				del_nrp = true;
+				break;
+			}
+		}
+		spin_unlock_bh(&ab->base_lock);
+
+		if (!del_nrp)
+			ath12k_warn(ab, "cannot delete %pM not added to list\n", mac);
+		else
+			ath12k_debugfs_nrp_clean(ar, mac);
+		break;
+	default:
+		break;
+	}
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	vfree(ptr);
+	return ret;
+}
+
+static const struct file_operations fops_write_nrp_mac = {
+	.write = ath12k_write_nrp_mac,
+	.open = simple_open,
+};
+
+static ssize_t ath12k_read_wmm_stats(struct file *file,
+				     char __user *ubuf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+	int size = 2048;
+	char *buf;
+	ssize_t retval;
+	u64 total_wmm_sent_pkts = 0;
+	u64 total_wmm_received_pkts = 0;
+	u64 total_wmm_fail_sent = 0;
+	u64 total_wmm_fail_received = 0;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		ath12k_warn(ar->ab,"failed to allocate the buffer%s\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	for (count = 0; count < WME_NUM_AC; count++) {
+		total_wmm_sent_pkts += ar->wmm_stats.total_wmm_tx_pkts[count];
+		total_wmm_received_pkts += ar->wmm_stats.total_wmm_rx_pkts[count];
+		total_wmm_fail_sent += ar->wmm_stats.total_wmm_tx_drop[count];
+		total_wmm_fail_received += ar->wmm_stats.total_wmm_rx_drop[count];
+	}
+
+	len += scnprintf(buf + len, size - len, "Total number of wmm_sent: %llu\n",
+			 total_wmm_sent_pkts);
+	len += scnprintf(buf + len, size - len, "total number of wmm_received: %llu\n",
+			 total_wmm_received_pkts);
+	len += scnprintf(buf + len, size - len, "total number of wmm_fail_sent: %llu\n",
+			 total_wmm_fail_sent);
+	len += scnprintf(buf + len, size - len, "total number of wmm_fail_received: %llu\n",
+			 total_wmm_fail_received);
+	len += scnprintf(buf + len, size - len, "Num of BE wmm_sent: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_pkts[WME_AC_BE]);
+	len += scnprintf(buf + len, size - len, "Num of BK wmm_sent: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_pkts[WME_AC_BK]);
+	len += scnprintf(buf + len, size - len, "Num of VI wmm_sent: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_pkts[WME_AC_VI]);
+	len += scnprintf(buf + len, size - len, "Num of VO wmm_sent: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_pkts[WME_AC_VO]);
+	len += scnprintf(buf + len, size - len, "num of be wmm_received: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_pkts[WME_AC_BE]);
+	len += scnprintf(buf + len, size - len, "num of bk wmm_received: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_pkts[WME_AC_BK]);
+	len += scnprintf(buf + len, size - len, "num of vi wmm_received: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_pkts[WME_AC_VI]);
+	len += scnprintf(buf + len, size - len, "num of vo wmm_received: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_pkts[WME_AC_VO]);
+	len += scnprintf(buf + len, size - len, "num of be wmm_tx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_drop[WME_AC_BE]);
+	len += scnprintf(buf + len, size - len, "num of bk wmm_tx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_drop[WME_AC_BK]);
+	len += scnprintf(buf + len, size - len, "num of vi wmm_tx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_drop[WME_AC_VI]);
+	len += scnprintf(buf + len, size - len, "num of vo wmm_tx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_tx_drop[WME_AC_VO]);
+	len += scnprintf(buf + len, size - len, "num of be wmm_rx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_drop[WME_AC_BE]);
+	len += scnprintf(buf + len, size - len, "num of bk wmm_rx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_drop[WME_AC_BK]);
+	len += scnprintf(buf + len, size - len, "num of vi wmm_rx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_drop[WME_AC_VI]);
+	len += scnprintf(buf + len, size - len, "num of vo wmm_rx_dropped: %llu\n",
+			 ar->wmm_stats.total_wmm_rx_drop[WME_AC_VO]);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	if (len > size)
+		len = size;
+	retval = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static const struct file_operations fops_wmm_stats = {
+	.read = ath12k_read_wmm_stats,
+	.open = simple_open,
+};
+
+static int ath12k_fill_bp_stats(struct ath12k_base *ab,
+				struct ath12k_bp_stats *bp_stats,
+				char *buf, int len, int size)
+{
+	lockdep_assert_held(&ab->base_lock);
+
+	len += scnprintf(buf + len, size - len, "count: %u\n",
+			 bp_stats->count);
+	len += scnprintf(buf + len, size - len, "hp: %u\n",
+			 bp_stats->hp);
+	len += scnprintf(buf + len, size - len, "tp: %u\n",
+			 bp_stats->tp);
+	len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
+			 jiffies_to_msecs(jiffies - bp_stats->jiffies));
+	return len;
+}
+
+static ssize_t ath12k_debugfs_dump_soc_ring_bp_stats(struct ath12k_base *ab,
+						     char *buf, int size)
+{
+	struct ath12k_bp_stats *bp_stats;
+	bool stats_rxd = false;
+	u8 i, pdev_idx;
+	int len = 0;
+
+	len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
+	len += scnprintf(buf + len, size - len, "==================\n");
+
+	spin_lock_bh(&ab->base_lock);
+	for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
+		bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
+
+		if (!bp_stats->count)
+			continue;
+
+		len += scnprintf(buf + len, size - len, "Ring: %s\n",
+				 htt_bp_umac_ring[i]);
+		len = ath12k_fill_bp_stats(ab, bp_stats, buf, len, size);
+		stats_rxd = true;
+	}
+
+	for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
+		for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
+			bp_stats =
+				&ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
+
+			if (!bp_stats->count)
+				continue;
+
+			len += scnprintf(buf + len, size - len, "Ring: %s\n",
+					 htt_bp_lmac_ring[i]);
+			len += scnprintf(buf + len, size - len, "pdev: %d\n",
+					 pdev_idx);
+			len = ath12k_fill_bp_stats(ab, bp_stats, buf, len, size);
+			stats_rxd = true;
+		}
+	}
+	spin_unlock_bh(&ab->base_lock);
+
+	if (!stats_rxd)
+		len += scnprintf(buf + len, size - len,
+				 "No Ring Backpressure stats received\n\n");
+
+	return len;
+}
+
+static ssize_t ath12k_debugfs_dump_soc_dp_stats(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_soc_dp_stats *soc_stats = &ab->soc_stats;
+	struct ath12k *ar;
+	int len = 0, i, retval;
+	const int size = 4096;
+	static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+			"Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+			"Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+			"AMSDU parse", "SA timeout", "DA timeout",
+			"Flow timeout", "Flush req"};
+	static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+			"Desc addr zero", "Desc inval", "AMPDU in non BA",
+			"Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+			"Frame OOR", "BAR OOR", "No BA session",
+			"Frame SN equal SSN", "PN check fail", "2k err",
+			"PN err", "Desc blocked"};
+
+	static const char *wbm_rel_src[HAL_WBM_REL_SRC_MODULE_MAX] = {
+				"TQM", "Rxdma", "Reo", "FW", "SW" };
+	char *buf;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+	len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+			 soc_stats->err_ring_pkts);
+	len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+			 soc_stats->invalid_rbm);
+	len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+	for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+		len += scnprintf(buf + len, size - len, "%s: handled %u dropped %u\n",
+				 rxdma_err[i], soc_stats->rxdma_error[i],
+				 soc_stats->rxdma_error_drop[i]);
+
+	len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+	for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+		len += scnprintf(buf + len, size - len, "%s: handled %u dropped %u\n",
+				 reo_err[i], soc_stats->reo_error[i],
+				 soc_stats->reo_error_drop[i]);
+	len += scnprintf(buf + len, size - len, "REO excep MSDU buf type:%u\n",
+			soc_stats->reo_excep_msdu_buf_type);
+
+	len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+	len += scnprintf(buf + len, size - len,
+			 "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+			 soc_stats->hal_reo_error[0],
+			 soc_stats->hal_reo_error[1],
+			 soc_stats->hal_reo_error[2],
+			 soc_stats->hal_reo_error[3]);
+
+	len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
+	len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
+
+	for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+		len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+				 i, soc_stats->tx_err.desc_na[i]);
+
+	len += scnprintf(buf + len, size - len, "\nTCL Ring Buffer Alloc Failures:\n");
+	for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+		len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+				 i, soc_stats->tx_err.txbuf_na[i]);
+
+	len += scnprintf(buf + len, size - len,
+			 "\nTx Threshold limit: %d\n",
+			 soc_stats->tx_err.pdev_threshold_limit);
+
+	len += scnprintf(buf + len, size - len,
+			 "\nGroup Tx Threshold limit: %u\n",
+			 soc_stats->tx_err.group_threshold_limit);
+
+	len += scnprintf(buf + len, size - len,
+			 "\nMisc Transmit Failures: %d\n",
+			 atomic_read(&soc_stats->tx_err.misc_fail));
+
+	len += scnprintf(buf + len, size - len,
+			 "\nMon drop descriptor: %u\n",
+			 soc_stats->mon_drop_desc);
+
+	len += scnprintf(buf + len, size - len,
+			"\nHAL_REO_CMD_DRAIN Counter: %u\n",
+			soc_stats->hal_reo_cmd_drain);
+
+	len += scnprintf(buf + len, size - len,
+			"\nREO_CMD_CACHE_FLUSH Failure: %u\n",
+			soc_stats->reo_cmd_cache_error);
+
+	len += scnprintf(buf + len, size - len,
+			"\nREO_CMD_UPDATE_RX_QUEUE Failure: %u\n",
+			soc_stats->reo_cmd_update_rx_queue_error);
+
+	len += scnprintf(buf + len, size - len,
+			"\nmcast reinject: %u\n",
+			soc_stats->mcast_reinject);
+
+	len += scnprintf(buf + len, size - len,
+			"\ntx_wbm_rel_source: 0:%u 1:%u 2:%u 3:%u 4:%u\n",
+			soc_stats->tx_wbm_rel_source[0],
+			soc_stats->tx_wbm_rel_source[1],
+			soc_stats->tx_wbm_rel_source[2],
+			soc_stats->tx_wbm_rel_source[3],
+			soc_stats->tx_wbm_rel_source[4]);
+
+	len += scnprintf(buf + len, size - len,
+			"\ntqm_rel_reason: 0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u 8:%u 9:%u 10:%u 11:%u 12:%u 13:%u 14:%u\n",
+			soc_stats->tqm_rel_reason[0],
+			soc_stats->tqm_rel_reason[1],
+			soc_stats->tqm_rel_reason[2],
+			soc_stats->tqm_rel_reason[3],
+			soc_stats->tqm_rel_reason[4],
+			soc_stats->tqm_rel_reason[5],
+			soc_stats->tqm_rel_reason[6],
+			soc_stats->tqm_rel_reason[7],
+			soc_stats->tqm_rel_reason[8],
+			soc_stats->tqm_rel_reason[9],
+			soc_stats->tqm_rel_reason[10],
+			soc_stats->tqm_rel_reason[11],
+			soc_stats->tqm_rel_reason[12],
+			soc_stats->tqm_rel_reason[13],
+			soc_stats->tqm_rel_reason[14]);
+
+	len += scnprintf(buf + len, size - len,
+			"\nfw_tx_status: 0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u\n",
+			soc_stats->fw_tx_status[0],
+			soc_stats->fw_tx_status[1],
+			soc_stats->fw_tx_status[2],
+			soc_stats->fw_tx_status[3],
+			soc_stats->fw_tx_status[4],
+			soc_stats->fw_tx_status[5],
+			soc_stats->fw_tx_status[6]);
+
+	len += scnprintf(buf + len, size - len,
+			"\ntx_enqueued: 0:%u 1:%u 2:%u 3:%u\n",
+			soc_stats->tx_enqueued[0],
+			soc_stats->tx_enqueued[1],
+			soc_stats->tx_enqueued[2],
+			soc_stats->tx_enqueued[3]);
+
+	len += scnprintf(buf + len, size - len,
+			 "\nTx Peers Not Present: %d\n",
+			 soc_stats->tx_err.peers_not_present);
+
+	len += scnprintf(buf + len, size - len,
+			"\nnull tx complete: 0:%u 1:%u 2:%u 3:%u\n",
+			soc_stats->null_tx_complete[0],
+			soc_stats->null_tx_complete[1],
+			soc_stats->null_tx_complete[2],
+			soc_stats->null_tx_complete[3]);
+
+	len += scnprintf(buf + len, size - len,
+			"\ntx_completed: 0:%u 1:%u 2:%u 3:%u\n",
+			soc_stats->tx_completed[0],
+			soc_stats->tx_completed[1],
+			soc_stats->tx_completed[2],
+			soc_stats->tx_completed[3]);
+
+
+	len += scnprintf(buf + len, size - len, "\nbond_tx_ucast_enqueued: ");
+	for (i = 0; i < MAX_TCL_RING; i++)
+		len += scnprintf(buf + len, size - len,"%u:%u ",
+				 i, soc_stats->bond_tx_ucast_enqueued[i]);
+
+	len += scnprintf(buf + len, size - len,	"\nbond_tx_mcast_enqueued: ");
+	for (i = 0; i < MAX_TCL_RING; i++)
+		len += scnprintf(buf + len, size - len,	"%u:%u ",
+				i, soc_stats->bond_tx_mcast_enqueued[i]);
+
+	len += scnprintf(buf + len, size - len, "\nbond_tx_ucast_dropped: ");
+	for (i = 0; i < MAX_TCL_RING; i++)
+		len += scnprintf(buf + len, size - len,	"%u:%u ",
+				i, soc_stats->bond_tx_ucast_dropped[i]);
+
+	len += scnprintf(buf + len, size - len, "\nbond_tx_mcast_dropped: ");
+	for (i = 0; i < MAX_TCL_RING; i++)
+		len += scnprintf(buf + len, size - len,	"%u:%u ",
+				i, soc_stats->bond_tx_mcast_dropped[i]);
+
+	len += scnprintf(buf + len, size - len, "\nbond_mcast2ucast_tried: ");
+	for (i = 0; i < MAX_TCL_RING; i++)
+		len += scnprintf(buf + len, size - len,	"%u:%u ",
+				i, soc_stats->bond_mcast2ucast_tried[i]);
+
+	len += scnprintf(buf + len, size - len, "\nbond_mcast2ucast_converted: ");
+	for (i = 0; i < MAX_TCL_RING; i++)
+		len += scnprintf(buf + len, size - len,	"%u:%u ",
+				i, soc_stats->bond_mcast2ucast_converted[i]);
+
+	len += scnprintf(buf + len, size - len,	"\nbond_mcast2ucast_drop: ");
+	for (i = 0; i < MAX_TCL_RING; i++)
+		len += scnprintf(buf + len, size - len,
+				"%u:%u ", i, soc_stats->bond_mcast2ucast_drop[i]);
+
+	len += scnprintf(buf + len, size - len,
+			 "\nag tx_pending: %u\n",
+			 atomic_read(&ab->ag->num_dp_tx_pending));
+
+	for (i = 1; i <= ab->num_radios; i++) {
+ 		ar = ath12k_mac_get_ar_by_pdev_id(ab, i);
+ 		if (ar) {
+ 			len += scnprintf(buf + len, size - len,
+					 "\nar tx_pending [%d]: %u\n", i,
+					 atomic_read(&ar->dp.num_tx_pending));
+ 		}
+ 	}
+
+	len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n");
+	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+		len += scnprintf(buf + len, size - len,
+				 "Ring%d: 0:%u\t1:%u\t2:%u\n",
+				 i + 1,
+				 soc_stats->reo_rx[i][0],
+				 soc_stats->reo_rx[i][1],
+				 soc_stats->reo_rx[i][2]);
+
+	len += scnprintf(buf + len, size - len, "\nREO Fast Rx:\n");
+	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+		len += scnprintf(buf + len, size - len,
+				 "Ring%d: 0:%u\t1:%u\t2:%u\n",
+				 i + 1,
+				 soc_stats->fast_rx[i][0],
+				 soc_stats->fast_rx[i][1],
+				 soc_stats->fast_rx[i][2]);
+
+	len += scnprintf(buf + len, size - len, "\nREO Non-Fast Rx:\n");
+	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+		len += scnprintf(buf + len, size - len,
+				 "Ring%d: 0:%u\t1:%u\t2:%u\n",
+				 i + 1,
+				 soc_stats->non_fast_rx[i][0],
+				 soc_stats->non_fast_rx[i][1],
+				 soc_stats->non_fast_rx[i][2]);
+
+	len += scnprintf(buf + len, size - len, "\nRx WBM REL SRC Errors:\n");
+	for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++)
+		len += scnprintf(buf + len, size - len,
+				"%s\t:0:%u\t1:%u\t2:%u\n",
+				wbm_rel_src[i],
+				soc_stats->rx_wbm_rel_source[i][0],
+				soc_stats->rx_wbm_rel_source[i][1],
+				soc_stats->rx_wbm_rel_source[i][2]);
+
+	len += scnprintf(buf + len, size - len,
+			"\nFIRST/LAST MSDU BIT MISSING COUNT: %u\n",
+			soc_stats->first_and_last_msdu_bit_miss);
+
+	len += ath12k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len);
+
+	if (len > size)
+		len = size;
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static ssize_t
+ath12k_debugfs_write_soc_dp_stats(struct file *file,
+				  const char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_soc_dp_stats *soc_stats = &ab->soc_stats;
+	char buf[20] = {0};
+	int ret;
+
+	if (count > 20)
+		return -EFAULT;
+
+	ret = copy_from_user(buf, user_buf, count);
+	if (ret)
+		return -EFAULT;
+
+	if (strstr(buf, "reset"))
+		memset(soc_stats, 0, sizeof(struct ath12k_soc_dp_stats));
+
+	return count;
+}
+
+static const struct file_operations fops_soc_dp_stats = {
+	.read = ath12k_debugfs_dump_soc_dp_stats,
+	.write = ath12k_debugfs_write_soc_dp_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_debugfs_hal_dump_srng_stats_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	int len = 0, retval;
+	const int size = 4096 * 6;
+	char *buf;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len = ath12k_debugfs_hal_dump_srng_stats(ab, buf + len, size - len);
+	if (len > size)
+		len = size;
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static const struct file_operations fops_dump_hal_stats = {
+	.read = ath12k_debugfs_hal_dump_srng_stats_read,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_read_trace_qdss(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	const char buf[] =
+	"1 - this will start qdss trace collection\n"
+	"0 - this will stop and save the qdss trace collection\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t
+ath12k_write_trace_qdss(struct file *file,
+			const char __user *user_buf,
+			size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar;
+	int i, ret;
+	bool radioup = false;
+	bool qdss_enable;
+
+	if (kstrtobool_from_user(user_buf, count, &qdss_enable))
+		return -EINVAL;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+		if (ar && ar->state == ATH12K_STATE_ON) {
+			radioup = true;
+			break;
+		}
+	}
+
+	if (!radioup) {
+		ath12k_err(ab, "radio is not up\n");
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (qdss_enable) {
+		if (ab->is_qdss_tracing) {
+			ret = count;
+			goto exit;
+		}
+		ath12k_config_qdss(ab);
+	} else {
+		if (!ab->is_qdss_tracing) {
+			ret = count;
+			goto exit;
+		}
+		ret = ath12k_send_qdss_trace_mode_req(ab,
+						      QMI_WLANFW_QDSS_TRACE_OFF_V01);
+		if (ret < 0)
+			ath12k_warn(ab,
+				    "Failed to stop QDSS: %d\n", ret);
+	}
+
+	ret = count;
+
+exit:
+	return ret;
+}
+
+static const struct file_operations fops_trace_qdss = {
+	.read = ath12k_read_trace_qdss,
+	.write = ath12k_write_trace_qdss,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static void ath12k_debug_multipd_wmi_pdev_set_param(struct ath12k_base *ab,
+						    const unsigned int value)
+{
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar;
+	struct device *dev = ab->dev;
+	int radio_idx;
+	bool multi_pd_arch;
+	bool assert_userpd;
+
+	multi_pd_arch = of_property_read_bool(dev->of_node, "qcom,multipd_arch");
+
+	if (!multi_pd_arch)
+		return;
+
+	for (radio_idx = 0; radio_idx < ab->num_radios; radio_idx++) {
+		pdev = &ab->pdevs[radio_idx];
+		ar = pdev->ar;
+
+		/* Set pdev param to let firmware know which pd to use for
+		 * sending fatal IRQ.
+		 * Non-MLO, fatal error comes from asserted radios's user pd
+		 * MLO, fatal error comes from asserted radio's root pd
+		 */
+		if (!ab->ag->mlo_capable) {
+			assert_userpd = true;
+		} else {
+			if (value == ATH12K_FW_RECOVERY_DISABLE ||
+			    value == ATH12K_FW_RECOVERY_ENABLE_SSR_ONLY ||
+			    value == ATH12K_FW_RECOVERY_ENABLE_AUTO)
+				assert_userpd = false;
+			else
+				assert_userpd = true;
+		}
+
+		ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MPD_USERPD_SSR,
+					  assert_userpd, ar->pdev->pdev_id);
+	}
+}
+
+static ssize_t ath12k_debug_write_fw_recovery(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k *ar;
+	struct ath12k_pdev *pdev;
+	struct ath12k_hw_group *ag;
+	unsigned int value;
+	enum wmi_fw_hang_recovery_mode_type recovery_mode;
+	int ret, radio_idx, radioup = 0;
+	int i;
+
+	if (kstrtouint_from_user(user_buf, count, 0, &value))
+	         return -EINVAL;
+
+	if (value < ATH12K_FW_RECOVERY_DISABLE ||
+	    value > ATH12K_FW_RECOVERY_ENABLE_SSR_ONLY) {
+	         ath12k_warn(ab, "Please enter: 0 = Disable, 1 = Enable (auto recover), "
+			     "2 = Enable SSR Mode1, 3 = Enable SSR only");
+	         ret = -EINVAL;
+	         goto exit;
+	}
+
+	ag = ab->ag;
+
+	if (!value)
+		recovery_mode = ATH12K_WMI_DISABLE_FW_RECOVERY;
+	else
+		recovery_mode = (value == ATH12K_FW_RECOVERY_ENABLE_MODE1_AUTO) ?
+			ATH12K_WMI_FW_HANG_RECOVERY_MODE1 :
+			ATH12K_WMI_FW_HANG_RECOVERY_MODE0;
+
+	ath12k_debug_multipd_wmi_pdev_set_param(ab, value);
+
+	if (ag->mlo_capable) {
+		for (i = 0; i < ag->num_chip; i++) {
+			ab = ag->ab[i];
+			mutex_lock(&ab->core_lock);
+			ab->fw_recovery_support = value;
+			mutex_unlock(&ab->core_lock);
+
+			/*
+			 * Set MODE0 or MODE 1, if recovery mode addr is valid.
+			 * TODO: Instead of checking recovery mode addr from
+			 * TLV, need to check WMI caps once the support is
+			 * added from FW.
+			 */
+			if (ab->recovery_mode_address) {
+				for (radio_idx = 0; radio_idx < ab->num_radios; radio_idx++) {
+
+					pdev = &ab->pdevs[radio_idx];
+					ar = pdev->ar;
+					if (ar && ar->state == ATH12K_STATE_ON) {
+						radioup = 1;
+						break;
+					}
+				}
+
+				if (radioup) {
+					ret = ath12k_wmi_force_fw_hang_cmd(ar,
+									   recovery_mode,
+									   ATH12K_WMI_FW_HANG_DELAY, false);
+					ath12k_info(ab, "setting FW assert mode [%d] ret [%d]\n", recovery_mode, ret);
+				} else
+					continue;
+			}
+		}
+	} else
+		ab->fw_recovery_support = value ? true : false;
+
+	ret = count;
+
+exit:
+	return ret;
+}
+
+static ssize_t ath12k_debug_read_fw_recovery(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	char buf[32];
+	size_t len;
+
+	len = scnprintf(buf, sizeof(buf), "%u\n", ab->fw_recovery_support);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_fw_recovery = {
+	.read = ath12k_debug_read_fw_recovery,
+	.write = ath12k_debug_write_fw_recovery,
+	.open = simple_open,
+};
+
+static ssize_t ath12k_read_fw_dbglog(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	size_t len;
+	char buf[128];
+
+	len = scnprintf(buf, sizeof(buf), "%u 0x%016llx\n",
+			ab->fw_dbglog_param, ab->fw_dbglog_val);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_fw_dbglog(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k *ar = ab->pdevs[0].ar;
+	char buf[128] = {0};
+	unsigned int param;
+	u64 value;
+	int ret;
+
+	if (!ar)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+				     user_buf, count);
+	if (ret <= 0)
+		goto out;
+
+	ret = sscanf(buf, "%u %llx", &param, &value);
+
+	if (ret != 2) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ab->fw_dbglog_param = param;
+	ab->fw_dbglog_val = value;
+	ret = ath12k_wmi_dbglog_cfg(ar, param, value);
+	if (ret) {
+		ath12k_warn(ar->ab, "dbglog cfg failed from debugfs: %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+	.read = ath12k_read_fw_dbglog,
+	.write = ath12k_write_fw_dbglog,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static void ath12k_debug_config_mon_status(struct ath12k *ar, bool enable)
+{
+	struct htt_rx_ring_tlv_filter tlv_filter = {0};
+	struct ath12k_base *ab = ar->ab;
+	int i;
+	u32 ring_id;
+
+	if (enable) {
+	        tlv_filter = ath12k_mac_mon_status_filter_default;
+		tlv_filter.rxmon_disable = false;
+	} else {
+		tlv_filter.rxmon_disable = true;
+	}
+
+	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+	        ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+	        ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+	                                         ar->dp.mac_id + i,
+	                                         HAL_RXDMA_MONITOR_DST,
+	                                         DP_RXDMA_REFILL_RING_SIZE,
+	                                         &tlv_filter);
+	}
+}
+
+static ssize_t ath12k_write_stats_disable(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_pdev *pdev;
+	bool disable;
+	int ret, i;
+	u32 mask = 0;
+
+	if (kstrtobool_from_user(user_buf, count, &disable))
+		return -EINVAL;
+
+	if (disable != ab->stats_disable) {
+		ab->stats_disable = disable;
+		for (i = 0; i < ab->num_radios; i++) {
+			pdev = &ab->pdevs[i];
+			if (pdev && pdev->ar) {
+				ath12k_debug_config_mon_status(pdev->ar, !disable);
+
+				if (!disable)
+					mask = HTT_PPDU_STATS_TAG_DEFAULT;
+
+				ath12k_dp_tx_htt_h2t_ppdu_stats_req(pdev->ar, mask);
+	                }
+	        }
+	 }
+	ret = count;
+
+	return ret;
+}
+
+static ssize_t ath12k_read_stats_disable(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	char buf[3];
+
+	buf[0] = ab->stats_disable ? '1' : '0';
+        buf[1] = '\n';
+	buf[2] = 0;
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);;
+}
+
+static const struct file_operations fops_soc_stats_disable = {
+	.open = simple_open,
+	.read = ath12k_read_stats_disable,
+	.write = ath12k_write_stats_disable,
+};
+
+static ssize_t ath12k_write_rx_hash_ix3(struct file *file,
+				        const char __user *ubuf,
+				        size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_pdev *pdev;
+	u32 rx_hash;
+	u8 buf[128] = {0};
+	int ret, i, radioup = 0;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+	        if (pdev && pdev->ar) {
+			radioup = 1;
+			break;
+	        }
+	}
+
+	if (radioup == 0) {
+		ath12k_err(ab, "radio is not up\n");
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (ret < 0)
+		goto exit;
+
+	buf[ret] = '\0';
+	ret = sscanf(buf, "%x", &rx_hash);
+	if (!ret) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (rx_hash != ab->rx_hash_ix3) {
+		ab->rx_hash_ix3 = rx_hash;
+	        if (rx_hash)
+			ath12k_hal_reo_ring_ctrl_hash_ix3_setup(ab, rx_hash);
+	}
+	ret = count;
+exit:
+	return ret;
+}
+
+static ssize_t ath12k_write_rx_hash_ix2(struct file *file,
+				        const char __user *ubuf,
+				        size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_pdev *pdev;
+	u32 rx_hash;
+	u8 buf[128] = {0};
+	int ret, i, radioup = 0;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+	        if (pdev && pdev->ar) {
+			radioup = 1;
+			break;
+	        }
+	}
+
+	if (radioup == 0) {
+		ath12k_err(ab, "radio is not up\n");
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (ret < 0)
+		goto exit;
+
+	buf[ret] = '\0';
+	ret = sscanf(buf, "%x", &rx_hash);
+	if (!ret) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (rx_hash != ab->rx_hash_ix2) {
+		ab->rx_hash_ix2 = rx_hash;
+	        if (rx_hash)
+			ath12k_hal_reo_ring_ctrl_hash_ix2_setup(ab, rx_hash);
+	}
+	ret = count;
+exit:
+	return ret;
+}
+
+static const struct file_operations fops_soc_rx_hash_ix2 = {
+	.open = simple_open,
+	.write = ath12k_write_rx_hash_ix2,
+};
+
+static const struct file_operations fops_soc_rx_hash_ix3 = {
+	.open = simple_open,
+	.write = ath12k_write_rx_hash_ix3,
+};
+
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+static ssize_t ath12k_debugfs_dump_ppeds_stats(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_ppeds_stats *ppeds_stats = &ab->ppeds_stats;
+	int len = 0,  retval;
+	const int size = 4096;
+	char *buf;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += scnprintf(buf + len, size - len, "PPEDS STATS\n");
+	len += scnprintf(buf + len, size - len, "-----------\n");
+	len += scnprintf(buf + len, size - len, "tcl_prod_cnt %u\n",
+			 ppeds_stats->tcl_prod_cnt);
+	len += scnprintf(buf + len, size - len, "tcl_cons_cnt %u\n",
+			 ppeds_stats->tcl_cons_cnt);
+	len += scnprintf(buf + len, size - len, "reo_prod_cnt %u\n",
+			 ppeds_stats->reo_prod_cnt);
+	len += scnprintf(buf + len, size - len, "reo_cons_cnt %u\n",
+			 ppeds_stats->reo_cons_cnt);
+	len += scnprintf(buf + len, size - len, "get_tx_desc_cnt %u\n",
+			 ppeds_stats->get_tx_desc_cnt);
+	len += scnprintf(buf + len, size - len, "enable_intr_cnt %u\n",
+			 ppeds_stats->enable_intr_cnt);
+	len += scnprintf(buf + len, size - len, "disable_intr_cnt %u\n",
+			 ppeds_stats->disable_intr_cnt);
+	len += scnprintf(buf + len, size - len, "release_tx_single_cnt %u\n",
+			 ppeds_stats->release_tx_single_cnt);
+	len += scnprintf(buf + len, size - len, "release_rx_desc_cnt %u\n",
+			 ppeds_stats->release_rx_desc_cnt);
+	len += scnprintf(buf + len, size - len, "tx_desc_allocated %u\n",
+			 ppeds_stats->tx_desc_allocated);
+	len += scnprintf(buf + len, size - len, "tx_desc_freed %u\n",
+			 ppeds_stats->tx_desc_freed);
+	len += scnprintf(buf + len, size - len, "fw2wbm_pkt_drops %u\n",
+			 ppeds_stats->fw2wbm_pkt_drops);
+	len += scnprintf(buf + len, size - len, "num_rx_desc_freed %u\n",
+			 ppeds_stats->num_rx_desc_freed);
+	len += scnprintf(buf + len, size - len, "num_rx_desc_realloc %u\n",
+			 ppeds_stats->num_rx_desc_realloc);
+
+	if (len > size)
+		len = size;
+
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static ssize_t
+ath12k_debugfs_write_ppeds_stats(struct file *file,
+				  const char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	struct ath12k_ppeds_stats *ppeds_stats = &ab->ppeds_stats;
+	char buf[20] = {0};
+	int ret;
+
+	if (count > sizeof(buf))
+		return -EFAULT;
+
+	ret = copy_from_user(buf, user_buf, count);
+	if (ret)
+		return -EFAULT;
+
+	if (strstr(buf, "reset"))
+		memset(ppeds_stats, 0, sizeof(struct ath12k_ppeds_stats));
+
+	return count;
+}
+
+static const struct file_operations fops_ppeds_stats = {
+	.read = ath12k_debugfs_dump_ppeds_stats,
+	.write = ath12k_debugfs_write_ppeds_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+#endif
+
+int ath12k_debugfs_pdev_create(struct ath12k_base *ab)
+{
+	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+		return 0;
+
+	debugfs_create_file("fw_dbglog_config", 0600, ab->debugfs_soc, ab,
+			    &fops_fw_dbglog);
+
+	debugfs_create_file("set_fw_recovery", 0600, ab->debugfs_soc, ab,
+			    &fops_fw_recovery);
+
+	debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+			    &fops_simulate_fw_crash);
+
+	debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
+			    &fops_soc_dp_stats);
+
+	debugfs_create_file("dump_srng_stats", 0600, ab->debugfs_soc, ab,
+			    &fops_dump_hal_stats);
+
+	debugfs_create_file("trace_qdss", 0600, ab->debugfs_soc, ab,
+			    &fops_trace_qdss);
+
+	debugfs_create_file("stats_disable", 0600, ab->debugfs_soc, ab,
+			    &fops_soc_stats_disable);
+
+	debugfs_create_file("rx_hash_ix2", 0600, ab->debugfs_soc, ab,
+			    &fops_soc_rx_hash_ix2);
+
+	debugfs_create_file("rx_hash_ix3", 0600, ab->debugfs_soc, ab,
+			    &fops_soc_rx_hash_ix3);
+
+	debugfs_create_file("fw_reset_stats", 0400, ab->debugfs_soc, ab,
+			    &fops_fw_reset_stats);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags))
+		debugfs_create_file("ppeds_stats", 0600, ab->debugfs_soc, ab,
+				    &fops_ppeds_stats);
+#endif
+	return 0;
+}
+
+void ath12k_debugfs_pdev_destroy(struct ath12k_base *ab)
+{
+}
+
+int ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+	char soc_name[64] = { 0 };
+
+	if (!(IS_ERR_OR_NULL(ab->debugfs_soc)))
+		return 0;
+
+	if (ab->userpd_id) {
+		snprintf(soc_name, sizeof(soc_name), "%s_%d",
+			 ab->hw_params->name, ab->userpd_id);
+	} else {
+		snprintf(soc_name, sizeof(soc_name), "%s_%s", ab->hw_params->name,
+			 dev_name(ab->dev));
+	}
+
+	ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+
+	return PTR_ERR_OR_ZERO(ab->debugfs_soc);
+}
+
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+	if (!debugfs_ath12k) {
+		/* ath12k_debugfs_destroy() already removed this directory */
+		ab->debugfs_soc = NULL;
+		return;
+	}
+
+	debugfs_remove_recursive(ab->debugfs_soc);
+	ab->debugfs_soc = NULL;
+}
+
+void ath12k_debugfs_fw_stats_init(struct ath12k *ar)
+{
+	struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+							ar->debug.debugfs_pdev);
+
+	ar->fw_stats.debugfs_fwstats = fwstats_dir;
+
+	/* all stats debugfs files created are under "fw_stats" directory
+	 * created per PDEV
+	 */
+	debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+			    &fops_pdev_stats);
+	debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+			    &fops_vdev_stats);
+	debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+			    &fops_bcn_stats);
+	debugfs_create_file("en_vdev_stats_ol", 0600, fwstats_dir, ar,
+			    &fops_vdev_stats_offload);
+
+}
+
+int ath12k_pktlog_rx_filter_setting(struct ath12k *ar,
+                                    struct htt_rx_ring_tlv_filter
+                                    *tlv_filter)
+{
+	int ret, i;
+	u32 ring_id;
+	for (i = 0; i < ar->ab->hw_params->num_rxmda_per_pdev; i++) {
+	    ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+	    ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+	                                           ar->dp.mac_id + i,
+	                                           HAL_RXDMA_MONITOR_DST,
+	                                           DP_RXDMA_REFILL_RING_SIZE,
+	                                           tlv_filter);
+		if(ret)
+			break;
+	}
+	return ret;
+}
+
+static int ath12k_pktlog_enable_hybrid_mode(struct ath12k *ar, u32 filter,
+					    u32 mode)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct htt_tx_ring_tlv_filter tlv_filter = {0};
+	u32 tx_filter, tx_buf_size, ring_id;
+	int ret;
+
+	if (mode == ATH12K_PKTLOG_MODE_FULL) {
+		tx_filter = HTT_TX_MON_FILTER_HYBRID_MODE;
+		tx_buf_size = DP_TX_MONITOR_BUF_SIZE;
+	} else if (mode == ATH12K_PKTLOG_MODE_LITE) {
+		ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+							  HTT_PPDU_STATS_TAG_PKTLOG);
+		if (ret) {
+			ath12k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+			goto out;
+		}
+		tx_filter = HTT_TX_MON_FILTER_HYBRID_MODE;
+		tx_buf_size = DP_RX_BUFFER_SIZE_LITE;
+	} else {
+		/* TODO: Default tx filter configuration */
+		tx_buf_size = DP_TX_MONITOR_BUF_SIZE;
+	}
+
+	tlv_filter.tx_mon_upstream_tlv_flags0 = tx_filter;
+	tlv_filter.tx_mon_upstream_tlv_flags2 =
+		HTT_TX_FILTER_TLV_FLAGS2_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32;
+
+	ring_id = ab->dp.tx_mon_buf_ring.refill_buf_ring.ring_id;
+	ret = ath12k_dp_tx_htt_tx_filter_setup(ab, ring_id, 0,
+					       HAL_TX_MONITOR_BUF,
+					       tx_buf_size,
+					       &tlv_filter);
+	if (ret) {
+		ath12k_warn(ab, "failed to set tx filter for monitor buf %d\n", ret);
+		goto out;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "pktlog filter %d hybrid mode %s\n",
+		   filter, ((mode == ATH12K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+	ar->debug.pktlog_filter = filter;
+	ar->debug.pktlog_mode = mode;
+out:
+	return ret;
+}
+
+static ssize_t ath12k_write_pktlog_filter(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	struct ath12k_base *ab = ar->ab;
+	struct htt_rx_ring_tlv_filter tlv_filter = {0};
+	u32 rx_filter = 0, filter, mode;
+	u8 buf[128] = {0};
+	int ret, hybrid_enable, rx_buf_sz;
+	ssize_t rc;
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (rc < 0) {
+		ret = rc;
+		goto out;
+	}
+	buf[rc] = '\0';
+
+	ret = sscanf(buf, "0x%x %u %d", &filter, &mode, &hybrid_enable);
+	if (ret < 2) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (ret == 2)
+		hybrid_enable = 0;
+
+	if ((filter & ATH12K_PKTLOG_RX) && hybrid_enable) {
+		ret = -EINVAL;
+		ath12k_warn(ab,
+			    "Invalid configuration. Hybrid mode is only applicable when tx or lite pktlog is used");
+		goto out;
+	}
+
+	/* TODO : Hybrid mode is not supported yet.
+	 * Bring up will happen once the FW design is finalised.
+	 */
+	if (hybrid_enable)
+		filter |= ATH12K_PKTLOG_HYBRID;
+	ret = ath12k_wmi_pdev_pktlog_enable(ar, filter);
+	if (ret) {
+		ath12k_warn(ab,
+			    "failed to enable pktlog filter %x: %d\n",
+			     ar->debug.pktlog_filter, ret);
+		goto out;
+	}
+
+	/*
+	 * TODO : Filter setting needs to be revisited after enabling TX monitor
+	 */
+
+	/* Clear rx filter set for monitor mode and rx status */
+	tlv_filter.offset_valid = false;
+
+	ret = ath12k_pktlog_rx_filter_setting(ar, &tlv_filter);
+	if (ret) {
+		ath12k_warn(ab, "failed to clear rx filter for monitor dest ring %d\n",
+		            ret);
+		goto out;
+	}
+
+	if (filter & ATH12K_PKTLOG_HYBRID) {
+		ret = ath12k_pktlog_enable_hybrid_mode(ar, filter, mode);
+		if (ret) {
+			ath12k_warn(ab, "failed to enable pktlog in hybrid mode\n");
+		} else {
+			ret = count;
+		}
+		goto out;
+	}
+
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+			(HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+			HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+			HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+			HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+			HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+			HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+	if (mode == ATH12K_PKTLOG_MODE_FULL) {
+		rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+			    HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+			    HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+			    HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+			    HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+			    HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+		rx_buf_sz = DP_RXDMA_REFILL_RING_SIZE;
+	} else if (mode == ATH12K_PKTLOG_MODE_LITE) {
+		ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+							  HTT_PPDU_STATS_TAG_PKTLOG);
+		if (ret) {
+			ath12k_err(ab, "failed to enable pktlog lite: %d\n", ret);
+			goto out;
+		}
+
+		rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+		rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+	} else {
+		rx_buf_sz = DP_RX_BUFFER_SIZE;
+		tlv_filter = ath12k_mac_mon_status_filter_default;
+		rx_filter = tlv_filter.rx_filter;
+
+		ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+							  HTT_PPDU_STATS_TAG_DEFAULT);
+		if (ret) {
+			ath12k_err(ab, "failed to send htt ppdu stats req: %d\n",
+				   ret);
+			goto out;
+		}
+	}
+
+	tlv_filter.rx_filter = rx_filter;
+	if (rx_filter) {
+		tlv_filter.pkt_filter_flags0 =
+					HTT_RX_FP_MGMT_FILTER_FLAGS0 |
+					HTT_RX_MO_MGMT_FILTER_FLAGS0;
+		tlv_filter.pkt_filter_flags1 =
+					HTT_RX_FP_MGMT_FILTER_FLAGS1 |
+					HTT_RX_MO_MGMT_FILTER_FLAGS1;
+		tlv_filter.pkt_filter_flags2 =
+					HTT_RX_FP_CTRL_FILTER_FLASG2 |
+					HTT_RX_MO_CTRL_FILTER_FLASG2;
+		tlv_filter.pkt_filter_flags3 =
+					HTT_RX_FP_CTRL_FILTER_FLASG3 |
+					HTT_RX_MO_CTRL_FILTER_FLASG3 |
+					HTT_RX_FP_DATA_FILTER_FLASG3 |
+					HTT_RX_MO_DATA_FILTER_FLASG3;
+	}
+
+	ret = ath12k_pktlog_rx_filter_setting(ar, &tlv_filter);
+	if (ret) {
+		ath12k_warn(ab, "failed to set rx filter for monitor dest ring %d\n",
+		            ret);
+		goto out;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "pktlog filter %d mode %s\n",
+		   filter, ((mode == ATH12K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+	ar->debug.pktlog_filter = filter;
+	ar->debug.pktlog_mode = mode;
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath12k_read_pktlog_filter(struct file *file,
+					 char __user *ubuf,
+					 size_t count, loff_t *ppos)
+
+{
+	char buf[32] = {0};
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+			ar->debug.pktlog_filter,
+			ar->debug.pktlog_mode);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+	.read = ath12k_read_pktlog_filter,
+	.write = ath12k_write_pktlog_filter,
+	.open = simple_open
+};
+
+#define SEGMENT_ID  GENMASK(1,0)
+#define CHRIP_ID    BIT(2)
+#define OFFSET      GENMASK(10,3)
+#define DETECTOR_ID GENMASK(12,11)
+#define FHSS	    BIT(14)
+
+static ssize_t ath12k_write_simulate_radar(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int ret;
+	u32 radar_params;
+	u8 agile = 0, segment = 0, radar_type = 0, chirp = 0, fhss = 0;
+	int offset = 0;
+	int len;
+	char buf[64], *token, *sptr;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len)) {
+		return -EFAULT;
+	}
+
+	/* For backward compatibility */
+	if (len <= 2)
+		goto send_cmd;
+
+	buf[len] = '\0';
+	sptr = buf;
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou8(token, 16, &segment))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou8(token, 16, &radar_type))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtoint(token, 10, &offset))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+
+	if (kstrtou8(token, 16, &agile))
+		return -EINVAL;
+
+	if ((segment > 1) || (radar_type > 2) || (agile > 2))
+		return -EINVAL;
+
+	if (agile && ar->agile_chandef.chan == NULL)
+		return -EINVAL;
+
+send_cmd:
+	/* radar_type 1 is for chirp, radar_type 2 is for FHSS */
+	if (radar_type == 1)
+		chirp = 1;
+	if (radar_type == 2)
+		fhss = 1;
+
+	radar_params = u32_encode_bits(segment, SEGMENT_ID) |
+		       u32_encode_bits(chirp, CHRIP_ID) |
+		       u32_encode_bits(offset, OFFSET) |
+		       u32_encode_bits(agile, DETECTOR_ID) |
+		       u32_encode_bits(fhss, FHSS);
+
+	ret = ath12k_wmi_simulate_radar(ar, radar_params);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+	.write = ath12k_write_simulate_radar,
+	.open = simple_open
+};
+
+static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_event *tpc_stats,
+					enum wmi_tpc_pream_bw pream_bw, int *mode_idx)
+{
+	u8 band;
+
+	band = (((tpc_stats->tpc_config.chan_freq) > 5920) ? NL80211_BAND_6GHZ :
+					(((tpc_stats->tpc_config.chan_freq) > 4800) ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ));
+
+	if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+		switch (pream_bw) {
+		case WMI_TPC_PREAM_HT20:
+		case WMI_TPC_PREAM_VHT20:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5G_6G;
+			break;
+		case WMI_TPC_PREAM_HE20:
+		case WMI_TPC_PREAM_EHT20:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5G_6G;
+			break;
+		case WMI_TPC_PREAM_HT40:
+		case WMI_TPC_PREAM_VHT40:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5G_6G;
+			break;
+		case WMI_TPC_PREAM_HE40:
+		case WMI_TPC_PREAM_EHT40:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5G_6G;
+			break;
+		case WMI_TPC_PREAM_VHT80:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT80_5G_6G;
+			break;
+		case WMI_TPC_PREAM_EHT60:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20;
+			break;
+		case WMI_TPC_PREAM_HE80:
+		case WMI_TPC_PREAM_EHT80:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5G_6G;
+			break;
+		case WMI_TPC_PREAM_VHT160:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT160_5G_6G;
+			break;
+		case WMI_TPC_PREAM_EHT120:
+		case WMI_TPC_PREAM_EHT140:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20;
+			break;
+		case WMI_TPC_PREAM_HE160:
+		case WMI_TPC_PREAM_EHT160:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5G_6G;
+			break;
+		case WMI_TPC_PREAM_EHT200:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120;
+			break;
+		case WMI_TPC_PREAM_EHT240:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80;
+			break;
+		case WMI_TPC_PREAM_EHT280:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40;
+			break;
+		case WMI_TPC_PREAM_EHT320:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5G_6G;
+			break;
+		default:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_5G_6G; /* for 5G and 6G, default case will be for OFDM */
+			break;
+		}
+	} else {
+		switch (pream_bw) {
+		case WMI_TPC_PREAM_OFDM:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_2G;
+			break;
+		case WMI_TPC_PREAM_HT20:
+		case WMI_TPC_PREAM_VHT20:
+		case WMI_TPC_PREAM_HE20:
+		case WMI_TPC_PREAM_EHT20:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT20_2G;
+			break;
+		case WMI_TPC_PREAM_HT40:
+		case WMI_TPC_PREAM_VHT40:
+		case WMI_TPC_PREAM_HE40:
+		case WMI_TPC_PREAM_EHT40:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT40_2G;
+			break;
+		default:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_CCK_2G; //for 2G, default case will be CCK
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static s16 ath12k_tpc_get_rate(struct ath12k *ar,
+			       struct wmi_tpc_stats_event *tpc_stats,
+			       u32 rate_idx, u32 num_chains, u32 rate_code,
+			       enum wmi_tpc_pream_bw pream_bw,
+			       enum ath12k_dbg_tpc_stats_type type,
+			       u32 eht_rate_idx)
+{
+	s8 rates_ctl_min, tpc_ctl;
+	u8 chain_idx, stm_idx, num_streams;
+	s16 rates, tpc, reg_pwr;
+	u32 tot_nss, tot_modes, txbf_on_off;
+	u32 index_offset1, index_offset2, index_offset3;
+	int mode, ret, txbf_enabled;
+	bool is_mu;
+
+	num_streams = 1 + ATH12K_HW_NSS(rate_code);
+	chain_idx = num_chains - 1;
+	stm_idx = num_streams - 1;
+	mode = -1;
+
+	ret = ath12k_get_tpc_ctl_mode_idx(tpc_stats, pream_bw, &mode);
+	if (ret) {
+		ath12k_warn(ar->ab, "Invalid mode index received\n");
+		tpc = TPC_INVAL;
+		goto out;
+	}
+
+	if (num_chains < num_streams) {
+		tpc = TPC_INVAL;
+		goto out;
+	}
+
+	if (__le32_to_cpu(tpc_stats->tpc_config.num_tx_chain) <= 1) {
+		tpc = TPC_INVAL;
+		goto out;
+	}
+
+	if (type == ATH12K_DBG_TPC_STATS_MU_WITH_TXBF ||
+	    type == ATH12K_DBG_TPC_STATS_SU_WITH_TXBF)
+		txbf_enabled = 1;
+	else
+		txbf_enabled = 0;
+
+	if (type == ATH12K_DBG_TPC_STATS_MU_WITH_TXBF ||
+	    type == ATH12K_DBG_TPC_STATS_MU) {
+		is_mu = true;
+	} else {
+		is_mu = false;
+	}
+
+	/* Below is the min calculation of ctl array, rates array and
+	 * regulator power table. tpc is minimum of all 3
+	 */
+	if (pream_bw >= WMI_TPC_PREAM_EHT20 && pream_bw <= WMI_TPC_PREAM_EHT320) {
+		if (is_mu) {
+			rates = FIELD_GET(ATH12K_TPC_RATE_ARRAY_MU,
+					le16_to_cpu (tpc_stats->rates_array2.rate_array[eht_rate_idx]));
+		} else {
+			rates = FIELD_GET(ATH12K_TPC_RATE_ARRAY_SU,
+					le16_to_cpu (tpc_stats->rates_array2.rate_array[eht_rate_idx]));
+		}
+	} else {
+		if (is_mu) {
+			rates = FIELD_GET(ATH12K_TPC_RATE_ARRAY_MU,
+					le16_to_cpu (tpc_stats->rates_array1.rate_array[rate_idx]));
+		} else {
+			rates = FIELD_GET(ATH12K_TPC_RATE_ARRAY_SU,
+					le16_to_cpu (tpc_stats->rates_array1.rate_array[rate_idx]));
+		}
+	}
+
+	if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+		tot_nss = tpc_stats->ctl_array.tpc_ctl_pwr.d1;
+		tot_modes = tpc_stats->ctl_array.tpc_ctl_pwr.d2;
+		txbf_on_off = tpc_stats->ctl_array.tpc_ctl_pwr.d3;
+		index_offset1 = txbf_on_off * tot_modes * tot_nss;
+		index_offset2 = tot_modes * tot_nss;
+		index_offset3 = tot_nss;
+
+		tpc_ctl = *(tpc_stats->ctl_array.ctl_pwr_table +
+			    chain_idx * index_offset1 + txbf_enabled * index_offset2
+			    + mode * index_offset3 + stm_idx);
+	} else {
+		tpc_ctl = TPC_MAX;
+		ath12k_info(ar->ab,
+			    "ctl array for tpc stats not received from fw\n");
+	}
+
+	rates_ctl_min = min_t(s16, rates, tpc_ctl);
+
+	reg_pwr = le16_to_cpu(tpc_stats->max_reg_allowed_power.reg_pwr_array[chain_idx]);
+
+	if (reg_pwr < 0)
+		reg_pwr = TPC_INVAL;
+
+	tpc = min_t(s16, rates_ctl_min, reg_pwr);
+
+	/* MODULATION_LIMIT is the maximum power limit,tpc should not exceed
+	 * modulation limt even if min tpc of all three array is greater
+	 * modulation limit
+	 */
+	tpc = min_t(s16, tpc, MODULATION_LIMIT);
+
+out:
+	return tpc;
+}
+
+u16 ath12k_get_ratecode(u16 pream_idx, u16 nss, u16 mcs_rate)
+{
+	u16 mode_type = ~0;
+
+	/* Below assignments are just for printing purpose only */
+	switch (pream_idx) {
+	case WMI_TPC_PREAM_CCK:
+		mode_type = WMI_RATE_PREAMBLE_CCK;
+		break;
+	case WMI_TPC_PREAM_OFDM:
+		mode_type = WMI_RATE_PREAMBLE_OFDM;
+		break;
+	case WMI_TPC_PREAM_HT20:
+	case WMI_TPC_PREAM_HT40:
+		mode_type = WMI_RATE_PREAMBLE_HT;
+		break;
+	case WMI_TPC_PREAM_VHT20:
+	case WMI_TPC_PREAM_VHT40:
+	case WMI_TPC_PREAM_VHT80:
+	case WMI_TPC_PREAM_VHT160:
+		mode_type = WMI_RATE_PREAMBLE_VHT;
+		break;
+	case WMI_TPC_PREAM_HE20:
+	case WMI_TPC_PREAM_HE40:
+	case WMI_TPC_PREAM_HE80:
+	case WMI_TPC_PREAM_HE160:
+		mode_type = WMI_RATE_PREAMBLE_HE;
+		break;
+	case WMI_TPC_PREAM_EHT20:
+	case WMI_TPC_PREAM_EHT40:
+	case WMI_TPC_PREAM_EHT60:
+	case WMI_TPC_PREAM_EHT80:
+	case WMI_TPC_PREAM_EHT120:
+	case WMI_TPC_PREAM_EHT140:
+	case WMI_TPC_PREAM_EHT160:
+	case WMI_TPC_PREAM_EHT200:
+	case WMI_TPC_PREAM_EHT240:
+	case WMI_TPC_PREAM_EHT280:
+	case WMI_TPC_PREAM_EHT320:
+		mode_type = WMI_RATE_PREAMBLE_EHT;
+		if (mcs_rate == 0 || mcs_rate == 1)
+			mcs_rate += 14;
+		else
+			mcs_rate -= 2;
+		break;
+	default:
+		return mode_type;
+	}
+
+	return ((mode_type << 8) | ((nss & 0x7) << 5) | (mcs_rate & 0x1F));
+}
+
+static bool ath12k_he_supports_extra_mcs(struct ath12k *ar, int freq)
+{
+	struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+	struct ath12k_band_cap *cap_band;
+	bool extra_mcs_supported;
+
+	if (freq <= ATH12K_2G_MAX_FREQUENCY)
+		cap_band = &cap->band[NL80211_BAND_2GHZ];
+	else if (freq <= ATH12K_5G_MAX_FREQUENCY)
+		cap_band = &cap->band[NL80211_BAND_5GHZ];
+	else
+		cap_band = &cap->band[NL80211_BAND_6GHZ];
+
+	extra_mcs_supported = FIELD_GET(HE_EXTRA_MCS_SUPPORT, cap_band->he_cap_info[1]);
+	return extra_mcs_supported;
+}
+
+static int ath12k_tpc_fill_pream(struct ath12k *ar, char *buf, int buf_len, int len,
+				 enum wmi_tpc_pream_bw pream_bw, u32 max_rix, int max_nss, int max_rates,
+				 int pream_type, enum ath12k_dbg_tpc_stats_type tpc_type,
+				 int rate_idx, int eht_rate_idx)
+{
+	int nss, rates, chains;
+	u8 active_tx_chains;
+	u16 rate_code;
+	s16 tpc;
+	struct wmi_tpc_stats_event *tpc_stats = ar->tpc_stats;
+
+	static const char *const pream_str[] = {
+		[WMI_TPC_PREAM_CCK]	= "CCK",
+		[WMI_TPC_PREAM_OFDM]	= "OFDM",
+		[WMI_TPC_PREAM_HT20]	= "HT20",
+		[WMI_TPC_PREAM_HT40]	= "HT40",
+		[WMI_TPC_PREAM_VHT20]	= "VHT20",
+		[WMI_TPC_PREAM_VHT40]	= "VHT40",
+		[WMI_TPC_PREAM_VHT80]	= "VHT80",
+		[WMI_TPC_PREAM_VHT160]	= "VHT160",
+		[WMI_TPC_PREAM_HE20]	= "HE20",
+		[WMI_TPC_PREAM_HE40]	= "HE40",
+		[WMI_TPC_PREAM_HE80]	= "HE80",
+		[WMI_TPC_PREAM_HE160]	= "HE160",
+		[WMI_TPC_PREAM_EHT20]   = "EHT20",
+		[WMI_TPC_PREAM_EHT40]   = "EHT40",
+		[WMI_TPC_PREAM_EHT60]   = "EHT60",
+		[WMI_TPC_PREAM_EHT80]   = "EHT80",
+		[WMI_TPC_PREAM_EHT120]   = "EHT120",
+		[WMI_TPC_PREAM_EHT140]   = "EHT140",
+		[WMI_TPC_PREAM_EHT160]   = "EHT160",
+		[WMI_TPC_PREAM_EHT200]   = "EHT200",
+		[WMI_TPC_PREAM_EHT240]   = "EHT240",
+		[WMI_TPC_PREAM_EHT280]   = "EHT280",
+		[WMI_TPC_PREAM_EHT320]   = "EHT320"};
+
+	active_tx_chains = ar->num_tx_chains;
+
+	for (nss = 0; nss < max_nss; nss++) {
+		for (rates = 0; rates < max_rates; rates++, rate_idx++, max_rix++) {
+			/* FW send extra MCS(10&11) for VHT and HE rates,
+			 *  this is not used. Hence skipping it here
+			 */
+			if (pream_type == WMI_RATE_PREAMBLE_VHT &&
+			    rates > ATH12K_VHT_MCS_MAX)
+				continue;
+
+			if (pream_type == WMI_RATE_PREAMBLE_HE &&
+				rates > ATH12K_HE_MCS_MAX)
+				continue;
+
+			if (pream_type == WMI_RATE_PREAMBLE_EHT &&
+				rates > ATH12K_EHT_MCS_MAX)
+				continue;
+
+			rate_code = ath12k_get_ratecode(pream_bw, nss, rates);
+			len += scnprintf(buf + len, buf_len - len,
+				 "%d\t %s\t 0x%03x\t", max_rix,
+				 pream_str[pream_bw], rate_code);
+
+			for (chains = 0; chains < active_tx_chains; chains++) {
+				if (nss > chains) {
+					len += scnprintf(buf + len,
+							 buf_len - len,
+							 "\t%s", "NA");
+				} else {
+					tpc = ath12k_tpc_get_rate(ar, tpc_stats, rate_idx,
+								  chains + 1, rate_code,
+								  pream_bw, tpc_type, eht_rate_idx);
+
+					if (tpc == TPC_INVAL) {
+						len += scnprintf(buf + len,
+						       buf_len - len, "\tNA");
+					} else {
+						len += scnprintf(buf + len,
+						       buf_len - len, "\t%d",
+						       tpc);
+					}
+				}
+			}
+			len += scnprintf(buf + len, buf_len - len, "\n");
+
+			if (pream_type == WMI_RATE_PREAMBLE_EHT)
+				/*For fetching the next eht rates pwr from rates array2*/
+				++eht_rate_idx;
+		}
+	}
+
+	return len;
+}
+
+static int ath12k_tpc_stats_print(struct ath12k *ar,
+				  struct wmi_tpc_stats_event *tpc_stats,
+				  char *buf, size_t len, enum ath12k_dbg_tpc_stats_type type)
+{
+	u32 i, j = 1, eht_idx = 0, pream_idx = 0, rate_pream_idx = 0, total_rates = 0, max_rix = 0;
+	u8 nss, active_tx_chains;
+	size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+	bool he_ext_mcs;
+	static const char *const type_str[ATH12K_DBG_TPC_MAX_STATS] = {
+		[ATH12K_DBG_TPC_STATS_SU]		= "SU",
+		[ATH12K_DBG_TPC_STATS_SU_WITH_TXBF]	= "SU WITH TXBF",
+		[ATH12K_DBG_TPC_STATS_MU]		= "MU",
+		[ATH12K_DBG_TPC_STATS_MU_WITH_TXBF]	= "MU WITH TXBF"};
+
+	u8 max_rates[WMI_TPC_PREAM_MAX] = {
+		[WMI_TPC_PREAM_CCK]	= ATH12K_CCK_RATES,
+		[WMI_TPC_PREAM_OFDM]	= ATH12K_OFDM_RATES,
+		[WMI_TPC_PREAM_HT20]	= ATH12K_HT_RATES,
+		[WMI_TPC_PREAM_HT40]	= ATH12K_HT_RATES,
+		[WMI_TPC_PREAM_VHT20]	= ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_VHT40]	= ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_VHT80]	= ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_VHT160]	= ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_HE20]	= ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_HE40]	= ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_HE80]	= ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_HE160]	= ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_EHT20]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT40]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT60]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT80]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT120]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT140]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT160]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT200]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT240]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT280]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT320]  = ATH12K_EHT_RATES};
+
+	u8 max_nss[WMI_TPC_PREAM_MAX] = {
+		[WMI_TPC_PREAM_CCK]	= ATH12K_NSS_1,
+		[WMI_TPC_PREAM_OFDM]	= ATH12K_NSS_1,
+		[WMI_TPC_PREAM_HT20]	= ATH12K_NSS_4,
+		[WMI_TPC_PREAM_HT40]	= ATH12K_NSS_4,
+		[WMI_TPC_PREAM_VHT20]	= ATH12K_NSS_8,
+		[WMI_TPC_PREAM_VHT40]	= ATH12K_NSS_8,
+		[WMI_TPC_PREAM_VHT80]	= ATH12K_NSS_8,
+		[WMI_TPC_PREAM_VHT160]	= ATH12K_NSS_4,
+		[WMI_TPC_PREAM_HE20]	= ATH12K_NSS_8,
+		[WMI_TPC_PREAM_HE40]	= ATH12K_NSS_8,
+		[WMI_TPC_PREAM_HE80]	= ATH12K_NSS_8,
+		[WMI_TPC_PREAM_HE160]	= ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT20]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT40]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT60]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT80]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT120]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT140]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT160]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT200]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT240]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT280]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT320]  = ATH12K_NSS_4};
+
+	u16 rate_idx[WMI_TPC_PREAM_MAX] = {0},
+	    eht_rate_idx[WMI_TPC_PREAM_MAX] = {0};
+
+	static const u8 pream_type[WMI_TPC_PREAM_MAX] = {
+		[WMI_TPC_PREAM_CCK]	= WMI_RATE_PREAMBLE_CCK,
+		[WMI_TPC_PREAM_OFDM]	= WMI_RATE_PREAMBLE_OFDM,
+		[WMI_TPC_PREAM_HT20]	= WMI_RATE_PREAMBLE_HT,
+		[WMI_TPC_PREAM_HT40]	= WMI_RATE_PREAMBLE_HT,
+		[WMI_TPC_PREAM_VHT20]	= WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_VHT40]	= WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_VHT80]	= WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_VHT160]	= WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_HE20]	= WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_HE40]	= WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_HE80]	= WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_HE160]	= WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_EHT20]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT40]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT60]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT80]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT120]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT140]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT160]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT200]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT240]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT280]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT320]  = WMI_RATE_PREAMBLE_EHT};
+
+	active_tx_chains = ar->num_tx_chains;
+	he_ext_mcs = ath12k_he_supports_extra_mcs(ar, tpc_stats->tpc_config.chan_freq);
+
+	/* mcs 12&13 is sent by FW for certain HWs in rate array, skipping it as
+	 * it is not supported
+	 */
+	if (he_ext_mcs) {
+		for (i = WMI_TPC_PREAM_HE20; i <= WMI_TPC_PREAM_HE160;  ++i)
+			max_rates[i] = ATH12K_HE_RATES;
+	}
+
+	if (type == ATH12K_DBG_TPC_STATS_MU ||
+	    type == ATH12K_DBG_TPC_STATS_MU_WITH_TXBF) {
+		pream_idx = WMI_TPC_PREAM_VHT20;
+
+		for (i = WMI_TPC_PREAM_CCK; i <= WMI_TPC_PREAM_HT40; ++i) {
+			max_rix += max_nss[i] * max_rates[i];
+		}
+	}
+
+	/* Enumerate all the rate indices */
+	for (i = rate_pream_idx + 1 ; i < WMI_TPC_PREAM_MAX; i++) {
+		nss = (max_nss[i - 1] < tpc_stats->tpc_config.num_tx_chain ?
+		       max_nss[i - 1] : tpc_stats->tpc_config.num_tx_chain);
+
+		rate_idx[i] = rate_idx[i - 1] + max_rates[i - 1] * nss;
+
+		if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) {
+			eht_rate_idx[j] = eht_rate_idx[j - 1] + max_rates[i] * nss;
+			/*For Filling the next eht_rate_idx for fetch rates pwr info rates array2*/
+			++j;
+		}
+	}
+
+	for (i = 0 ; i < WMI_TPC_PREAM_MAX; i++) {
+		nss = (max_nss[i] < tpc_stats->tpc_config.num_tx_chain ?
+		       max_nss[i] : tpc_stats->tpc_config.num_tx_chain);
+		total_rates += max_rates[i] * nss;
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "No.of rates-%d\n", total_rates);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "**************** %s ****************\n",
+			 type_str[type]);
+	len += scnprintf(buf + len, buf_len - len,
+			 "\t\t\t\tTPC values for Active chains\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "Rate idx Preamble Rate code");
+
+	for (i = 1; i <= active_tx_chains; ++i) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\t%d-Chain", i);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	for (i = pream_idx; i < WMI_TPC_PREAM_MAX; i++) {
+		if (tpc_stats->tpc_config.chan_freq <= 2483) {
+			if (i == WMI_TPC_PREAM_VHT80 ||
+			    i == WMI_TPC_PREAM_VHT160 ||
+			    i == WMI_TPC_PREAM_HE80 ||
+			    i == WMI_TPC_PREAM_HE160 ||
+			    (i >= WMI_TPC_PREAM_EHT60 &&
+			     i <= WMI_TPC_PREAM_EHT320)) {
+				max_rix += max_nss[i] * max_rates[i];
+				continue;
+			}
+		} else {
+			if (i == WMI_TPC_PREAM_CCK) {
+				max_rix += max_rates[i];
+				continue;
+			}
+		}
+
+		nss = (max_nss[i] < ar->num_tx_chains ? max_nss[i] : ar->num_tx_chains);
+
+		if (!(tpc_stats->tpc_config.caps & (1 << ATH12K_TPC_STATS_SUPPORT_BE_PUNC))) {
+			if (i == WMI_TPC_PREAM_EHT60 || i == WMI_TPC_PREAM_EHT120 ||
+			    i == WMI_TPC_PREAM_EHT140 || i == WMI_TPC_PREAM_EHT200 ||
+			    i == WMI_TPC_PREAM_EHT240 || i == WMI_TPC_PREAM_EHT280) {
+				max_rix += max_nss[i] * max_rates[i];
+				continue;
+			}
+		}
+
+		len = ath12k_tpc_fill_pream(ar, buf, buf_len, len, i, max_rix, nss,
+					    max_rates[i], pream_type[i],
+					    type, rate_idx[i], eht_rate_idx[eht_idx]);
+
+		if (pream_type[i] == WMI_RATE_PREAMBLE_EHT)
+			/*For fetch the next index eht rates from rates array2*/
+                        ++eht_idx;
+
+		max_rix += max_nss[i] * max_rates[i];
+	}
+	return len;
+}
+
+static void ath12k_tpc_stats_fill(struct ath12k *ar,
+				  struct wmi_tpc_stats_event *tpc_stats,
+				  char *buf)
+{
+	struct wmi_tpc_configs *tpc;
+	size_t len = 0;
+	size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+
+	spin_lock_bh(&ar->data_lock);
+	if (!tpc_stats) {
+		ath12k_warn(ar->ab, "failed to find tpc stats\n");
+		goto unlock;
+	}
+
+	tpc = &tpc_stats->tpc_config;
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "*************** TPC config **************\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "* powers are in 0.25 dBm steps\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "reg domain-%d\t\tchan freq-%d\n",
+			 tpc->reg_domain, tpc->chan_freq);
+	len += scnprintf(buf + len, buf_len - len,
+			 "power limit-%d\t\tmax reg-domain Power-%d\n",
+			 (tpc->twice_max_reg_power) / 2, tpc->power_limit);
+	len += scnprintf(buf + len, buf_len - len,
+			 "No.of tx chain-%d\t",
+			 ar->num_tx_chains);
+
+	ath12k_tpc_stats_print(ar, tpc_stats, buf, len,
+			       ar->tpc_stats_type);
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_debug_tpc_stats_request(struct ath12k *ar)
+{
+	int ret;
+	unsigned long time_left;
+	struct ath12k_base *ab = ar->ab;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->tpc_complete);
+
+	spin_lock_bh(&ar->data_lock);
+	ar->tpc_request = true;
+	spin_unlock_bh(&ar->data_lock);
+
+	ret = ath12k_wmi_pdev_get_tpc_table_cmdid(ar);
+	if (ret) {
+		ath12k_warn(ab, "failed to request tpc table cmdid: %d\n", ret);
+		goto out;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->tpc_complete,
+						TPC_STATS_WAIT_TIME);
+
+	if (time_left == 0)
+		ret = -ETIMEDOUT;
+
+out:
+	spin_lock_bh(&ar->data_lock);
+	ar->tpc_request = false;
+	spin_unlock_bh(&ar->data_lock);
+
+	return ret;
+}
+
+static int ath12k_tpc_stats_open(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	void *buf;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ath12k_warn(ar->ab, "Interface not up\n");
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	buf = kmalloc(ATH12K_TPC_STATS_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ret = ath12k_debug_tpc_stats_request(ar);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to request tpc stats: %d\n",
+			    ret);
+		spin_lock_bh(&ar->data_lock);
+		ath12k_wmi_free_tpc_stats_mem(ar);
+		spin_unlock_bh(&ar->data_lock);
+		goto err_free;
+	}
+
+	ath12k_tpc_stats_fill(ar, ar->tpc_stats, buf);
+	file->private_data = buf;
+
+	spin_lock_bh(&ar->data_lock);
+	ath12k_wmi_free_tpc_stats_mem(ar);
+	spin_unlock_bh(&ar->data_lock);
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+
+err_free:
+	kfree(buf);
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath12k_tpc_stats_release(struct inode *inode,
+				    struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static ssize_t ath12k_tpc_stats_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	unsigned int len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_read_tpc_stats_type(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	char buf[32];
+	size_t len;
+
+	len = scnprintf(buf, sizeof(buf), "%u\n", ar->tpc_stats_type);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_tpc_stats_type(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	u8 type;
+	int ret;
+
+	ret = kstrtou8_from_user(user_buf, count, 0, &type);
+	if (ret)
+		return ret;
+
+	if (type >= ATH12K_DBG_TPC_MAX_STATS)
+		return -E2BIG;
+
+	ar->tpc_stats_type = type;
+
+	ret = count;
+
+	return ret;
+}
+
+static const struct file_operations fops_tpc_stats_type = {
+	.read = ath12k_read_tpc_stats_type,
+	.write = ath12k_write_tpc_stats_type,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static const struct file_operations fops_tpc_stats = {
+	.open = ath12k_tpc_stats_open,
+	.release = ath12k_tpc_stats_release,
+	.read = ath12k_tpc_stats_read,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_simulate_awgn(struct file *file,
+                                          const char __user *user_buf,
+                                          size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        int ret;
+        u32 chan_bw_interference_bitmap;
+
+        mutex_lock(&ar->conf_mutex);
+        if (ar->state != ATH12K_STATE_ON) {
+                ret = -ENETDOWN;
+                goto exit;
+        }
+
+        if (kstrtou32_from_user(user_buf, count, 0, &chan_bw_interference_bitmap)) {
+		mutex_unlock(&ar->conf_mutex);
+                return -EINVAL;
+	}
+
+        ret = ath12k_wmi_simulate_awgn(ar, chan_bw_interference_bitmap);
+        if (ret)
+                goto exit;
+
+        ret = count;
+
+exit:
+        mutex_unlock(&ar->conf_mutex);
+        return ret;
+}
+
+static const struct file_operations fops_simulate_awgn = {
+        .write = ath12k_write_simulate_awgn,
+        .open = simple_open
+};
+
+static ssize_t ath12k_dump_mgmt_stats(struct file *file,
+					char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	struct ath12k_link_vif *arvif = NULL;
+	struct ath12k_mgmt_frame_stats *mgmt_stats;
+	int len = 0, ret, i;
+	int size = (TARGET_NUM_VDEVS - 1) * 1500;
+	char *buf;
+	const char *mgmt_frm_type[ATH12K_STATS_MGMT_FRM_TYPE_MAX-1] = {
+		"assoc_req", "assoc_resp",
+		"reassoc_req", "reassoc_resp",
+		"probe_req", "probe_resp",
+		"timing_advertisement", "reserved",
+		"beacon", "atim", "disassoc",
+		"auth", "deauth", "action", "action_no_ack"};
+
+	if (ar->state != ATH12K_STATE_ON)
+		return -ENETDOWN;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&ar->conf_mutex);
+	spin_lock_bh(&ar->data_lock);
+
+	list_for_each_entry (arvif, &ar->arvifs, list) {
+		if (!arvif)
+			break;
+
+		if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+			continue;
+		mgmt_stats = &arvif->ahvif->mgmt_stats;
+		len += scnprintf(buf + len, size - len, "MGMT frame stats for vdev %u :\n", arvif->vdev_id);
+		len += scnprintf(buf + len, size - len, "  TX stats :\n ");
+		len += scnprintf(buf + len, size - len, "  Success frames:\n");
+		for (i = 0; i < ATH12K_STATS_MGMT_FRM_TYPE_MAX-1; i++)
+			len += scnprintf(buf + len, size - len, "       %s: %d\n",
+					mgmt_frm_type[i], mgmt_stats->tx_succ_cnt[i]);
+
+		len += scnprintf(buf + len, size - len, "  Failed frames:\n");
+
+		for (i = 0; i < ATH12K_STATS_MGMT_FRM_TYPE_MAX-1; i++)
+			len += scnprintf(buf + len, size - len, "       %s: %d\n",
+					mgmt_frm_type[i], mgmt_stats->tx_fail_cnt[i]);
+
+		len += scnprintf(buf + len, size - len, "  RX stats :\n");
+		len += scnprintf(buf + len, size - len, "  Success frames:\n");
+		for (i = 0; i < ATH12K_STATS_MGMT_FRM_TYPE_MAX-1; i++)
+			len += scnprintf(buf + len, size - len, "       %s: %d\n",
+					mgmt_frm_type[i], mgmt_stats->rx_cnt[i]);
+
+		len += scnprintf(buf + len, size - len, " Tx completion stats :\n");
+		len += scnprintf(buf + len, size - len, " success completions:\n");
+
+		for (i = 0; i < ATH12K_STATS_MGMT_FRM_TYPE_MAX-1; i++)
+			len += scnprintf(buf + len, size - len, "       %s: %d\n",
+					mgmt_frm_type[i], mgmt_stats->tx_compl_succ[i]);
+
+		len += scnprintf(buf + len, size - len, " failure completions:\n");
+
+		for (i = 0; i < ATH12K_STATS_MGMT_FRM_TYPE_MAX-1; i++)
+			len += scnprintf(buf + len, size - len, "       %s: %d\n", mgmt_frm_type[i], mgmt_stats->tx_compl_fail[i]);
+	}
+
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len > size)
+		len = size;
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+	mutex_unlock(&ar->conf_mutex);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations fops_dump_mgmt_stats = {
+	.read = ath12k_dump_mgmt_stats,
+	.open = simple_open
+};
+
+static ssize_t ath12k_athdiag_read(struct file *file,
+                                   char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        u8 *buf;
+        int ret;
+
+        if (*ppos <= 0)
+                return -EINVAL;
+
+        if (!count)
+                return 0;
+
+        buf = vmalloc(count);
+        if (!buf) {
+                return -ENOMEM;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+
+        ret = ath12k_qmi_mem_read(ar->ab, *ppos, buf, count);
+        if (ret < 0) {
+                ath12k_warn(ar->ab, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
+                            (u32)(*ppos), ret);
+                goto exit;
+        }
+
+        ret = copy_to_user(user_buf, buf, count);
+        if (ret) {
+                ret = -EFAULT;
+                goto exit;
+        }
+
+        count -= ret;
+        *ppos += count;
+        ret = count;
+
+exit:
+        vfree(buf);
+        mutex_unlock(&ar->conf_mutex);
+        return ret;
+}
+
+static ssize_t ath12k_athdiag_write(struct file *file,
+                                    const char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        u8 *buf;
+        int ret;
+
+        if (*ppos <= 0)
+                return -EINVAL;
+
+        if (!count)
+                return 0;
+
+        mutex_lock(&ar->conf_mutex);
+
+        buf = vmalloc(count);
+        if (!buf) {
+                ret = -ENOMEM;
+                goto error_unlock;
+        }
+
+        ret = copy_from_user(buf, user_buf, count);
+        if (ret) {
+                ret = -EFAULT;
+                goto exit;
+        }
+
+        ret = ath12k_qmi_mem_write(ar->ab, *ppos, buf, count);
+        if (ret < 0) {
+                ath12k_warn(ar->ab, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+                            (u32)(*ppos), ret);
+                goto exit;
+        }
+
+        *ppos += count;
+        ret = count;
+
+exit:
+        vfree(buf);
+
+error_unlock:
+        mutex_unlock(&ar->conf_mutex);
+        return ret;
+}
+
+static const struct file_operations fops_athdiag = {
+        .read = ath12k_athdiag_read,
+        .write = ath12k_athdiag_write,
+        .open = simple_open,
+        .owner = THIS_MODULE,
+        .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_btcoex(struct file *file,
+                                   const char __user *ubuf,
+                                   size_t count, loff_t *ppos)
+{
+        struct ath12k_link_vif *arvif = NULL;
+        struct ath12k *ar = file->private_data;
+        char buf[256] = {0};
+        size_t buf_size;
+        int ret = 0,coex = BTCOEX_CONFIGURE_DEFAULT, wlan_weight = 0,
+            wlan_prio_mask_value = 0;
+        enum qca_wlan_priority_type wlan_prio_mask = QCA_WLAN_PRIORITY_BE;
+
+        if (!ar) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        buf_size = min(count, (sizeof(buf) - 1));
+
+        if (copy_from_user(buf, ubuf, buf_size)) {
+                ret = -EFAULT;
+                goto exit;
+        }
+
+        buf[buf_size] = '\0';
+        ret = sscanf(buf, "%d %d %d" , &coex, &wlan_prio_mask_value, &wlan_weight);
+
+        if (!ret) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        if (coex != BTCOEX_ENABLE &&  coex != BTCOEX_CONFIGURE_DEFAULT && coex) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+
+        switch (coex) {
+        case BTCOEX_ENABLE:
+                if (!test_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags))
+                                set_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags);
+                break;
+        case BTCOEX_CONFIGURE_DEFAULT:
+                if (!test_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags)) {
+                        ret = -EINVAL;
+                        goto error_unlock;
+                }
+                break;
+        case BTCOEX_DISABLE:
+                clear_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags);
+                break;
+        default:
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        if ((wlan_weight < BTCOEX_CONFIGURE_DEFAULT) ||
+            (wlan_prio_mask_value < BTCOEX_CONFIGURE_DEFAULT) ||
+            (wlan_weight > BTCOEX_MAX_PKT_WEIGHT) ||
+            (wlan_prio_mask_value > QCA_WLAN_PRIORITY_MGMT)) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        if (wlan_weight == BTCOEX_CONFIGURE_DEFAULT)
+                wlan_weight = ar->coex.wlan_weight;
+
+        wlan_prio_mask = ((wlan_prio_mask_value == BTCOEX_CONFIGURE_DEFAULT)?
+                           ar->coex.wlan_prio_mask : wlan_prio_mask_value);
+
+        if (ar->state != ATH12K_STATE_ON &&
+            ar->state != ATH12K_STATE_RESTARTED) {
+                ath12k_warn(ar->ab, "pdev %d not in ON state\n", ar->pdev->pdev_id);
+                ret = -ENETDOWN;
+                goto error_unlock;
+        }
+
+        arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+
+        if (!arvif->is_started) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        ret = ath12k_mac_btcoex_config(ar, arvif, coex, wlan_prio_mask, wlan_weight);
+
+        if (ret) {
+                ath12k_warn(ar->ab,
+                            "failed to enable coex vdev_id %d ret %d\n",
+                            arvif->vdev_id, ret);
+                goto error_unlock;
+        }
+
+        ar->coex.wlan_prio_mask = wlan_prio_mask;
+        ar->coex.wlan_weight = wlan_weight;
+        ret = count;
+
+error_unlock:
+        mutex_unlock(&ar->conf_mutex);
+
+exit:
+        return ret;
+}
+
+static ssize_t ath12k_read_btcoex(struct file *file, char __user *ubuf,
+                                  size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        char buf[256] = {0};
+        int len = 0;
+
+        if (!ar)
+                return -EINVAL;
+
+        mutex_lock(&ar->conf_mutex);
+        len = scnprintf(buf, sizeof(buf) - len, "%u %u %u\n",
+                        test_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags),
+                        ar->coex.wlan_prio_mask,
+                        ar->coex.wlan_weight);
+        mutex_unlock(&ar->conf_mutex);
+
+        return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex = {
+        .read = ath12k_read_btcoex,
+        .write = ath12k_write_btcoex,
+        .open = simple_open
+};
+
+static ssize_t ath12k_write_btcoex_duty_cycle(struct file *file,
+                                              const char __user *ubuf,
+                                              size_t count, loff_t *ppos)
+{
+        struct ath12k_link_vif *arvif = NULL;
+        struct ath12k *ar = file->private_data;
+        struct coex_config_arg coex_config;
+        char buf[256] = {0};
+        size_t buf_size;
+        u32 duty_cycle = 0, wlan_duration = 0;
+        int ret = 0;
+
+        if (!ar) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        buf_size = min(count, (sizeof(buf) - 1));
+
+        if (copy_from_user(buf, ubuf, buf_size)) {
+                ret = -EFAULT;
+                goto exit;
+        }
+
+        buf[buf_size] = '\0';
+        ret = sscanf(buf, "%u %u", &duty_cycle, &wlan_duration);
+
+        if (!ret) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        /*Maximum duty_cycle period allowed is 100 Miliseconds*/
+        if (duty_cycle < wlan_duration || !duty_cycle || !wlan_duration || duty_cycle > 100000) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+
+        if (ar->state != ATH12K_STATE_ON &&
+            ar->state != ATH12K_STATE_RESTARTED) {
+                ath12k_warn(ar->ab, "pdev %d not in ON state\n", ar->pdev->pdev_id);
+                ret = -ENETDOWN;
+                goto error_unlock;
+        }
+
+        if (!test_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags)) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        if (ar->coex.coex_algo_type != COEX_ALGO_OCS) {
+                ath12k_err(ar->ab,"duty cycle algo is not enabled");
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+
+        if (!arvif->is_started) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        coex_config.vdev_id = arvif->vdev_id;
+        coex_config.config_type = WMI_COEX_CONFIG_AP_TDM;
+        coex_config.duty_cycle = duty_cycle;
+        coex_config.wlan_duration = wlan_duration;
+        mutex_unlock(&ar->conf_mutex);
+
+        ret = ath12k_send_coex_config_cmd(ar, &coex_config);
+
+        if (ret) {
+                ath12k_warn(ar->ab,
+                            "failed to set duty cycle vdev_id %d ret %d\n",
+                            coex_config.vdev_id, ret);
+                goto exit;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+        ar->coex.duty_cycle = duty_cycle;
+        ar->coex.wlan_duration = wlan_duration;
+        ret = count;
+
+error_unlock:
+        mutex_unlock(&ar->conf_mutex);
+
+exit:
+        return ret;
+}
+
+static ssize_t ath12k_read_btcoex_duty_cycle(struct file *file, char __user *ubuf,
+                                             size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        char buf[256] = {0};
+        int len = 0;
+
+        if (!ar)
+                return -EINVAL;
+
+        mutex_lock(&ar->conf_mutex);
+        len = scnprintf(buf, sizeof(buf) - len, "%d %d\n",
+                        ar->coex.duty_cycle,ar->coex.wlan_duration);
+        mutex_unlock(&ar->conf_mutex);
+
+        return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex_duty_cycle = {
+        .read = ath12k_read_btcoex_duty_cycle,
+        .write = ath12k_write_btcoex_duty_cycle,
+        .open = simple_open
+};
+
+static ssize_t ath12k_write_btcoex_algo(struct file *file,
+                                        const char __user *ubuf,
+                                        size_t count, loff_t *ppos)
+{
+        struct ath12k_link_vif *arvif = NULL;
+        struct ath12k *ar = file->private_data;
+        struct coex_config_arg coex_config;
+        char buf[256] = {0};
+        size_t buf_size;
+        u32 pta_num = 0, coex_mode = 0, bt_txrx_time  = 0,
+        bt_priority_time = 0, pta_algorithm = 0,
+        pta_priority = 0;
+        int ret = 0;
+
+        if (!ar) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        buf_size = min(count, (sizeof(buf) - 1));
+
+        if (copy_from_user(buf, ubuf, buf_size)) {
+                ret = -EFAULT;
+                goto exit;
+        }
+
+        buf[buf_size] = '\0';
+        ret = sscanf(buf, "%u 0x%x 0x%x 0x%x 0x%x 0x%x" , &pta_num, &coex_mode,
+                     &bt_txrx_time, &bt_priority_time,
+                     &pta_algorithm, &pta_priority);
+
+        if (!ret) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        if (coex_mode > BTCOEX_PTA_MODE ||
+            coex_mode < BTCOEX_THREE_WIRE_MODE ||
+            pta_algorithm >= COEX_ALGO_MAX_SUPPORTED) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+
+        if (ar->state != ATH12K_STATE_ON &&
+            ar->state != ATH12K_STATE_RESTARTED) {
+                ath12k_warn(ar->ab, "pdev %d not in ON state\n", ar->pdev->pdev_id);
+                ret = -ENETDOWN;
+                goto error_unlock;
+        }
+
+        if (!test_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags)) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+
+        if (!arvif->is_started) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        coex_config.vdev_id            = arvif->vdev_id;
+        coex_config.config_type        = WMI_COEX_CONFIG_PTA_INTERFACE;
+        coex_config.pta_num            = pta_num;
+        coex_config.coex_mode          = coex_mode;
+        coex_config.bt_txrx_time       = bt_txrx_time;
+        coex_config.bt_priority_time   = bt_priority_time;
+        coex_config.pta_algorithm      = pta_algorithm;
+        coex_config.pta_priority       = pta_priority;
+        mutex_unlock(&ar->conf_mutex);
+
+        ret = ath12k_send_coex_config_cmd(ar, &coex_config);
+
+        if (ret) {
+                ath12k_warn(ar->ab,
+                            "failed to set coex algorithm vdev_id %d ret %d\n",
+                            coex_config.vdev_id, ret);
+                goto exit;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+        ar->coex.pta_num                =   pta_num;
+        ar->coex.coex_mode              =   coex_mode;
+        ar->coex.bt_active_time_slot    =   bt_txrx_time;
+        ar->coex.bt_priority_time_slot  =   bt_priority_time;
+        ar->coex.pta_algorithm          =   pta_algorithm;
+        ar->coex.pta_priority           =   pta_priority;
+        ret = count;
+
+error_unlock:
+        mutex_unlock(&ar->conf_mutex);
+
+exit:
+        return ret;
+}
+
+static ssize_t ath12k_read_btcoex_algo(struct file *file, char __user *ubuf,
+                                       size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        char buf[256] = {0};
+        int len = 0;
+
+        if (!ar)
+                return -EINVAL;
+
+        mutex_lock(&ar->conf_mutex);
+        len = scnprintf(buf, sizeof(buf) - len, "%u %u %u %u %u %u\n",
+                        ar->coex.pta_num, ar->coex.coex_mode,
+                        ar->coex.bt_active_time_slot,
+                        ar->coex.bt_priority_time_slot,
+                        ar->coex.pta_algorithm, ar->coex.pta_priority);
+        mutex_unlock(&ar->conf_mutex);
+
+        return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex_algo = {
+        .read = ath12k_read_btcoex_algo,
+        .write = ath12k_write_btcoex_algo,
+        .open = simple_open
+};
+
+static ssize_t ath12k_btcoex_pkt_priority_write(struct file *file,
+                                          const char __user *ubuf,
+                                          size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        struct ath12k_link_vif *arvif = NULL;
+        struct coex_config_arg coex_config;
+        char buf[128] = {0};
+        size_t buf_size;
+        enum qca_wlan_priority_type wlan_pkt_type = 0;
+        u32 wlan_pkt_type_continued = 0, wlan_pkt_weight = 0,
+        bt_pkt_weight = 0;
+        int ret;
+
+        if (!ar) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        buf_size = min(count, (sizeof(buf) - 1));
+
+        if (copy_from_user(buf, ubuf, buf_size)) {
+                ret = -EFAULT;
+                goto exit;
+        }
+
+        buf[buf_size] = '\0';
+        ret = sscanf(buf, "%u %u %u %u" , &wlan_pkt_type,
+                     &wlan_pkt_type_continued, &wlan_pkt_weight,
+                     &bt_pkt_weight);
+
+        if (!ret) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        if (wlan_pkt_type > QCA_WLAN_PRIORITY_MGMT ||
+            wlan_pkt_weight > BTCOEX_MAX_PKT_WEIGHT  ||
+            bt_pkt_weight > BTCOEX_MAX_PKT_WEIGHT) {
+                ret = -EINVAL;
+                goto exit;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+
+        if (ar->state != ATH12K_STATE_ON &&
+            ar->state != ATH12K_STATE_RESTARTED) {
+                ath12k_warn(ar->ab, "pdev %d not in ON state\n", ar->pdev->pdev_id);
+                ret = -ENETDOWN;
+                goto error_unlock;
+        }
+
+        if (!test_bit(ATH12K_FLAG_BTCOEX, &ar->dev_flags)) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+
+        if (!arvif->is_started) {
+                ret = -EINVAL;
+                goto error_unlock;
+        }
+
+        coex_config.vdev_id                  = arvif->vdev_id;
+        coex_config.config_type              = WMI_COEX_CONFIG_WLAN_PKT_PRIORITY;
+        coex_config.wlan_pkt_type            = wlan_pkt_type;
+        coex_config.wlan_pkt_type_continued  = wlan_pkt_type_continued;
+        coex_config.wlan_pkt_weight          = wlan_pkt_weight;
+        coex_config.bt_pkt_weight            = bt_pkt_weight;
+        mutex_unlock(&ar->conf_mutex);
+
+        ret = ath12k_send_coex_config_cmd(ar, &coex_config);
+
+        if (ret) {
+                ath12k_warn(ar->ab,
+                            "failed to set coex pkt priority vdev_id %d ret %d\n",
+                            coex_config.vdev_id, ret);
+                goto exit;
+        }
+
+        mutex_lock(&ar->conf_mutex);
+        ar->coex.wlan_pkt_type              = wlan_pkt_type;
+        ar->coex.wlan_pkt_type_continued    = wlan_pkt_type_continued;
+        ar->coex.wlan_weight                = wlan_pkt_weight;
+        ar->coex.bt_weight                  = bt_pkt_weight;
+
+        ret = count;
+
+error_unlock:
+        mutex_unlock(&ar->conf_mutex);
+
+exit:
+        return ret;
+}
+
+static ssize_t ath12k_btcoex_pkt_priority_read(struct file *file,
+                                         char __user *ubuf,
+                                         size_t count, loff_t *ppos)
+{
+        struct ath12k *ar = file->private_data;
+        u8 buf[128] = {0};
+        size_t len = 0;
+
+        if (!ar)
+                return -EINVAL;
+
+        mutex_lock(&ar->conf_mutex);
+        len = scnprintf(buf, sizeof(buf) - len,
+                        "%u %u %u %u\n",ar->coex.wlan_pkt_type,
+                        ar->coex.wlan_pkt_type_continued, ar->coex.wlan_weight,
+                        ar->coex.bt_weight);
+        mutex_unlock(&ar->conf_mutex);
+
+        return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex_priority = {
+        .read = ath12k_btcoex_pkt_priority_read,
+        .write = ath12k_btcoex_pkt_priority_write,
+        .open = simple_open,
+        .owner = THIS_MODULE,
+        .llseek = default_llseek,
+};
+
+#ifdef CONFIG_ATH12K_SAWF
+static ssize_t ath12k_write_sawf_stats(struct file *file,
+				       const char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	u32 sawf_stats;
+	int ret;
+
+	if (kstrtouint_from_user(ubuf, count, 0, &sawf_stats))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ath12k_err(ar->ab, "Netdev is down\n");
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	if (!ath12k_sawf_enable)
+	{
+		ath12k_err(ar->ab, "SAWF support is not enabled\n");
+		ret = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (sawf_stats > ATH12K_SAWF_STATS_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = count;
+
+	if (sawf_stats == ar->debug.sawf_stats)
+		goto out;
+
+	ar->debug.sawf_stats = sawf_stats;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath12k_read_sawf_stats(struct file *file,
+				      char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+	char buf[32] = {0};
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+			ar->debug.sawf_stats);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_sawf_stats = {
+	.read = ath12k_read_sawf_stats,
+	.write = ath12k_write_sawf_stats,
+	.open = simple_open
+};
+#endif /* CONFIG_ATH12K_SAWF */
+
+static ssize_t ath12k_read_ani_enable(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+	char buf[32];
+
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n", ar->ani_enabled);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_ani_enable(struct file *file,
+				       const char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int ret;
+	bool enable;
+
+	if (kstrtobool_from_user(user_buf, count, &enable))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (ar->ani_enabled == enable) {
+		ret = count;
+		goto exit;
+	}
+
+	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ANI_ENABLE,
+					enable, ar->pdev->pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to %s ANI: %d\n", enable ? "enable" : "disable",
+			    ret);
+		goto exit;
+	}
+
+	ar->ani_enabled = enable;
+	ret = count;
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_ani_enable = {
+	.read = ath12k_read_ani_enable,
+	.write = ath12k_write_ani_enable,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_read_ani_poll_period(struct file *file,
+					   char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+	char buf[32];
+
+	if (!ar->ani_enabled)
+		len = scnprintf(buf, sizeof(buf), "ANI is disabled\n");
+	else
+		len = scnprintf(buf, sizeof(buf) - len, "%u\n", ar->ani_poll_period);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_ani_poll_period(struct file *file,
+					    const char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int ret;
+	u32 ani_poll_period;
+
+	if (kstrtou32_from_user(user_buf, count, 0, &ani_poll_period))
+		return -EINVAL;
+
+	if (ani_poll_period > ATH12K_ANI_POLL_PERIOD_MAX)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (!ar->ani_enabled) {
+		ath12k_warn(ar->ab, "ANI is disabled\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+					ani_poll_period,
+					ar->pdev->pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send ANI poll period wmi cmd: %d\n", ret);
+		goto exit;
+	}
+
+	ar->ani_poll_period = ani_poll_period;
+	ret = count;
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_ani_poll_period = {
+	.read = ath12k_read_ani_poll_period,
+	.write = ath12k_write_ani_poll_period,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_read_ani_listen_period(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	int len = 0;
+	char buf[32];
+
+	if (!ar->ani_enabled)
+		len = scnprintf(buf, sizeof(buf), "ANI is disabled\n");
+	else
+		len = scnprintf(buf, sizeof(buf) - len, "%u\n", ar->ani_listen_period);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_ani_listen_period(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	u32 ani_listen_period = 0;
+	int ret;
+
+	if (kstrtou32_from_user(user_buf, count, 0, &ani_listen_period))
+		return -EINVAL;
+
+	if (ani_listen_period > ATH12K_ANI_LISTEN_PERIOD_MAX)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (!ar->ani_enabled) {
+		ath12k_warn(ar->ab, "ANI is disabled\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+					ani_listen_period, ar->pdev->pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send ANI listen period wmi cmd: %d\n", ret);
+		goto exit;
+	}
+
+	ar->ani_listen_period = ani_listen_period;
+	ret = count;
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_ani_listen_period = {
+	.read = ath12k_read_ani_listen_period,
+	.write = ath12k_write_ani_listen_period,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath12k_debug_get_ani_level(struct ath12k *ar)
+{
+	unsigned long time_left;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->ani_ofdm_event);
+
+	ret = ath12k_wmi_pdev_get_ani_level(ar, WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+					    ar->pdev->pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to request ofdm ani level: %d\n", ret);
+		return ret;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->ani_ofdm_event, 1 * HZ);
+	if (time_left == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static ssize_t ath12k_read_ani_level(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	char buf[128];
+	int ret, len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto unlock;
+	}
+
+	if (!ar->ani_enabled) {
+		len += scnprintf(buf, sizeof(buf), "ANI is disabled\n");
+	} else {
+		ret = ath12k_debug_get_ani_level(ar);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to request ani ofdm level: %d\n", ret);
+			goto unlock;
+		}
+		len += scnprintf(buf, sizeof(buf), "ofdm level %d\n",
+				 ar->ani_ofdm_level);
+	}
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath12k_write_ani_level(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	s32 ofdm_level;
+	int ret;
+
+	if (kstrtos32_from_user(user_buf, count, 0, &ofdm_level))
+		return -EINVAL;
+
+	if ((ofdm_level < ATH12K_ANI_LEVEL_MIN || ofdm_level > ATH12K_ANI_LEVEL_MAX) &&
+	    ofdm_level != ATH12K_ANI_LEVEL_AUTO)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON && ar->state != ATH12K_STATE_RESTARTED) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (!ar->ani_enabled) {
+		ath12k_warn(ar->ab, "ANI is disabled\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+					ofdm_level, ar->pdev->pdev_id);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set ANI ofdm level :%d\n", ret);
+		goto exit;
+	}
+
+	ret = count;
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_ani_level = {
+	.write = ath12k_write_ani_level,
+	.read = ath12k_read_ani_level,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+int ath12k_debugfs_register(struct ath12k *ar)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ieee80211_hw *hw = ar->ah->hw;
+	char pdev_name[5];
+	char buf[100] = {0};
+
+	snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+	ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+	if (IS_ERR(ar->debug.debugfs_pdev))
+		return PTR_ERR(ar->debug.debugfs_pdev);
+
+	/* Create a symlink under ieee80211/phy* */
+	snprintf(buf, 100, "../../ath12k/%pd2", ar->debug.debugfs_pdev);
+
+	if (1) {
+		debugfs_create_symlink("ath12k", hw->wiphy->debugfsdir, buf);
+	} else {
+		char dirname[32] = {0};
+
+		snprintf(dirname, 32, "ath12k_hw%d", ar->link_idx);
+		debugfs_create_symlink(dirname, hw->wiphy->debugfsdir, buf);
+	}
+
+	ath12k_debugfs_htt_stats_init(ar);
+
+	ath12k_debugfs_wmi_ctrl_stats(ar);
+	ath12k_debugfs_fw_stats_init(ar);
+	ath12k_init_pktlog(ar);
+
+	init_completion(&ar->tpc_complete);
+	init_completion(&ar->ani_ofdm_event);
+
+	memset(&ar->wmm_stats, 0, sizeof(struct ath12k_wmm_stats));
+
+	debugfs_create_file("wmm_stats", 0644,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_wmm_stats);
+	debugfs_create_file("neighbor_peer", 0644,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_write_nrp_mac);
+	debugfs_create_file("ext_tx_stats", 0644,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_extd_tx_stats);
+	debugfs_create_file("ext_rx_stats", 0644,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_extd_rx_stats);
+	debugfs_create_file("pktlog_filter", 0644,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_pktlog_filter);
+
+	if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) {
+		debugfs_create_file("dfs_simulate_radar", 0200,
+				    ar->debug.debugfs_pdev, ar,
+				    &fops_simulate_radar);
+		debugfs_create_bool("dfs_block_radar_events", 0200,
+				    ar->debug.debugfs_pdev,
+				    &ar->dfs_block_radar_events);
+	}
+
+	if (ar->mac.sbands[NL80211_BAND_6GHZ].channels) {
+                debugfs_create_file("simulate_awgn", 0200,
+                                    ar->debug.debugfs_pdev, ar,
+                                    &fops_simulate_awgn);
+        }
+
+
+	debugfs_create_file("tpc_stats", 0400,
+				ar->debug.debugfs_pdev, ar,
+			    	&fops_tpc_stats);
+
+	debugfs_create_file("tpc_stats_type", 0600,
+				ar->debug.debugfs_pdev, ar,
+				&fops_tpc_stats_type);
+
+	debugfs_create_file("dump_mgmt_stats", 0644,
+				ar->debug.debugfs_pdev, ar,
+				&fops_dump_mgmt_stats);
+
+	debugfs_create_file("athdiag", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_athdiag);
+
+        debugfs_create_file("btcoex", 0644,
+                            ar->debug.debugfs_pdev, ar,
+                            &fops_btcoex);
+
+        debugfs_create_file("btcoex_duty_cycle", 0644,
+                            ar->debug.debugfs_pdev, ar,
+                            &fops_btcoex_duty_cycle);
+
+        debugfs_create_file("btcoex_algorithm", 0644,
+                            ar->debug.debugfs_pdev, ar,
+                            &fops_btcoex_algo);
+
+        debugfs_create_file("btcoex_priority", 0600,
+                            ar->debug.debugfs_pdev, ar,
+                            &fops_btcoex_priority);
+#ifdef CONFIG_ATH12K_SAWF
+	debugfs_create_file("sawf_stats", 0644,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_sawf_stats);
+#endif
+	debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_pdev,
+			    ar, &fops_ani_enable);
+	debugfs_create_file("ani_level", 0600, ar->debug.debugfs_pdev,
+			    ar, &fops_ani_level);
+	debugfs_create_file("ani_poll_period", 0600,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_ani_poll_period);
+	debugfs_create_file("ani_listen_period", 0600,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_ani_listen_period);
+	return 0;
+}
+
+void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+	ath12k_deinit_pktlog(ar);
+
+	/* Remove wmi ctrl stats file */
+	debugfs_remove(ar->wmi_ctrl_stat);
+	ar->wmi_ctrl_stat = NULL;
+}
+
+int ath12k_debugfs_create(void)
+{
+	debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
+	if (IS_ERR_OR_NULL(debugfs_ath12k)) {
+		if (IS_ERR(debugfs_ath12k))
+			return PTR_ERR(debugfs_ath12k);
+		else
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void ath12k_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(debugfs_ath12k);
+	debugfs_ath12k = NULL;
+}
+
+static ssize_t ath12k_write_twt_add_dialog(struct file *file,
+					   const char __user *ubuf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k_link_vif *arvif = file->private_data;
+	struct wmi_twt_add_dialog_params params = { 0 };
+	u8 buf[128] = {0};
+	int ret;
+
+	if (arvif->ar->twt_enabled == 0) {
+		ath12k_err(arvif->ar->ab, "twt support is not enabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (ret < 0)
+		return ret;
+
+	buf[ret] = '\0';
+	ret = sscanf(buf,
+		     "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
+		     &params.peer_macaddr[0],
+		     &params.peer_macaddr[1],
+		     &params.peer_macaddr[2],
+		     &params.peer_macaddr[3],
+		     &params.peer_macaddr[4],
+		     &params.peer_macaddr[5],
+		     &params.dialog_id,
+		     &params.wake_intvl_us,
+		     &params.wake_intvl_mantis,
+		     &params.wake_dura_us,
+		     &params.sp_offset_us,
+		     &params.twt_cmd,
+		     &params.flag_bcast,
+		     &params.flag_trigger,
+		     &params.flag_flow_type,
+		     &params.flag_protection);
+	if (ret != 16)
+		return -EINVAL;
+
+	params.vdev_id = arvif->vdev_id;
+
+	ret = ath12k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t ath12k_write_twt_del_dialog(struct file *file,
+					   const char __user *ubuf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k_link_vif *arvif = file->private_data;
+	struct wmi_twt_del_dialog_params params = { 0 };
+	u8 buf[64] = {0};
+	int ret;
+
+	if (arvif->ar->twt_enabled == 0) {
+		ath12k_err(arvif->ar->ab, "twt support is not enabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (ret < 0)
+		return ret;
+
+	buf[ret] = '\0';
+	ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+		     &params.peer_macaddr[0],
+		     &params.peer_macaddr[1],
+		     &params.peer_macaddr[2],
+		     &params.peer_macaddr[3],
+		     &params.peer_macaddr[4],
+		     &params.peer_macaddr[5],
+		     &params.dialog_id);
+	if (ret != 7)
+		return -EINVAL;
+
+	params.vdev_id = arvif->vdev_id;
+
+	ret = ath12k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t ath12k_write_twt_pause_dialog(struct file *file,
+					     const char __user *ubuf,
+					     size_t count, loff_t *ppos)
+{
+	struct ath12k_link_vif *arvif = file->private_data;
+	struct wmi_twt_pause_dialog_params params = { 0 };
+	u8 buf[64] = {0};
+	int ret;
+
+	if (arvif->ar->twt_enabled == 0) {
+		ath12k_err(arvif->ar->ab, "twt support is not enabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (ret < 0)
+		return ret;
+
+	buf[ret] = '\0';
+	ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+		     &params.peer_macaddr[0],
+		     &params.peer_macaddr[1],
+		     &params.peer_macaddr[2],
+		     &params.peer_macaddr[3],
+		     &params.peer_macaddr[4],
+		     &params.peer_macaddr[5],
+		     &params.dialog_id);
+	if (ret != 7)
+		return -EINVAL;
+
+	params.vdev_id = arvif->vdev_id;
+
+	ret = ath12k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t ath12k_write_twt_resume_dialog(struct file *file,
+					      const char __user *ubuf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath12k_link_vif *arvif = file->private_data;
+	struct wmi_twt_resume_dialog_params params = { 0 };
+	u8 buf[64] = {0};
+	int ret;
+
+	if (arvif->ar->twt_enabled == 0) {
+		ath12k_err(arvif->ar->ab, "twt support is not enabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+	if (ret < 0)
+		return ret;
+
+	buf[ret] = '\0';
+	ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
+		     &params.peer_macaddr[0],
+		     &params.peer_macaddr[1],
+		     &params.peer_macaddr[2],
+		     &params.peer_macaddr[3],
+		     &params.peer_macaddr[4],
+		     &params.peer_macaddr[5],
+		     &params.dialog_id,
+		     &params.sp_offset_us,
+		     &params.next_twt_size);
+	if (ret != 9)
+		return -EINVAL;
+
+	params.vdev_id = arvif->vdev_id;
+
+	ret = ath12k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t ath12k_read_rts_threshold(struct file *file,
+					       char __user *ubuf,
+					       size_t count, loff_t *ppos)
+{
+	struct ath12k_link_vif *arvif = file->private_data;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
+	u8 link_id = arvif->link_id;
+	struct ieee80211_bss_conf *link_conf;
+	int ret, len = 0;
+	const int size = 20;
+	char *buf;
+
+	rcu_read_lock();
+	link_conf = rcu_dereference(vif->link_conf[link_id]);
+
+	if (!link_conf) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	buf = kzalloc(size, GFP_ATOMIC);
+
+	if (!buf) {
+		rcu_read_unlock();
+		return -ENOMEM;
+	}
+
+	len = scnprintf(buf + len, size - len,
+			"%d\n", -1);
+	rcu_read_unlock();
+	ret = simple_read_from_buffer((char *)ubuf, count, ppos, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations ath12k_fops_rts_threshold = {
+	.read = ath12k_read_rts_threshold,
+	.open = simple_open
+};
+
+static const struct file_operations ath12k_fops_twt_add_dialog = {
+	.write = ath12k_write_twt_add_dialog,
+	.open = simple_open
+};
+
+static const struct file_operations ath12k_fops_twt_del_dialog = {
+	.write = ath12k_write_twt_del_dialog,
+	.open = simple_open
+};
+
+static const struct file_operations ath12k_fops_twt_pause_dialog = {
+	.write = ath12k_write_twt_pause_dialog,
+	.open = simple_open
+};
+
+static const struct file_operations ath12k_fops_twt_resume_dialog = {
+	.write = ath12k_write_twt_resume_dialog,
+	.open = simple_open
+};
+
+static int ath12k_open_link_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k_vif *ahvif = inode->i_private;
+	struct ath12k_hw *ah = ahvif->ah;
+	struct host_link_stats *linkstat = NULL;
+	size_t len = 0, buf_len = PAGE_SIZE * 2;
+	char *buf;
+	int link_id;
+
+	if (!ahvif)
+		return -EINVAL;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	file->private_data = buf;
+
+	mutex_lock(&ah->conf_mutex);
+	for_each_set_bit(link_id, &ahvif->links_map,
+			 IEEE80211_MLD_MAX_NUM_LINKS) {
+		if (ahvif->link[link_id])
+			linkstat = &ahvif->link[link_id]->link_stats;
+		else
+			break;
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] tx_enqueued  = %d\n",
+				 link_id, linkstat->tx_enqueued);
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] tx_completed = %d\n",
+				 link_id, linkstat->tx_completed);
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] tx_bcast_mcast = %d\n",
+				 link_id, linkstat->tx_bcast_mcast);
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] tx_dropped = %d\n",
+				 link_id, linkstat->tx_dropped);
+		len += scnprintf(buf + len, buf_len - len,
+				"link[%d] tx_encap_type = 0:%d 1:%d 2:%d 3:%d\n",
+				link_id, linkstat->tx_encap_type[0],
+				linkstat->tx_encap_type[1],
+				linkstat->tx_encap_type[2],
+				linkstat->tx_encap_type[3]);
+		len += scnprintf(buf + len, buf_len - len,
+				"link[%d] tx_encrypt_type = 0:%d 1:%d 2:%d " \
+				"3:%d 4:%d 5:%d 6:%d 7:%d 8:%d 9:%d 10:%d 11:%d\n",
+				link_id, linkstat->tx_encrypt_type[0],
+				linkstat->tx_encrypt_type[1],
+				linkstat->tx_encrypt_type[2],
+				linkstat->tx_encrypt_type[3],
+				linkstat->tx_encrypt_type[4],
+				linkstat->tx_encrypt_type[5],
+				linkstat->tx_encrypt_type[6],
+				linkstat->tx_encrypt_type[7],
+				linkstat->tx_encrypt_type[8],
+				linkstat->tx_encrypt_type[9],
+				linkstat->tx_encrypt_type[10],
+				linkstat->tx_encrypt_type[11]);
+		len += scnprintf(buf + len, buf_len - len,
+				"link[%d] tx_desc_type = 0:%d 1:%d\n",
+				link_id, linkstat->tx_desc_type[0],
+				linkstat->tx_desc_type[1]);
+
+		len += scnprintf(buf + len, buf_len - len,
+				"------------------------------------------------------\n");
+	}
+	mutex_unlock(&ah->conf_mutex);
+	return 0;
+}
+
+static int ath12k_release_link_stats(struct inode *inode, struct file *file)
+{
+	char *buf = file->private_data;
+
+	kfree(buf);
+	return 0;
+}
+
+static ssize_t ath12k_read_link_stats(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+	int ret;
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	return ret;
+}
+
+static const struct file_operations ath12k_fops_link_stats = {
+	.open = ath12k_open_link_stats,
+	.release = ath12k_release_link_stats,
+	.read = ath12k_read_link_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_primary_link(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath12k_vif *ahvif = file->private_data;
+	struct ath12k_hw *ah = ahvif->ah;
+	struct ath12k *ar = ah->radio;
+	u8 primary_link;
+
+	if (kstrtou8_from_user(user_buf, count, 0, &primary_link))
+		return -EINVAL;
+
+	if (!(ahvif->links_map & BIT(primary_link))) {
+		ath12k_warn(ar->ab, "Invalid link id : %u\n", primary_link);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ah->conf_mutex);
+	ahvif->primary_link_id = primary_link;
+	mutex_unlock(&ah->conf_mutex);
+
+	return count;
+}
+
+static ssize_t ath12k_read_primary_link(struct file *file,
+					char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	struct ath12k_vif *ahvif = file->private_data;
+	struct ath12k_hw *ah = ahvif->ah;
+	int len = 0;
+	char buf[32] = {0};
+
+	mutex_lock(&ah->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "Primary link_id: %u\n",
+			ahvif->primary_link_id);
+	mutex_unlock(&ah->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations ath12k_fops_primary_link = {
+	.open = simple_open,
+	.write = ath12k_write_primary_link,
+	.read = ath12k_read_primary_link,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath12k_debugfs_add_interface(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
+	u8 link_id = arvif->link_id;
+
+	if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+		return;
+
+	if (ahvif->vif->type != NL80211_IFTYPE_AP)
+		return;
+
+	if (arvif->debugfs_twt)
+		return;
+
+	if (1) {
+		arvif->debugfs_twt = debugfs_create_dir("twt",
+							vif->debugfs_dir);
+	} else {
+	}
+
+	if (!arvif->debugfs_twt || IS_ERR(arvif->debugfs_twt)) {
+		ath12k_warn(arvif->ar->ab,
+			    "failed to create directory %p\n",
+			    arvif->debugfs_twt);
+		arvif->debugfs_twt = NULL;
+		return;
+	}
+
+	debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+			    arvif, &ath12k_fops_twt_add_dialog);
+
+	debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+			    arvif, &ath12k_fops_twt_del_dialog);
+
+	debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+			    arvif, &ath12k_fops_twt_pause_dialog);
+
+	debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+			    arvif, &ath12k_fops_twt_resume_dialog);
+
+	if (0) {
+		if (!arvif->debugfs_rtsthreshold || IS_ERR(arvif->debugfs_rtsthreshold)) {
+			ath12k_warn(arvif->ar->ab,
+				    "failed to create file %p\n",
+				    arvif->debugfs_rtsthreshold);
+			debugfs_remove_recursive(arvif->debugfs_twt);
+			arvif->debugfs_twt = NULL;
+			arvif->debugfs_rtsthreshold = NULL;
+			return;
+		}
+	}
+
+	if (ahvif->debugfs_linkstats)
+		return;
+
+	ahvif->debugfs_linkstats = debugfs_create_file("link_stats",
+						       0200,
+						       vif->debugfs_dir,
+						       ahvif,
+						       &ath12k_fops_link_stats);
+	if (!ahvif->debugfs_linkstats ||
+	    IS_ERR(ahvif->debugfs_linkstats)) {
+		ath12k_warn(arvif->ar->ab,
+			    "failed to create link_stats file");
+		debugfs_remove_recursive(arvif->debugfs_twt);
+		arvif->debugfs_twt = NULL;
+
+		if (0) {
+			debugfs_remove(arvif->debugfs_rtsthreshold);
+			arvif->debugfs_rtsthreshold = NULL;
+		}
+
+		ahvif->debugfs_linkstats = NULL;
+	}
+
+	ahvif->debugfs_primary_link = debugfs_create_file("primary_link",
+							  0644,
+							  vif->debugfs_dir,
+							  ahvif,
+							  &ath12k_fops_primary_link);
+}
+
+void ath12k_debugfs_remove_interface(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ahvif->vif;
+
+	if (!vif) {
+		arvif->debugfs_twt = NULL;
+		arvif->debugfs_rtsthreshold = NULL;
+		ahvif->debugfs_linkstats = NULL;
+		ahvif->debugfs_primary_link = NULL;
+		return;
+	}
+
+	debugfs_remove(ahvif->debugfs_primary_link);
+	ahvif->debugfs_primary_link = NULL;
+
+	if (ahvif->debugfs_linkstats) {
+		debugfs_remove(ahvif->debugfs_linkstats);
+		ahvif->debugfs_linkstats = NULL;
+	}
+
+	if (!arvif->debugfs_twt)
+		return;
+
+	debugfs_remove_recursive(arvif->debugfs_twt);
+	arvif->debugfs_twt = NULL;
+
+	if (!arvif->debugfs_rtsthreshold)
+		return;
+
+	debugfs_remove(arvif->debugfs_rtsthreshold);
+	arvif->debugfs_rtsthreshold = NULL;
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debugfs.h	2024-03-18 14:40:14.843741115 +0100
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_H_
+#define _ATH12K_DEBUGFS_H_
+
+#include "hal_tx.h"
+
+#define ATH12K_TX_POWER_MAX_VAL	70
+#define ATH12K_TX_POWER_MIN_VAL	0
+
+#define ATH12K_DRV_TX_STATS_SIZE 1024
+
+#define ATH12K_MAX_NRPS 7
+#define MAC_UNIT_LEN 3
+
+/* htt_dbg_ext_stats_type */
+enum ath12k_dbg_htt_ext_stats_type {
+	ATH12K_DBG_HTT_EXT_STATS_RESET                      =  0,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX                    =  1,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_RX                    =  2,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ                =  3,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED              =  4,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR                 =  5,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM                   =  6,
+	ATH12K_DBG_HTT_EXT_STATS_TQM_CMDQ                   =  7,
+	ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO                 =  8,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE               =  9,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE               =  10,
+	ATH12K_DBG_HTT_EXT_STATS_PEER_INFO                  =  11,
+	ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO            =  12,
+	ATH12K_DBG_HTT_EXT_STATS_TX_MU_HWQ                  =  13,
+	ATH12K_DBG_HTT_EXT_STATS_RING_IF_INFO               =  14,
+	ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO                  =  15,
+	ATH12K_DBG_HTT_EXT_STATS_SFM_INFO                   =  16,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU                 =  17,
+	ATH12K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST          =  18,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS             =  19,
+	ATH12K_DBG_HTT_EXT_STATS_TWT_SESSIONS               =  20,
+	ATH12K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS         =  21,
+	ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO           =  22,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS	    =  23,
+	ATH12K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS    =  24,
+	ATH12K_DBG_HTT_EXT_STATS_LATENCY_PROF_STATS	    =  25,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_TRIG_STATS	    =  26,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS  =  27,
+	ATH12K_DBG_HTT_EXT_STATS_FSE_RX			    =  28,
+	ATH12K_DBG_HTT_EXT_PEER_CTRL_PATH_TXRX_STATS	    =  29,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE_EXT	    =  30,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF	    =  31,
+	ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA		    =  32,
+	ATH12K_DBG_HTT_EXT_STA_11AX_UL_STATS		    =  33,
+	ATH12K_DBG_HTT_EXT_VDEV_RTT_RESP_STATS		    =  34,
+	ATH12K_DBG_HTT_EXT_PKTLOG_AND_HTT_RING_STATS	    =  35,
+	ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS		    =  36,
+	ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS	    =  37,
+	ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS		    =  38,
+	ATH12K_DBG_HTT_EXT_VDEV_RTT_INITIATOR_STATS	    =  39,
+	ATH12K_DBG_HTT_EXT_PDEV_PER_STATS		    =  40,
+	ATH12K_DBG_HTT_EXT_AST_ENTRIES			    =  41,
+	ATH12K_DBG_HTT_EXT_RX_RING_STATS		    =  42,
+	ATH12K_DBG_HTT_STRM_GEN_MPDUS_STATS		    =  43,
+	ATH12K_DBG_HTT_STRM_GEN_MPDUS_DETAILS_STATS	    =  44,
+	ATH12K_DBG_HTT_DBG_SOC_ERROR_STATS		    =  45,
+	ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS		    =  46,
+	ATH12K_DBG_HTT_DBG_EXT_STATS_ML_PEERS_INFO	    =  47,
+	ATH12K_DBG_HTT_DBG_ODD_MANDATORY_STATS		    =  48,
+	ATH12K_DBG_HTT_DBG_PDEV_SCHED_ALGO_STATS	    =  49,
+	ATH12K_DBG_HTT_DBG_ODD_MANDATORY_MUMIMO_STATS	    =  50,
+	ATH12K_DBG_HTT_DBG_ODD_MANDATORY_MUOFDMA_STATS	    =  51,
+	ATH12K_DBG_HTT_DBG_EXT_PHY_PROF_CAL_STATS	    =  52,
+	ATH12K_DGB_HTT_DBG_EXT_STATS_PDEV_BW_MGR	    =  53,
+	ATH12K_DGB_HTT_DBG_PDEV_MBSSID_CTRL_FRAME_STATS	    =  54,
+	ATH12K_DBG_HTT_UMAC_RESET_SSR_STATS		    =  55,
+	/* keep this last */
+	ATH12K_DBG_HTT_NUM_EXT_STATS,
+};
+
+#define ATH12K_CCK_RATES			4
+#define ATH12K_OFDM_RATES			8
+#define ATH12K_HT_RATES				8
+/* VHT rates includes extra MCS. sent by FW */
+#define ATH12K_VHT_RATES			12
+#define ATH12K_HE_RATES				12
+#define ATH12K_HE_RATES_WITH_EXTRA_MCS		14
+#define ATH12K_EHT_RATES                        16
+#define ATH12K_NSS_1				1
+#define ATH12K_NSS_4				4
+#define ATH12K_NSS_8				8
+#define TPC_STATS_WAIT_TIME			(1 * HZ)
+#define MAX_TPC_PREAM_STR_LEN			7
+/* Max negative power value to indicate error */
+#define TPC_INVAL				-128
+#define TPC_MAX					127
+#define TPC_STATS_TOT_ROW			700
+#define TPC_STATS_TOT_COLUMN			100
+#define ATH12K_TPC_STATS_BUF_SIZE   (TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN)
+
+enum ath12k_dbg_tpc_stats_type {
+	ATH12K_DBG_TPC_STATS_SU,
+	ATH12K_DBG_TPC_STATS_SU_WITH_TXBF,
+	ATH12K_DBG_TPC_STATS_MU,
+	ATH12K_DBG_TPC_STATS_MU_WITH_TXBF,
+	/*last*/
+	ATH12K_DBG_TPC_MAX_STATS,
+};
+
+enum ath12k_debug_tpc_stats_ctl_mode {
+	ATH12K_TPC_STATS_CTL_MODE_LEGACY_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_VHT80_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_VHT160_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5G_6G,
+	ATH12K_TPC_STATS_CTL_MODE_CCK_2G,
+	ATH12K_TPC_STATS_CTL_MODE_LEGACY_2G,
+	ATH12K_TPC_STATS_CTL_MODE_HT20_2G,
+	ATH12K_TPC_STATS_CTL_MODE_HT40_2G,
+
+	ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23,
+	ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20,
+	ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40,
+	ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80,
+	ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120
+};
+
+enum ath12k_debug_tpc_stats_support_modes {
+	ATH12K_TPC_STATS_SUPPORT_160 = 0,
+	ATH12K_TPC_STATS_SUPPORT_320,
+	ATH12K_TPC_STATS_SUPPORT_AX,
+	ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS,
+	ATH12K_TPC_STATS_SUPPORT_BE,
+	ATH12K_TPC_STATS_SUPPORT_BE_PUNC,
+};
+
+struct debug_htt_stats_req {
+	bool done;
+	bool override_cfg_param;
+	u8 pdev_id;
+	u32 type;
+	u32 cfg_param[4];
+	u8 peer_addr[ETH_ALEN];
+	struct completion cmpln;
+	u32 buf_len;
+	u8 buf[];
+};
+
+struct ath12k_pktlog_hdr {
+	u16 flags;
+	u16 missed_cnt;
+	u16 log_type;
+	u16 size;
+	u32 timestamp;
+	u32 type_specific_data;
+	struct mlo_timestamp m_timestamp;
+	u8 payload[];
+} __packed;
+
+#define ATH12K_HTT_PEER_STATS_RESET BIT(16)
+
+#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+enum ath12k_pktlog_filter {
+	ATH12K_PKTLOG_RX		= 0x000000001,
+	ATH12K_PKTLOG_TX		= 0x000000002,
+	ATH12K_PKTLOG_RCFIND		= 0x000000004,
+	ATH12K_PKTLOG_RCUPDATE		= 0x000000008,
+	ATH12K_PKTLOG_EVENT_SMART_ANT	= 0x000000020,
+	ATH12K_PKTLOG_EVENT_SW		= 0x000000040,
+	ATH12K_PKTLOG_HYBRID		= 0x000020000,
+	ATH12K_PKTLOG_ANY		= 0x00000006f,
+};
+
+enum ath12k_pktlog_mode {
+	ATH12K_PKTLOG_MODE_LITE = 1,
+	ATH12K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath12k_pktlog_enum {
+	ATH12K_PKTLOG_TYPE_TX_CTRL      = 1,
+	ATH12K_PKTLOG_TYPE_TX_STAT      = 2,
+	ATH12K_PKTLOG_TYPE_TX_MSDU_ID   = 3,
+	ATH12K_PKTLOG_TYPE_TX_FRM_HDR	= 4,
+	ATH12K_PKTLOG_TYPE_RX_STAT      = 5,
+	ATH12K_PKTLOG_TYPE_RC_FIND      = 6,
+	ATH12K_PKTLOG_TYPE_RC_UPDATE    = 7,
+	ATH12K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+	ATH12K_PKTLOG_TYPE_DBG_PRINT	= 9,
+	ATH12K_PKTLOG_TYPE_RX_CBF       = 10,
+	ATH12K_PKTLOG_TYPE_ANI          = 11,
+	ATH12K_PKTLOG_TYPE_GRPID	= 12,
+	ATH12K_PKTLOG_TYPE_TX_MU	= 13,
+	ATH12K_PKTLOG_TYPE_SMART_ANTENNA = 14,
+	ATH12K_PKTLOG_TYPE_TX_PFSCHED_CMD = 15,
+	ATH12K_PKTLOG_TYPE_TX_FW_GENERATED1 = 19,
+	ATH12K_PKTLOG_TYPE_TX_FW_GENERATED2 = 20,
+	ATH12K_PKTLOG_TYPE_MAX = 21,
+	ATH12K_PKTLOG_TYPE_RX_STATBUF   = 22,
+	ATH12K_PKTLOG_TYPE_PPDU_STATS   = 23,
+	ATH12K_PKTLOG_TYPE_LITE_RX      = 24,
+	ATH12K_PKTLOG_TYPE_HOST_SW_EVENT = 30,
+};
+
+enum ath12k_dbg_aggr_mode {
+	ATH12K_DBG_AGGR_MODE_AUTO,
+	ATH12K_DBG_AGGR_MODE_MANUAL,
+	ATH12K_DBG_AGGR_MODE_MAX,
+};
+
+enum {
+	NRP_ACTION_ADD,
+	NRP_ACTION_DEL,
+};
+
+struct ath12k_neighbor_peer {
+	struct list_head list;
+	struct completion filter_done;
+	bool is_filter_on;
+	int vdev_id;
+	u8 addr[ETH_ALEN];
+	u8 rssi;
+	s64 timestamp;
+	bool rssi_valid;
+};
+
+/*
+ * enum qca_wlan_priority_type - priority mask
+ * This enum defines priority mask that user can configure
+ * over BT traffic type which can be passed through
+ * QCA_WLAN_VENDOR_ATTR_BTCOEX_CONFIG_WLAN_PRIORITY attribute.
+ *
+ * @QCA_WLAN_PRIORITY_BE: Bit mask for WLAN Best effort traffic
+ * @QCA_WLAN_PRIORITY_BK: Bit mask for WLAN Background traffic
+ * @QCA_WLAN_PRIORITY_VI: Bit mask for WLAN Video traffic
+ * @QCA_WLAN_PRIORITY_VO: Bit mask for WLAN Voice traffic
+ * @QCA_WLAN_PRIORITY_BEACON: Bit mask for WLAN BEACON frame
+ * @QCA_WLAN_PRIORITY_MGMT: Bit mask for WLAN Management frame
+*/
+enum qca_wlan_priority_type {
+        QCA_WLAN_PRIORITY_BE = BIT(0),
+        QCA_WLAN_PRIORITY_BK = BIT(1),
+        QCA_WLAN_PRIORITY_VI = BIT(2),
+        QCA_WLAN_PRIORITY_VO = BIT(3),
+        QCA_WLAN_PRIORITY_BEACON = BIT(4),
+        QCA_WLAN_PRIORITY_MGMT = BIT(5),
+};
+
+#define BTCOEX_ENABLE                    1
+#define BTCOEX_DISABLE                   0
+#define BTCOEX_CONFIGURE_DEFAULT        -1
+#define BTCOEX_THREE_WIRE_MODE           1
+#define BTCOEX_PTA_MODE                  2
+#define BTCOEX_MAX_PKT_WEIGHT            255
+
+#define ATH12K_ANI_LEVEL_MAX         30
+#define ATH12K_ANI_LEVEL_MIN         -5
+#define ATH12K_ANI_LEVEL_AUTO        128
+#define ATH12K_ANI_POLL_PERIOD_MAX   3000
+#define ATH12K_ANI_LISTEN_PERIOD_MAX 3000
+
+void ath12k_wmi_crl_path_stats_list_free(struct ath12k *ar, struct list_head *head);
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+int ath12k_debugfs_soc_create(struct ath12k_base *ab);
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
+int ath12k_debugfs_pdev_create(struct ath12k_base *ab);
+void ath12k_debugfs_pdev_destroy(struct ath12k_base *ab);
+int ath12k_debugfs_register(struct ath12k *ar);
+void ath12k_debugfs_unregister(struct ath12k *ar);
+int ath12k_debugfs_create(void);
+void ath12k_debugfs_destroy(void);
+void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+				     struct ath12k_fw_stats *stats);
+
+void ath12k_debugfs_fw_stats_init(struct ath12k *ar);
+
+void ath12k_debugfs_nrp_clean(struct ath12k *ar, const u8 *addr);
+void ath12k_debugfs_nrp_cleanup_all(struct ath12k *ar);
+
+static inline bool ath12k_debugfs_is_pktlog_lite_mode_enabled(struct ath12k *ar)
+{
+	return (ar->debug.pktlog_mode == ATH12K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath12k_debugfs_is_pktlog_rx_stats_enabled(struct ath12k *ar)
+{
+	return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath12k_debugfs_is_pktlog_peer_valid(struct ath12k *ar, u8 *addr)
+{
+	return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+		ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath12k_debugfs_is_extd_tx_stats_enabled(struct ath12k *ar)
+{
+	return ar->debug.extd_tx_stats;
+}
+
+static inline int ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+	return ar->debug.extd_rx_stats;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+	return ar->debug.rx_filter;
+}
+
+void ath12k_debugfs_add_interface(struct ath12k_link_vif *arvif);
+void ath12k_debugfs_remove_interface(struct ath12k_link_vif *arvif);
+
+#else
+static inline int ath12k_debugfs_create(void)
+{
+	return 0;
+}
+
+static inline void ath12k_debugfs_destroy(void)
+{
+}
+
+static inline int ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+	return 0;
+}
+
+static inline void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_debugfs_pdev_create(struct ath12k_base *ab)
+{
+	return 0;
+}
+
+static inline void ath12k_debugfs_pdev_destroy(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_debugfs_register(struct ath12k *ar)
+{
+	return 0;
+}
+
+static inline void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+}
+
+static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+						   struct ath12k_fw_stats *stats)
+{
+}
+
+static inline void ath12k_debugfs_fw_stats_init(struct ath12k *ar)
+{
+}
+
+static inline int ath12k_debugfs_is_extd_tx_stats_enabled(struct ath12k *ar)
+{
+	return 0;
+}
+
+static inline int ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+	return 0;
+}
+
+static inline bool ath12k_debugfs_is_pktlog_lite_mode_enabled(struct ath12k *ar)
+{
+	return false;
+}
+
+static inline bool ath12k_debugfs_is_pktlog_rx_stats_enabled(struct ath12k *ar)
+{
+	return false;
+}
+
+static inline bool ath12k_debugfs_is_pktlog_peer_valid(struct ath12k *ar, u8 *addr)
+{
+	return false;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+	return 0;
+}
+
+static inline void ath12k_debugfs_add_interface(struct ath12k_link_vif *arvif)
+{
+}
+
+static inline void ath12k_debugfs_remove_interface(struct ath12k_link_vif *arvif)
+{
+}
+
+static inline void ath12k_debugfs_nrp_clean(struct ath12k *ar, const u8 *addr)
+{
+}
+
+static inline void ath12k_debugfs_nrp_cleanup_all(struct ath12k *ar)
+{
+}
+#endif /* CONFIG_MAC80211_DEBUGFS*/
+
+#ifdef CONFIG_ATH12K_PKTLOG
+void ath12k_init_pktlog(struct ath12k *ar);
+void ath12k_deinit_pktlog(struct ath12k *ar);
+void ath12k_htt_pktlog_process(struct ath12k *ar, u8 *data);
+void ath12k_htt_ppdu_pktlog_process(struct ath12k *ar, u8 *data, u32 len);
+void ath12k_rx_stats_buf_pktlog_process(struct ath12k *ar, u8 *data,
+					u16 log_type, u32 len);
+#else
+static inline void ath12k_init_pktlog(struct ath12k *ar) { }
+
+static inline void ath12k_deinit_pktlog(struct ath12k *ar) { }
+
+static inline void ath12k_htt_pktlog_process(struct ath12k *ar,
+					     u8 *data)
+{
+}
+
+static inline void ath12k_htt_ppdu_pktlog_process(struct ath12k *ar,
+						  u8 *data, u32 len)
+{
+}
+
+static inline void ath12k_rx_stats_buf_pktlog_process(struct ath12k *ar,
+						      u8 *data, u16 log_type, u32 len) { }
+#endif /* CONFIG_ATH12K_PKTLOG */
+
+
+#ifdef CONFIG_ATH12K_SAWF
+static inline unsigned int ath12k_debugfs_is_sawf_stats_enabled(struct ath12k *ar)
+{
+	return ar->debug.sawf_stats;
+}
+
+#else
+
+static inline unsigned int ath12k_debugfs_is_sawf_stats_enabled(struct ath12k *ar)
+{
+	return 0;
+}
+
+#endif
+
+#endif /* _ATH12K_DEBUGFS_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c	2024-03-18 14:40:14.847741224 +0100
@@ -0,0 +1,9258 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+
+#define HTT_MAX_STRING_LEN 256
+#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
+#define HTT_HISTOGRAM_STATS_LEN 512
+
+
+#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline)				\
+	do {										\
+		int index = 0; u8 i; const char *str_val = str;				\
+		const char *new_line = newline;						\
+		if (str_val) {								\
+			index += scnprintf((out + buflen),				\
+				 (ATH12K_HTT_STATS_BUF_SIZE - buflen),			\
+				 "%s = ", str_val);					\
+		}									\
+		for (i = 0; i < len; i++) {						\
+			index += scnprintf((out + buflen) + index,			\
+				 (ATH12K_HTT_STATS_BUF_SIZE - buflen) - index,		\
+				 " %u:%u,", i, arr[i]);					\
+		}									\
+		index += scnprintf((out + buflen) + index,				\
+			 (ATH12K_HTT_STATS_BUF_SIZE - buflen) - index,			\
+			  "%s", new_line);						\
+		buflen += index;							\
+	} while (0)
+
+static inline void htt_print_stats_string_tlv(const void *tag_buf,
+					      u16 tag_len,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_stats_string_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8  i;
+	u16 index = 0;
+	char data[HTT_MAX_STRING_LEN] = {0};
+
+	tag_len = tag_len >> 2;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+
+	for (i = 0; i < tag_len; i++) {
+		index += scnprintf(&data[index],
+				HTT_MAX_STRING_LEN - index,
+				"%.*s", 4, (char *)&(htt_stats_buf->data[i]));
+		if (index >= HTT_MAX_STRING_LEN)
+			break;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "data = %s\n", data);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
+						   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n",
+			 htt_stats_buf->comp_delivered);
+	len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n",
+			 htt_stats_buf->self_triggers);
+	len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+			 htt_stats_buf->hw_queued);
+	len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+			 htt_stats_buf->hw_reaped);
+	len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+			 htt_stats_buf->underrun);
+	len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+			 htt_stats_buf->hw_paused);
+	len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+			 htt_stats_buf->hw_flush);
+	len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+			 htt_stats_buf->hw_filt);
+	len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+			 htt_stats_buf->tx_abort);
+	len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n",
+			 htt_stats_buf->ppdu_ok);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+			 htt_stats_buf->mpdu_requed);
+	len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+			 htt_stats_buf->tx_xretry);
+	len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+			 htt_stats_buf->data_rc);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+			 htt_stats_buf->mpdu_dropped_xretry);
+	len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+			 htt_stats_buf->illgl_rate_phy_err);
+	len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+			 htt_stats_buf->cont_xretry);
+	len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+			 htt_stats_buf->tx_timeout);
+	len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n",
+			 htt_stats_buf->tx_time_dur_data);
+	len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+			 htt_stats_buf->pdev_resets);
+	len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+			 htt_stats_buf->phy_underrun);
+	len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+			 htt_stats_buf->txop_ovf);
+	len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+			 htt_stats_buf->seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+			 htt_stats_buf->seq_failed_queueing);
+	len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+			 htt_stats_buf->seq_completed);
+	len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+			 htt_stats_buf->seq_restarted);
+	len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n",
+			 htt_stats_buf->seq_txop_repost_stop);
+	len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n",
+			 htt_stats_buf->next_seq_cancel);
+	len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n",
+			 htt_stats_buf->mu_seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n",
+			 htt_stats_buf->mu_ofdma_seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n",
+			 htt_stats_buf->ul_mumimo_seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n",
+			 htt_stats_buf->ul_ofdma_seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n",
+			 htt_stats_buf->num_mu_peer_blacklisted);
+	len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n",
+			 htt_stats_buf->seq_qdepth_repost_stop);
+	len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n",
+			 htt_stats_buf->seq_min_msdu_repost_stop);
+	len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n",
+			 htt_stats_buf->mu_seq_min_msdu_repost_stop);
+	len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+			 htt_stats_buf->seq_switch_hw_paused);
+	len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+			 htt_stats_buf->next_seq_posted_dsr);
+	len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+			 htt_stats_buf->seq_posted_isr);
+	len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+			 htt_stats_buf->seq_ctrl_cached);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+			 htt_stats_buf->mpdu_count_tqm);
+	len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+			 htt_stats_buf->msdu_count_tqm);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+			 htt_stats_buf->mpdu_removed_tqm);
+	len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+			 htt_stats_buf->msdu_removed_tqm);
+	len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n",
+			 htt_stats_buf->remove_mpdus_max_retries);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+			 htt_stats_buf->mpdus_sw_flush);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+			 htt_stats_buf->mpdus_hw_filter);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+			 htt_stats_buf->mpdus_truncated);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+			 htt_stats_buf->mpdus_ack_failed);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+			 htt_stats_buf->mpdus_expired);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+			 htt_stats_buf->mpdus_seq_hw_retry);
+	len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+			 htt_stats_buf->ack_tlv_proc);
+	len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+			 htt_stats_buf->coex_abort_mpdu_cnt_valid);
+	len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+			 htt_stats_buf->coex_abort_mpdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+			 htt_stats_buf->num_total_ppdus_tried_ota);
+	len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+			 htt_stats_buf->num_data_ppdus_tried_ota);
+	len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+			 htt_stats_buf->local_ctrl_mgmt_enqued);
+	len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+			 htt_stats_buf->local_ctrl_mgmt_freed);
+	len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+			 htt_stats_buf->local_data_enqued);
+	len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+			 htt_stats_buf->local_data_freed);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+			 htt_stats_buf->mpdu_tried);
+	len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+			 htt_stats_buf->isr_wait_seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+			 htt_stats_buf->tx_active_dur_us_low);
+	len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+			 htt_stats_buf->tx_active_dur_us_high);
+	len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n",
+			 htt_stats_buf->fes_offsets_err_cnt);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
+				   u16 tag_len,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
+				    u16 tag_len,
+				    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
+				   u16 tag_len,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
+				      u16 tag_len,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
+					u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status,
+			   "sifs_hist_status", num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
+					    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n",
+			 htt_stats_buf->num_data_ppdus_legacy_su);
+
+	len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n",
+			 htt_stats_buf->num_data_ppdus_ac_su);
+
+	len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n",
+			 htt_stats_buf->num_data_ppdus_ax_su);
+
+	len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n",
+			 htt_stats_buf->num_data_ppdus_ac_su_txbf);
+
+	len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n",
+			 htt_stats_buf->num_data_ppdus_ax_su_txbf);
+
+	stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_tx_pdev_mu_ppdu_dist_stats_tlv_v(const void *tag_buf,
+						struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_mu_ppdu_dist_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 max = HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST;
+	char hw_mode_prefix[][3] = {"ac", "ax"};
+	u8 j = 0, k = 0;
+
+	if (htt_stats_buf->hw_mode == HTT_STATS_HWMODE_AC) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nHTT_TX_PDEV_MU_PPDU_DISTRIBUTION_STATS:\n");
+	} else if (htt_stats_buf->hw_mode == HTT_STATS_HWMODE_AX) {
+		k = 1;
+	} else {
+		/* hw_mode not recognized */
+		return;
+	}
+
+	for (j = 0; j < HTT_STATS_NUM_NR_BINS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_seq_posted_nr4 %u:%u\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_seq_posted[j]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_seq_posted_nr8 %u:%u\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_seq_posted[j]);
+	}
+
+	for (j = 0; j < HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_ppdu_posted_per_burst_nr4 %u:%u,\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_ppdu_posted_per_burst[j]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_ppdu_posted_per_burst_nr8 %u:%u,\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_ppdu_posted_per_burst
+						[max + j]);
+	}
+
+	for (j = 0; j < HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_ppdu_completed_per_burst_nr4 %u:%u,\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_ppdu_completed_per_burst[j]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_ppdu_completed_per_burst_nr8 %u:%u,\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_ppdu_completed_per_burst
+						 [max + j]);
+	}
+
+	for (j = 0; j < HTT_STATS_MAX_NUM_SCHED_STATUS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_seq_term_status_nr4 %u:%u,\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_seq_term_status[j]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_mu_mimo_num_seq_term_status_nr8 %u:%u,\n",
+				 hw_mode_prefix[k], j,
+				 htt_stats_buf->mu_mimo_num_seq_term_status
+						[HTT_STATS_MAX_NUM_SCHED_STATUS + j]);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+						  u16 tag_len,
+						  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32  num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+	len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+			 htt_stats_buf->hist_bin_size);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+			   "tried_mpdu_cnt_hist = %s\n", num_elements, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
+	memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
+	       HTT_STATS_MAX_HW_INTR_NAME_LEN);
+	len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name);
+	len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+			 htt_stats_buf->mask);
+	len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+			 htt_stats_buf->count);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
+	memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
+	       HTT_STATS_MAX_HW_MODULE_NAME_LEN);
+	len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n",
+			 hw_module_name);
+	len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+			 htt_stats_buf->count);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+			 htt_stats_buf->tx_abort);
+	len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+			 htt_stats_buf->tx_abort_fail_count);
+	len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+			 htt_stats_buf->rx_abort);
+	len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+			 htt_stats_buf->rx_abort_fail_count);
+	len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+			 htt_stats_buf->rx_flush_cnt);
+	len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+			 htt_stats_buf->warm_reset);
+	len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+			 htt_stats_buf->cold_reset);
+	len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n",
+			 htt_stats_buf->mac_cold_reset_restore_cal);
+	len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n",
+			 htt_stats_buf->mac_cold_reset);
+	len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n",
+			 htt_stats_buf->mac_warm_reset);
+	len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n",
+			 htt_stats_buf->mac_only_reset);
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n",
+			 htt_stats_buf->phy_warm_reset);
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n",
+			 htt_stats_buf->phy_warm_reset_ucode_trig);
+	len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n",
+			 htt_stats_buf->mac_warm_reset_restore_cal);
+	len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n",
+			 htt_stats_buf->mac_sfm_reset);
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n",
+			 htt_stats_buf->phy_warm_reset_m3_ssr);
+	len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n",
+			 htt_stats_buf->fw_rx_rings_reset);
+	len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+			 htt_stats_buf->tx_flush);
+	len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+			 htt_stats_buf->tx_glb_reset);
+	len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+			 htt_stats_buf->tx_txq_reset);
+	len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n",
+			 htt_stats_buf->rx_timeout_reset);
+	len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n");
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_phy_m3);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_tx_hw_stuck = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_tx_hw_stuck);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_num_cca_rx_frame_stuck);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_mac_reset_converted_phy_reset);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_tx_consecutive_flush9_war);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n",
+			 htt_stats_buf->phy_warm_reset_reason_tx_hwsch_reset_war);
+	len += scnprintf(buf + len, buf_len - len,
+			 "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n",
+			 htt_stats_buf->phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war);
+
+	len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "wal_rx_recovery_rst_mac_hang_count = %u\n",
+			 htt_stats_buf->wal_rx_recovery_rst_mac_hang_count);
+	len += scnprintf(buf + len, buf_len - len,
+			 "wal_rx_recovery_rst_known_sig_count = %u\n",
+			 htt_stats_buf->wal_rx_recovery_rst_known_sig_count);
+	len += scnprintf(buf + len, buf_len - len,
+			 "wal_rx_recovery_rst_no_rx_count = %u\n",
+			 htt_stats_buf->wal_rx_recovery_rst_no_rx_count);
+	len += scnprintf(buf + len, buf_len - len,
+			 "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n",
+			 htt_stats_buf->wal_rx_recovery_rst_no_rx_consecutive_count);
+	len += scnprintf(buf + len, buf_len - len,
+			 "wal_rx_recovery_rst_rx_busy_count = %u\n",
+			 htt_stats_buf->wal_rx_recovery_rst_rx_busy_count);
+	len += scnprintf(buf + len, buf_len - len,
+			 "wal_rx_recovery_rst_phy_mac_hang_count = %u\n",
+			 htt_stats_buf->wal_rx_recovery_rst_phy_mac_hang_count);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
+						 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 drop_rule = __le32_to_cpu(htt_stats_buf->tx_flow_no__tid_num__drop_rule);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:");
+	len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n",
+			 htt_stats_buf->last_update_timestamp);
+	len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n",
+			 htt_stats_buf->last_add_timestamp);
+	len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n",
+			 htt_stats_buf->last_remove_timestamp);
+	len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n",
+			 htt_stats_buf->total_processed_msdu_count);
+	len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n",
+			 htt_stats_buf->cur_msdu_count_in_flowq);
+	len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+			 htt_stats_buf->sw_peer_id);
+	len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %u\n",
+			 u32_get_bits(drop_rule, HTT_MSDU_FLOW_STATS_TX_FLOW_NO));
+	len += scnprintf(buf + len, buf_len - len, "tid_num = %u\n",
+			 u32_get_bits(drop_rule, HTT_MSDU_FLOW_STATS_TID_NUM));
+	len += scnprintf(buf + len, buf_len - len, "drop_rule = %u\n",
+			 u32_get_bits(drop_rule, HTT_MSDU_FLOW_STATS_DROP_RULE));
+	len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n",
+			 htt_stats_buf->last_cycle_enqueue_count);
+	len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n",
+			 htt_stats_buf->last_cycle_dequeue_count);
+	len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n",
+			 htt_stats_buf->last_cycle_drop_count);
+	len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n",
+			 htt_stats_buf->current_drop_th);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+	u32 tid_num = __le32_to_cpu(htt_stats_buf->sw_peer_id__tid_num);
+	u32 num_ppdu = __le32_to_cpu(htt_stats_buf->num_sched_pending__num_ppdu_in_hwq);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
+	memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+	len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+	len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+			 u32_get_bits(tid_num, HTT_TX_TID_STATS_SW_PEER_ID));
+	len += scnprintf(buf + len, buf_len - len, "tid_num = %u\n",
+			 u32_get_bits(tid_num, HTT_TX_TID_STATS_TID_NUM));
+	len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %u\n",
+			 u32_get_bits(tid_num, HTT_TX_TID_STATS_NUM_SCHED_PENDING));
+	len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %u\n",
+			 u32_get_bits(num_ppdu, HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ));
+	len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+			 htt_stats_buf->tid_flags);
+	len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+			 htt_stats_buf->hw_queued);
+	len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+			 htt_stats_buf->hw_reaped);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+			 htt_stats_buf->mpdus_hw_filter);
+	len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+			 htt_stats_buf->qdepth_bytes);
+	len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+			 htt_stats_buf->qdepth_num_msdu);
+	len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+			 htt_stats_buf->qdepth_num_mpdu);
+	len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+			 htt_stats_buf->last_scheduled_tsmp);
+	len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+			 htt_stats_buf->pause_module_id);
+	len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
+			 htt_stats_buf->block_module_id);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
+						 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+	u32 tid_num = __le32_to_cpu(htt_stats_buf->sw_peer_id__tid_num);
+	u32 num_ppdu = __le32_to_cpu(htt_stats_buf->num_sched_pending__num_ppdu_in_hwq);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
+	memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+	len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+	len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+			 u32_get_bits(tid_num, HTT_TX_TID_STATS_V1_SW_PEER_ID));
+	len += scnprintf(buf + len, buf_len - len, "tid_num = %u\n",
+			 u32_get_bits(tid_num, HTT_TX_TID_STATS_V1_TID_NUM));
+	len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %u\n",
+			 u32_get_bits(num_ppdu, HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING));
+	len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %u\n",
+			 u32_get_bits(num_ppdu, HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ));
+	len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+			 htt_stats_buf->tid_flags);
+	len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n",
+			 htt_stats_buf->max_qdepth_bytes);
+	len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n",
+			 htt_stats_buf->max_qdepth_n_msdus);
+	len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n",
+			 htt_stats_buf->rsvd);
+	len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+			 htt_stats_buf->qdepth_bytes);
+	len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+			 htt_stats_buf->qdepth_num_msdu);
+	len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+			 htt_stats_buf->qdepth_num_mpdu);
+	len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+			 htt_stats_buf->last_scheduled_tsmp);
+	len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+			 htt_stats_buf->pause_module_id);
+	len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
+			 htt_stats_buf->block_module_id);
+	len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n",
+			 htt_stats_buf->allow_n_flags);
+	len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n",
+			 htt_stats_buf->sendn_frms_allowed);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+	u32 tid_num = __le32_to_cpu(htt_stats_buf->sw_peer_id__tid_num);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+			 u32_get_bits(tid_num, HTT_RX_TID_STATS_SW_PEER_ID));
+	len += scnprintf(buf + len, buf_len - len, "tid_num = %u\n",
+			 u32_get_bits(tid_num, HTT_RX_TID_STATS_TID_NUM));
+	memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+	len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+	len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n",
+			 htt_stats_buf->dup_in_reorder);
+	len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n",
+			 htt_stats_buf->dup_past_outside_window);
+	len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n",
+			 htt_stats_buf->dup_past_within_window);
+	len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n",
+			 htt_stats_buf->rxdesc_err_decrypt);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_counter_tlv(const void *tag_buf,
+					 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_counter_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name,
+			   "counter_name = %s\n", HTT_MAX_COUNTER_NAME, "\n\n");
+	len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+			 htt_stats_buf->count);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
+						struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n",
+			 htt_stats_buf->ppdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n",
+			 htt_stats_buf->mpdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n",
+			 htt_stats_buf->msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n",
+			 htt_stats_buf->pause_bitmap);
+	len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n",
+			 htt_stats_buf->block_bitmap);
+	len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n",
+			 htt_stats_buf->rssi);
+	len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n",
+			 htt_stats_buf->peer_enqueued_count_low |
+			 ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+	len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n",
+			 htt_stats_buf->peer_dequeued_count_low |
+			 ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+	len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n",
+			 htt_stats_buf->peer_dropped_count_low |
+			 ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+	len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n",
+			 htt_stats_buf->ppdu_transmitted_bytes_low |
+			 ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+	len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n",
+			 htt_stats_buf->remove_mpdus_max_retries);
+	len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n",
+			 htt_stats_buf->peer_ttl_removed_count);
+	len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n",
+			 htt_stats_buf->inactive_time);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_ast_entry_tlv(const void *tag_buf,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_ast_entry_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_addr = __le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32);
+	u32 mac_addr_16 = __le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "mac_addr : %02x:%02x:%02x:%02x:%02x:%02x | ast_index : %u | sw_peer_id : %u | pdev_id : %u | vdev_id : %u\n",
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_0),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_1),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_2),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_3),
+			 u32_get_bits(mac_addr_16, HTT_MAC_ADDR_H16_0),
+			 u32_get_bits(mac_addr_16, HTT_MAC_ADDR_H16_1),
+			 htt_stats_buf->ast_index, htt_stats_buf->sw_peer_id,
+			 htt_stats_buf->pdev_id, htt_stats_buf->vdev_id);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "next_hop : %u | mcast : %u | monitor_direct :%u | mesh_sta : %u | mec :%u | intra_bss :%u\n",
+			 htt_stats_buf->next_hop, htt_stats_buf->mcast,
+			 htt_stats_buf->monitor_direct, htt_stats_buf->mesh_sta,
+			 htt_stats_buf->mec, htt_stats_buf->intra_bss);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_details_tlv(const void *tag_buf,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_peer_details_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 ast_idx = __le32_to_cpu(htt_stats_buf->vdev_pdev_ast_idx);
+	u32 mac_addr = __le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32);
+	u32 mac_addr_16 = __le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n",
+			 htt_stats_buf->peer_type);
+	len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+			 htt_stats_buf->sw_peer_id);
+	len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+			 u32_get_bits(ast_idx, HTT_PEER_DETAILS_VDEV_ID));
+	len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+			 u32_get_bits(ast_idx, HTT_PEER_DETAILS_PDEV_ID));
+	len += scnprintf(buf + len, buf_len - len, "ast_idx = %u\n",
+			 u32_get_bits(ast_idx, HTT_PEER_DETAILS_AST_IDX));
+	len += scnprintf(buf + len, buf_len - len,
+			 "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_0),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_1),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_2),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_3),
+			 u32_get_bits(mac_addr_16, HTT_MAC_ADDR_H16_0),
+			 u32_get_bits(mac_addr_16, HTT_MAC_ADDR_H16_1));
+	len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n",
+			 htt_stats_buf->peer_flags);
+	len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n",
+			 htt_stats_buf->qpeer_flags);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+	u8 j;
+
+	for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+		tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!tx_gi[j])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+			 htt_stats_buf->tx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+			 htt_stats_buf->rts_cnt);
+	len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+			 htt_stats_buf->ack_rssi);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs,
+			   "tx_mcs = %s\n", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs",
+			   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs",
+			   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+			   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+			   HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+	for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "tx_gi[%u] = %s ",
+				j, tx_gi[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+				   HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+			   HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
+
+	stats_req->buf_len = len;
+
+fail:
+	for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+		kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 i, j;
+	char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+	char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+		rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!rssi_chain[j])
+			goto fail;
+	}
+
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+		rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!rx_gi[j])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+			 htt_stats_buf->nsts);
+	len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+			 htt_stats_buf->rx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+			 htt_stats_buf->rts_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+			 htt_stats_buf->rssi_mgmt);
+	len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+			 htt_stats_buf->rssi_data);
+	len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+			 htt_stats_buf->rssi_comb);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+			   HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+			   HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+			   HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "rssi_chain[%u] = %s\n",
+				 j, rssi_chain[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+				   HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "\nrssi_chain_ext[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain_ext[j], NULL,
+				   HTT_RX_PEER_STATS_NUM_BW_EXT_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "rx_gi[%u] = %s\n",
+				 j, rx_gi[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+				   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+			   HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\nrx_ulofdma_non_data_ppdu = %u\n",
+			 htt_stats_buf->rx_ulofdma_non_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %u\n",
+			 htt_stats_buf->rx_ulofdma_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %u\n",
+			 htt_stats_buf->rx_ulofdma_mpdu_ok);
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %u\n",
+			 htt_stats_buf->rx_ulofdma_mpdu_fail);
+	len += scnprintf(buf + len, buf_len - len, "rx_ulmumimo_non_data_ppdu = %u\n",
+			 htt_stats_buf->rx_ulmumimo_non_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len, "rx_ulmumimo_data_ppdu = %u\n",
+			 htt_stats_buf->rx_ulmumimo_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len, "rx_ulmumimo_mpdu_ok = %u\n",
+			 htt_stats_buf->rx_ulmumimo_mpdu_ok);
+	len += scnprintf(buf + len, buf_len - len, "rx_ulmumimo_mpdu_fail = %u\n",
+			 htt_stats_buf->rx_ulmumimo_mpdu_fail);
+
+	len += scnprintf(buf + len, buf_len - len, "rx_ul_fd_rssi = ");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ul_fd_rssi, NULL,
+			   HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\nper_chain_rssi_pkt_type = %#x",
+			 htt_stats_buf->per_chain_rssi_pkt_type);
+
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nrx_per_chain_rssi_in_dbm[%u] = ", j);
+		for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u:%d,",
+					 i,
+					 htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+	stats_req->buf_len = len;
+
+fail:
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++)
+		kfree(rssi_chain[j]);
+
+	for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++)
+		kfree(rx_gi[j]);
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+			 htt_stats_buf->mu_mimo_sch_posted);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+			 htt_stats_buf->mu_mimo_sch_failed);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+			 htt_stats_buf->mu_mimo_ppdu_posted);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n",
+			 htt_stats_buf->mu_mimo_mpdus_queued_usr);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n",
+			 htt_stats_buf->mu_mimo_mpdus_tried_usr);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n",
+			 htt_stats_buf->mu_mimo_mpdus_failed_usr);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n",
+			 htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n",
+			 htt_stats_buf->mu_mimo_err_no_ba_usr);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n",
+			 htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n",
+			 htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__hwq_id__word);
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_TX_HWQ_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "hwq_id = %u\n\n",
+			 u32_get_bits(mac_id_word, HTT_TX_HWQ_STATS_HWQ_ID));
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__hwq_id__word);
+
+	/* TODO: HKDBG */
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_TX_HWQ_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "hwq_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_TX_HWQ_STATS_HWQ_ID));
+	len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
+			 htt_stats_buf->xretry);
+	len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
+			 htt_stats_buf->underrun_cnt);
+	len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
+			 htt_stats_buf->flush_cnt);
+	len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
+			 htt_stats_buf->filt_cnt);
+	len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
+			 htt_stats_buf->null_mpdu_bmap);
+	len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
+			 htt_stats_buf->user_ack_failure);
+	len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+			 htt_stats_buf->ack_tlv_proc);
+	len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
+			 htt_stats_buf->sched_id_proc);
+	len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
+			 htt_stats_buf->null_mpdu_tx_count);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
+			 htt_stats_buf->mpdu_bmap_not_recvd);
+	len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
+			 htt_stats_buf->num_bar);
+	len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+			 htt_stats_buf->rts);
+	len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+			 htt_stats_buf->cts2self);
+	len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+			 htt_stats_buf->qos_null);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
+			 htt_stats_buf->mpdu_tried_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
+			 htt_stats_buf->mpdu_queued_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
+			 htt_stats_buf->mpdu_ack_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
+			 htt_stats_buf->mpdu_filt_cnt);
+	len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
+			 htt_stats_buf->false_mpdu_ack_count);
+	len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n",
+			 htt_stats_buf->txq_timeout);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
+					  u16 tag_len,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:");
+	len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u",
+			 htt_stats_buf->hist_intvl);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist,
+			   "difs_latency_hist", data_len, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
+					u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 data_len;
+
+	data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result",
+			   data_len, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
+				       u16 tag_len,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems;
+
+	num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status,
+			   "cmd_stall_status", num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
+					u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems;
+
+	num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+					   u16 tag_len,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32  num_elements = ((tag_len -
+			    sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+	len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+			 htt_stats_buf->hist_bin_size);
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+			   "tried_mpdu_cnt_hist", num_elements, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
+					  u16 tag_len,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 num_elements = tag_len >> 2;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist,
+			   "txop_used_cnt_hist", num_elements, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
+						   struct debug_htt_stats_req *stats_req)
+{
+	s32 i;
+	const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	const u32 *cbf_20 = htt_stats_buf->cbf_20;
+	const u32 *cbf_40 = htt_stats_buf->cbf_40;
+	const u32 *cbf_80 = htt_stats_buf->cbf_80;
+	const u32 *cbf_160 = htt_stats_buf->cbf_160;
+
+	if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+		for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+					 i,
+					 htt_stats_buf->sounding[0],
+					 htt_stats_buf->sounding[1],
+					 htt_stats_buf->sounding[2],
+					 htt_stats_buf->sounding[3]);
+		}
+	} else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+				 cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+				 cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+		for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+					 i,
+					 htt_stats_buf->sounding[0],
+					 htt_stats_buf->sounding[1],
+					 htt_stats_buf->sounding[2],
+					 htt_stats_buf->sounding[3]);
+		}
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+			 htt_stats_buf->su_bar);
+	len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+			 htt_stats_buf->rts);
+	len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+			 htt_stats_buf->cts2self);
+	len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+			 htt_stats_buf->qos_null);
+	len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+			 htt_stats_buf->delayed_bar_1);
+	len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+			 htt_stats_buf->delayed_bar_2);
+	len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+			 htt_stats_buf->delayed_bar_3);
+	len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+			 htt_stats_buf->delayed_bar_4);
+	len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+			 htt_stats_buf->delayed_bar_5);
+	len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+			 htt_stats_buf->delayed_bar_6);
+	len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n",
+			 htt_stats_buf->delayed_bar_7);
+	len += scnprintf(buf + len, buf_len - len, "bar_with_tqm_head_seq_num = %u\n",
+			 htt_stats_buf->bar_with_tqm_head_seq_num);
+	len += scnprintf(buf + len, buf_len - len, "bar_with_tid_seq_num = %u\n",
+			 htt_stats_buf->bar_with_tid_seq_num);
+	len += scnprintf(buf + len, buf_len - len, "su_sw_rts_queued = %u\n",
+			 htt_stats_buf->su_sw_rts_queued);
+	len += scnprintf(buf + len, buf_len - len, "su_sw_rts_tried = %u\n",
+			 htt_stats_buf->su_sw_rts_tried);
+	len += scnprintf(buf + len, buf_len - len, "su_sw_rts_err = %u\n",
+			 htt_stats_buf->su_sw_rts_err);
+	len += scnprintf(buf + len, buf_len - len, "su_sw_rts_flushed = %u\n",
+			 htt_stats_buf->su_sw_rts_flushed);
+	len += scnprintf(buf + len, buf_len - len, "su_sw_rts_rcvd_cts_diff_bw = %u\n",
+			 htt_stats_buf->su_sw_rts_rcvd_cts_diff_bw);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_queued = %u\n",
+			   htt_stats_buf->ac_su_ndpa_queued);
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_tried = %u",
+			   htt_stats_buf->ac_su_ndpa);
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_queued = %u\n",
+			   htt_stats_buf->ac_su_ndp_queued);
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_tried = %u",
+			   htt_stats_buf->ac_su_ndp);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_queued = %u\n",
+			   htt_stats_buf->ac_mu_mimo_ndpa_queued);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_tried = %u",
+			   htt_stats_buf->ac_mu_mimo_ndpa);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_queued = %u\n",
+			   htt_stats_buf->ac_mu_mimo_ndp_queued);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_tried = %u",
+			   htt_stats_buf->ac_mu_mimo_ndp);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1_queued = %u\n",
+			   htt_stats_buf->ac_mu_mimo_brpoll_1_queued);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1_tried = %u",
+			   htt_stats_buf->ac_mu_mimo_brpoll_1);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2_queued = %u\n",
+			   htt_stats_buf->ac_mu_mimo_brpoll_2_queued);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2_tried = %u",
+			   htt_stats_buf->ac_mu_mimo_brpoll_2);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3_queued = %u\n",
+			   htt_stats_buf->ac_mu_mimo_brpoll_3_queued);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3_tried = %u\n",
+			   htt_stats_buf->ac_mu_mimo_brpoll_3);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_queued = %u\n",
+			 htt_stats_buf->ax_su_ndpa_queued);
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_tried = %u\n",
+			 htt_stats_buf->ax_su_ndpa);
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_queued = %u\n",
+			 htt_stats_buf->ax_su_ndp_queued);
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_tried = %u\n",
+			 htt_stats_buf->ax_su_ndp);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_queued = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndpa_queued);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_tried = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndpa);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_queued = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndp_queued);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_tried = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndp);
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_brpoll_queued,
+			   "ax_mu_mimo_brpollX_queued",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_brpoll,
+			   "ax_mu_mimo_brpollX_tried",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mumimo_trigger,
+			   "ax_ul_mumimo_trigger",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS, "\n");
+	len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+			 htt_stats_buf->ax_basic_trigger);
+	len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+			 htt_stats_buf->ax_bsr_trigger);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+			 htt_stats_buf->ax_mu_bar_trigger);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n",
+			 htt_stats_buf->ax_mu_rts_trigger);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+			 htt_stats_buf->ac_su_ndp_err);
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_flushed = %u\n",
+			 htt_stats_buf->ac_su_ndp_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+			 htt_stats_buf->ac_su_ndpa_err);
+	len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_flushed = %u\n",
+			 htt_stats_buf->ac_su_ndpa_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+			 htt_stats_buf->ac_mu_mimo_ndpa_err);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_flushed = %u\n",
+			 htt_stats_buf->ac_mu_mimo_ndpa_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+			 htt_stats_buf->ac_mu_mimo_ndp_err);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_flushed = %u\n",
+			 htt_stats_buf->ac_mu_mimo_ndp_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+			 htt_stats_buf->ac_mu_mimo_brp1_err);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+			 htt_stats_buf->ac_mu_mimo_brp2_err);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n",
+			 htt_stats_buf->ac_mu_mimo_brp3_err);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_flushed = %u\n",
+			 htt_stats_buf->ac_mu_mimo_brp1_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_flushed = %u\n",
+			 htt_stats_buf->ac_mu_mimo_brp2_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_flushed = %u\n",
+			 htt_stats_buf->ac_mu_mimo_brp3_flushed);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+			 htt_stats_buf->ax_su_ndp_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_flushed = %u\n",
+			 htt_stats_buf->ax_su_ndp_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+			 htt_stats_buf->ax_su_ndpa_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_flushed = %u\n",
+			 htt_stats_buf->ax_su_ndpa_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndpa_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_flushed = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndpa_flushed);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndp_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_flushed = %u\n",
+			 htt_stats_buf->ax_mu_mimo_ndp_flushed);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_brp_err,
+			   "ax_mu_mimo_brpX_err",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_brpoll_flushed,
+			   "ax_mu_mimo_brpollX_flushed",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_brp_err_num_cbf_received,
+			   "ax_mu_mimo_num_cbf_rcvd_on_brp_err",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mumimo_trigger_err,
+			   "ax_ul_mumimo_trigger_err",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+			 htt_stats_buf->ax_basic_trigger_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+			 htt_stats_buf->ax_bsr_trigger_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+			 htt_stats_buf->ax_mu_bar_trigger_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n",
+			 htt_stats_buf->ax_mu_rts_trigger_err);
+
+	stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_tx_selfgen_ac_sched_status_stats_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_ac_sched_status_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_SELFGEN_AC_SCHED_STATUS_STATS_TLV:");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_su_ndpa_sch_status,
+			   "ac_su_ndpa_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_su_ndp_sch_status,
+			   "ac_su_ndp_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_ndpa_sch_status,
+			   "ac_mu_mimo_ndpa_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_ndp_sch_status,
+			   "ac_mu_mimo_ndp_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_brp_sch_status,
+			   "ac_mu_mimo_brp_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_su_ndp_sch_flag_err,
+			   "ac_su_ndp_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_ndp_sch_flag_err,
+			   "ac_mu_mimo_ndp_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_brp_sch_flag_err,
+			   "ac_mu_mimo_brp_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_sched_status_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_ax_sched_status_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_SELFGEN_AX_SCHED_STATUS_STATS_TLV:");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_su_ndpa_sch_status,
+			   "ax_su_ndpa_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_su_ndp_sch_status,
+			   "ax_su_ndp_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_ndpa_sch_status,
+			   "ax_mu_mimo_ndpa_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_ndp_sch_status,
+			   "ax_mu_mimo_ndp_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_brp_sch_status,
+			   "ax_mu_brp_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_bar_sch_status,
+			   "ax_mu_bar_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_basic_trig_sch_status,
+			   "ax_basic_trig_sch_status = %s ",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_su_ndp_sch_flag_err,
+			   "ax_su_ndp_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_ndp_sch_flag_err,
+			   "ax_mu_mimo_ndp_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_brp_sch_flag_err,
+			   "ax_mu_brp_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_bar_sch_flag_err,
+			   "ax_mu_bar_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_basic_trig_sch_flag_err,
+			   "ax_basic_trig_sch_flag_err = %s ",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_dl_mu_ofdma_sch_stats_tlv(const void *tag_buf,
+					    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_dl_mu_ofdma_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "11ax DL MU_OFDMA SCH STATS:\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_ofdma_sch_nusers,
+			   "ax_mu_ofdma_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_ul_mu_ofdma_sch_stats_tlv(const void *tag_buf,
+					    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_ul_mu_ofdma_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "11ax UL MU_OFDMA SCH STATS:\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mu_ofdma_basic_sch_nusers,
+			   "ax_ul_mu_ofdma_basic_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mu_ofdma_bsr_sch_nusers,
+			   "ax_ul_mu_ofdma_bsr_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mu_ofdma_bar_sch_nusers,
+			   "ax_ul_mu_ofdma_bar_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mu_ofdma_brp_sch_nusers,
+			   "ax_ul_mu_ofdma_brp_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_ul_mu_mimo_sch_stats_tlv(const void *tag_buf,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_ul_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "11ax UL MU_MIMO SCH STATS:\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mu_mimo_basic_sch_nusers,
+			   "ax_ul_mu_mimo_basic_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_ul_mu_mimo_brp_sch_nusers,
+			   "ax_ul_mu_mimo_brp_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_dl_mu_mimo_sch_stats_tlv(const void *tag_buf,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_dl_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+			 htt_stats_buf->mu_mimo_sch_posted);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+			 htt_stats_buf->mu_mimo_sch_failed);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+			 htt_stats_buf->mu_mimo_ppdu_posted);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz,
+			   "ac_mu_mimo_sch_posted_per_group_index",
+			   HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz,
+			   "ax_mu_mimo_sch_posted_per_group_index",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "11ac DL MU_MIMO SCH STATS:\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_sch_nusers,
+			   "ac_mu_mimo_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\n11ax DL MU_MIMO SCH STATS:\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_sch_nusers,
+			   "ax_mu_mimo_sch_nusers",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 i;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+			 htt_stats_buf->mu_mimo_sch_posted);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+			 htt_stats_buf->mu_mimo_sch_failed);
+	len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
+			 htt_stats_buf->mu_mimo_ppdu_posted);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "\nac_mu_mimo_sch_posted_per_group_index %u (SU) = %u,\n",
+			 0, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[0]);
+	for (i = 1; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+		 "ac_mu_mimo_sch_posted_per_group_index %u (TOTAL STREAMS = %u) = %u,\n",
+		 i, i+1, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]);
+	}
+
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+		 "ac_mu_mimo_sch_posted_per_group_index %u (TOTAL STREAMS = %u) = %u,\n",
+		 i+HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS,
+		 i+HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS+1,
+		 htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz_ext[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+		"\nax_mu_mimo_sch_posted_per_group_index %u (SU) = %u,\n",
+		0, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[0]);
+	for (i = 1; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+		 "ax_mu_mimo_sch_posted_per_group_index %u (TOTAL STREAMS = %u) = %u,\n",
+		 i, i+1, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+		"\nbe_mu_mimo_sch_posted_per_group_index %u (SU) = %u,\n",
+		0, htt_stats_buf->be_mu_mimo_sch_posted_per_grp_sz[0]);
+	for (i = 1; i < HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+		 "be_mu_mimo_sch_posted_per_group_index %u (TOTAL STREAMS = %u) = %u,\n",
+		 i, i+1, htt_stats_buf->be_mu_mimo_sch_posted_per_grp_sz[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n11ac MU_MIMO SCH STATS:\n");
+
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_mu_mimo_sch_nusers_%u = %u\n",
+				 i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
+
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_mu_mimo_sch_nusers_%u = %u\n",
+				 i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\n11be MU_MIMO SCH STATS:\n");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+			"be_mu_mimo_sch_nusers_%u = %u\n",
+			i, htt_stats_buf->be_mu_mimo_sch_nusers[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
+
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_sch_nusers_%u = %u\n",
+				 i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ul_ofdma_basic_sch_nusers_%u = %u\n",
+				 i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n",
+				 i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ul_ofdma_bar_sch_nusers_%u = %u\n",
+				 i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ul_ofdma_brp_sch_nusers_%u = %u\n",
+				 i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			"\n11ax UL MUMIMO SCH STATS:");
+
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ul_mumimo_basic_sch_nusers_%u = %u\n", i,
+				 htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]);
+	        len += scnprintf(buf + len, buf_len - len,
+				 "ax_ul_mumimo_brp_sch_nusers_%u = %u\n", i,
+				 htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf,
+					    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_mumimo_grp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int j;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_MUMIMO_GRP_STATS:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dl_mumimo_grp_best_grp_size,
+			   "dl_mumimo_grp_best_grp_size", HTT_STATS_MAX_MUMIMO_GRP_SZ,
+			   "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dl_mumimo_grp_best_num_usrs,
+			   "dl_mumimo_grp_best_num_usrs",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dl_mumimo_grp_tputs,
+			   "dl_mumimo_grp_tputs_observed (per bin = 300 mbps)",
+			   HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dl_mumimo_grp_eligible,
+			   "dl_mumimo_grp eligible", HTT_STATS_MAX_MUMIMO_GRP_SZ, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dl_mumimo_grp_ineligible,
+			   "dl_mumimo_grp_ineligible", HTT_STATS_MAX_MUMIMO_GRP_SZ, "\n");
+	for (j = 0; j < HTT_STATS_MAX_MUMIMO_GRP_SZ; j++) {
+		len += scnprintf(buf + len, buf_len - len, "grp_id = %u", j);
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_stats_buf->dl_mumimo_grp_invalid, NULL,
+				   HTT_STATS_MAX_INVALID_REASON_CODE, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_mumimo_grp_best_grp_size,
+			   "ul_mumimo_grp_best_grp_size", HTT_STATS_MAX_MUMIMO_GRP_SZ,
+			   "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_mumimo_grp_best_num_usrs,
+			   "ul_mumimo_grp_best_num_usrs",
+			   HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_mumimo_grp_tputs,
+			   "ul_mumimo_grp_tputs_observed (per bin = 300 mbps)",
+			   HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+					 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+		if (!htt_stats_buf->user_index)
+			len += scnprintf(buf + len, buf_len - len,
+					 "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+		if (htt_stats_buf->user_index <
+		    HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_queued_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_tried_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_failed_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_requeued_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->err_no_ba_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdu_underrun_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->ampdu_underrun_usr);
+		}
+	}
+
+	if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+		if (!htt_stats_buf->user_index)
+			len += scnprintf(buf + len, buf_len - len,
+					 "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+		if (htt_stats_buf->user_index <
+		    HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_queued_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_tried_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_failed_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_requeued_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->err_no_ba_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdu_underrun_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->ampdu_underrun_usr);
+		}
+	}
+
+	if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+		if (!htt_stats_buf->user_index)
+			len += scnprintf(buf + len, buf_len - len,
+					 "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+		if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_queued_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_tried_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_failed_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdus_requeued_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->err_no_ba_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->mpdu_underrun_usr);
+			len += scnprintf(buf + len, buf_len - len,
+					 "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n",
+					 htt_stats_buf->user_index,
+					 htt_stats_buf->ampdu_underrun_usr);
+		}
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
+				     u16 tag_len,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted,
+			   "sched_cmd_posted", num_elements, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
+				     u16 tag_len,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped,
+			   "sched_cmd_reaped", num_elements, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
+					 u16 tag_len,
+					 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	/* each entry is u32, i.e. 4 bytes */
+	u32 sched_order_su_num_entries =
+		min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su",
+			   sched_order_su_num_entries, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
+					      u16 tag_len,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	/* each entry is u32, i.e. 4 bytes */
+	u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility,
+			   "sched_ineligibility", sched_ineligibility_num_entries,
+			   "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__txq_id__word);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word,
+				      HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID));
+	len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+			 htt_stats_buf->sched_policy);
+	len += scnprintf(buf + len, buf_len - len,
+			 "last_sched_cmd_posted_timestamp = %u\n",
+			 htt_stats_buf->last_sched_cmd_posted_timestamp);
+	len += scnprintf(buf + len, buf_len - len,
+			 "last_sched_cmd_compl_timestamp = %u\n",
+			 htt_stats_buf->last_sched_cmd_compl_timestamp);
+	len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+			 htt_stats_buf->sched_2_tac_lwm_count);
+	len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+			 htt_stats_buf->sched_2_tac_ring_full);
+	len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+			 htt_stats_buf->sched_cmd_post_failure);
+	len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+			 htt_stats_buf->num_active_tids);
+	len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+			 htt_stats_buf->num_ps_schedules);
+	len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+			 htt_stats_buf->sched_cmds_pending);
+	len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+			 htt_stats_buf->num_tid_register);
+	len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+			 htt_stats_buf->num_tid_unregister);
+	len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+			 htt_stats_buf->num_qstats_queried);
+	len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+			 htt_stats_buf->qstats_update_pending);
+	len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+			 htt_stats_buf->last_qstats_query_timestamp);
+	len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+			 htt_stats_buf->num_tqm_cmdq_full);
+	len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+			 htt_stats_buf->num_de_sched_algo_trigger);
+	len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+			 htt_stats_buf->num_rt_sched_algo_trigger);
+	len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+			 htt_stats_buf->num_tqm_sched_algo_trigger);
+	len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n",
+			 htt_stats_buf->notify_sched);
+	len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+			 htt_stats_buf->dur_based_sendn_term);
+	len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n",
+			 htt_stats_buf->su_notify2_sched);
+	len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n",
+			 htt_stats_buf->su_optimal_queued_msdus_sched);
+	len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n",
+			 htt_stats_buf->su_delay_timeout_sched);
+	len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n",
+			 htt_stats_buf->su_min_txtime_sched_delay);
+	len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n",
+			 htt_stats_buf->su_no_delay);
+	len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n",
+			 htt_stats_buf->num_supercycles);
+	len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n",
+			 htt_stats_buf->num_subcycles_with_sort);
+	len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n",
+			 htt_stats_buf->num_subcycles_no_sort);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n",
+			 htt_stats_buf->current_timestamp);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
+				      u16 tag_len,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elements = min_t(u16, (tag_len >> 2),
+				 HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason,
+			   "gen_mpdu_end_reason", num_elements, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
+				       u16 tag_len,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason,
+			   "list_mpdu_end_reason", num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
+				     u16 tag_len,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2),
+			      HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist,
+			   "list_mpdu_cnt_hist", num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+	len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+			 htt_stats_buf->msdu_count);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+			 htt_stats_buf->mpdu_count);
+	len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+			 htt_stats_buf->remove_msdu);
+	len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+			 htt_stats_buf->remove_mpdu);
+	len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+			 htt_stats_buf->remove_msdu_ttl);
+	len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+			 htt_stats_buf->send_bar);
+	len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+			 htt_stats_buf->bar_sync);
+	len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+			 htt_stats_buf->notify_mpdu);
+	len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+			 htt_stats_buf->sync_cmd);
+	len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+			 htt_stats_buf->write_cmd);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+			 htt_stats_buf->hwsch_trigger);
+	len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+			 htt_stats_buf->ack_tlv_proc);
+	len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+			 htt_stats_buf->gen_mpdu_cmd);
+	len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+			 htt_stats_buf->gen_list_cmd);
+	len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+			 htt_stats_buf->remove_mpdu_cmd);
+	len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+			 htt_stats_buf->remove_mpdu_tried_cmd);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+			 htt_stats_buf->mpdu_queue_stats_cmd);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+			 htt_stats_buf->mpdu_head_info_cmd);
+	len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+			 htt_stats_buf->msdu_flow_stats_cmd);
+	len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+			 htt_stats_buf->remove_msdu_cmd);
+	len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+			 htt_stats_buf->remove_msdu_ttl_cmd);
+	len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+			 htt_stats_buf->flush_cache_cmd);
+	len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+			 htt_stats_buf->update_mpduq_cmd);
+	len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+			 htt_stats_buf->enqueue);
+	len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+			 htt_stats_buf->enqueue_notify);
+	len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+			 htt_stats_buf->notify_mpdu_at_head);
+	len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+			 htt_stats_buf->notify_mpdu_state_valid);
+	len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+			 htt_stats_buf->sched_udp_notify1);
+	len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+			 htt_stats_buf->sched_udp_notify2);
+	len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+			 htt_stats_buf->sched_nonudp_notify1);
+	len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n",
+			 htt_stats_buf->sched_nonudp_notify2);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+			       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+			 htt_stats_buf->max_cmdq_id);
+	len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+			 htt_stats_buf->list_mpdu_cnt_hist_intvl);
+	len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+			 htt_stats_buf->add_msdu);
+	len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+			 htt_stats_buf->q_empty);
+	len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+			 htt_stats_buf->q_not_empty);
+	len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+			 htt_stats_buf->drop_notification);
+	len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n",
+			 htt_stats_buf->desc_threshold);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n",
+			 htt_stats_buf->hwsch_tqm_invalid_status);
+	len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n",
+			 htt_stats_buf->missed_tqm_gen_mpdus);
+
+	if (tag_len > (offsetof(struct htt_tx_tqm_cmn_stats_tlv, tqm_active_tids) -
+		       offsetof(struct htt_tx_tqm_cmn_stats_tlv, mac_id__word))) {
+		len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n",
+				 htt_stats_buf->tqm_active_tids);
+		len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n",
+				 htt_stats_buf->tqm_inactive_tids);
+		len += scnprintf(buf + len, buf_len - len,
+				 "tqm_active_msduq_flows = %u\n",
+				 htt_stats_buf->tqm_active_msduq_flows);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_unavailable_error_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_stats_error_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_ERROR_STATS_TLV:");
+	len += scnprintf(buf + len, buf_len - len,
+			 "No stats to print for current request: %d",
+			 htt_stats_buf->htt_stats_type);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_unsupported_error_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_stats_error_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_ERROR_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "Unsupported HTT stats type: %d\n",
+			 htt_stats_buf->htt_stats_type);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+			 htt_stats_buf->q_empty_failure);
+	len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+			 htt_stats_buf->q_not_empty_failure);
+	len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n",
+			 htt_stats_buf->add_msdu_failure);
+	len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n");
+	len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n",
+			   htt_stats_buf->tqm_cache_ctl_err);
+	len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n",
+			   htt_stats_buf->tqm_soft_reset);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_total_num_in_use_link_descs = %u\n",
+			   htt_stats_buf->tqm_reset_total_num_in_use_link_descs);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_worst_case_num_lost_link_descs = %u\n",
+			   htt_stats_buf->tqm_reset_worst_case_num_lost_link_descs);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n",
+			 htt_stats_buf->tqm_reset_worst_case_num_lost_host_tx_bufs_count);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n",
+			   htt_stats_buf->tqm_reset_num_in_use_link_descs_internal_tqm);
+	len += scnprintf(buf + len, buf_len - len,
+		      "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n",
+		      htt_stats_buf->tqm_reset_num_in_use_link_descs_wbm_idle_link_ring);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_time_to_tqm_hang_delta_ms = %u\n",
+			   htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms);
+	len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n",
+			   htt_stats_buf->tqm_reset_recovery_time_ms);
+	len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n",
+			   htt_stats_buf->tqm_reset_num_peers_hdl);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n",
+			   htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_proc_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n",
+			   htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_flush_cache_cmd_su_cnt = %u\n",
+			   htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_flush_cache_cmd_other_cnt = %u\n",
+			   htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_flush_cache_cmd_trig_type = %u\n",
+			   htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_flush_cache_cmd_trig_cfg = %u\n",
+			   htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg);
+	len += scnprintf(buf + len, buf_len - len,
+			   "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n",
+			   htt_stats_buf->tqm_reset_flush_cache_cmd_skip_cmd_status_null);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__cmdq_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_TX_TQM_CMDQ_STATUS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "cmdq_id = %u\n\n",
+			 u32_get_bits(mac_id_word, HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID));
+	len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+			 htt_stats_buf->sync_cmd);
+	len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+			 htt_stats_buf->write_cmd);
+	len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+			 htt_stats_buf->gen_mpdu_cmd);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+			 htt_stats_buf->mpdu_queue_stats_cmd);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+			 htt_stats_buf->mpdu_head_info_cmd);
+	len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+			 htt_stats_buf->msdu_flow_stats_cmd);
+	len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+			 htt_stats_buf->remove_mpdu_cmd);
+	len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+			 htt_stats_buf->remove_msdu_cmd);
+	len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+			 htt_stats_buf->flush_cache_cmd);
+	len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+			 htt_stats_buf->update_mpduq_cmd);
+	len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n",
+			 htt_stats_buf->update_msduq_cmd);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+			 htt_stats_buf->m1_packets);
+	len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+			 htt_stats_buf->m2_packets);
+	len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+			 htt_stats_buf->m3_packets);
+	len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+			 htt_stats_buf->m4_packets);
+	len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+			 htt_stats_buf->g1_packets);
+	len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n",
+			 htt_stats_buf->g2_packets);
+	len += scnprintf(buf + len, buf_len - len, "rc4_packets = %u\n",
+			   htt_stats_buf->rc4_packets);
+	len += scnprintf(buf + len, buf_len - len, "eap_packets = %u\n",
+			   htt_stats_buf->eap_packets);
+	len += scnprintf(buf + len, buf_len - len, "eapol_start_packets = %u\n",
+			   htt_stats_buf->eapol_start_packets);
+	len += scnprintf(buf + len, buf_len - len, "eapol_logoff_packets = %u\n",
+			   htt_stats_buf->eapol_logoff_packets);
+	len += scnprintf(buf + len, buf_len - len, "eapol_encap_asf_packets = %u\n",
+			   htt_stats_buf->eapol_encap_asf_packets);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+			 htt_stats_buf->ap_bss_peer_not_found);
+	len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+			 htt_stats_buf->ap_bcast_mcast_no_peer);
+	len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+			 htt_stats_buf->sta_delete_in_progress);
+	len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+			 htt_stats_buf->ibss_no_bss_peer);
+	len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+			 htt_stats_buf->invalid_vdev_type);
+	len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+			 htt_stats_buf->invalid_ast_peer_entry);
+	len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+			 htt_stats_buf->peer_entry_invalid);
+	len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+			 htt_stats_buf->ethertype_not_ip);
+	len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+			 htt_stats_buf->eapol_lookup_failed);
+	len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+			 htt_stats_buf->qpeer_not_allow_data);
+	len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+			 htt_stats_buf->fse_tid_override);
+	len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+			 htt_stats_buf->ipv6_jumbogram_zero_length);
+	len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
+			 htt_stats_buf->qos_to_non_qos_in_prog);
+	len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_eapol = %u\n",
+			   htt_stats_buf->ap_bcast_mcast_eapol);
+	len += scnprintf(buf + len, buf_len - len, "unicast_on_ap_bss_peer = %u\n",
+			   htt_stats_buf->unicast_on_ap_bss_peer);
+	len += scnprintf(buf + len, buf_len - len, "ap_vdev_invalid = %u\n",
+			   htt_stats_buf->ap_vdev_invalid);
+	len += scnprintf(buf + len, buf_len - len, "incomplete_llc = %u\n",
+			   htt_stats_buf->incomplete_llc);
+	len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m3 = %u\n",
+			   htt_stats_buf->eapol_duplicate_m3);
+	len += scnprintf(buf + len, buf_len - len, "eapol_duplicate_m4 = %u\n",
+			   htt_stats_buf->eapol_duplicate_m4);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+			 htt_stats_buf->arp_packets);
+	len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+			 htt_stats_buf->igmp_packets);
+	len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+			 htt_stats_buf->dhcp_packets);
+	len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+			 htt_stats_buf->host_inspected);
+	len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+			 htt_stats_buf->htt_included);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+			 htt_stats_buf->htt_valid_mcs);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+			 htt_stats_buf->htt_valid_nss);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+			 htt_stats_buf->htt_valid_preamble_type);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+			 htt_stats_buf->htt_valid_chainmask);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+			 htt_stats_buf->htt_valid_guard_interval);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+			 htt_stats_buf->htt_valid_retries);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+			 htt_stats_buf->htt_valid_bw_info);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+			 htt_stats_buf->htt_valid_power);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+			 htt_stats_buf->htt_valid_key_flags);
+	len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+			 htt_stats_buf->htt_valid_no_encryption);
+	len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+			 htt_stats_buf->fse_entry_count);
+	len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+			 htt_stats_buf->fse_priority_be);
+	len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+			 htt_stats_buf->fse_priority_high);
+	len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+			 htt_stats_buf->fse_priority_low);
+	len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+			 htt_stats_buf->fse_traffic_ptrn_be);
+	len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+			 htt_stats_buf->fse_traffic_ptrn_over_sub);
+	len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+			 htt_stats_buf->fse_traffic_ptrn_bursty);
+	len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+			 htt_stats_buf->fse_traffic_ptrn_interactive);
+	len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+			 htt_stats_buf->fse_traffic_ptrn_periodic);
+	len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+			 htt_stats_buf->fse_hwqueue_alloc);
+	len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+			 htt_stats_buf->fse_hwqueue_created);
+	len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+			 htt_stats_buf->fse_hwqueue_send_to_host);
+	len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+			 htt_stats_buf->mcast_entry);
+	len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+			 htt_stats_buf->bcast_entry);
+	len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+			 htt_stats_buf->htt_update_peer_cache);
+	len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+			 htt_stats_buf->htt_learning_frame);
+	len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+			 htt_stats_buf->fse_invalid_peer);
+	len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n",
+			 htt_stats_buf->mec_notify);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+			 htt_stats_buf->eok);
+	len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+			 htt_stats_buf->classify_done);
+	len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+			 htt_stats_buf->lookup_failed);
+	len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+			 htt_stats_buf->send_host_dhcp);
+	len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+			 htt_stats_buf->send_host_mcast);
+	len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+			 htt_stats_buf->send_host_unknown_dest);
+	len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+			 htt_stats_buf->send_host);
+	len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n",
+			 htt_stats_buf->status_invalid);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+			 htt_stats_buf->enqueued_pkts);
+	len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+			 htt_stats_buf->to_tqm);
+	len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n",
+			 htt_stats_buf->to_tqm_bypass);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+			 htt_stats_buf->discarded_pkts);
+	len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+			 htt_stats_buf->local_frames);
+	len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n",
+			 htt_stats_buf->is_ext_msdu);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
+						   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+			 htt_stats_buf->tcl_dummy_frame);
+	len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+			 htt_stats_buf->tqm_dummy_frame);
+	len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+			 htt_stats_buf->tqm_notify_frame);
+	len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+			 htt_stats_buf->fw2wbm_enq);
+	len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n",
+			 htt_stats_buf->tqm_bypass_frame);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
+					  u16 tag_len,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16  num_elements = tag_len >> 2;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist,
+			   "fw2wbm_ring_full_hist", num_elements, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+			 htt_stats_buf->tcl2fw_entry_count);
+	len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+			 htt_stats_buf->not_to_fw);
+	len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+			 htt_stats_buf->invalid_pdev_vdev_peer);
+	len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+			 htt_stats_buf->tcl_res_invalid_addrx);
+	len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+			 htt_stats_buf->wbm2fw_entry_count);
+	len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n",
+			 htt_stats_buf->invalid_pdev);
+	len += scnprintf(buf + len, buf_len - len, "tcl_res_addrx_timeout = %u\n",
+			 htt_stats_buf->tcl_res_addrx_timeout);
+	len += scnprintf(buf + len, buf_len - len, "invalid_vdev = %u\n",
+			 htt_stats_buf->invalid_vdev);
+	len += scnprintf(buf + len, buf_len - len, "invalid_tcl_exp_frame_desc = %u\n",
+			 htt_stats_buf->invalid_tcl_exp_frame_desc);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 tail_idx = __le32_to_cpu(htt_stats_buf->num_elems__prefetch_tail_idx);
+	u32 head_idx = __le32_to_cpu(htt_stats_buf->head_idx__tail_idx);
+	u32 shadow_idx = __le32_to_cpu(htt_stats_buf->shadow_head_idx__shadow_tail_idx);
+	u32 thresh = __le32_to_cpu(htt_stats_buf->lwm_thresh__hwm_thresh);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n",
+			 htt_stats_buf->base_addr);
+	len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+			 htt_stats_buf->elem_size);
+	len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n",
+			 (unsigned long)u32_get_bits(tail_idx,
+						     HTT_RING_IF_STATS_NUM_ELEMS));
+	len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n",
+			 (unsigned long)u32_get_bits(tail_idx,
+						HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX));
+	len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n",
+			 (unsigned long)u32_get_bits(head_idx,
+						     HTT_RING_IF_STATS_HEAD_IDX));
+	len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n",
+			 (unsigned long)u32_get_bits(head_idx,
+						     HTT_RING_IF_STATS_TAIL_IDX));
+	len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n",
+			 (unsigned long)u32_get_bits(shadow_idx,
+						HTT_RING_IF_STATS_SHADOW_HEAD_IDX));
+	len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n",
+			 (unsigned long)u32_get_bits(shadow_idx,
+						HTT_RING_IF_STATS_SHADOW_TAIL_IDX));
+	len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n",
+			   htt_stats_buf->num_tail_incr);
+	len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n",
+			 (unsigned long)u32_get_bits(thresh,
+						HTT_RING_IF_STATS_LWM_THRESH));
+	len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n",
+			 (unsigned long)u32_get_bits(thresh,
+						HTT_RING_IF_STATS_HWM_THRESH));
+	len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n",
+			 htt_stats_buf->overrun_hit_count);
+	len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n",
+			 htt_stats_buf->underrun_hit_count);
+	len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n",
+			 htt_stats_buf->prod_blockwait_count);
+	len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n",
+			 htt_stats_buf->cons_blockwait_count);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count,
+			   "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count,
+			   "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+			 htt_stats_buf->num_records);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
+						   u16 tag_len,
+						   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = tag_len >> 2;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n,
+			   "dwords_used_by_user_n", num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_tlv(const void *tag_buf,
+					    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+			 htt_stats_buf->client_id);
+	len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+			 htt_stats_buf->buf_min);
+	len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+			 htt_stats_buf->buf_max);
+	len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+			 htt_stats_buf->buf_busy);
+	len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+			 htt_stats_buf->buf_alloc);
+	len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+			 htt_stats_buf->buf_avail);
+	len += scnprintf(buf + len, buf_len - len, "num_users = %u\n",
+			 htt_stats_buf->num_users);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
+					 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+			 htt_stats_buf->buf_total);
+	len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+			 htt_stats_buf->mem_empty);
+	len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+			 htt_stats_buf->deallocate_bufs);
+	len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+			 htt_stats_buf->num_records);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_stats_tlv(const void *tag_buf,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__ring_id__arena__ep);
+	u32 avail_words = __le32_to_cpu(htt_stats_buf->num_avail_words__num_valid_words);
+	u32 head_tail_ptr = __le32_to_cpu(htt_stats_buf->head_ptr__tail_ptr);
+	u32 sring_stat = __le32_to_cpu(htt_stats_buf->consumer_empty__producer_full);
+	u32 tail_ptr = __le32_to_cpu(htt_stats_buf->prefetch_count__internal_tail_ptr);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_SRING_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "ring_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_SRING_STATS_RING_ID));
+	len += scnprintf(buf + len, buf_len - len, "arena = %u\n",
+			 u32_get_bits(mac_id_word, HTT_SRING_STATS_ARENA));
+	len += scnprintf(buf + len, buf_len - len, "ep = %u\n",
+			 u32_get_bits(mac_id_word, HTT_SRING_STATS_EP));
+	len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+			 htt_stats_buf->base_addr_lsb);
+	len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+			 htt_stats_buf->base_addr_msb);
+	len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+			 htt_stats_buf->ring_size);
+	len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+			 htt_stats_buf->elem_size);
+	len += scnprintf(buf + len, buf_len - len, "num_avail_words = %u\n",
+			 u32_get_bits(avail_words, HTT_SRING_STATS_NUM_AVAIL_WORDS));
+	len += scnprintf(buf + len, buf_len - len, "num_valid_words = %u\n",
+			 u32_get_bits(avail_words, HTT_SRING_STATS_NUM_VALID_WORDS));
+	len += scnprintf(buf + len, buf_len - len, "head_ptr = %u\n",
+			 u32_get_bits(head_tail_ptr, HTT_SRING_STATS_HEAD_PTR));
+	len += scnprintf(buf + len, buf_len - len, "tail_ptr = %u\n",
+			 u32_get_bits(head_tail_ptr, HTT_SRING_STATS_TAIL_PTR));
+	len += scnprintf(buf + len, buf_len - len, "consumer_empty = %u\n",
+			 u32_get_bits(sring_stat, HTT_SRING_STATS_CONSUMER_EMPTY));
+	len += scnprintf(buf + len, buf_len - len, "producer_full = %u\n",
+			 u32_get_bits(head_tail_ptr, HTT_SRING_STATS_PRODUCER_FULL));
+	len += scnprintf(buf + len, buf_len - len, "prefetch_count = %u\n",
+			 u32_get_bits(tail_ptr, HTT_SRING_STATS_PREFETCH_COUNT));
+	len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %u\n\n",
+			 u32_get_bits(tail_ptr, HTT_SRING_STATS_INTERNAL_TAIL_PTR));
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+			 htt_stats_buf->num_records);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 j;
+	char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
+	u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS + 1] = { 0 };
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+		tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!tx_gi[j])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+			 htt_stats_buf->tx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+			 htt_stats_buf->ac_mu_mimo_tx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+			 htt_stats_buf->ax_mu_mimo_tx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+			 htt_stats_buf->ofdma_tx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+			 htt_stats_buf->rts_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+			 htt_stats_buf->rts_success);
+	len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+			 htt_stats_buf->ack_rssi);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n",
+			 htt_stats_buf->tx_legacy_cck_rate[0],
+			 htt_stats_buf->tx_legacy_cck_rate[1],
+			 htt_stats_buf->tx_legacy_cck_rate[2],
+			 htt_stats_buf->tx_legacy_cck_rate[3]);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+			 "                   24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+			 htt_stats_buf->tx_legacy_ofdm_rate[0],
+			 htt_stats_buf->tx_legacy_ofdm_rate[1],
+			 htt_stats_buf->tx_legacy_ofdm_rate[2],
+			 htt_stats_buf->tx_legacy_ofdm_rate[3],
+			 htt_stats_buf->tx_legacy_ofdm_rate[4],
+			 htt_stats_buf->tx_legacy_ofdm_rate[5],
+			 htt_stats_buf->tx_legacy_ofdm_rate[6],
+			 htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+			   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs_ext, "tx_mcs_ext",
+			   HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs,
+			   "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs_ext,
+			   "ac_mu_mimo_tx_mcs_ext",
+			   HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs,
+			   "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs_ext,
+			   "ax_mu_mimo_tx_mcs_ext",
+			   HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs",
+			   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs_ext,
+			   "ofdma_tx_mcs_ext",
+			   HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss,
+			   "ac_mu_mimo_tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss,
+			   "ax_mu_mimo_tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; j++) {
+		tx_bw[j] = htt_stats_buf->tx_bw[j];
+	}
+	tx_bw[j] = htt_stats_buf->tx_bw_320mhz;
+	PRINT_ARRAY_TO_BUF(buf, len, tx_bw, "tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS + 1, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw,
+			   "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw,
+			   "ax_mu_mimo_tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+			   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+			   HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+			 htt_stats_buf->tx_he_ltf[1],
+			 htt_stats_buf->tx_he_ltf[2],
+			 htt_stats_buf->tx_he_ltf[3]);
+
+	/* SU GI Stats */
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "tx_gi[%u] = %s\n",
+				 j, tx_gi[j]);
+		len += scnprintf(buf + len, buf_len - len, " -2:%u,-1:%u,",
+				 htt_stats_buf->tx_gi_ext_2[j][0],
+				 htt_stats_buf->tx_gi_ext_2[j][1]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+				   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi_ext[j], NULL,
+				   HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+
+	}
+
+	/* AC MU-MIMO GI Stats */
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_mu_mimo_tx_gi[%u] = %s\n",
+				 j, tx_gi[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j],
+				   NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi_ext[j],
+				   NULL, HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+	}
+
+	/* AX MU-MIMO GI Stats */
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_mu_mimo_tx_gi[%u] = %s\n",
+				 j, tx_gi[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j],
+				   NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi_ext[j],
+				   NULL, HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+	}
+
+	/* DL OFDMA GI Stats */
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s\n",
+				 j, tx_gi[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL,
+				   HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi_ext[j],
+				   NULL, HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_ru_size,
+			   "ofdma_tx_ru_size",
+			   HTT_TX_PDEV_STATS_NUM_AX_RU_SIZE_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+			   HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_punctured_mode,
+			   "tx_su_punctured_mode",
+			   HTT_TX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "half_tx_bw = " :"quarter_tx_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_tx_bw[j], NULL,
+				   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "half_ac_mu_mimo_tx_bw = " :"quarter_ac_mu_mimo_tx_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_ac_mu_mimo_tx_bw[j], NULL,
+				   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "half_ax_mu_mimo_tx_bw = " :"quarter_ax_mu_mimo_tx_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_ax_mu_mimo_tx_bw[j], NULL,
+				   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "half_ofdma_tx_bw" :"quarter_ofdma_tx_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_ax_mu_ofdma_tx_bw[j], NULL,
+				   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_he_sig_b_mcs,
+			   "ofdma_he_sig_b_mcs",
+			   HTT_TX_PDEV_STATS_NUM_HE_SIG_B_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->trigger_type_11ax,
+			   "11ax_trigger_type",
+			   HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->trigger_type_11be,
+			   "11be_trigger_type",
+			   HTT_TX_PDEV_STATS_NUM_11BE_TRIGGER_TYPES, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "ax_su_embedded_trigger_data_ppdu_cnt = %u\n",
+			 htt_stats_buf->ax_su_embedded_trigger_data_ppdu);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "ax_su_embedded_trigger_data_ppdu_err_cnt = %u\n",
+			 htt_stats_buf->ax_su_embedded_trigger_data_ppdu_err);
+
+	stats_req->buf_len = len;
+fail:
+	for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
+		kfree(tx_gi[j]);
+}
+
+static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 i, j;
+	char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+	char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL};
+	char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!rssi_chain[j])
+			goto fail;
+	}
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!rx_gi[j])
+			goto fail;
+	}
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!rx_pilot_evm_db[j])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+			 htt_stats_buf->nsts);
+	len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+			 htt_stats_buf->rx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+			 htt_stats_buf->rts_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+			 htt_stats_buf->rssi_mgmt);
+	len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+			 htt_stats_buf->rssi_data);
+	len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+			 htt_stats_buf->rssi_comb);
+	len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+			 htt_stats_buf->rssi_in_dbm);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+			   HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+			   HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+			   HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+			 htt_stats_buf->nss_count);
+
+	len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+			 htt_stats_buf->pilot_count);
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "pilot_evm_db[%u] = ", j);
+		for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u:%d,",
+					 i,
+					 htt_stats_buf->rx_pilot_evm_db[j][i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "pilot_evm_db_mean = ");
+	for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len,
+				 buf_len - len,
+				 " %u:%d,", i,
+				 htt_stats_buf->rx_pilot_evm_db_mean[i]);
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rssi_chain[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+				   HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	}
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_gi[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+				   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+			   HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+			 htt_stats_buf->rx_11ax_su_ext);
+	len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+			 htt_stats_buf->rx_11ac_mumimo);
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+			 htt_stats_buf->rx_11ax_mumimo);
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+			 htt_stats_buf->rx_11ax_ofdma);
+	len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+			 htt_stats_buf->txbf);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
+			   "rx_legacy_cck_rate",
+			   HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate,
+			   "rx_legacy_ofdm_rate",
+			   HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+			 htt_stats_buf->rx_active_dur_us_low);
+	len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+			 htt_stats_buf->rx_active_dur_us_high);
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+			 htt_stats_buf->rx_11ax_ul_ofdma);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs,
+			   "ul_ofdma_rx_mcs",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s\n",
+				 j, rx_gi[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL,
+				   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss,
+			   "ul_ofdma_rx_nss",
+			   HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw",
+			   HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+			 htt_stats_buf->ul_ofdma_rx_stbc);
+	len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+			 htt_stats_buf->ul_ofdma_rx_ldpc);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+			   "rx_ulofdma_non_data_ppdu",
+			   HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu,
+			   "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok,
+			   "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail,
+			   "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ul_fd_rssi: nss[%u] = ", j);
+		for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u:%d,",
+					 i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+			 htt_stats_buf->per_chain_rssi_pkt_type);
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_per_chain_rssi_in_dbm[%u] = ", j);
+		for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u:%d,",
+					 i,
+					 htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+			   "rx_11ax_dl_ofdma_mcs",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru,
+			   "rx_11ax_dl_ofdma_ru",
+			   HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers,
+			   "rx_ulofdma_non_data_nusers",
+			   HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers,
+			   "rx_ulofdma_data_nusers",
+			   HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+
+fail:
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+		kfree(rssi_chain[j]);
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+		kfree(rx_pilot_evm_db[j]);
+
+	for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++)
+		kfree(rx_gi[i]);
+}
+
+#define CHAIN_ARRAY_TO_BUF(out, buflen, arr, len)                         \
+	do {                                       \
+		int index = 0; u8 i;                           \
+		for (i = 0; i < len; i++) {                    \
+			index += scnprintf((out + buflen) + index,         \
+			(ATH12K_HTT_STATS_BUF_SIZE - buflen) - index,  \
+				" %u:%d,", i, arr[i]);           \
+		}                                  \
+		buflen += index;                           \
+	} while (0)
+
+static inline void htt_print_rx_pdev_rate_ext_stats_tlv(const void *tag_buf,
+						       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_rate_ext_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 j=0, i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_EXT_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "rssi_mcast_in_dbm = %d\n",
+			 htt_stats_buf->rssi_mcast_in_dbm);
+	len += scnprintf(buf + len, buf_len - len, "rssi_mgmt_in_dbm = %d\n",
+			 htt_stats_buf->rssi_mgmt_in_dbm);
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u:%d,",
+					 i,
+					 htt_stats_buf->rx_per_chain_rssi_ext_in_dbm[j][i]);
+		len += scnprintf(buf + len, buf_len - len, "\nrssi_chain_ext[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain_ext[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nrx_per_chain_rssi_ext_in_dbm[%u] = ", j);
+		CHAIN_ARRAY_TO_BUF(buf, len,
+				   htt_stats_buf->rx_per_chain_rssi_ext_in_dbm[j],
+				   HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs_ext,
+			   "rx_mcs_ext", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc_ext,
+			   "rx_stbc_ext", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "rx_gi_ext[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi_ext[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs_ext,
+			   "ul_ofdma_rx_mcs_ext",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ul_ofdma_rx_gi_ext[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi_ext[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs_ext,
+			   "rx_11ax_su_txbf_mcs_ext",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs_ext,
+			   "rx_11ax_mu_txbf_mcs_ext",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs_ext,
+			   "rx_11ax_dl_ofdma_mcs_ext",
+			   HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw_ext,
+			   "rx_bw_ext",
+			   HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "half_rx_bw = " :
+				 "quarter_rx_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_stats_buf->reduced_rx_bw[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_su_punctured_mode,
+			   "rx_su_punctured_mode",
+			   HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rssi_chain_ext_2[%u] = ",j);
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_stats_buf->rssi_chain_ext_2[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BW_EXT_2_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_per_chain_rssi_ext_2_in_dbm[%u] = ", j);
+		CHAIN_ARRAY_TO_BUF(buf, len,
+				   htt_stats_buf->rx_per_chain_rssi_ext_2_in_dbm[j],
+				   HTT_RX_PDEV_STATS_NUM_BW_EXT_2_COUNTERS);
+               len += scnprintf(buf +len, buf_len -len, "\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
+
+static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
+						 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n",
+			 htt_stats_buf->fw_reo_ring_data_msdu);
+	len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n",
+			 htt_stats_buf->fw_to_host_data_msdu_bcmc);
+	len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n",
+			 htt_stats_buf->fw_to_host_data_msdu_uc);
+	len += scnprintf(buf + len, buf_len - len,
+			 "ofld_remote_data_buf_recycle_cnt = %u\n",
+			 htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "ofld_remote_free_buf_indication_cnt = %u\n",
+			 htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "ofld_buf_to_host_data_msdu_uc = %u\n",
+			 htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reo_fw_ring_to_host_data_msdu_uc = %u\n",
+			 htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+	len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n",
+			 htt_stats_buf->wbm_sw_ring_reap);
+	len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n",
+			 htt_stats_buf->wbm_forward_to_host_cnt);
+	len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n",
+			 htt_stats_buf->wbm_target_recycle_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "target_refill_ring_recycle_cnt = %u\n",
+			 htt_stats_buf->target_refill_ring_recycle_cnt);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
+					    u16 tag_len,
+					    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt,
+			   "refill_ring_empty_cnt", num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
+						    u16 tag_len,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf =
+		tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
+						  u16 tag_len,
+						  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n",
+			 htt_stats_buf->sample_id);
+	len += scnprintf(buf + len, buf_len - len, "total_max = %u\n",
+			 htt_stats_buf->total_max);
+	len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n",
+			 htt_stats_buf->total_avg);
+	len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n",
+			 htt_stats_buf->total_sample);
+	len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n",
+			 htt_stats_buf->non_zeros_avg);
+	len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n",
+			 htt_stats_buf->non_zeros_sample);
+	len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n",
+			 htt_stats_buf->last_non_zeros_max);
+	len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n",
+			 htt_stats_buf->last_non_zeros_min);
+	len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n",
+			 htt_stats_buf->last_non_zeros_avg);
+	len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n",
+			 htt_stats_buf->last_non_zeros_sample);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
+						 u16 tag_len,
+						 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill,
+			   "refill_ring_num_refill", num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
+						  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+			 htt_stats_buf->ppdu_recvd);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+			 htt_stats_buf->mpdu_cnt_fcs_ok);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+			 htt_stats_buf->mpdu_cnt_fcs_err);
+	len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+			 htt_stats_buf->tcp_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+			 htt_stats_buf->tcp_ack_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
+			 htt_stats_buf->udp_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
+			 htt_stats_buf->other_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
+			 htt_stats_buf->fw_ring_mpdu_ind);
+	len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
+			 htt_stats_buf->fw_ring_mcast_data_msdu);
+	len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
+			 htt_stats_buf->fw_ring_bcast_data_msdu);
+	len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
+			 htt_stats_buf->fw_ring_ucast_data_msdu);
+	len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
+			 htt_stats_buf->fw_ring_null_data_msdu);
+	len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
+			 htt_stats_buf->fw_ring_mpdu_drop);
+	len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
+			 htt_stats_buf->ofld_local_data_ind_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "ofld_local_data_buf_recycle_cnt = %u\n",
+			 htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+	len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
+			 htt_stats_buf->drx_local_data_ind_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "drx_local_data_buf_recycle_cnt = %u\n",
+			 htt_stats_buf->drx_local_data_buf_recycle_cnt);
+	len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
+			 htt_stats_buf->local_nondata_ind_cnt);
+	len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
+			 htt_stats_buf->local_nondata_buf_recycle_cnt);
+	len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
+			 htt_stats_buf->fw_status_buf_ring_refill_cnt);
+	len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
+			 htt_stats_buf->fw_status_buf_ring_empty_cnt);
+	len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
+			 htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+	len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
+			 htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+	len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
+			 htt_stats_buf->fw_link_buf_ring_refill_cnt);
+	len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
+			 htt_stats_buf->fw_link_buf_ring_empty_cnt);
+	len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
+			 htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+	len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
+			 htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
+			 htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
+			 htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "mon_status_buf_ring_refill_cnt = %u\n",
+			 htt_stats_buf->mon_status_buf_ring_refill_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
+			 htt_stats_buf->mon_status_buf_ring_empty_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
+			 htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
+			 htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
+			 htt_stats_buf->mon_dest_ring_update_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
+			 htt_stats_buf->mon_dest_ring_full_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+			 htt_stats_buf->rx_suspend_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+			 htt_stats_buf->rx_suspend_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+			 htt_stats_buf->rx_resume_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
+			 htt_stats_buf->rx_resume_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
+			 htt_stats_buf->rx_ring_switch_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
+			 htt_stats_buf->rx_ring_restore_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+			 htt_stats_buf->rx_flush_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
+			 htt_stats_buf->rx_recovery_reset_cnt);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
+					 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err,
+			   "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
+				     u16 tag_len,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop",
+			   num_elems, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n",
+			 htt_stats_buf->total_phy_err_cnt);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs",
+			   HTT_STATS_PHY_ERR_MAX, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+			 htt_stats_buf->chan_num);
+	len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+			 htt_stats_buf->num_records);
+	len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+			 htt_stats_buf->valid_cca_counters_bitmap);
+	len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n",
+			 htt_stats_buf->collection_interval);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "|  tx_frame|   rx_frame|   rx_clear| my_rx_frame|        cnt| med_rx_idle| med_tx_idle_global|   cca_obss|");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n",
+			 htt_stats_buf->tx_frame_usec,
+			 htt_stats_buf->rx_frame_usec,
+			 htt_stats_buf->rx_clear_usec,
+			 htt_stats_buf->my_rx_frame_usec,
+			 htt_stats_buf->usec_cnt,
+			 htt_stats_buf->med_rx_idle_usec,
+			 htt_stats_buf->med_tx_idle_global_usec,
+			 htt_stats_buf->cca_obss_usec);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
+						  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+			 htt_stats_buf->last_unpause_ppdu_id);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+			 htt_stats_buf->hwsch_unpause_wait_tqm_write);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+			 htt_stats_buf->hwsch_dummy_tlv_skipped);
+	len += scnprintf(buf + len, buf_len - len,
+			 "hwsch_misaligned_offset_received = %u\n",
+			 htt_stats_buf->hwsch_misaligned_offset_received);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+			 htt_stats_buf->hwsch_reset_count);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+			 htt_stats_buf->hwsch_dev_reset_war);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+			 htt_stats_buf->hwsch_delayed_pause);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+			 htt_stats_buf->hwsch_long_delayed_pause);
+	len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+			 htt_stats_buf->sch_rx_ppdu_no_response);
+	len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+			 htt_stats_buf->sch_selfgen_response);
+	len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n",
+			 htt_stats_buf->sch_rx_sifs_resp_trigger);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+			 htt_stats_buf->pdev_id);
+	len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n",
+			 htt_stats_buf->num_sessions);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_addr = __le32_to_cpu(htt_stats_buf->peer_mac.mac_addr_l32);
+	u32 mac_addr_16 = __le32_to_cpu(htt_stats_buf->peer_mac.mac_addr_h16);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+			 htt_stats_buf->vdev_id);
+	len += scnprintf(buf + len, buf_len - len,
+			 "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x\n",
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_0),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_1),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_2),
+			 u32_get_bits(mac_addr, HTT_MAC_ADDR_L32_3),
+			 u32_get_bits(mac_addr_16, HTT_MAC_ADDR_H16_0),
+			 u32_get_bits(mac_addr_16, HTT_MAC_ADDR_H16_1));
+	len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n",
+			 htt_stats_buf->flow_id_flags);
+	len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n",
+			 htt_stats_buf->dialog_id);
+	len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n",
+			 htt_stats_buf->wake_dura_us);
+	len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n",
+			 htt_stats_buf->wake_intvl_us);
+	len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n",
+			 htt_stats_buf->sp_offset_us);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_STATS_TLV:\n");
+	/*
+	 * Successful/Failure OBSS Transmission stats are omitted out as they
+	 * are not supported in the current chipsets.
+	 */
+	len += scnprintf(buf + len, buf_len - len, "num_spatial_reuse_tx = %u\n",
+			 htt_stats_buf->num_sr_tx_transmissions);
+	len += scnprintf(buf + len, buf_len - len,
+			 "num_spatial_reuse_opportunities = %u\n",
+			 htt_stats_buf->num_spatial_reuse_opportunities);
+	len += scnprintf(buf + len, buf_len - len, "num_non_srg_opportunities = %u\n",
+			 htt_stats_buf->num_non_srg_opportunities);
+	len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_tried = %u\n",
+			 htt_stats_buf->num_non_srg_ppdu_tried);
+	len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_success = %u\n",
+			 htt_stats_buf->num_non_srg_ppdu_success);
+	len += scnprintf(buf + len, buf_len - len, "num_psr_opportunities = %u\n",
+			 htt_stats_buf->num_psr_opportunities);
+	len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_tried = %u\n",
+			 htt_stats_buf->num_psr_ppdu_tried);
+	len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_success = %u\n",
+			 htt_stats_buf->num_psr_ppdu_success);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
+						      u8 *data)
+{
+	struct debug_htt_stats_req *stats_req =
+			(struct debug_htt_stats_req *)data;
+	struct htt_ring_backpressure_stats_tlv *htt_stats_buf =
+			(struct htt_ring_backpressure_stats_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+			 htt_stats_buf->pdev_id);
+	len += scnprintf(buf + len, buf_len - len, "Head_idx = %u\n",
+			 htt_stats_buf->current_head_idx);
+	len += scnprintf(buf + len, buf_len - len, "Tail_idx = %u\n",
+			 htt_stats_buf->current_tail_idx);
+	len += scnprintf(buf + len, buf_len - len, "Num Backpressure Msgs sent = %u\n",
+			 htt_stats_buf->num_htt_msgs_sent);
+	len += scnprintf(buf + len, buf_len - len,
+			 "backpressure_time_ms = %u\n",
+			 htt_stats_buf->backpressure_time_ms);
+
+	len += scnprintf(buf + len, buf_len - len, "Ring Backpressure Histogram\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "100ms to 200ms = %u, 200ms to 300ms = %u, 300ms to 400ms = %u\n",
+			 htt_stats_buf->backpressure_hist[0],
+			 htt_stats_buf->backpressure_hist[1],
+			 htt_stats_buf->backpressure_hist[2]);
+
+ 	len += scnprintf(buf + len, buf_len - len,
+			 "400ms to 500 ms = %u, Above 500ms = %u\n",
+			 htt_stats_buf->backpressure_hist[3],
+			 htt_stats_buf->backpressure_hist[4]);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "============================");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_be_stats_tlv(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_be_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_SELFGEN_BE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_queued = %u\n",
+			 htt_stats_buf->be_su_ndpa_queued);
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_tried = %u\n",
+			 htt_stats_buf->be_su_ndpa);
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndp_queued = %u\n",
+			 htt_stats_buf->be_su_ndp_queued);
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndp_tried = %u\n",
+			 htt_stats_buf->be_su_ndp);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_mimo_ndpa_queued = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndpa_queued);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_mimo_ndpa_tried = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndpa);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_mimo_ndp_queued = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndp_queued);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_mimo_ndp_tried = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndp);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_brpoll_queued,
+			   "be_mu_mimo_brpollX_queued",
+			   HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_brpoll,
+			   "be_mu_mimo_brpollX_tried",
+			   HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_ul_mumimo_trigger,
+			   "be_ul_mumimo_trigger",
+			   HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS, "\n\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_basic_trigger = %u\n",
+			 htt_stats_buf->be_basic_trigger);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_ulmumimo_total_trigger = %u\n",
+			 htt_stats_buf->be_ulmumimo_trigger);
+	len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger = %u\n",
+			 htt_stats_buf->be_bsr_trigger);
+	len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger = %u\n",
+			 htt_stats_buf->be_mu_bar_trigger);
+	len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger = %u\n",
+			 htt_stats_buf->be_mu_rts_trigger);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_be_sched_status_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_selfgen_be_sched_status_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_SELFGEN_BE_SCHED_STATUS_STATS_TLV:\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_su_ndpa_sch_status,
+			   "be_su_ndpa_sch_status",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_su_ndp_sch_status,
+			   "be_su_ndp_sch_status",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_ndpa_sch_status,
+			   "be_mu_mimo_ndpa_sch_status",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_ndp_sch_status,
+			   "be_mu_mimo_ndp_sch_status",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_brp_sch_status,
+			   "be_mu_brp_sch_status",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_bar_sch_status,
+			   "be_mu_bar_sch_status",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_basic_trig_sch_status,
+			   "be_basic_trig_sch_status",
+			   HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_su_ndp_sch_flag_err,
+			   "be_su_ndp_sch_flag_er",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_ndp_sch_flag_err,
+			   "be_mu_mimo_ndp_sch_flag_err",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_brp_sch_flag_err,
+			   "be_mu_brp_sch_flag_err",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_bar_sch_flag_err,
+			   "be_mu_bar_sch_flag_err",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_basic_trig_sch_flag_err,
+			   "be_basic_trig_sch_flag_err",
+			   HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS, "\n\n");
+
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_be_ul_mu_mimo_sch_stats_tlv(const void *tag_buf,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_be_ul_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 i;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "11be UL MU_MIMO SCH STATS:\n");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				   "be_ul_mu_mimo_basic_sch_nusers_%u = %u", i,
+				   htt_stats_buf->be_ul_mu_mimo_basic_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				   "be_ul_mu_mimo_brp_sch_nusers_%u = %u", i,
+				   htt_stats_buf->be_ul_mu_mimo_brp_sch_nusers[i]);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_be_rate_stats_tlv(const void *tag_buf,
+				    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_rate_stats_be_tlv *htt_stats_buf = tag_buf;
+	u8  i, j;
+	char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS];
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) {
+		tx_gi[i] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!tx_gi[i])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_BE_RATE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_tx_ldpc = %u",
+			 htt_stats_buf->be_mu_mimo_tx_ldpc);
+
+	/* MCS -2 and -1 will be printed first */
+	len += scnprintf(buf + len, buf_len - len, " -2:%u,-1:%u,",
+		htt_stats_buf->be_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS-2],
+		htt_stats_buf->be_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS-1]);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_tx_mcs,
+			   "be_mu_mimo_tx_mcs",
+			   HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_tx_nss,
+			   "be_mu_mimo_tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_tx_bw,
+			   "be_mu_mimo_tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BE_BW_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_mu_mimo_tx_gi[%u] = %s ", j, tx_gi[j]);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_tx_gi[j],
+				   NULL, HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS, "\n");
+	}
+
+	if (len >= buf_len)
+		buf[buf_len - 1] = 0;
+	else
+		buf[len] = 0;
+
+	stats_req->buf_len = len;
+fail:
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++)
+		kfree(tx_gi[i]);
+
+}
+
+static inline void
+htt_print_be_ul_mimo_user_stats(const void *tag_buf,
+				struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_be_ul_mimo_user_stats_tlv *htt_ul_user_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	if (htt_ul_user_stats_buf->user_index < HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER) {
+		if (htt_ul_user_stats_buf->user_index == 0)
+			len += scnprintf(buf + len, buf_len - len,
+					 "HTT_STATS_RX_PDEV_BE_UL_MIMO_USER_STATS_TLV\n");
+
+		len += scnprintf(buf + len, buf_len - len, "be_rx_ulmumimo_non_data_ppdu_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->be_rx_ulmumimo_non_data_ppdu);
+		len += scnprintf(buf + len, buf_len - len, "be_rx_ulmumimo_data_ppdu_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->be_rx_ulmumimo_data_ppdu);
+		len += scnprintf(buf + len, buf_len - len, "be_rx_ulmumimo_mpdu_ok_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->be_rx_ulmumimo_mpdu_ok);
+		len += scnprintf(buf + len, buf_len - len, "be_rx_ulmumimo_mpdu_fail_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->be_rx_ulmumimo_mpdu_fail);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_ul_mumimo_trig_be_stats(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_ul_mumimo_trig_be_stats_tlv *htt_ul_mumimo_trig_be_stats_buf = tag_buf;
+	char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS];
+	u8 i, j;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_ul_mumimo_trig_be_stats_buf->mac_id__word);
+
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) {
+		rx_gi[i] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!rx_gi[i])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_PDEV_UL_MUMIMO_TRIG_BE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+
+	len += scnprintf(buf + len, buf_len - len, "rx_11be_ul_mumimo = %u\n",
+			 htt_ul_mumimo_trig_be_stats_buf->rx_11be_ul_mumimo);
+
+	/* TODO: Check if enough space is present before writing BE MCS Counters */
+	/* MCS -2 and -1 will be printed first */
+
+	len += scnprintf(buf + len, buf_len - len, " -2:%u,-1:%u\n",
+		htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_mcs[HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS-2],
+		htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_mcs[HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS-1]);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_mcs,
+			   "be_ul_mumimo_rx_mcs",
+			   HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS - 2, "\n\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, " -2:%u,-1:%u\n",
+			htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_gi[j][HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS-2],
+			htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_gi[j][HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS-1]);
+
+		PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_gi[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS-2, "\n\n");
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ul_mumimo_rx_gi[%u] = %s ", j, rx_gi[j]);
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_nss,
+			   "be_ul_mumimo_rx_nss",
+			   HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_bw,
+			   "be_ul_mumimo_rx_bw",
+			   HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS, "\n\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_ul_mumimo_rx_stbc = %u\n",
+			 htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_stbc);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_ul_mumimo_rx_ldpc = %u\n",
+			 htt_ul_mumimo_trig_be_stats_buf->be_ul_mumimo_rx_ldpc);
+
+	for (i = 0; i < HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS; i++) {
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_be_stats_buf->be_rx_ul_mumimo_chain_rssi_in_dbm[i],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS, "\n\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_rx_ul_mumimo_rssi_in_dbm: chain[%u] = %s ",
+				 i, htt_ul_mumimo_trig_be_stats_buf->be_rx_ul_mumimo_chain_rssi_in_dbm[i]);
+	}
+
+	for (i = 0; i < HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER; i++) {
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_be_stats_buf->be_rx_ul_mumimo_target_rssi[i],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS, "\n\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_rx_ul_mumimo_target_rssi: user[%u] = %s ",
+				 i, htt_ul_mumimo_trig_be_stats_buf->be_rx_ul_mumimo_target_rssi[i]);
+	}
+
+	for (i = 0; i < HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER; i++) {
+		PRINT_ARRAY_TO_BUF(buf, len,
+				htt_ul_mumimo_trig_be_stats_buf->be_rx_ul_mumimo_fd_rssi[i],
+				NULL, HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS, "\n\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_rx_ul_mumimo_fd_rssi: user[%u] = %s ",
+				 i, htt_ul_mumimo_trig_be_stats_buf->be_rx_ul_mumimo_fd_rssi[i]);
+	}
+
+	for (i = 0; i < HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER; i++) {
+		PRINT_ARRAY_TO_BUF(buf, len,
+				htt_ul_mumimo_trig_be_stats_buf->be_rx_ulmumimo_pilot_evm_dB_mean[i],
+				NULL, HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS, "\n\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_rx_ulmumimo_pilot_evm_dB_mean: user [%u] = %s ",
+				 i, htt_ul_mumimo_trig_be_stats_buf->be_rx_ulmumimo_pilot_evm_dB_mean[i]);
+	}
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+fail:
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++)
+		kfree(rx_gi[i]);
+}
+
+static inline void
+htt_print_be_ul_ofdma_trigger_stats(const void *tag_buf,
+				    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_be_ul_trigger_stats_tlv *htt_trigger_stats_buf = tag_buf;
+	char   *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS];
+	u8  i, j;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word = __le32_to_cpu(htt_trigger_stats_buf->mac_id__word);
+
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) {
+		rx_gi[i] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!rx_gi[i])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_PDEV_BE_UL_TRIGGER_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "rx_11be_ul_ofdma =%u\n",
+			 htt_trigger_stats_buf->rx_11be_ul_ofdma);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_ul_ofdma_rx_mcs,
+			   "be_ul_ofdma_rx_mcs",
+			   HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ul_ofdma_rx_gi[%u] = %s ", j, rx_gi[j]);
+		for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS; i++) {
+			len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
+					 htt_trigger_stats_buf->be_ul_ofdma_rx_gi[j][i]);
+		}
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_ul_ofdma_rx_nss,
+			   "be_ul_ofdma_rx_nss",
+			   HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_ul_ofdma_rx_bw,
+			   "be_ul_ofdma_rx_bw",
+			   HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "be_ul_ofdma_rx_stbc = %u\n",
+			htt_trigger_stats_buf->be_ul_ofdma_rx_stbc);
+	len += scnprintf(buf + len, buf_len - len, "be_ul_ofdma_rx_ldpc = %u\n",
+			htt_trigger_stats_buf->be_ul_ofdma_rx_ldpc);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_rx_ulofdma_data_ru_size_ppdu,
+			   "be_rx_ulofdma_data_ru_size_ppdu",
+			   HTT_RX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_rx_ulofdma_non_data_ru_size_ppdu,
+			   "be_rx_ulofdma_non_data_ru_size_ppdu",
+			   HTT_RX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_uplink_sta_aid,
+			   "be_rx_rssi_track_sta_aid",
+			   HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_uplink_sta_target_rssi,
+			   "be_rx_sta_target_rssi",
+			   HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_uplink_sta_fd_rssi,
+			   "be_uplink_sta_fd_rssi",
+			   HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->be_uplink_sta_power_headroom,
+			   "be_rx_sta_power_headroom",
+			   HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+fail:
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++)
+		kfree(rx_gi[i]);
+}
+
+static inline void
+htt_print_tx_pdev_be_dl_mu_ofdma_sch_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_be_dl_mu_ofdma_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 i;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "11BE DL MU_OFDMA SCH STATS:\n");
+
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_mu_ofdma_sch_nusers_%u = %u\n", i,
+				 htt_stats_buf->be_mu_ofdma_sch_nusers[i]);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_be_ul_mu_ofdma_sch_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_be_ul_mu_ofdma_sch_stats_tlv *htt_stats_buf = tag_buf;
+	u8 i;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n11ax BE UL MU_OFDMA SCH STATS:");
+
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ul_mu_ofdma_basic_sch_nusers_%u = %u\n", i,
+				 htt_stats_buf->be_ul_mu_ofdma_basic_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ul_mu_ofdma_bsr_sch_nusers_%u = %u\n", i,
+				 htt_stats_buf->be_ul_mu_ofdma_bsr_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ul_mu_ofdma_bar_sch_nusers_%u = %u\n", i,
+				 htt_stats_buf->be_ul_mu_ofdma_bar_sch_nusers[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ul_mu_ofdma_brp_sch_nusers_%u = %u\n", i,
+				 htt_stats_buf-> be_ul_mu_ofdma_brp_sch_nusers[i]);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_rate_stats_be_ofdma_tlv *htt_stats_buf = tag_buf;
+	u8  i, j;
+	char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS];
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++) {
+		tx_gi[i] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+		if (!tx_gi[i])
+			goto fail;
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_PDEV_RATE_STATS_BE_OFDMA_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 htt_stats_buf->mac_id__word & 0xFF);
+
+	len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ldpc = %u\n",
+			 htt_stats_buf->be_ofdma_tx_ldpc);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_ofdma_tx_mcs,
+			   "be_ofdma_tx_mcs",
+			   HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_ofdma_tx_nss,
+			   "be_ofdma_tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_ofdma_tx_bw,
+			   "be_ofdma_tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_ofdma_eht_sig_mcs,
+			   "be_ofdma_eht_sig_mcs",
+			   HTT_TX_PDEV_STATS_NUM_EHT_SIG_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_ofdma_tx_ru_size,
+			   "be_ofdma_tx_ru_size",
+			   HTT_TX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_tx_gi[%u] = %s ", j, tx_gi[j]);
+		for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS; i++) {
+			len += scnprintf(buf + len, buf_len - len,
+					" %u:%u,", i,
+					htt_stats_buf->be_ofdma_tx_gi[j][i]);
+		}
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	stats_req->buf_len = len;
+fail:
+	for (i = 0; i < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; i++)
+		kfree(tx_gi[i]);
+}
+
+static inline void
+htt_print_tx_selfgen_be_err_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+        u8 *buf = stats_req->buf;
+        u32 len = stats_req->buf_len;
+        u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	const struct htt_tx_selfgen_be_err_stats_tlv *htt_stats_buf = tag_buf;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TX_SELFGEN_BE_ERR_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndp_err = %u\n",
+			 htt_stats_buf->be_su_ndp_err);
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndp_flushed = %u\n",
+			 htt_stats_buf->be_su_ndp_flushed);
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_err = %u\n",
+			 htt_stats_buf->be_su_ndpa_err);
+	len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_flushed = %u\n",
+			 htt_stats_buf->be_su_ndpa_flushed);
+	len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_err = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndpa_err);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_mimo_ndpa_flushed = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndpa_flushed);
+	len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_err = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndp_err);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_mimo_ndp_flushed = %u\n",
+			 htt_stats_buf->be_mu_mimo_ndp_flushed);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_brp_err,
+			   "be_mu_mimo_brpX_err",
+			   HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1, "\n\n");
+
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_brpoll_flushed,
+			   "be_mu_mimo_brpollX_flushed",
+			   HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_mu_mimo_brp_err_num_cbf_received,
+			   "be_mu_mimo_num_cbf_rcvd_on_brp_err",
+			   HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS, "\n\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->be_ul_mumimo_trigger_err,
+			   "be_ul_mumimo_trigger_err",
+			   HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS, "\n\n");
+
+	len += scnprintf(buf + len, buf_len - len, "be_basic_trigger_err = %u\n",
+			 htt_stats_buf->be_basic_trigger_err);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_ulmumimo_total_trigger_err = %u\n",
+			 htt_stats_buf->be_ulmumimo_trigger_err);
+	len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger_err = %u\n",
+			 htt_stats_buf->be_bsr_trigger_err);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_bar_trigger_err = %u\n",
+			 htt_stats_buf->be_mu_bar_trigger_err);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_mu_rts_trigger_err = %u\n",
+			 htt_stats_buf->be_mu_rts_trigger_err);
+}
+
+static inline void htt_print_latency_prof_stats_tlv_v(const u32 *tag_buf, u8 *data)
+{
+	struct debug_htt_stats_req *stats_req =
+			(struct debug_htt_stats_req *)data;
+	struct htt_latency_prof_stats_tlv *htt_stats_buf =
+		(struct htt_latency_prof_stats_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	if (htt_stats_buf->print_header == 1) {
+		len += scnprintf(buf + len, buf_len - len,
+				   "HTT_STATS_LATENCY_PROF_TLV:\n");
+		len += scnprintf(buf + len, buf_len - len,
+				   "|  prof_name\t| cnt\t| min\t| max\t| last\t| tot\t| avg \t| hist_intvl\t| hist\t|\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			   "|%s| %8u| %8u| %8u| %8u| %8u| %8u| %8u| %4u:%4u:%4u|\n",
+			   htt_stats_buf->latency_prof_name, htt_stats_buf->cnt,
+			   htt_stats_buf->min, htt_stats_buf->max, htt_stats_buf->last,
+			   htt_stats_buf->tot, htt_stats_buf->avg,
+			   htt_stats_buf->hist_intvl, htt_stats_buf->hist[0],
+			   htt_stats_buf->hist[1], htt_stats_buf->hist[2]);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_latency_prof_ctx_tlv(const u32 *tag_buf,
+						  struct debug_htt_stats_req *stats_req)
+{
+	struct htt_latency_prof_ctx_tlv *htt_stats_buf =
+		(struct htt_latency_prof_ctx_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CTX_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "duration= %u\n",
+			 htt_stats_buf->duration);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tx_msdu_cnt = %u\n", htt_stats_buf->tx_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tx_mpdu_cnt = %u\n", htt_stats_buf->tx_mpdu_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_msdu_cnt = %u\n", htt_stats_buf->rx_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_mpdu_cnt = %u\n", htt_stats_buf->rx_mpdu_cnt);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_latency_prof_cnt(const u32 *tag_buf,
+					      struct debug_htt_stats_req *stats_req)
+{
+	struct htt_latency_prof_cnt_tlv *htt_stats_buf =
+		(struct htt_latency_prof_cnt_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "prof_enable_cnt = %u\n",
+			 htt_stats_buf->prof_enable_cnt);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_ul_mumimo_trig_stats(const u32 *tag_buf,
+						  struct debug_htt_stats_req *stats_req)
+{
+	struct htt_rx_pdev_ul_mumimo_trig_stats_tlv *htt_ul_mumimo_trig_stats_buf =
+	(struct htt_rx_pdev_ul_mumimo_trig_stats_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id = __le32_to_cpu(htt_ul_mumimo_trig_stats_buf->mac_id__word);
+	u8 j;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_PDEV_UL_MUMIMO_TRIG_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id, HTT_STATS_MAC_ID));
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_11ax_ul_mumimo = %u\n",
+			 htt_ul_mumimo_trig_stats_buf->rx_11ax_ul_mumimo);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_mcs,
+			   "ul_mumimo_rx_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_mcs_ext,
+			   "ul_mumimo_rx_mcs_ext",
+			   HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "\nul_mumimo_rx_gi[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_gi[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+		len += scnprintf(buf + len, buf_len - len, ", ");
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_gi_ext[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_nss,
+			   "ul_mumimo_rx_nss",
+			   HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_bw,
+			   "ul_mumimo_rx_bw",
+			   HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\nul_mumimo_rx_stbc = %u",
+			 htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_stbc);
+	len += scnprintf(buf + len, buf_len - len, "\nul_mumimo_rx_ldpc = %u",
+			 htt_ul_mumimo_trig_stats_buf->ul_mumimo_rx_ldpc);
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nrx_ul_mumimo_rssi_in_dbm: chain[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_stats_buf->rx_ul_mumimo_chain_rssi_in_dbm[j],
+				   NULL, HTT_RX_PDEV_STATS_TOTAL_BW_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				   "\nrx_ul_mumimo_target_rssi: user[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_stats_buf->rx_ul_mumimo_target_rssi[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nrx_ul_mumimo_fd_rssi: user[%u] =  ", j);
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_stats_buf->rx_ul_mumimo_fd_rssi[j],
+				   NULL, HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS,
+				   "\n");
+	}
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nrx_ulmumimo_pilot_evm_db_mean: user [%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_ul_mumimo_trig_stats_buf->rx_ulmumimo_pilot_evm_db_mean[j],
+				   NULL, HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS,
+				   "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_ul_mimo_user_stats(const u32 *tag_buf,
+						struct debug_htt_stats_req *stats_req)
+{
+	struct htt_rx_pdev_ul_mimo_user_stats_tlv *htt_ul_user_stats_buf =
+		(struct htt_rx_pdev_ul_mimo_user_stats_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	if (htt_ul_user_stats_buf->user_index > HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER) {
+		if (htt_ul_user_stats_buf->user_index == 0) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "HTT_STATS_RX_PDEV_UL_MIMO_USER_STATS_TLV:\n");
+		}
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ulmumimo_non_data_ppdu_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->rx_ulmumimo_non_data_ppdu);
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ulmumimo_data_ppdu_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->rx_ulmumimo_data_ppdu);
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ulmumimo_mpdu_ok_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->rx_ulmumimo_mpdu_ok);
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ulmumimo_mpdu_fail_%u = %u\n",
+				 htt_ul_user_stats_buf->user_index,
+				 htt_ul_user_stats_buf->rx_ulmumimo_mpdu_fail);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_ul_ofdma_user_stats(const u32 *tag_buf,
+						 struct debug_htt_stats_req *stats_req)
+{
+	struct htt_rx_pdev_ul_ofdma_user_stats_tlv *htt_ul_user_stats_buf =
+		(struct htt_rx_pdev_ul_ofdma_user_stats_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	if (htt_ul_user_stats_buf->user_index == 0) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "HTT_RX_PDEV_UL_OFDMA_USER_STAS_TLV:\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_non_data_ppdu_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->rx_ulofdma_non_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_data_ppdu_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->rx_ulofdma_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_mpdu_ok_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->rx_ulofdma_mpdu_ok);
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_mpdu_fail_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->rx_ulofdma_mpdu_fail);
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_non_data_nusers_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->rx_ulofdma_non_data_nusers);
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_data_nusers_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->rx_ulofdma_data_nusers);
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_ul_ofdma_trigger_stats(const u32 *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	struct htt_rx_pdev_ul_trigger_stats_tlv *htt_trigger_stats_buf =
+		(struct htt_rx_pdev_ul_trigger_stats_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id = __le32_to_cpu(htt_trigger_stats_buf->mac_id__word);
+	u8 j;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_PDEV_UL_TRIGGER_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id, HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma =%u\n",
+			 htt_trigger_stats_buf->rx_11ax_ul_ofdma);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->ul_ofdma_rx_mcs,
+			   "ul_ofdma_rx_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "\nul_ofdma_rx_gi[%u] = ", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->ul_ofdma_rx_gi[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	}
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->ul_ofdma_rx_nss,
+			   "ul_ofdma_rx_nss", HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+			   "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->ul_ofdma_rx_bw,
+			   "ul_ofdma_rx_bw", HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	for (j = 0; j < HTT_RX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "\nhalf_ul_ofdma_rx_bw = " :
+				 "\nquarter_ul_ofdma_rx_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len,
+				   htt_trigger_stats_buf->reduced_ul_ofdma_rx_bw[j],
+				   NULL, HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	}
+	len += scnprintf(buf + len, buf_len - len, "\nul_ofdma_rx_stbc = %u",
+			 htt_trigger_stats_buf->ul_ofdma_rx_stbc);
+	len += scnprintf(buf + len, buf_len - len, "\nul_ofdma_rx_ldpc = %u",
+			 htt_trigger_stats_buf->ul_ofdma_rx_ldpc);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->rx_ulofdma_data_ru_size_ppdu,
+			   "rx_ulofdma_non_data_ru_size_ppdu",
+			   HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_trigger_stats_buf->rx_ulofdma_non_data_ru_size_ppdu,
+			   "rx_ulofdma_data_ru_size_ppdu",
+			   HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->uplink_sta_aid,
+			   "rx_rssi_track_sta_aid", HTT_RX_UL_MAX_UPLINK_RSSI_TRACK,
+			   "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->uplink_sta_target_rssi,
+			   "rx_sta_target_rssi", HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->uplink_sta_fd_rssi,
+			   "rx_sta_fd_rssi", HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_trigger_stats_buf->uplink_sta_power_headroom,
+			   "rx_sta_power_headroom", HTT_RX_UL_MAX_UPLINK_RSSI_TRACK,
+			   "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_fse_stats_tlv(const u32 *tag_buf,
+					      struct debug_htt_stats_req *stats_req)
+{
+	struct htt_rx_fse_stats_tlv *htt_stats_buf =
+		(struct htt_rx_fse_stats_tlv *)tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_RX_FSE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "=== Software RX FSE STATS ===\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "Enable count  = %u\n", htt_stats_buf->fse_enable_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Disable count = %u\n", htt_stats_buf->fse_disable_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Cache Invalidate Entry Count   = %u\n",
+			 htt_stats_buf->fse_cache_invalidate_entry_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Full Cache Invalidate Count    = %u\n",
+			 htt_stats_buf->fse_full_cache_invalidate_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=== Hardware RX FSE STATS ===\n");
+	len += scnprintf(buf + len, buf_len - len, "Cache hits Count = %u\n",
+			 htt_stats_buf->fse_num_cache_hits_cnt);
+	len += scnprintf(buf + len, buf_len - len, "Cache No. of searches = %u\n",
+			 htt_stats_buf->fse_num_searches_cnt);
+	len += scnprintf(buf + len, buf_len - len, "Cache occupancy Peak Count:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 " [0] = %u [1-16] = %u [17-32] = %u "
+			 "[33-48] = %u [49-64] = %u [65-80] = %u "
+			 "[81-96] = %u [97-112] = %u [113-127] = %u "
+			 "[128] = %u\n",
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[0],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[1],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[2],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[3],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[4],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[5],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[6],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[7],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[8],
+			 htt_stats_buf->fse_cache_occupancy_peak_cnt[9]);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Cache occupancy Current Count:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 " [0] = %u [1-16] = %u [17-32] = %u "
+			 "[33-48] = %u [49-64] = %u [65-80] = %u "
+			 "[81-96] = %u [97-112] = %u [113-127] = %u "
+			 "[128] = %u\n",
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[0],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[1],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[2],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[3],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[4],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[5],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[6],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[7],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[8],
+			 htt_stats_buf->fse_cache_occupancy_curr_cnt[9]);
+	len += scnprintf(buf + len, buf_len - len, "Cache search Square Count:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 " [0] = %u [1-50] = %u [51-100] = %u "
+			 "[101-200] = %u [201-255] = %u [256] = %u\n",
+			 htt_stats_buf->fse_search_stat_square_cnt[0],
+			 htt_stats_buf->fse_search_stat_square_cnt[1],
+			 htt_stats_buf->fse_search_stat_square_cnt[2],
+			 htt_stats_buf->fse_search_stat_square_cnt[3],
+			 htt_stats_buf->fse_search_stat_square_cnt[4],
+			 htt_stats_buf->fse_search_stat_square_cnt[5]);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "Cache search Peak Pending Count:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 " [0] = %u [1-2] = %u [3-4] = %u "
+			 "[Greater/Equal to 5] = %u\n",
+			 htt_stats_buf->fse_search_stat_peak_cnt[0],
+			 htt_stats_buf->fse_search_stat_peak_cnt[1],
+			 htt_stats_buf->fse_search_stat_peak_cnt[2],
+			 htt_stats_buf->fse_search_stat_peak_cnt[3]);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Cache search Number of Pending Count:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 " [0] = %u [1-2] = %u [3-4] = %u "
+			 "[Greater/Equal to 5] = %u\n",
+			 htt_stats_buf->fse_search_stat_search_pending_cnt[0],
+			 htt_stats_buf->fse_search_stat_search_pending_cnt[1],
+			 htt_stats_buf->fse_search_stat_search_pending_cnt[2],
+			 htt_stats_buf->fse_search_stat_search_pending_cnt[3]);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i, j;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+			 "                   24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+			 htt_stats_buf->tx_legacy_ofdm_rate[0],
+			 htt_stats_buf->tx_legacy_ofdm_rate[1],
+			 htt_stats_buf->tx_legacy_ofdm_rate[2],
+			 htt_stats_buf->tx_legacy_ofdm_rate[3],
+			 htt_stats_buf->tx_legacy_ofdm_rate[4],
+			 htt_stats_buf->tx_legacy_ofdm_rate[5],
+			 htt_stats_buf->tx_legacy_ofdm_rate[6],
+			 htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+	len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = ");
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = ");
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs =");
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = ");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = ");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = ");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = ");
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]);
+
+	for (j = 0; j < HTT_TX_TXBF_RATE_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "\nhalf_tx_ol_bw = " : "\nquarter_tx_ol_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_tx_su_ol_bw[j],
+				   NULL, HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = ");
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]);
+
+	for (j = 0; j < HTT_TX_TXBF_RATE_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "\nhalf_tx_ibf_bw = " : "\nquarter_tx_ibf_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_tx_su_ibf_bw[j],
+				   NULL, HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = ");
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]);
+
+	for (j = 0; j < HTT_TX_TXBF_RATE_STATS_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "\nhalf_tx_txbf_bw = " : "\nquarter_tx_txbf_bw = ");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_tx_su_txbf_bw[j],
+				   NULL, HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_per_rate_stats_tlv(const void *tag_buf,
+				struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_per_rate_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char mode_prefix[][3] = {"su", "mu"};
+	int i = 0, j = 0;
+
+	if (htt_stats_buf->rc_mode == HTT_STATS_RC_MODE_DLSU) {
+		len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
+		len += scnprintf(buf + len, buf_len - len, "PER_STATS_SU:\n");
+	} else if (htt_stats_buf->rc_mode == HTT_STATS_RC_MODE_DLMUMIMO) {
+		len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
+		j = 1;
+	} else {
+		return;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_bw[i].ppdus_tried);
+	}
+	/*
+	 * Now i = HTT_TX_PDEV_STATS_NUM_BW_COUNTERS.
+	 * Adding 320 MHz ppdus tried.
+	 */
+	len += scnprintf(buf + len, buf_len - len,
+			 " %u:%u\n", i, htt_stats_buf->per_bw320.ppdus_tried);
+
+	len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_bw[i].ppdus_ack_failed);
+	}
+	/*
+	 * Now i = HTT_TX_PDEV_STATS_NUM_BW_COUNTERS.
+	 * Adding 320 MHz ppdus ack failed.
+	 */
+	len += scnprintf(buf + len, buf_len - len,
+			 " %u:%u\n", i, htt_stats_buf->per_bw320.ppdus_ack_failed);
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_bw[i].mpdus_tried);
+	}
+	/*
+	 * Now i = HTT_TX_PDEV_STATS_NUM_BW_COUNTERS.
+	 * Adding 320 MHz mpdus tried.
+	 */
+	len += scnprintf(buf + len, buf_len - len,
+			 " %u:%u\n", i, htt_stats_buf->per_bw320.mpdus_tried);
+
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_BW_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_bw[i].mpdus_failed);
+	}
+	/*
+	 * Now i = HTT_TX_PDEV_STATS_NUM_BW_COUNTERS.
+	 * Adding 320 MHz mpdus failed.
+	 */
+	len += scnprintf(buf + len, buf_len - len,
+			 " %u:%u\n", i, htt_stats_buf->per_bw320.mpdus_failed);
+
+	len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_nss[i].ppdus_tried);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nppdus_ack_fialed_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_nss[i].ppdus_ack_failed);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nmpdus_tried_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_nss[i].mpdus_tried);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nmpdus_failed_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_nss[i].mpdus_failed);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nPER per Mcs:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_mcs[i].ppdus_tried);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nppdus_ack_failed_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_mcs[i].ppdus_ack_failed);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nmpdus_tried_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_mcs[i].mpdus_tried);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nmpdus_failed_%s", mode_prefix[j]);
+	for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u ",
+				 i, htt_stats_buf->per_mcs[i].mpdus_failed);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	if (htt_stats_buf->rc_mode == HTT_STATS_RC_MODE_DLMUMIMO) {
+		len += scnprintf(buf + len, buf_len - len, "last_probed_bw  = %u\n",
+				 htt_stats_buf->last_probed_bw);
+		len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
+				 htt_stats_buf->last_probed_nss);
+		len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
+				 htt_stats_buf->last_probed_mcs);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static
+inline void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf,
+						struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndpa_queued_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndpa_queued[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndpa_tried_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndpa_tried[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndpa_flushed_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndpa_err_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndpa_err[i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static
+inline void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndp_queued_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndp_queued[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndp_tried_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndp_tried[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndp_flushed_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndp_flushed[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_ndp_err_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_ndp_err[i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static
+inline void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_brpoll_queued_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_brpoll_queued[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_brpoll_tried_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_brpoll_tried[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_brpoll_flushed_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_brp_err_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_brp_err[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static
+inline void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf,
+						 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n");
+	for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_num_ppdu_steer_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_num_ppdu_ol_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_num_usrs_prefetch_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_num_usrs_sound_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_ofdma_num_usrs_force_sound_user%d = %u\n",
+				 i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]);
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_ax_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 num_elements = htt_stats_buf->num_elems_ax_ndpa_arr;
+	int i, null_output;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_tried = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flushed) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flushed);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_flushed = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_err = %s\n", "NONE");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_ax_ndp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 num_elements = htt_stats_buf->num_elems_ax_ndp_arr;
+	int i, null_output;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_queued) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_queued);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_tried) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_tried);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_tried = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_flushed) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_flushed);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_flushed = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_err) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_ndp[i].ax_ofdma_ndp_err);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_err = %s\n", "NONE");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_ax_brp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 num_elements = htt_stats_buf->num_elems_ax_brp_arr;
+	int i, null_output;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_brp[i].ax_ofdma_brpoll_queued) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_brp[i].ax_ofdma_brpoll_queued);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_brp[i].ax_ofdma_brpoll_tried) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_brp[i].ax_ofdma_brpoll_tried);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_tried = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_brp[i].ax_ofdma_brpoll_flushed) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_brp[i].ax_ofdma_brpoll_flushed);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_flushed = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_brp[i].ax_ofdma_brp_err) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_brp[i].ax_ofdma_brp_err);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brp_err = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_brp[i].ax_ofdma_brp_err_num_cbf_rcvd) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_brp[i].ax_ofdma_brp_err_num_cbf_rcvd);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brp_err_num_cbf_rcvd = %s\n", "NONE");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_ax_steer_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 num_elements = htt_stats_buf->num_elems_ax_steer_arr;
+	int i, null_output;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_steer[i].ax_ofdma_num_ppdu_steer) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_steer[i].ax_ofdma_num_ppdu_steer);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_steer[i].ax_ofdma_num_usrs_prefetch) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_steer[i].ax_ofdma_num_usrs_prefetch);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_usrs_prefetch = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_steer[i].ax_ofdma_num_usrs_sound) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_steer[i].ax_ofdma_num_usrs_sound);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_usrs_sound = %s\n", "NONE");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->ax_steer[i].ax_ofdma_num_usrs_force_sound) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+					 "  %u:%u, ", i + 1,
+					 htt_stats_buf->ax_steer[i].ax_ofdma_num_usrs_force_sound);
+		}
+	}
+	if (null_output)
+		len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_usrs_force_sound = %s\n", "NONE");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_be_ndpa_stats_tlv(const void *tag_buf,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_be_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i, null_output;
+	u32 num_elements = htt_stats_buf->num_elems_be_ndpa_arr;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TXBF_OFDMA_BE_NDPA_STATS_TLV:\n");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_queued) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_queued);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_ndpa_queued = %s\n", "NONE");
+	}
+
+	null_output = 1;
+
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_tried) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_tried);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_ndpa_tried = %s\n", "NONE");
+	}
+
+	null_output = 1;
+
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_flushed) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_flushed);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_ndpa_flushed = %s\n", "NONE");
+	}
+
+	null_output = 1;
+
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_err) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_ndpa[i].be_ofdma_ndpa_err);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_ndpa_err = %s\n", "NONE");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_be_ndp_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_be_ndp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i, null_output;
+
+	u32 num_elements = htt_stats_buf->num_elems_be_ndp_arr;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_BE_NDP_STATS_TLV:\n");
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_ndp[i].be_ofdma_ndp_queued) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_ndp[i].be_ofdma_ndp_queued);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_ndp_queued = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_ndp[i].be_ofdma_ndp_flushed) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_ndp[i].be_ofdma_ndp_flushed);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_ndp_flushed = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_ndp[i].be_ofdma_ndp_err) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_ndp[i].be_ofdma_ndp_err);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_ndp_err = %s\n", "NONE");
+	}
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_be_brp_stats_tlv(const void *tag_buf,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_be_brp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i, null_output;
+
+	u32 num_elements = htt_stats_buf->num_elems_be_brp_arr;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_BE_BRP_STATS_TLV:\n");
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_brp[i].be_ofdma_brpoll_queued) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_brp[i].be_ofdma_brpoll_queued);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_brpoll_queued = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_brp[i].be_ofdma_brpoll_tried) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_brp[i].be_ofdma_brpoll_tried);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_brpoll_tried = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_brp[i].be_ofdma_brpoll_flushed) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_brp[i].be_ofdma_brpoll_flushed);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_brpoll_flushed = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_brp[i].be_ofdma_brp_err) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_brp[i].be_ofdma_brp_err);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_brp_err = %s\n", "NONE");
+	}
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_brp[i].be_ofdma_brp_err_num_cbf_rcvd) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_brp[i].be_ofdma_brp_err_num_cbf_rcvd);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_brp_err_num_cbf_rcvd = %s\n", "NONE");
+	}
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_be_steer_stats_tlv(const void *tag_buf,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_be_steer_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i, null_output;
+
+	u32 num_elements = htt_stats_buf->num_elems_be_steer_arr;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TXBF_OFDMA_BE_STEER_STATS_TLV:\n");
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_steer[i].be_ofdma_num_ppdu_steer) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_steer[i].be_ofdma_num_ppdu_steer);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_num_ppdu_steer = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_steer[i].be_ofdma_num_ppdu_ol) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_steer[i].be_ofdma_num_ppdu_ol);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_num_ppdu_ol = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_steer[i].be_ofdma_num_usrs_prefetch) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_steer[i].be_ofdma_num_usrs_prefetch);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_num_usrs_prefetch = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_steer[i].be_ofdma_num_usrs_sound) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_steer[i].be_ofdma_num_usrs_sound);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_num_usrs_sound = %s\n", "NONE");
+	}
+
+	null_output = 1;
+	for (i = 0; i < num_elements; i++) {
+		if (htt_stats_buf->be_steer[i].be_ofdma_num_usrs_force_sound) {
+			null_output = 0;
+			len += scnprintf(buf + len, buf_len - len,
+				" %u:%u,", i + 1,
+				htt_stats_buf->be_steer[i].be_ofdma_num_usrs_force_sound);
+		}
+	}
+	if (null_output) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_ofdma_num_usrs_force_sound = %s\n", "NONE");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_dmac_reset_stats_tlv(const void *tag_buf,
+			       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_dmac_reset_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_DMAC_RESET_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "reset_count = %u\n",
+			 htt_stats_buf->reset_count);
+	len += scnprintf(buf + len, buf_len - len, "reset_time_hi_ms = 0x%x\n",
+			 htt_stats_buf->reset_time_hi_ms);
+	len += scnprintf(buf + len, buf_len - len, "reset_time_lo_ms = 0x%x\n",
+			 htt_stats_buf->reset_time_lo_ms);
+	len += scnprintf(buf + len, buf_len - len, "disengage_time_hi_ms = 0x%x\n",
+			 htt_stats_buf->disengage_time_hi_ms);
+	len += scnprintf(buf + len, buf_len - len, "disengage_time_lo_ms = 0x%x\n",
+			 htt_stats_buf->disengage_time_lo_ms);
+	len += scnprintf(buf + len, buf_len - len, "engage_time_hi_ms = 0x%x\n",
+			 htt_stats_buf->engage_time_hi_ms);
+	len += scnprintf(buf + len, buf_len - len, "engage_time_lo_ms = 0x%x\n",
+			 htt_stats_buf->engage_time_lo_ms);
+	len += scnprintf(buf + len, buf_len - len, "disengage_count = %u\n",
+			 htt_stats_buf->disengage_count);
+	len += scnprintf(buf + len, buf_len - len, "drain_dest_ring_mask = %u\n",
+			 htt_stats_buf->drain_dest_ring_mask);
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_be_ul_ofdma_user_stats(const void *tag_buf,
+				 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_be_ul_ofdma_user_stats_tlv *htt_ul_user_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	if (htt_ul_user_stats_buf->user_index == 0) {
+		len += scnprintf(buf + len, buf_len - len,
+				"HTT_RX_PDEV_BE_UL_OFDMA_USER_STAS_TLV\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_rx_ulofdma_non_data_ppdu_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->be_rx_ulofdma_non_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_rx_ulofdma_data_ppdu_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->be_rx_ulofdma_data_ppdu);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_rx_ulofdma_mpdu_ok_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->be_rx_ulofdma_mpdu_ok);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_rx_ulofdma_mpdu_fail_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->be_rx_ulofdma_mpdu_fail);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_rx_ulofdma_non_data_nusers_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->be_rx_ulofdma_non_data_nusers);
+	len += scnprintf(buf + len, buf_len - len,
+			 "be_rx_ulofdma_data_nusers_%u = %u\n",
+			 htt_ul_user_stats_buf->user_index,
+			 htt_ul_user_stats_buf->be_rx_ulofdma_data_nusers);
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_phy_tpc_stats_tlv(const void *tag_buf,
+			    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_phy_tpc_stats_tlv *htt_stats_phy_tpc_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : pdev_id = %u\n",
+			 htt_stats_phy_tpc_stats_buf->pdev_id);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : tx_power_scale = %u\n",
+			 htt_stats_phy_tpc_stats_buf->tx_power_scale);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : tx_power_scale_db = %u\n",
+			 htt_stats_phy_tpc_stats_buf->tx_power_scale_db);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : min_negative_tx_power = %d\n",
+			 htt_stats_phy_tpc_stats_buf->min_negative_tx_power);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : reg_ctl_domain = %u\n",
+			 htt_stats_phy_tpc_stats_buf->reg_ctl_domain);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : twice_max_rd_power = %u\n",
+			 htt_stats_phy_tpc_stats_buf->twice_max_rd_power);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : max_tx_power = %u\n",
+			 htt_stats_phy_tpc_stats_buf->max_tx_power);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : home_max_tx_power = %u\n",
+			 htt_stats_phy_tpc_stats_buf->home_max_tx_power);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : psd_power = %d\n",
+			 htt_stats_phy_tpc_stats_buf->psd_power);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : eirp_power = %u\n",
+			 htt_stats_phy_tpc_stats_buf->eirp_power);
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : power_type_6ghz = %u\n",
+			 htt_stats_phy_tpc_stats_buf->power_type_6ghz);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "tpc_stats : max_reg_allowed_power = ");
+	for (i = 0; i < HTT_STATS_MAX_CHAINS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%d,", i,
+				 htt_stats_phy_tpc_stats_buf->max_reg_allowed_power[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "\ntpc_stats : max_reg_allowed_power_6g = ");
+	for (i = 0; i < HTT_STATS_MAX_CHAINS; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%d,", i,
+				 htt_stats_phy_tpc_stats_buf->max_reg_allowed_power_6g[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "\ntpc_stats : sub_band_cfreq = ");
+	for (i = 0; i < HTT_MAX_CH_PWR_INFO_SIZE; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u,", i,
+				 htt_stats_phy_tpc_stats_buf->sub_band_cfreq[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "\ntpc_stats : sub_band_txpower = ");
+	for (i = 0; i < HTT_MAX_CH_PWR_INFO_SIZE; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 " %u:%u,", i,
+				 htt_stats_phy_tpc_stats_buf->sub_band_txpower[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "\n=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static const char *htt_stats_direction_abbrev(enum htt_stats_direction direction,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const char *direction_str = "unknown";
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	switch (direction) {
+	case HTT_STATS_DIRECTION_TX:
+		direction_str = "tx";
+		break;
+	case HTT_STATS_DIRECTION_RX:
+		direction_str = "rx";
+		break;
+	default:
+		len += scnprintf(buf + len, buf_len - len,
+			"warning: encountered unexpected HTT_STATS_DIRECTION: %d",
+			direction);
+		direction_str = "unknown";
+	}
+
+	stats_req->buf_len = len;
+	return direction_str;
+}
+
+static const char *htt_stats_ppdu_type_abbrev(enum htt_stats_ppdu_type ppdu_type,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const char *ppdu_type_str = "unknown";
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	switch (ppdu_type) {
+	case HTT_STATS_PPDU_TYPE_MODE_SU:
+		ppdu_type_str = "su";
+		break;
+	case HTT_STATS_PPDU_TYPE_DL_MU_MIMO:
+		ppdu_type_str = "dl_mu_mimo";
+		break;
+	case HTT_STATS_PPDU_TYPE_UL_MU_MIMO:
+		ppdu_type_str = "ul_mu_mimo";
+		break;
+	case HTT_STATS_PPDU_TYPE_DL_MU_OFDMA:
+		ppdu_type_str = "dl_mu_ofdma";
+		break;
+	case HTT_STATS_PPDU_TYPE_UL_MU_OFDMA:
+		ppdu_type_str = "ul_mu_ofdma";
+		break;
+	default:
+		len += scnprintf(buf + len, buf_len - len,
+			"warning: encountered unexpected HTT_STATS_PPDU_TYPE: %d",
+			ppdu_type);
+	}
+
+	stats_req->buf_len = len;
+	return ppdu_type_str;
+}
+
+static const char *htt_stats_pream_type_abbrev(enum htt_stats_param_type pream_type,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const char *pream_type_str = "unknown";
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	switch (pream_type) {
+	case HTT_STATS_PREAM_OFDM:
+		pream_type_str = "ofdm";
+		break;
+	case HTT_STATS_PREAM_CCK:
+		pream_type_str = "cck";
+		break;
+	case HTT_STATS_PREAM_HT:
+		pream_type_str = "ht";
+		break;
+	case HTT_STATS_PREAM_VHT:
+		pream_type_str = "ac";
+		break;
+	case HTT_STATS_PREAM_HE:
+		pream_type_str = "ax";
+		break;
+	case HTT_STATS_PREAM_EHT:
+		pream_type_str = "be";
+		break;
+	default:
+		len += scnprintf(buf + len, buf_len - len,
+			"warning: encountered unexpected HTT_STATS_PREAM_TYPE: %d",
+			pream_type);
+	}
+
+	stats_req->buf_len = len;
+	return pream_type_str;
+}
+
+static inline void
+htt_print_puncture_stats_tlv(const void *tag_buf,
+			     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_puncture_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+	const char *direction_str = NULL;
+	const char *preamble_str = NULL;
+	const char *ppdu_type_str = NULL;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_PDEV_PUNCTURE_STATS_TLV:\n");
+
+	direction_str = htt_stats_direction_abbrev(htt_stats_buf->direction, stats_req);
+	ppdu_type_str = htt_stats_ppdu_type_abbrev(htt_stats_buf->ppdu_type, stats_req);
+	preamble_str = htt_stats_pream_type_abbrev(htt_stats_buf->preamble, stats_req);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "mac_id = %u\n",
+			 HTT_PDEV_PUNCTURE_STATS_MAC_ID_GET(htt_stats_buf->mac_id__word));
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "%s_%s_%s_last_used_pattern_mask: 0x%08x\n",
+			 direction_str, preamble_str, ppdu_type_str,
+			 htt_stats_buf->last_used_pattern_mask);
+
+	for (i = 0; (i < HTT_PUNCTURE_STATS_MAX_SUBBAND_COUNT) &&
+			(i < htt_stats_buf->subband_count); ++i) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_%s_%s_num_subbands_used_cnt_%02d: %u\n",
+				 direction_str, preamble_str, ppdu_type_str,
+				 i + 1, htt_stats_buf->num_subbands_used_cnt[i]);
+	}
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_ml_peer_details_stats_tlv(const void *tag_buf,
+				    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_ml_peer_details_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_ML_PEER_DETAILS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "========================\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "remote_mld_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+			 (htt_stats_buf->remote_mld_mac_addr.mac_addr_l32 & 0xFF),
+			 (htt_stats_buf->remote_mld_mac_addr.mac_addr_l32 & 0xFF00) >> 8,
+			 (htt_stats_buf->remote_mld_mac_addr.mac_addr_l32 & 0xFF0000) >> 16,
+			 (htt_stats_buf->remote_mld_mac_addr.mac_addr_l32 & 0xFF000000) >> 24,
+			 (htt_stats_buf->remote_mld_mac_addr.mac_addr_h16 & 0xFF),
+			 (htt_stats_buf->remote_mld_mac_addr.mac_addr_h16 & 0xFF00) >> 8);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "ml_peer_flags = 0x%x\n",
+			 htt_stats_buf->ml_peer_flags);
+
+	len += scnprintf(buf + len, buf_len - len,
+		"num_links = %u\n",
+		HTT_ML_PEER_DETAILS_NUM_LINKS_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"ml_peer_id = %u\n",
+		HTT_ML_PEER_DETAILS_ML_PEER_ID_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"primary_link_idx = %u\n",
+		HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"primary_chip_id = %u\n",
+		HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"link_init_count = %u\n",
+		HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"non_str = %u\n",
+		HTT_ML_PEER_DETAILS_NON_STR_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"emlsr = %u\n",
+		HTT_ML_PEER_DETAILS_EMLSR_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"is_stako = %u\n",
+		HTT_ML_PEER_DETAILS_IS_STA_KO_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"num_local_links = %u\n",
+		HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"allocated = %u\n",
+		HTT_ML_PEER_DETAILS_ALLOCATED_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+		"participating_chips_bitmap = 0x%x\n",
+		HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_GET(
+			htt_stats_buf->msg_dword_2));
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_ml_peer_ext_stats_tlv(const void *tag_buf,
+				struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_ml_peer_ext_details_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_ML_PEER_EXT_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "====================\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "peer_assoc_ipc_recvd    = %u\n",
+			 HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_GET(
+				 htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "sched_peer_delete_recvd = %u\n",
+			 HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_GET(
+				 htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "mld_ast_index           = %u\n",
+			 HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_GET(htt_stats_buf->msg_dword_1));
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_ml_link_info_stats_tlv(const void *tag_buf,
+				 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_ml_link_info_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_ML_LINK_INFO_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "=====================\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "valid             = %u\n",
+			 HTT_ML_LINK_INFO_VALID_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "active            = %u\n",
+			 HTT_ML_LINK_INFO_ACTIVE_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "primary           = %u\n",
+			 HTT_ML_LINK_INFO_PRIMARY_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "assoc_link        = %u\n",
+			 HTT_ML_LINK_INFO_ASSOC_LINK_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "chip_id           = %u\n",
+			 HTT_ML_LINK_INFO_CHIP_ID_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "ieee_link_id      = %u\n",
+			 HTT_ML_LINK_INFO_IEEE_LINK_ID_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "hw_link_id        = %u\n",
+			 HTT_ML_LINK_INFO_HW_LINK_ID_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "logical_link_id   = %u\n",
+			 HTT_ML_LINK_INFO_LOGICAL_LINK_ID_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "master_link       = %u\n",
+			 HTT_ML_LINK_INFO_MASTER_LINK_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "anchor_link       = %u\n",
+			 HTT_ML_LINK_INFO_ANCHOR_LINK_GET(htt_stats_buf->msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len,
+			 "initialized       = %u\n",
+			 HTT_ML_LINK_INFO_INITIALIZED_GET(htt_stats_buf->msg_dword_1));
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "sw_peer_id        = %u\n",
+			 HTT_ML_LINK_INFO_SW_PEER_ID_GET(htt_stats_buf->msg_dword_2));
+	len += scnprintf(buf + len, buf_len - len,
+			 "vdev_id           = %u\n",
+			 HTT_ML_LINK_INFO_VDEV_ID_GET(htt_stats_buf->msg_dword_2));
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "primary_tid_mask  = 0x%x\n",
+			 htt_stats_buf->primary_tid_mask);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_ppdu_dur_stats_tlv(const void *tag_buf,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_ppdu_dur_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8  i, j;
+	u16 index = 0;
+	char data[HTT_MAX_STRING_LEN] = {0};
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_PPDU_DUR_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "pdev_up_time_us_low = %u\n",
+			 htt_stats_buf->pdev_up_time_us_low);
+	len += scnprintf(buf + len, buf_len - len, "pdev_up_time_us_high = %u\n",
+			 htt_stats_buf->pdev_up_time_us_high);
+	len += scnprintf(buf + len, buf_len - len, "tx_success_time_us_low = %u\n",
+			 htt_stats_buf->tx_success_time_us_low);
+	len += scnprintf(buf + len, buf_len - len, "tx_success_time_us_high = %u\n",
+			 htt_stats_buf->tx_success_time_us_high);
+	len += scnprintf(buf + len, buf_len - len, "tx_fail_time_us_low = %u\n",
+			 htt_stats_buf->tx_fail_time_us_low);
+	len += scnprintf(buf + len, buf_len - len, "tx_fail_time_us_high = %u\n",
+			 htt_stats_buf->tx_fail_time_us_high);
+
+	/* Split the buffer store mechanism into two to avoid data buffer overflow
+	 */
+	for (i = 0; i < HTT_PDEV_STATS_PPDU_DUR_HIST_BINS >> 1; i++) {
+		index += snprintf(&data[index],
+				  HTT_MAX_STRING_LEN - index,
+				  " %u-%u : %u,",
+				  i * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  (i + 1) * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  htt_stats_buf->tx_ppdu_dur_hist[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "tx_ppdu_dur_hist_us_0 = %s\n", data);
+	memset(data, '\0', sizeof(char) * HTT_MAX_STRING_LEN);
+	index = 0;
+
+	for (j = i; j < HTT_PDEV_STATS_PPDU_DUR_HIST_BINS; j++) {
+		index += snprintf(&data[index],
+				  HTT_MAX_STRING_LEN - index,
+				  " %u-%u : %u,",
+				  j * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  (j + 1) * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  htt_stats_buf->tx_ppdu_dur_hist[j]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "tx_ppdu_dur_hist_us_1 = %s\n", data);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_ppdu_dur_stats_tlv(const void *tag_buf,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_rx_pdev_ppdu_dur_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8  i, j;
+	u16 index = 0;
+	char data[HTT_MAX_STRING_LEN] = {0};
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_PPDU_DUR_STATS_TLV:\n");
+
+	/* Split the buffer store mechanism into two to avoid data buffer overflow
+	 */
+	for (i = 0; i < HTT_PDEV_STATS_PPDU_DUR_HIST_BINS >> 1; i++) {
+		index += snprintf(&data[index],
+				  HTT_MAX_STRING_LEN - index,
+				  " %u-%u : %u,",
+				  i * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  (i + 1) * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  htt_stats_buf->rx_ppdu_dur_hist[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "rx_ppdu_dur_hist_us_0 = %s\n", data);
+	memset(data, '\0', sizeof(char) * HTT_MAX_STRING_LEN);
+	index = 0;
+
+	for (j = i; j < HTT_PDEV_STATS_PPDU_DUR_HIST_BINS; j++) {
+		index += snprintf(&data[index],
+				  HTT_MAX_STRING_LEN - index,
+				  " %u-%u : %u,",
+				  j * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  (j + 1) * HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US,
+				  htt_stats_buf->rx_ppdu_dur_hist[j]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "rx_ppdu_dur_hist_us_1 = %s\n", data);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_sched_algo_ofdma_stats_tlv(const void *tag_buf,
+					  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_sched_algo_ofdma_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rate_based_dlofdma_enabled_count,
+			   "rate_based_dlofdma_enabled_count",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rate_based_dlofdma_disabled_count,
+			   "rate_based_dlofdma_disabled_count",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rate_based_dlofdma_probing_count,
+			   "rate_based_dlofdma_probing_count",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rate_based_dlofdma_monitoring_count,
+			   "rate_based_dlofdma_monitoring_count",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->chan_acc_lat_based_dlofdma_enabled_count,
+			   "chan_acc_lat_based_dlofdma_enabled_count",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->chan_acc_lat_based_dlofdma_disabled_count,
+			   "chan_acc_lat_based_dlofdma_disabled_count",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->chan_acc_lat_based_dlofdma_monitoring_count,
+			   "chan_acc_lat_based_dlofdma_monitoring_count",
+			   HTT_NUM_AC_WMM, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_latency_prof_cal_stats_tlv(const void *tag_buf,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_latency_prof_cal_stats_tlv *htt_stats_buf = tag_buf;
+	char latency_prof_stat_name[HTT_STATS_MAX_PROF_STATS_NAME_LEN + 1] = {0};
+	u32 i, j;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_PROF_CAL_TLV:\n");
+	for (i = 1; i < HTT_STATS_MAX_PROF_CAL; i++) {
+		memcpy(latency_prof_stat_name,
+		       (htt_stats_buf->latency_prof_name[i]),
+		       HTT_STATS_MAX_PROF_STATS_NAME_LEN);
+
+		len += scnprintf(buf + len, buf_len - len,
+				"%-32s",
+				htt_stats_buf->latency_prof_name[i]);
+		len += scnprintf(buf + len, buf_len - len,
+				 "|%9s|%8s|%8s|%8s|%8s|%8s|%10s|%14s|%8s|%8s|%8s|\n",
+				 "cal_index", "cnt", "min", "max", "last", "tot",
+				 "hist_intvl", "hist", "pf_last", "pf_tot",
+				 "pf_max");
+
+		for (j = 0; j < htt_stats_buf->CalCnt[i]; j++) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "|%9u|%8u|%8u|%8u|%8u|%8u|%10u|%4u:%4u:%4u|%8u|%8u|%8u|\n",
+					 htt_stats_buf->enabledCalIdx[i][j],
+					 htt_stats_buf->cnt[i][j],
+					 htt_stats_buf->min[i][j],
+					 htt_stats_buf->max[i][j],
+					 htt_stats_buf->last[i][j],
+					 htt_stats_buf->tot[i][j],
+					 htt_stats_buf->hist_intvl[i][j],
+					 htt_stats_buf->hist[i][j][0],
+					 htt_stats_buf->hist[i][j][1],
+					 htt_stats_buf->hist[i][j][2],
+					 htt_stats_buf->pf_last[i][j],
+					 htt_stats_buf->pf_tot[i][j],
+					 htt_stats_buf->pf_max[i][j]);
+		}
+	}
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_ap_edca_params_stats_tlv_v(const void *tag_buf,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_ap_edca_params_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "\nAP EDCA PARAMETERS FOR UL MUMIMO:\n");
+	for (i = 0; i < HTT_NUM_AC_WMM; i++) {
+		len += scnprintf(buf + len, buf_len - len, "ul_mumimo_less_aggressive[%u] = %u\n",
+			i, htt_stats_buf->ul_mumimo_less_aggressive[i]);
+		len += scnprintf(buf + len, buf_len - len, "ul_mumimo_medium_aggressive[%u] = %u\n",
+			i, htt_stats_buf->ul_mumimo_medium_aggressive[i]);
+		len += scnprintf(buf + len, buf_len - len, "ul_mumimo_highly_aggressive[%u] = %u\n",
+			i, htt_stats_buf->ul_mumimo_highly_aggressive[i]);
+		len += scnprintf(buf + len, buf_len - len, "ul_mumimo_default_relaxed[%u] = %u\n",
+			i, htt_stats_buf->ul_mumimo_default_relaxed[i]);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "AP EDCA PARAMETERS FOR UL OFDMA:\n");
+	for (i = 0; i < HTT_NUM_AC_WMM; i++) {
+		len += scnprintf(buf + len, buf_len - len, "ul_muofdma_less_aggressive[%u] = %u\n",
+			i, htt_stats_buf->ul_muofdma_less_aggressive[i]);
+		len += scnprintf(buf + len, buf_len - len, "ul_muofdma_medium_aggressive[%u] = %u\n",
+			i, htt_stats_buf->ul_muofdma_medium_aggressive[i]);
+		len += scnprintf(buf + len, buf_len - len, "ul_muofdma_highly_aggressive[%u] = %u\n",
+			i, htt_stats_buf->ul_muofdma_highly_aggressive[i]);
+		len += scnprintf(buf + len, buf_len - len, "ul_muofdma_default_relaxed[%u] = %u\n",
+			i, htt_stats_buf->ul_muofdma_default_relaxed[i]);
+	}
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n",
+			 htt_stats_buf->ax_ofdma_rbo_steer_mpdus_tried);
+	len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n",
+			 htt_stats_buf->ax_ofdma_rbo_steer_mpdus_failed);
+	len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n",
+			 htt_stats_buf->ax_ofdma_sifs_steer_mpdus_tried);
+	len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n",
+			 htt_stats_buf->ax_ofdma_sifs_steer_mpdus_failed);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_txbf_ofdma_be_steer_mpdu_stats_tlv(const void *tag_buf,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_txbf_ofdma_be_steer_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_BE_STEER_MPDU_STATS_TLV:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n",
+			 htt_stats_buf->be_ofdma_rbo_steer_mpdus_tried);
+	len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n",
+			 htt_stats_buf->be_ofdma_rbo_steer_mpdus_failed);
+	len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n",
+			 htt_stats_buf->be_ofdma_sifs_steer_mpdus_tried);
+	len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n",
+			 htt_stats_buf->be_ofdma_sifs_steer_mpdus_failed);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_peer_ax_ofdma_stats(const void *tag_buf,
+			      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_peer_ax_ofdma_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PEER_AX_OFDMA_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "peer_id = %u\n",
+			 htt_stats_buf->peer_id);
+	len += scnprintf(buf + len, buf_len - len, "ax_basic_trig_count = %u\n",
+			 htt_stats_buf->ax_basic_trig_count);
+	len += scnprintf(buf + len, buf_len - len, "ax_basic_trig_err = %u\n",
+			 htt_stats_buf->ax_basic_trig_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_bsr_trig_count = %u\n",
+			 htt_stats_buf->ax_bsr_trig_count);
+	len += scnprintf(buf + len, buf_len - len, "ax_bsr_trig_err = %u\n",
+			 htt_stats_buf->ax_bsr_trig_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trig_count = %u\n",
+			 htt_stats_buf->ax_mu_bar_trig_count);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trig_err = %u\n",
+			 htt_stats_buf->ax_mu_bar_trig_err);
+	len += scnprintf(buf + len, buf_len - len, "ax_basic_trig_with_per = %u\n",
+			 htt_stats_buf->ax_basic_trig_with_per);
+	len += scnprintf(buf + len, buf_len - len, "ax_bsr_trig_with_per = %u\n",
+			 htt_stats_buf->ax_bsr_trig_with_per);
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trig_with_per = %u\n",
+			 htt_stats_buf->ax_mu_bar_trig_with_per);
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->is_airtime_large_for_dl_ofdma,
+			   "is_airtime_large_for_dl_ofdma", 2, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->is_airtime_large_for_ul_ofdma,
+			   "is_airtime_large_for_ul_ofdma", 2, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->last_updated_dl_qdepth,
+			   "last_updated_dl_qdepth", HTT_NUM_AC_WMM, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->last_updated_ul_qdepth,
+			   "last_updated_ul_qdepth", HTT_NUM_AC_WMM, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_edca_params_stats_tlv_v(const void *tag_buf,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_mu_edca_params_stats_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char *edca_buf = NULL;
+
+	edca_buf = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
+	if (!edca_buf)
+		goto fail;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MU_EDCA_PARAMS_STATS_TLV:\n");
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->relaxed_mu_edca,
+			   "irelaxed_mu_edca",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->mumimo_aggressive_mu_edca,
+			   "mumimo_aggressive_mu_edca",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->muofdma_relaxed_mu_edca,
+			   "muofdma_relaxed_mu_edca",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->latency_mu_edca,
+			   "latency_mu_edca",
+			   HTT_NUM_AC_WMM, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->psd_boost_mu_edca,
+			   "psd_boost_mu_edca",
+			   HTT_NUM_AC_WMM, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=========================================== \n");
+	stats_req->buf_len = len;
+fail:
+	kfree(edca_buf);
+}
+
+static inline void
+htt_print_odd_pdev_mandatory_tlv(const void *tag_buf,
+				 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_odd_mandatory_pdev_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_ODD_PDEV_MANDATORY_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+			 htt_stats_buf->hw_queued);
+	len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+			 htt_stats_buf->hw_reaped);
+	len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+			 htt_stats_buf->hw_paused);
+	len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+			 htt_stats_buf->hw_filt);
+	len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+			 htt_stats_buf->seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+			 htt_stats_buf->seq_completed);
+	len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+			 htt_stats_buf->underrun);
+	len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+			 htt_stats_buf->hw_flush);
+	len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+			 htt_stats_buf->next_seq_posted_dsr);
+	len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+			 htt_stats_buf->seq_posted_isr);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+			 htt_stats_buf->mpdu_cnt_fcs_ok);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+			 htt_stats_buf->mpdu_cnt_fcs_err);
+	len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+			 htt_stats_buf->msdu_count_tqm);
+	len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+			 htt_stats_buf->mpdu_count_tqm);
+	len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+			 htt_stats_buf->mpdus_ack_failed);
+	len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+			 htt_stats_buf->num_data_ppdus_tried_ota);
+	len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n",
+			 htt_stats_buf->ppdu_ok);
+	len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+			 htt_stats_buf->num_total_ppdus_tried_ota);
+	len += scnprintf(buf + len, buf_len - len, "thermal_suspend_cnt = %u\n",
+			 htt_stats_buf->thermal_suspend_cnt);
+	len += scnprintf(buf + len, buf_len - len, "dfs_suspend_cnt = %u\n",
+			 htt_stats_buf->dfs_suspend_cnt);
+	len += scnprintf(buf + len, buf_len - len, "tx_abort_suspend_cnt = %u\n",
+			 htt_stats_buf->tx_abort_suspend_cnt);
+	len += scnprintf(buf + len, buf_len - len, "suspended_txq_mask = %u\n",
+			 htt_stats_buf->suspended_txq_mask);
+	len += scnprintf(buf + len, buf_len - len, "last_suspend_reason = %u\n",
+			 htt_stats_buf->last_suspend_reason);
+	len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+			 htt_stats_buf->seq_failed_queueing);
+	len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+			 htt_stats_buf->seq_restarted);
+	len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n",
+			 htt_stats_buf->seq_txop_repost_stop);
+	len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n",
+			 htt_stats_buf->next_seq_cancel);
+	len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n",
+			 htt_stats_buf->seq_min_msdu_repost_stop);
+	len += scnprintf(buf + len, buf_len - len, "total_phy_err_cnt = %u\n",
+			 htt_stats_buf->total_phy_err_cnt);
+	len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+			 htt_stats_buf->ppdu_recvd);
+	len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+			 htt_stats_buf->tcp_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+			 htt_stats_buf->tcp_ack_msdu_cnt);
+	len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u \n",
+			 htt_stats_buf->udp_msdu_cnt);
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->fw_tx_mgmt_subtype,
+			   "fw_tx_mgmt_subtype",
+			   HTT_STATS_SUBTYPE_MAX, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->fw_rx_mgmt_subtype,
+			   "fw_rx_mgmt_subtype",
+			   HTT_STATS_SUBTYPE_MAX, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->fw_ring_mpdu_err,
+			   "fw_ring_mpdu_err",
+			   HTT_STATS_SUBTYPE_MAX, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->fw_rx_mgmt_subtype,
+			   "fw_rx_mgmt_subtype",
+			   HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->urrn_stats,
+			   "urrn_stats",
+			   HTT_TX_PDEV_MAX_URRN_STATS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->sifs_status,
+			   "sifs_status",
+			   HTT_TX_PDEV_MAX_SIFS_BURST_STATS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->fw_rx_mgmt_subtype,
+			   "sifs_hist_status",
+			   HTT_TX_PDEV_SIFS_BURST_HIST_STATS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+			 htt_stats_buf->rx_suspend_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+			 htt_stats_buf->rx_suspend_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+			 htt_stats_buf->rx_resume_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u \n",
+			 htt_stats_buf->rx_resume_fail_cnt);
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->hwq_beacon_cmd_result,
+			   "hwq_beacon_cmd_result",
+			   HTT_TX_HWQ_MAX_CMD_RESULT_STATS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->hwq_voice_cmd_result,
+			   "hwq_voice_cmd_result",
+			   HTT_TX_HWQ_MAX_CMD_RESULT_STATS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->hwq_video_cmd_result,
+			   "hwq_video_cmd_result",
+			   HTT_TX_HWQ_MAX_CMD_RESULT_STATS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->hwq_best_effort_cmd_result,
+			   "hwq_best_effort_cmd_result",
+			   HTT_TX_HWQ_MAX_CMD_RESULT_STATS, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "hwq_beacon_mpdu_tried_cnt = %u\n",
+			 htt_stats_buf->hwq_beacon_mpdu_tried_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_voice_mpdu_tried_cnt = %u\n",
+			 htt_stats_buf->hwq_voice_mpdu_tried_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_video_mpdu_tried_cnt = %u\n",
+			 htt_stats_buf->hwq_video_mpdu_tried_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_best_effort_mpdu_tried_cnt = %u\n",
+			 htt_stats_buf->hwq_best_effort_mpdu_tried_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_beacon_mpdu_queued_cnt = %u\n",
+			 htt_stats_buf->hwq_beacon_mpdu_queued_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_voice_mpdu_queued_cnt = %u\n",
+			 htt_stats_buf->hwq_voice_mpdu_queued_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_video_mpdu_queued_cnt = %u\n",
+			 htt_stats_buf->hwq_video_mpdu_queued_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_best_effort_mpdu_queued_cnt = %u\n",
+			 htt_stats_buf->hwq_best_effort_mpdu_queued_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_beacon_mpdu_ack_fail_cnt = %u\n",
+			 htt_stats_buf->hwq_beacon_mpdu_ack_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_voice_mpdu_ack_fail_cnt = %u\n",
+			 htt_stats_buf->hwq_voice_mpdu_ack_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_video_mpdu_ack_fail_cnt = %u\n",
+			 htt_stats_buf->hwq_video_mpdu_ack_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "hwq_best_effort_mpdu_ack_fail_cnt = %u\n",
+			 htt_stats_buf->hwq_best_effort_mpdu_ack_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+			 htt_stats_buf->pdev_resets);
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n",
+			 htt_stats_buf->phy_warm_reset);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+			 htt_stats_buf->hwsch_reset_count);
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n",
+			 htt_stats_buf->phy_warm_reset_ucode_trig);
+	len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n",
+			 htt_stats_buf->mac_cold_reset);
+	len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n",
+			 htt_stats_buf->mac_warm_reset);
+	len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n",
+			 htt_stats_buf->mac_warm_reset_restore_cal);
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n",
+			 htt_stats_buf->phy_warm_reset_m3_ssr);
+	len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n",
+			 htt_stats_buf->fw_rx_rings_reset);
+	len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+			 htt_stats_buf->tx_flush);
+	len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+			 htt_stats_buf->hwsch_dev_reset_war);
+	len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n",
+			 htt_stats_buf->mac_cold_reset_restore_cal);
+	len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n",
+			 htt_stats_buf->mac_only_reset);
+	len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n",
+			 htt_stats_buf->mac_sfm_reset);
+	len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+			 htt_stats_buf->rx_ldpc);
+	len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u \n",
+			 htt_stats_buf->tx_ldpc);
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->gen_mpdu_end_reason,
+			   "gen_mpdu_end_reason",
+			   HTT_TX_TQM_MAX_GEN_MPDU_END_REASON, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->list_mpdu_end_reason,
+			   "list_mpdu_end_reason",
+			   HTT_TX_TQM_MAX_GEN_MPDU_END_REASON, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->tx_mcs,
+			   "tx_mcs",
+			   (HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +
+			   HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS +
+			   HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS), "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->tx_nss,
+			   "tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->tx_bw,
+			   "tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->half_tx_bw,
+			   "half_tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->quarter_tx_bw,
+			   "quarter_tx_bw",
+			   HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->tx_su_punctured_mode,
+			   "tx_su_punctured_mode",
+			   HTT_TX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rx_mcs,
+			   "rx_mcs",
+			   (HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS +
+			   HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS +
+			   HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS), "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rx_nss,
+			   "rx_nss",
+			   HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rx_bw,
+			   "rx_bw",
+			   HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->rx_stbc,
+			   "rx_stbc",
+			   (HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS +
+			    HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS +
+			    HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS), "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+			 htt_stats_buf->rts_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+			 htt_stats_buf->rts_success);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_mbssid_ctrl_frame_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 HTT_STATS_CMN_MAC_ID_GET(htt_stats_buf->mac_id__word));
+
+	len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss_count = %u\n",
+			 htt_stats_buf->basic_trigger_across_bss);
+	len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss_count = %u\n",
+			 htt_stats_buf->basic_trigger_within_bss);
+	len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss_count = %u\n",
+			 htt_stats_buf->bsr_trigger_across_bss);
+	len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss_count = %u\n",
+			 htt_stats_buf->bsr_trigger_within_bss);
+	len += scnprintf(buf + len, buf_len - len, "MU_RTS_across_bss_count = %u\n",
+			 htt_stats_buf->mu_rts_across_bss);
+	len += scnprintf(buf + len, buf_len - len, "MU_RTS_within_bss_count = %u\n",
+			 htt_stats_buf->mu_rts_within_bss);
+	len += scnprintf(buf + len, buf_len - len, "UL_MUMIMO_trigger_across_bss_count = %u\n",
+			 htt_stats_buf->ul_mumimo_trigger_across_bss);
+	len += scnprintf(buf + len, buf_len - len, "UL_MUMIMO_trigger_within_bss_count = %u\n",
+			 htt_stats_buf->ul_mumimo_trigger_within_bss);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_mlo_abort_tlv_v(const void *tag_buf,
+					u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_mlo_abort_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 num_elements = tag_len >> 2;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_MLO_ABORT_TLV_V:\n");
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->mlo_abort_cnt,
+			   "mlo_abort_cnt",
+			   num_elements, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_mlo_txop_abort_tlv_v(const void *tag_buf,
+					     u16 tag_len,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_pdev_stats_mlo_txop_abort_tlv_v *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 num_elements = tag_len >> 2;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_MLO_TXOP_ABORT_TLV_V:\n");
+	PRINT_ARRAY_TO_BUF(buf, len,
+			   htt_stats_buf->mlo_txop_abort_cnt,
+			   "mlo_txop_abort_cnt",
+			   num_elements, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_phy_counters_tlv(const void *tag_buf,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+			 htt_stats_buf->rx_ofdma_timing_err_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+			 htt_stats_buf->rx_cck_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+			 htt_stats_buf->mactx_abort_cnt);
+	len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+			 htt_stats_buf->macrx_abort_cnt);
+	len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+			 htt_stats_buf->phytx_abort_cnt);
+	len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+			 htt_stats_buf->phyrx_abort_cnt);
+	len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+			 htt_stats_buf->phyrx_defer_abort_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+			 htt_stats_buf->rx_gain_adj_lstf_event_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+			 htt_stats_buf->rx_gain_adj_non_legacy_cnt);
+
+	for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++)
+		len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n",
+				 i, htt_stats_buf->rx_pkt_cnt[i]);
+
+	for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_pkt_crc_pass_cnt[%d] = %u\n",
+				 i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]);
+
+	for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "per_blk_err_cnt[%d] = %u\n",
+				 i, htt_stats_buf->per_blk_err_cnt[i]);
+
+	for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++)
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ota_err_cnt[%d] = %u\n",
+				 i, htt_stats_buf->rx_ota_err_cnt[i]);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_phy_stats_tlv(const void *tag_buf,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+	for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
+		len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
+				 i, htt_stats_buf->nf_chain[i]);
+
+	for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
+		len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
+				 i, htt_stats_buf->runtime_nf_chain[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
+			 htt_stats_buf->false_radar_cnt,
+			 htt_stats_buf->fw_run_time);
+	len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+			 htt_stats_buf->radar_cs_cnt);
+	len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n",
+			 htt_stats_buf->ani_level);
+
+	len += scnprintf(buf + len, buf_len - len, "current operating bw = %u\n",
+			 htt_stats_buf->current_operating_width);
+	len += scnprintf(buf + len, buf_len - len, "current device bw = %u\n",
+			 htt_stats_buf->current_device_width);
+	len += scnprintf(buf + len, buf_len - len, "last radar type = %u\n",
+			 htt_stats_buf->last_radar_type);
+	len += scnprintf(buf + len, buf_len - len, "dfs regulatory domain = %u\n",
+			 htt_stats_buf->dfs_reg_domain);
+	len += scnprintf(buf + len, buf_len - len, "radar mask bit = %u\n",
+			 htt_stats_buf->radar_mask_bit);
+	len += scnprintf(buf + len, buf_len - len, "radar rssi = %d\n",
+			 htt_stats_buf->radar_rssi);
+	len += scnprintf(buf + len, buf_len - len, "radar dfs flags = %u\n",
+			 htt_stats_buf->radar_dfs_flags);
+	len += scnprintf(buf + len, buf_len - len, "operating center freq = %u\n",
+			 htt_stats_buf->band_center_frequency_operating);
+	len += scnprintf(buf + len, buf_len - len, "device center freq = %u\n",
+			 htt_stats_buf->band_center_frequency_device);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_phy_reset_counters_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "reset_counts : pdev_id = %u\n",
+			 htt_stats_buf->pdev_id);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_counts : cf_active_low_fail_cnt = %u\n",
+			 htt_stats_buf->cf_active_low_fail_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_counts : cf_active_low_pass_cnt = %u\n",
+			 htt_stats_buf->cf_active_low_pass_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_counts : phy_off_through_vreg_cnt = %u\n",
+			 htt_stats_buf->phy_off_through_vreg_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_counts : force_calibration_cnt = %u\n",
+			 htt_stats_buf->force_calibration_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_counts : rf_mode_switch_phy_off_cnt = %u\n",
+			 htt_stats_buf->rf_mode_switch_phy_off_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_phy_reset_stats_tlv(const void *tag_buf,
+						 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : pdev_id = %u\n",
+			 htt_stats_buf->pdev_id);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : chan_mhz = %u\n",
+			 htt_stats_buf->chan_mhz);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : chan_band_center_freq1 = %u\n",
+			 htt_stats_buf->chan_band_center_freq1);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : chan_band_center_freq2 = %u\n",
+			 htt_stats_buf->chan_band_center_freq2);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : chan_phy_mode = %u\n",
+			 htt_stats_buf->chan_phy_mode);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : chan_flags = 0x%0x\n",
+			 htt_stats_buf->chan_flags);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : chan_num = %u\n",
+			 htt_stats_buf->chan_num);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : reset_cause = 0x%0x\n",
+			 htt_stats_buf->reset_cause);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : prev_reset_cause = 0x%0x\n",
+			 htt_stats_buf->prev_reset_cause);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phy_warm_reset_src = 0x%0x\n",
+			 htt_stats_buf->phy_warm_reset_src);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : rx_gain_tbl_mode = %d\n",
+			 htt_stats_buf->rx_gain_tbl_mode);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : xbar_val = 0x%0x\n",
+			 htt_stats_buf->xbar_val);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : force_calibration = %u\n",
+			 htt_stats_buf->force_calibration);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : phyrf_mode = %u\n",
+			 htt_stats_buf->phyrf_mode);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : phy_homechan = %u\n",
+			 htt_stats_buf->phy_homechan);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phy_tx_ch_mask = 0x%0x\n",
+			 htt_stats_buf->phy_tx_ch_mask);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phy_rx_ch_mask = 0x%0x\n",
+			 htt_stats_buf->phy_rx_ch_mask);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phybb_ini_mask = 0x%0x\n",
+			 htt_stats_buf->phybb_ini_mask);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phyrf_ini_mask = 0x%0x\n",
+			 htt_stats_buf->phyrf_ini_mask);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phy_dfs_en_mask = 0x%0x\n",
+			 htt_stats_buf->phy_dfs_en_mask);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phy_sscan_en_mask = 0x%0x\n",
+			 htt_stats_buf->phy_sscan_en_mask);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phy_synth_sel_mask = 0x%0x\n",
+			 htt_stats_buf->phy_synth_sel_mask);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : phy_adfs_freq = %u\n",
+			 htt_stats_buf->phy_adfs_freq);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : cck_fir_settings = 0x%0x\n",
+			 htt_stats_buf->cck_fir_settings);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : phy_dyn_pri_chan = %u\n",
+			 htt_stats_buf->phy_dyn_pri_chan);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : cca_thresh = 0x%0x\n",
+			 htt_stats_buf->cca_thresh);
+	len += scnprintf(buf + len, buf_len - len, "reset_stats : dyn_cca_status = %u\n",
+			 htt_stats_buf->dyn_cca_status);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : rxdesense_thresh_hw = 0x%x\n",
+			 htt_stats_buf->rxdesense_thresh_hw);
+	len += scnprintf(buf + len, buf_len - len,
+			 "reset_stats : rxdesense_thresh_sw = 0x%x\n",
+			 htt_stats_buf->rxdesense_thresh_sw);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_soc_txrx_stats_common_tlv(const void *tag_buf,
+				    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "soc_drop_count = 0x%08x%08x\n",
+			 htt_stats_buf->inv_peers_msdu_drop_count_hi,
+			 htt_stats_buf->inv_peers_msdu_drop_count_lo);
+	len += scnprintf(buf + len, buf_len - len, "================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_vdev_txrx_stats_hw_tlv(const void *tag_buf,
+				 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_t2h_vdev_txrx_stats_hw_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_VDEV_TXRX_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+			 htt_stats_buf->vdev_id);
+
+	len += scnprintf(buf + len, buf_len - len, "rx_msdu_byte_cnt = 0x%08x%08x\n",
+			 htt_stats_buf->rx_msdu_byte_cnt_hi,
+			 htt_stats_buf->rx_msdu_byte_cnt_lo);
+
+	len += scnprintf(buf + len, buf_len - len, "rx_msdu_cnt = 0x%08x%08x\n",
+			 htt_stats_buf->rx_msdu_cnt_hi,
+			 htt_stats_buf->rx_msdu_cnt_lo);
+
+	len += scnprintf(buf + len, buf_len - len, "tx_msdu_byte_cnt = 0x%08x%08x\n",
+			 htt_stats_buf->tx_msdu_byte_cnt_hi,
+			 htt_stats_buf->tx_msdu_byte_cnt_lo);
+
+	len += scnprintf(buf + len, buf_len - len, "tx_msdu_cnt = 0x%08x%08x\n",
+			 htt_stats_buf->tx_msdu_cnt_hi,
+			 htt_stats_buf->tx_msdu_cnt_lo);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "tx_msdu_excessive_retry_discard_cnt = 0x%08x%08x\n",
+			 htt_stats_buf->tx_msdu_excessive_retry_discard_cnt_hi,
+			 htt_stats_buf->tx_msdu_excessive_retry_discard_cnt_lo);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "tx_msdu_cong_ctrl_drop_cnt = 0x%08x%08x\n",
+			 htt_stats_buf->tx_msdu_cong_ctrl_drop_cnt_hi,
+			 htt_stats_buf->tx_msdu_cong_ctrl_drop_cnt_lo);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "tx_msdu_ttl_expire_drop_cnt = 0x%08x%08x\n",
+			 htt_stats_buf->tx_msdu_ttl_expire_drop_cnt_hi,
+			 htt_stats_buf->tx_msdu_ttl_expire_drop_cnt_lo);
+
+	len += scnprintf(buf + len, buf_len - len, "===============================\n");
+
+	stats_req->buf_len = len;
+}
+
+static
+inline void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i;
+	const char *mgmt_frm_type[ATH12K_STATS_MGMT_FRM_TYPE_MAX] = {"ASSOC_REQ",
+								     "ASSOC_RES",
+								     "REASSOC_REQ",
+								     "REASSOC_RES",
+								     "PRB_REQ", "PRB_RES",
+								     "RESV", "RESV",
+								     "BCN", "ATIM",
+								     "DISASSOC", "AUTH",
+								     "DAUTH", "ACTN",
+								     "RESV", "RESV", };
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+			 htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1],
+			 htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3],
+			 htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]
+			 );
+
+	len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_fc_subtype:\n");
+	for (i = 0; i < ATH12K_STATS_MGMT_FRM_TYPE_MAX; i++)
+		len += scnprintf(buf + len, buf_len - len, "\t%s[%d]:%u\n",
+				 mgmt_frm_type[i], i,
+				 htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_fc_subtype:\n");
+	for (i = 0; i < ATH12K_STATS_MGMT_FRM_TYPE_MAX; i++)
+		len += scnprintf(buf + len, buf_len - len, "\t%s[%d]:%u\n",
+				 mgmt_frm_type[i], i,
+				 htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static
+inline void htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf,
+						  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG:\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_tx_mgmt_subtype,
+			   "fw_tx_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf, u16 tag_len,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u16 num_elems = min_t(u16, (tag_len >> 2), HTT_SCHED_SUPERCYCLE_TRIGGER_MAX);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER_V:\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->supercycle_triggers,
+			   "supercycle_triggers", num_elems, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_hw_war_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 i;
+	u32 mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+	u32 tag_words = tag_len >> 2;
+
+	tag_words--;         /* first word beyond TLV header is for mac_id */
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id, HTT_STATS_MAC_ID));
+
+	for (i = 0; i < tag_words; i++) {
+		len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n",
+				 i, htt_stats_buf->hw_wars[i]);
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_sched_stats_tlv(const void *tag_buf,
+						  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_peer_sched_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PEER_SCHED_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "peer_id = %u\n",
+			 htt_stats_buf->peer_id);
+	len += scnprintf(buf + len, buf_len - len, "num_sched_dl = %u\n",
+			 htt_stats_buf->num_sched_dl);
+	len += scnprintf(buf + len, buf_len - len, "num_sched_ul = %u\n",
+			 htt_stats_buf->num_sched_ul);
+	len += scnprintf(buf + len, buf_len - len, "peer_tx_active_dur_ms = %llu\n",
+		   (div_u64((htt_stats_buf->peer_tx_active_dur_us_low |
+		   ((unsigned long long)htt_stats_buf->peer_tx_active_dur_us_high << 32)),
+		   1000)));
+	len += scnprintf(buf + len, buf_len - len, "peer_rx_active_dur_ms = %llu\n",
+		   (div_u64((htt_stats_buf->peer_rx_active_dur_us_low |
+		   ((unsigned long long)htt_stats_buf->peer_rx_active_dur_us_high << 32)),
+		   1000)));
+	len += scnprintf(buf + len, buf_len - len, "peer_curr_rate_kbps = %u\n",
+			 htt_stats_buf->peer_curr_rate_kbps);
+
+	stats_req->buf_len = len;
+}
+
+static void htt_print_sta_ul_ofdma_stats_tlv(const void *tag_buf,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_print_sta_ul_ofdma_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int i, j;
+
+	len += scnprintf(buf + len, buf_len - len, "========STA UL OFDMA STATS=======\n");
+
+	len += scnprintf(buf + len, buf_len - len, "pdev ID = %d\n",
+			 htt_stats_buf->pdev_id);
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_trigger_type,
+			   "STA HW Trigger Type", HTT_STA_UL_OFDMA_NUM_TRIG_TYPE, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 " BASIC:%u, BRPOLL:%u, MUBAR:%u, MURTS:%u BSRP:%u Others:%u",
+			 htt_stats_buf->ax_trigger_type[0],
+			 htt_stats_buf->ax_trigger_type[1],
+			 htt_stats_buf->ax_trigger_type[2],
+			 htt_stats_buf->ax_trigger_type[3],
+			 htt_stats_buf->ax_trigger_type[4],
+			 htt_stats_buf->ax_trigger_type[5]);
+	len += scnprintf(buf + len, buf_len - len, "11ax Trigger Type\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 " HIPRI:%u, LOWPRI:%u, BSR:%u",
+			 htt_stats_buf->num_data_ppdu_responded_per_hwq[0],
+			 htt_stats_buf->num_data_ppdu_responded_per_hwq[1],
+			 htt_stats_buf->num_data_ppdu_responded_per_hwq[2]);
+	len += scnprintf(buf + len, buf_len - len, "Data PPDU Resp per HWQ\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 " HIPRI:%u, LOWPRI:%u, BSR:%u",
+			 htt_stats_buf->num_null_delimiters_responded_per_hwq[0],
+			 htt_stats_buf->num_null_delimiters_responded_per_hwq[1],
+			 htt_stats_buf->num_null_delimiters_responded_per_hwq[2]);
+	len += scnprintf(buf + len, buf_len - len, "Null Delim Resp per HWQ\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 " Data:%u, NullDelim:%u",
+			 htt_stats_buf->num_total_trig_responses[0],
+			 htt_stats_buf->num_total_trig_responses[1]);
+	len += scnprintf(buf + len, buf_len - len, "Trigger Resp Status\n");
+
+	len += scnprintf(buf + len, buf_len - len, "Last Trigger RX Time Interval = %u\n",
+			 htt_stats_buf->last_trig_rx_time_delta_ms);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_tx_mcs,
+			   "ul_ofdma_tx_mcs",
+			   HTT_STA_UL_OFDMA_NUM_MCS_COUNTERS, "\n");
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_tx_nss,
+			   "ul_ofdma_tx_nss",
+			   HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+	for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "ul_ofdma_tx_gi[%u]", j);
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_tx_gi[j], NULL,
+				   HTT_STA_UL_OFDMA_NUM_MCS_COUNTERS, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "ul_ofdma_tx_ldpc = %u\n",
+			 htt_stats_buf->ul_ofdma_tx_ldpc);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_tx_bw,
+			   "ul_ofdma_tx_bw",
+			   HTT_STA_UL_OFDMA_NUM_BW_COUNTERS, "\n");
+
+	for (i = 0; i < HTT_STA_UL_OFDMA_NUM_REDUCED_CHAN_TYPES; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+			  i == 0 ? "half_ul_ofdma_tx_bw" : "quarter_ul_ofdma_tx_bw");
+		PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reduced_ul_ofdma_tx_bw[i],
+				   NULL, HTT_STA_UL_OFDMA_NUM_BW_COUNTERS, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "Trig Based Tx PPDU = %u\n",
+			 htt_stats_buf->trig_based_ppdu_tx);
+	len += scnprintf(buf + len, buf_len - len, "RBO Based Tx PPDU = %u\n",
+			 htt_stats_buf->rbo_based_ppdu_tx);
+	len += scnprintf(buf + len, buf_len - len, "MU to SU EDCA Switch Count = %u\n",
+			 htt_stats_buf->mu_edca_to_su_edca_switch_count);
+	len += scnprintf(buf + len, buf_len - len, "MU EDCA Params Apply Count = %u\n",
+			 htt_stats_buf->num_mu_edca_param_apply_count);
+
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->current_edca_hwq_mode,
+			   "current_edca_hwq_mode[AC]",
+			   HTT_NUM_AC_WMM, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->current_cw_min,
+			   "current_cw_min",
+			   HTT_NUM_AC_WMM, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->current_cw_max,
+			   "current_cw_max",
+			   HTT_NUM_AC_WMM, "\n");
+	PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->current_aifs,
+			   "current_aifs",
+			   HTT_NUM_AC_WMM, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "=============================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_vdev_rtt_resp_stats_tlv(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_vdev_rtt_resp_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_VDEV_RTT_RESP_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+			 htt_stats_buf->vdev_id);
+	len += scnprintf(buf + len, buf_len - len, "tx_ftm_suc = %u\n",
+			 htt_stats_buf->tx_ftm_suc);
+	len += scnprintf(buf + len, buf_len - len, "tx_ftm_suc_retry = %u\n",
+			 htt_stats_buf->tx_ftm_suc_retry);
+	len += scnprintf(buf + len, buf_len - len, "tx_ftm_fail = %u\n",
+			 htt_stats_buf->tx_ftm_fail);
+	len += scnprintf(buf + len, buf_len - len, "rx_ftmr_cnt = %u\n",
+			 htt_stats_buf->rx_ftmr_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_ftmr_dup_cnt = %u\n",
+			 htt_stats_buf->rx_ftmr_dup_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_iftmr_cnt = %u\n",
+			 htt_stats_buf->rx_iftmr_cnt);
+	len += scnprintf(buf + len, buf_len - len, "rx_iftmr_dup_cnt = %u\n",
+			 htt_stats_buf->rx_iftmr_dup_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "initiator_active_responder_rejected_cnt = %u\n",
+			 htt_stats_buf->initiator_active_responder_rejected_cnt);
+	len += scnprintf(buf + len, buf_len - len, "responder_terminate_cnt = %u\n",
+			 htt_stats_buf->responder_terminate_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_vdev_rtt_init_stats_tlv(const void *tag_buf,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_vdev_rtt_init_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_VDEV_RTT_INIT_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "vdev_id  = %u\n",
+			 htt_stats_buf->vdev_id);
+	len += scnprintf(buf + len, buf_len - len, "tx_ftmr_cnt  = %u\n",
+			 htt_stats_buf->tx_ftmr_cnt);
+	len += scnprintf(buf + len, buf_len - len, "tx_ftmr_fail  = %u\n",
+			 htt_stats_buf->tx_ftmr_fail);
+	len += scnprintf(buf + len, buf_len - len, "tx_ftmr_suc_retry  = %u\n",
+			 htt_stats_buf->tx_ftmr_suc_retry);
+	len += scnprintf(buf + len, buf_len - len, "rx_ftm_cnt  = %u\n",
+			 htt_stats_buf->rx_ftm_cnt);
+	len += scnprintf(buf + len, buf_len - len, "initiator_terminate_cnt  = %u\n",
+			 htt_stats_buf->initiator_terminate_cnt);
+	len += scnprintf(buf + len, buf_len - len, "tx_meas_req_count = %u\n",
+			 htt_stats_buf->tx_meas_req_count);
+	len += scnprintf(buf + len, buf_len - len, "===============================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pktlog_and_htt_ring_stats_tlv(const void *tag_buf,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_pktlog_and_htt_ring_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_PKTLOG_AND_HTT_RING_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "pktlog_lite_drop_cnt = %u\n",
+			 htt_stats_buf->pktlog_lite_drop_cnt);
+	len += scnprintf(buf + len, buf_len - len, "pktlog_tqm_drop_cnt = %u\n",
+			 htt_stats_buf->pktlog_tqm_drop_cnt);
+	len += scnprintf(buf + len, buf_len - len, "pktlog_ppdu_stats_drop_cnt = %u\n",
+			 htt_stats_buf->pktlog_ppdu_stats_drop_cnt);
+	len += scnprintf(buf + len, buf_len - len, "pktlog_ppdu_ctrl_drop_cnt = %u\n",
+			 htt_stats_buf->pktlog_ppdu_ctrl_drop_cnt);
+	len += scnprintf(buf + len, buf_len - len, "pktlog_sw_events_drop_cnt = %u\n",
+			 htt_stats_buf->pktlog_sw_events_drop_cnt);
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_dlpager_stats_tlv(const void *tag_buf,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_dl_pager_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 msg_dword_1 = htt_stats_buf->msg_dword_1;
+	u32 msg_dword_3 = htt_stats_buf->msg_dword_3;
+	u8 pg_locked = HTT_STATS_PAGE_LOCKED;
+	u8 pg_unlocked = HTT_STATS_PAGE_UNLOCKED;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
+			 HTT_DLPAGER_ASYNC_LOCK_PAGE_COUNT_GET(msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
+			 HTT_DLPAGER_SYNC_LOCK_PAGE_COUNT_GET(msg_dword_1));
+	len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
+			 HTT_DLPAGER_TOTAL_LOCKED_PAGES_GET(htt_stats_buf->msg_dword_2));
+	len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
+			 HTT_DLPAGER_TOTAL_FREE_PAGES_GET(htt_stats_buf->msg_dword_2));
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	len += scnprintf(buf + len, buf_len - len, "LOCKED PAGES HISTORY\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
+			 (HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_GET(msg_dword_3)) ?
+			 (HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_GET(msg_dword_3) - 1) :
+						(HTT_DLPAGER_STATS_MAX_HIST - 1));
+
+	for (i = 0; i < HTT_DLPAGER_STATS_MAX_HIST; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "Index - %u : Page Number - %u : Num of pages - %u : Timestamp - %llu us\n",
+				 i,
+				 htt_stats_buf->last_pages_info[pg_locked][i].page_num,
+				 htt_stats_buf->last_pages_info
+						[pg_locked][i].num_of_pages,
+				 ((htt_stats_buf->last_pages_info
+						[pg_locked][i].timestamp_lsbs) |
+				 (((unsigned long long)htt_stats_buf->last_pages_info
+						[pg_locked][i].timestamp_msbs) << 32)));
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	len += scnprintf(buf + len, buf_len - len, "UNLOCKED PAGES HISTORY\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+	len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
+			(HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_GET(msg_dword_3)) ?
+			(HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_GET(msg_dword_3) - 1) :
+						(HTT_DLPAGER_STATS_MAX_HIST - 1));
+
+	for (i = 0; i < HTT_DLPAGER_STATS_MAX_HIST; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "Index - %u : Page Number - %u : Num of pages - %u : Timestamp - %llu us\n",
+				 i, htt_stats_buf->last_pages_info
+							[pg_unlocked][i].page_num,
+				 htt_stats_buf->last_pages_info
+							[pg_unlocked][i].num_of_pages,
+				 ((htt_stats_buf->last_pages_info
+							[pg_unlocked][i].timestamp_lsbs) |
+				 (((unsigned long long)htt_stats_buf->last_pages_info
+						[pg_unlocked][i].timestamp_msbs) << 32)));
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void htt_print_histogram_stats_tlv(const void *tag_buf,
+						 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_tx_histogram_stats_tlv *htt_stats_buf = tag_buf;
+	u8 i = 0;
+	u16 index = 0;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	char data[HTT_HISTOGRAM_STATS_LEN] = {0};
+
+	len += scnprintf(buf + len, buf_len - len, "low_latency_rate_cnt =  %u\n",
+			 htt_stats_buf->low_latency_rate_cnt);
+
+	len += scnprintf(buf + len, buf_len - len, "su_burst_rate_drop_cnt = %u\n",
+			 htt_stats_buf->su_burst_rate_drop_cnt);
+
+	len += scnprintf(buf + len, buf_len - len, "su_burst_rate_drop_fail_cnt = %u\n",
+			 htt_stats_buf->su_burst_rate_drop_fail_cnt);
+
+	len += scnprintf(buf + len, buf_len - len, "PER_HISTOGRAM_STATS\n");
+
+	index = 0;
+	memset(data, 0x0, HTT_HISTOGRAM_STATS_LEN);
+
+	for (i = 0 ; i < 25; i++) {
+		index += scnprintf(&data[index],
+				   HTT_HISTOGRAM_STATS_LEN - index,
+				   " %u:%u,", i, htt_stats_buf->per_histogram_cnt[i]);
+		if (index >= HTT_HISTOGRAM_STATS_LEN)
+			break;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "Per_histogram_cnt: %s\n", data);
+
+	index = 0;
+	memset(data, 0x0, HTT_HISTOGRAM_STATS_LEN);
+
+	for (i = 25 ; i < 50; i++) {
+		index += scnprintf(&data[index],
+				   HTT_HISTOGRAM_STATS_LEN - index,
+				   " %u:%u,", i, htt_stats_buf->per_histogram_cnt[i]);
+		if (index >= HTT_HISTOGRAM_STATS_LEN)
+			break;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, " %s\n", data);
+
+	index = 0;
+	memset(data, 0x0, HTT_HISTOGRAM_STATS_LEN);
+
+	for (i = 50 ; i < 75; i++) {
+		index += scnprintf(&data[index],
+				   HTT_HISTOGRAM_STATS_LEN - index,
+				   " %u:%u,", i, htt_stats_buf->per_histogram_cnt[i]);
+		if (index >= HTT_HISTOGRAM_STATS_LEN)
+			break;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, " %s\n", data);
+
+	index = 0;
+	memset(data, 0x0, HTT_HISTOGRAM_STATS_LEN);
+
+	for (i = 75 ; i < HTT_TX_PDEV_STATS_NUM_PER_COUNTERS; i++) {
+		index += scnprintf(&data[index],
+				   HTT_HISTOGRAM_STATS_LEN - index,
+				   " %u:%u,", i, htt_stats_buf->per_histogram_cnt[i]);
+		if (index >= HTT_HISTOGRAM_STATS_LEN)
+			break;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, " %s\n", data);
+
+	stats_req->buf_len = len;
+}
+
+static void htt_print_umac_ssr_stats_tlv(const void *tag_buf,
+					 struct debug_htt_stats_req *stats_req)
+{
+	const struct htt_umac_ssr_stats_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_UMAC_SSR_STATS_TLV:");
+	len += scnprintf(buf + len, buf_len - len, "total_done = %u\n",
+			 htt_stats_buf->total_done);
+	len += scnprintf(buf + len, buf_len - len, "trigger_requests_count = %u\n",
+			 htt_stats_buf->trigger_requests_count);
+	len += scnprintf(buf + len, buf_len - len, "total_trig_dropped = %u\n",
+			 htt_stats_buf->total_trig_dropped);
+	len += scnprintf(buf + len, buf_len - len, "umac_disengaged_count = %u\n",
+			 htt_stats_buf->umac_disengaged_count);
+	len += scnprintf(buf + len, buf_len - len, "umac_soft_reset_count = %u\n",
+			 htt_stats_buf->umac_soft_reset_count);
+	len += scnprintf(buf + len, buf_len - len, "umac_engaged_count = %u\n",
+			 htt_stats_buf->umac_engaged_count);
+	len += scnprintf(buf + len, buf_len - len, "last_trigger_request_ms = %u\n",
+			 htt_stats_buf->last_trigger_request_ms);
+	len += scnprintf(buf + len, buf_len - len, "last_start_ms = %u\n",
+			 htt_stats_buf->last_start_ms);
+	len += scnprintf(buf + len, buf_len - len, "last_start_disengage_umac_ms = %u\n",
+			 htt_stats_buf->last_start_disengage_umac_ms);
+	len += scnprintf(buf + len, buf_len - len, "last_enter_ssr_platform_thread_ms = %u\n",
+			 htt_stats_buf->last_enter_ssr_platform_thread_ms);
+	len += scnprintf(buf + len, buf_len - len, "last_exit_ssr_platform_thread_ms = %u\n",
+			 htt_stats_buf->last_exit_ssr_platform_thread_ms);
+	len += scnprintf(buf + len, buf_len - len, "last_start_engage_umac_ms = %u\n",
+			 htt_stats_buf->last_start_engage_umac_ms);
+	len += scnprintf(buf + len, buf_len - len, "post_reset_tqm_sync_cmd_completion_ms = %u\n",
+			 htt_stats_buf->post_reset_tqm_sync_cmd_completion_ms);
+	len += scnprintf(buf + len, buf_len - len, "last_done_successful_ms = %u\n",
+			 htt_stats_buf->last_done_successful_ms);
+	len += scnprintf(buf + len, buf_len - len, "last_e2e_delta_ms = %u\n",
+			 htt_stats_buf->last_e2e_delta_ms);
+	len += scnprintf(buf + len, buf_len - len, "max_e2e_delta_ms = %u\n",
+			 htt_stats_buf->max_e2e_delta_ms);
+	len += scnprintf(buf + len, buf_len - len, "trigger_count_for_umac_hang = %u\n",
+			 htt_stats_buf->trigger_count_for_umac_hang);
+	len += scnprintf(buf + len, buf_len - len, "trigger_count_for_mlo_quick_ssr = %u\n",
+			 htt_stats_buf->trigger_count_for_mlo_quick_ssr);
+	len += scnprintf(buf + len, buf_len - len, "trigger_count_for_unknown_signature = %u\n",
+			 htt_stats_buf->trigger_count_for_unknown_signature);
+	len += scnprintf(buf + len, buf_len - len, "htt_sync_mlo_initiate_umac_recovery_ms = %u\n",
+			 htt_stats_buf->htt_sync_mlo_initiate_umac_recovery_ms);
+	len += scnprintf(buf + len, buf_len - len, "htt_sync_do_pre_reset_ms = %u\n",
+			 htt_stats_buf->htt_sync_do_pre_reset_ms);
+	len += scnprintf(buf + len, buf_len - len, "htt_sync_do_post_reset_start_ms = %u\n",
+			 htt_stats_buf->htt_sync_do_post_reset_start_ms);
+	len += scnprintf(buf + len, buf_len - len, "htt_sync_do_post_reset_complete_ms = %u\n",
+			 htt_stats_buf->htt_sync_do_post_reset_complete_ms);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "=================================================\n");
+
+	stats_req->buf_len = len;
+}
+
+static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
+					  u16 tag, u16 len, const void *tag_buf,
+					  void *user_data)
+{
+	struct debug_htt_stats_req *stats_req = user_data;
+
+	switch (tag) {
+	case HTT_STATS_TX_PDEV_CMN_TAG:
+		htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+		htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_SIFS_TAG:
+		htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_FLUSH_TAG:
+		htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_PHY_ERR_TAG:
+		htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+		htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG:
+		htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_MU_PPDU_DIST_TAG:
+		htt_print_tx_pdev_mu_ppdu_dist_stats_tlv_v(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG:
+		htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len,
+								  stats_req);
+		break;
+
+	case HTT_STATS_STRING_TAG:
+		htt_print_stats_string_tlv(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_CMN_TAG:
+		htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG:
+		htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_CMD_RESULT_TAG:
+		htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_CMD_STALL_TAG:
+		htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_FES_STATUS_TAG:
+		htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG:
+		htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG:
+		htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+		htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+		htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+		htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_TQM_CMN_TAG:
+		htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_TQM_PDEV_TAG:
+		htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG:
+		htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+		htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+		htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+		htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+		htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+		htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+		htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG:
+		htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_CMN_TAG:
+		htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RING_IF_TAG:
+		htt_print_ring_if_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
+		htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG:
+		htt_print_tx_pdev_mumimo_grp_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_DL_MU_OFDMA_STATS_TAG:
+		htt_print_tx_pdev_dl_mu_ofdma_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_UL_MU_OFDMA_STATS_TAG:
+		htt_print_tx_pdev_ul_mu_ofdma_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_DL_MU_MIMO_STATS_TAG:
+		htt_print_tx_pdev_dl_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_UL_MU_MIMO_STATS_TAG:
+		htt_print_tx_pdev_ul_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_SFM_CMN_TAG:
+		htt_print_sfm_cmn_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_SRING_STATS_TAG:
+		htt_print_sring_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_FW_STATS_TAG:
+		htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG:
+		htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG:
+		htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_RX_SOC_FW_STATS_TAG:
+		htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG:
+		htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG:
+		htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
+				tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG:
+		htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(
+				tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_RX_REFILL_REO_ERR_TAG:
+		htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(
+				tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_RX_REO_RESOURCE_STATS_TAG:
+		htt_print_rx_reo_debug_stats_tlv_v(
+				tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG:
+		htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+		htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+		htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG:
+		htt_print_rx_pdev_rate_ext_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+		htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TX_SCHED_CMN_TAG:
+		htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
+		htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+		htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_RING_IF_CMN_TAG:
+		htt_print_ring_if_cmn_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_SFM_CLIENT_USER_TAG:
+		htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_SFM_CLIENT_TAG:
+		htt_print_sfm_client_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+		htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+		htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_SRING_CMN_TAG:
+		htt_print_sring_cmn_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SOUNDING_STATS_TAG:
+		htt_print_tx_sounding_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
+		htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
+		htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
+		htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
+		htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
+		htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG:
+		htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG:
+		htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG:
+		htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_HW_INTR_MISC_TAG:
+		htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_HW_WD_TIMEOUT_TAG:
+		htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_HW_PDEV_ERRS_TAG:
+		htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_COUNTER_NAME_TAG:
+		htt_print_counter_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_TID_DETAILS_TAG:
+		htt_print_tx_tid_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_TID_DETAILS_V1_TAG:
+		htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_TID_DETAILS_TAG:
+		htt_print_rx_tid_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PEER_STATS_CMN_TAG:
+		htt_print_peer_stats_cmn_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PEER_DETAILS_TAG:
+		htt_print_peer_details_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PEER_MSDU_FLOWQ_TAG:
+		htt_print_msdu_flow_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PEER_TX_RATE_STATS_TAG:
+		htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PEER_RX_RATE_STATS_TAG:
+		htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+		htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
+	case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
+	case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
+		htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
+		htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_WHAL_TX_TAG:
+		htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PDEV_TWT_SESSIONS_TAG:
+		htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PDEV_TWT_SESSION_TAG:
+		htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+		htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+		htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req);
+		break;
+
+	case HTT_STATS_PDEV_OBSS_PD_TAG:
+		htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
+		break;
+	case HTT_STATS_HW_WAR_TAG:
+		htt_print_hw_war_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
+		htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
+		break;
+
+	case HTT_STATS_TX_PDEV_BE_DL_MU_OFDMA_STATS_TAG:
+		htt_print_tx_pdev_be_dl_mu_ofdma_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_BE_UL_MU_OFDMA_STATS_TAG:
+		htt_print_tx_pdev_be_ul_mu_ofdma_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG:
+		htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_BE_STATS_TAG:
+		htt_print_ul_mumimo_trig_be_stats(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_BE_STATS_TAG:
+		htt_print_tx_selfgen_be_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG:
+		htt_print_tx_selfgen_be_err_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG:
+		htt_print_tx_selfgen_be_sched_status_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_PDEV_BE_UL_MU_MIMO_STATS_TAG:
+		htt_print_tx_pdev_be_ul_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_BE_UL_MIMO_USER_STATS_TAG:
+		htt_print_be_ul_mimo_user_stats(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_RX_PDEV_BE_UL_TRIG_STATS_TAG:
+		htt_print_be_ul_ofdma_trigger_stats(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_BE_RATE_STATS_TAG:
+		htt_print_tx_pdev_be_rate_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_LATENCY_PROF_STATS_TAG:
+		htt_print_latency_prof_stats_tlv_v(tag_buf, user_data);
+		break;
+	case HTT_STATS_LATENCY_CTX_TAG:
+		htt_print_latency_prof_ctx_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_LATENCY_CNT_TAG:
+		htt_print_latency_prof_cnt(tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG:
+		htt_print_ul_ofdma_trigger_stats(tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG:
+		htt_print_ul_ofdma_user_stats(tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_UL_MIMO_USER_STATS_TAG:
+		htt_print_ul_mimo_user_stats(tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG:
+		htt_print_ul_mumimo_trig_stats(tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_FSE_STATS_TAG:
+		htt_print_rx_fse_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PEER_SCHED_STATS_TAG:
+		htt_print_peer_sched_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG:
+		htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG:
+		htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+		htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG:
+		htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case  HTT_STATS_PER_RATE_STATS_TAG:
+		htt_print_tx_per_rate_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG:
+		htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG:
+		htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG:
+		htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG:
+		htt_print_tx_selfgen_ac_sched_status_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG:
+		htt_print_tx_selfgen_ax_sched_status_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_UNAVAILABLE_ERROR_STATS_TAG:
+		htt_print_unavailable_error_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_UNSUPPORTED_ERROR_STATS_TAG:
+		htt_print_unsupported_error_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_VDEV_RTT_RESP_STATS_TAG:
+		htt_print_vdev_rtt_resp_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_VDEV_RTT_INIT_STATS_TAG:
+		htt_print_vdev_rtt_init_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PKTLOG_AND_HTT_RING_STATS_TAG:
+		htt_print_pktlog_and_htt_ring_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_DLPAGER_STATS_TAG:
+		htt_print_dlpager_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PHY_COUNTERS_TAG:
+		htt_print_phy_counters_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PHY_STATS_TAG:
+		htt_print_phy_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_STA_UL_OFDMA_STATS_TAG:
+		htt_print_sta_ul_ofdma_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+		htt_print_phy_reset_counters_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PHY_RESET_STATS_TAG:
+		htt_print_phy_reset_stats_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
+		htt_print_soc_txrx_stats_common_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG:
+		htt_print_vdev_txrx_stats_hw_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_AST_ENTRY_TAG:
+		htt_print_ast_entry_tlv(tag_buf, stats_req);
+		break;
+
+	case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
+		htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG:
+		htt_print_txbf_ofdma_ax_ndpa_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG:
+		htt_print_txbf_ofdma_ax_ndp_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG:
+		htt_print_txbf_ofdma_ax_brp_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG:
+		htt_print_txbf_ofdma_ax_steer_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_BE_NDPA_STATS_TAG:
+		htt_print_txbf_ofdma_be_ndpa_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_BE_NDP_STATS_TAG:
+		htt_print_txbf_ofdma_be_ndp_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_BE_BRP_STATS_TAG:
+		htt_print_txbf_ofdma_be_brp_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_BE_STEER_STATS_TAG:
+		htt_print_txbf_ofdma_be_steer_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_DMAC_RESET_STATS_TAG:
+		htt_print_dmac_reset_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_BE_UL_OFDMA_USER_STATS_TAG:
+		htt_print_be_ul_ofdma_user_stats(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PHY_TPC_STATS_TAG:
+		htt_print_phy_tpc_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PDEV_PUNCTURE_STATS_TAG:
+		htt_print_puncture_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_ML_PEER_DETAILS_TAG:
+		htt_print_ml_peer_details_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_ML_PEER_EXT_DETAILS_TAG:
+		htt_print_ml_peer_ext_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_ML_LINK_INFO_DETAILS_TAG:
+		htt_print_ml_link_info_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_PPDU_DUR_TAG:
+		htt_print_tx_pdev_ppdu_dur_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_PPDU_DUR_TAG:
+		htt_print_rx_pdev_ppdu_dur_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG:
+		htt_print_pdev_sched_algo_ofdma_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_LATENCY_PROF_CAL_STATS_TAG:
+		htt_print_latency_prof_cal_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_AP_EDCA_PARAMS_STATS_TAG:
+		htt_print_tx_pdev_ap_edca_params_stats_tlv_v(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG:
+		htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_BE_STEER_MPDU_STATS_TAG:
+		htt_print_txbf_ofdma_be_steer_mpdu_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PEER_AX_OFDMA_STATS_TAG:
+		htt_print_peer_ax_ofdma_stats(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_MU_EDCA_PARAMS_STATS_TAG:
+		htt_print_tx_pdev_mu_edca_params_stats_tlv_v(tag_buf, stats_req);
+		break;
+	case HTT_STATS_ODD_PDEV_MANDATORY_TAG:
+		htt_print_odd_pdev_mandatory_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG:
+		htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_MLO_ABORT_TAG:
+		htt_print_tx_pdev_stats_mlo_abort_tlv_v(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_MLO_TXOP_ABORT_TAG:
+		htt_print_tx_pdev_stats_mlo_txop_abort_tlv_v(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_SAWF_RATE_STATS_TAG:
+		htt_print_histogram_stats_tlv(tag_buf, stats_req);
+		break;
+	case HTT_STATS_UMAC_SSR_TAG:
+		htt_print_umac_ssr_stats_tlv(tag_buf, stats_req);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+					  struct sk_buff *skb)
+{
+	struct ath12k_htt_extd_stats_msg *msg;
+	struct debug_htt_stats_req *stats_req;
+	struct ath12k *ar;
+	u32 len;
+	u64 cookie;
+	int ret;
+	bool send_completion = false;
+	u8 pdev_id;
+
+	msg = (struct ath12k_htt_extd_stats_msg *)skb->data;
+	cookie = __le64_to_cpu(msg->cookie);
+
+	if (u64_get_bits(cookie, HTT_STATS_COOKIE_MSB) != HTT_STATS_MAGIC_VALUE) {
+		ath12k_warn(ab, "received invalid htt ext stats event\n");
+		return;
+	}
+
+	pdev_id = u64_get_bits(cookie, HTT_STATS_COOKIE_LSB);
+	rcu_read_lock();
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+	rcu_read_unlock();
+	if (!ar) {
+		ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+		return;
+	}
+
+	stats_req = ar->debug.htt_stats.stats_req;
+	if (!stats_req)
+		return;
+
+	spin_lock_bh(&ar->debug.htt_stats.lock);
+
+	stats_req->done = u32_get_bits(__le32_to_cpu(msg->info1),
+				       HTT_T2H_EXT_STATS_INFO1_DONE);
+	if (stats_req->done)
+		send_completion = true;
+
+	spin_unlock_bh(&ar->debug.htt_stats.lock);
+
+	len = u32_get_bits(__le32_to_cpu(msg->info1), HTT_T2H_EXT_STATS_INFO1_LENGTH);
+	ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+				     ath12k_dbg_htt_ext_stats_parse,
+				     stats_req);
+	if (ret)
+		ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+	if (send_completion)
+		complete(&stats_req->cmpln);
+}
+
+static ssize_t ath12k_read_htt_stats_type(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	char buf[32];
+	size_t len;
+
+	len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_htt_stats_type(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	u32 type;
+	unsigned int cfg_param[4] = {0};
+	int ret, num_args;
+	u8 *buf;
+
+	buf = vzalloc(count);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, user_buf, count)) {
+		vfree(buf);
+		return -EFAULT;
+	}
+
+	num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0],
+			  &cfg_param[1], &cfg_param[2], &cfg_param[3]);
+	vfree(buf);
+
+	if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS)
+		return -E2BIG;
+
+	if (type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+		return -EPERM;
+
+	ar->debug.htt_stats.type = type;
+	ar->debug.htt_stats.cfg_param[0] = cfg_param[0];
+	ar->debug.htt_stats.cfg_param[1] = cfg_param[1];
+	ar->debug.htt_stats.cfg_param[2] = cfg_param[2];
+	ar->debug.htt_stats.cfg_param[3] = cfg_param[3];
+
+	ret = count;
+	return ret;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+	.read = ath12k_read_htt_stats_type,
+	.write = ath12k_write_htt_stats_type,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath12k_prep_htt_stats_cfg_params(struct ath12k *ar, u8 type,
+					    const u8 *mac_addr,
+					    struct htt_ext_stats_cfg_params *cfg_params)
+{
+	if (!cfg_params)
+		return -EINVAL;
+
+	switch (type) {
+	case ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ:
+	case ATH12K_DBG_HTT_EXT_STATS_TX_MU_HWQ:
+		cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS;
+		break;
+	case ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED:
+		cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS;
+		break;
+	case ATH12K_DBG_HTT_EXT_STATS_TQM_CMDQ:
+		cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS;
+		break;
+	case ATH12K_DBG_HTT_EXT_STATS_PEER_INFO:
+		cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+		cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1),
+					HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+		cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+		cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+		cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+		cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+		cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+		cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+		cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+		break;
+	case ATH12K_DBG_HTT_EXT_STATS_RING_IF_INFO:
+	case ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO:
+		cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS;
+		break;
+	case ATH12K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST:
+		cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS;
+		break;
+	case ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS:
+		cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE;
+		break;
+	case ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
+		cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int ath12k_debugfs_htt_stats_req(struct ath12k *ar)
+{
+	struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+	u8 type = stats_req->type;
+	u64 cookie = 0;
+	int ret, pdev_id = ar->pdev->pdev_id;
+	struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+	init_completion(&stats_req->cmpln);
+
+	stats_req->done = false;
+	stats_req->pdev_id = pdev_id;
+
+	cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) |
+		 FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id);
+
+	ret = ath12k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr,
+					       &cfg_params);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret);
+		return ret;
+	}
+	if (stats_req->override_cfg_param) {
+		cfg_params.cfg0 = stats_req->cfg_param[0];
+		cfg_params.cfg1 = stats_req->cfg_param[1];
+		cfg_params.cfg2 = stats_req->cfg_param[2];
+		cfg_params.cfg3 = stats_req->cfg_param[3];
+	}
+
+	ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+		return ret;
+	}
+
+	while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) {
+		spin_lock_bh(&ar->debug.htt_stats.lock);
+		if (!stats_req->done) {
+			stats_req->done = true;
+			spin_unlock_bh(&ar->debug.htt_stats.lock);
+			ath12k_warn(ar->ab, "stats request timed out\n");
+			return -ETIMEDOUT;
+		}
+		spin_unlock_bh(&ar->debug.htt_stats.lock);
+	}
+
+	return 0;
+}
+
+static int ath12k_open_htt_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct debug_htt_stats_req *stats_req;
+	u8 type = ar->debug.htt_stats.type;
+	int ret;
+
+	if (type == ATH12K_DBG_HTT_EXT_STATS_RESET ||
+	    type == ATH12K_DBG_HTT_EXT_STATS_PEER_INFO ||
+	    type == ATH12K_DBG_HTT_EXT_PEER_CTRL_PATH_TXRX_STATS)
+		return -EPERM;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON &&
+	    ar->ab->fw_mode != ATH12K_FIRMWARE_MODE_FTM) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	if (ar->debug.htt_stats.stats_req) {
+		ret = -EAGAIN;
+		goto err_unlock;
+	}
+
+	stats_req = vzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE);
+	if (!stats_req) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ar->debug.htt_stats.stats_req = stats_req;
+	stats_req->type = type;
+	stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0];
+	stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1];
+	stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2];
+	stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3];
+	stats_req->override_cfg_param = !!stats_req->cfg_param[0] ||
+					!!stats_req->cfg_param[1] ||
+					!!stats_req->cfg_param[2] ||
+					!!stats_req->cfg_param[3];
+
+	ret = ath12k_debugfs_htt_stats_req(ar);
+	if (ret < 0)
+		goto out;
+
+	file->private_data = stats_req;
+
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+out:
+	vfree(stats_req);
+	ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static int ath12k_release_htt_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+
+	mutex_lock(&ar->conf_mutex);
+	vfree(file->private_data);
+	ar->debug.htt_stats.stats_req = NULL;
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_htt_stats(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct debug_htt_stats_req *stats_req = file->private_data;
+	char *buf;
+	u32 length = 0;
+
+	buf = stats_req->buf;
+	length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+	.open = ath12k_open_htt_stats,
+	.release = ath12k_release_htt_stats,
+	.read = ath12k_read_htt_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_read_htt_stats_reset(struct file *file,
+					   char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	char buf[32];
+	size_t len;
+
+	len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_htt_stats_reset(struct file *file,
+					    const char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	u8 type;
+	struct htt_ext_stats_cfg_params cfg_params = { 0 };
+	int ret;
+
+	ret = kstrtou8_from_user(user_buf, count, 0, &type);
+	if (ret)
+		return ret;
+
+	if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS ||
+	    type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+		return -E2BIG;
+
+	mutex_lock(&ar->conf_mutex);
+	cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+	cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+	ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar,
+						 ATH12K_DBG_HTT_EXT_STATS_RESET,
+						 &cfg_params,
+						 0ULL);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+		mutex_unlock(&ar->conf_mutex);
+		return ret;
+	}
+
+	ar->debug.htt_stats.reset = type;
+	mutex_unlock(&ar->conf_mutex);
+
+	ret = count;
+
+	return ret;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+	.read = ath12k_read_htt_stats_reset,
+	.write = ath12k_write_htt_stats_reset,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath12k_debugfs_htt_stats_init(struct ath12k *ar)
+{
+	spin_lock_init(&ar->debug.htt_stats.lock);
+	debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+			    ar, &fops_htt_stats_type);
+	debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+			    ar, &fops_dump_htt_stats);
+	debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev,
+			    ar, &fops_htt_stats_reset);
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h	2024-03-18 14:40:14.847741224 +0100
@@ -0,0 +1,4277 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define HTT_STATS_COOKIE_LSB    GENMASK_ULL(31, 0)
+#define HTT_STATS_COOKIE_MSB    GENMASK_ULL(63, 32)
+#define HTT_STATS_MAGIC_VALUE   0xF0F0F0F0
+#define HTT_TLV_HDR_LEN                4
+
+enum htt_tlv_tag_t {
+	HTT_STATS_TX_PDEV_CMN_TAG                           = 0,
+	HTT_STATS_TX_PDEV_UNDERRUN_TAG                      = 1,
+	HTT_STATS_TX_PDEV_SIFS_TAG                          = 2,
+	HTT_STATS_TX_PDEV_FLUSH_TAG                         = 3,
+	HTT_STATS_TX_PDEV_PHY_ERR_TAG                       = 4,
+	HTT_STATS_STRING_TAG                                = 5,
+	HTT_STATS_TX_HWQ_CMN_TAG                            = 6,
+	HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG                   = 7,
+	HTT_STATS_TX_HWQ_CMD_RESULT_TAG                     = 8,
+	HTT_STATS_TX_HWQ_CMD_STALL_TAG                      = 9,
+	HTT_STATS_TX_HWQ_FES_STATUS_TAG                     = 10,
+	HTT_STATS_TX_TQM_GEN_MPDU_TAG                       = 11,
+	HTT_STATS_TX_TQM_LIST_MPDU_TAG                      = 12,
+	HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG                  = 13,
+	HTT_STATS_TX_TQM_CMN_TAG                            = 14,
+	HTT_STATS_TX_TQM_PDEV_TAG                           = 15,
+	HTT_STATS_TX_TQM_CMDQ_STATUS_TAG                    = 16,
+	HTT_STATS_TX_DE_EAPOL_PACKETS_TAG                   = 17,
+	HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG                 = 18,
+	HTT_STATS_TX_DE_CLASSIFY_STATS_TAG                  = 19,
+	HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG                 = 20,
+	HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG                 = 21,
+	HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG                 = 22,
+	HTT_STATS_TX_DE_CMN_TAG                             = 23,
+	HTT_STATS_RING_IF_TAG                               = 24,
+	HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG                 = 25,
+	HTT_STATS_SFM_CMN_TAG                               = 26,
+	HTT_STATS_SRING_STATS_TAG                           = 27,
+	HTT_STATS_RX_PDEV_FW_STATS_TAG                      = 28,
+	HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG              = 29,
+	HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG                  = 30,
+	HTT_STATS_RX_SOC_FW_STATS_TAG                       = 31,
+	HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG           = 32,
+	HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG      = 33,
+	HTT_STATS_TX_PDEV_RATE_STATS_TAG                    = 34,
+	HTT_STATS_RX_PDEV_RATE_STATS_TAG                    = 35,
+	HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG           = 36,
+	HTT_STATS_TX_SCHED_CMN_TAG                          = 37,
+	HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG             = 38,
+	HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG                  = 39,
+	HTT_STATS_RING_IF_CMN_TAG                           = 40,
+	HTT_STATS_SFM_CLIENT_USER_TAG                       = 41,
+	HTT_STATS_SFM_CLIENT_TAG                            = 42,
+	HTT_STATS_TX_TQM_ERROR_STATS_TAG                    = 43,
+	HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG                  = 44,
+	HTT_STATS_SRING_CMN_TAG                             = 45,
+	HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG               = 46,
+	HTT_STATS_TX_SELFGEN_CMN_STATS_TAG                  = 47,
+	HTT_STATS_TX_SELFGEN_AC_STATS_TAG                   = 48,
+	HTT_STATS_TX_SELFGEN_AX_STATS_TAG                   = 49,
+	HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG               = 50,
+	HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG               = 51,
+	HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG              = 52,
+	HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG               = 53,
+	HTT_STATS_HW_INTR_MISC_TAG                          = 54,
+	HTT_STATS_HW_WD_TIMEOUT_TAG                         = 55,
+	HTT_STATS_HW_PDEV_ERRS_TAG                          = 56,
+	HTT_STATS_COUNTER_NAME_TAG                          = 57,
+	HTT_STATS_TX_TID_DETAILS_TAG                        = 58,
+	HTT_STATS_RX_TID_DETAILS_TAG                        = 59,
+	HTT_STATS_PEER_STATS_CMN_TAG                        = 60,
+	HTT_STATS_PEER_DETAILS_TAG                          = 61,
+	HTT_STATS_PEER_TX_RATE_STATS_TAG                    = 62,
+	HTT_STATS_PEER_RX_RATE_STATS_TAG                    = 63,
+	HTT_STATS_PEER_MSDU_FLOWQ_TAG                       = 64,
+	HTT_STATS_TX_DE_COMPL_STATS_TAG                     = 65,
+	HTT_STATS_WHAL_TX_TAG                               = 66,
+	HTT_STATS_TX_PDEV_SIFS_HIST_TAG                     = 67,
+	HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG              = 68,
+	HTT_STATS_TX_TID_DETAILS_V1_TAG                     = 69,
+	HTT_STATS_PDEV_CCA_1SEC_HIST_TAG                    = 70,
+	HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG                 = 71,
+	HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG              = 72,
+	HTT_STATS_PDEV_CCA_COUNTERS_TAG                     = 73,
+	HTT_STATS_TX_PDEV_MPDU_STATS_TAG                    = 74,
+	HTT_STATS_PDEV_TWT_SESSIONS_TAG                     = 75,
+	HTT_STATS_PDEV_TWT_SESSION_TAG                      = 76,
+	HTT_STATS_RX_REFILL_RXDMA_ERR_TAG                   = 77,
+	HTT_STATS_RX_REFILL_REO_ERR_TAG                     = 78,
+	HTT_STATS_RX_REO_RESOURCE_STATS_TAG                 = 79,
+	HTT_STATS_TX_SOUNDING_STATS_TAG                     = 80,
+	HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG                 = 81,
+	HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG           = 82,
+	HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG            = 83,
+	HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG             = 84,
+	HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG           = 85,
+	HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG              = 86,
+	HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG         = 87,
+	HTT_STATS_PDEV_OBSS_PD_TAG                          = 88,
+	HTT_STATS_HW_WAR_TAG				    = 89,
+	HTT_STATS_RING_BACKPRESSURE_STATS_TAG		    = 90,
+	HTT_STATS_LATENCY_PROF_STATS_TAG		    = 91,
+	HTT_STATS_LATENCY_CTX_TAG			    = 92,
+	HTT_STATS_LATENCY_CNT_TAG			    = 93,
+	HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG		    = 94,
+	HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG	    = 95,
+	HTT_STATS_RX_PDEV_UL_MIMO_USER_STATS_TAG	    = 96,
+	HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG	    = 97,
+	HTT_STATS_RX_FSE_STATS_TAG			    = 98,
+	HTT_STATS_PEER_SCHED_STATS_TAG			    = 99,
+	HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG	    = 100,
+	HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG		    = 101,
+	HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG		    = 102,
+	HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG		    = 103,
+	HTT_STATS_TX_PDEV_DL_MU_MIMO_STATS_TAG		    = 104,
+	HTT_STATS_TX_PDEV_UL_MU_MIMO_STATS_TAG		    = 105,
+	HTT_STATS_TX_PDEV_DL_MU_OFDMA_STATS_TAG		    = 106,
+	HTT_STATS_TX_PDEV_UL_MU_OFDMA_STATS_TAG		    = 107,
+	HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG		    = 108,
+	HTT_STATS_UNSUPPORTED_ERROR_STATS_TAG		    = 109,
+	HTT_STATS_UNAVAILABLE_ERROR_STATS_TAG		    = 110,
+	HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG	    = 111,
+	HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG	    = 112,
+	HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG		    = 113,
+	HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG		    = 114,
+	HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG		    = 115,
+	HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG		    = 116,
+	HTT_STATS_STA_UL_OFDMA_STATS_TAG		    = 117,
+	HTT_STATS_VDEV_RTT_RESP_STATS_TAG		    = 118,
+	HTT_STATS_PKTLOG_AND_HTT_RING_STATS_TAG		    = 119,
+	HTT_STATS_DLPAGER_STATS_TAG			    = 120,
+	HTT_STATS_PHY_COUNTERS_TAG			    = 121,
+	HTT_STATS_PHY_STATS_TAG				    = 122,
+	HTT_STATS_PHY_RESET_COUNTERS_TAG		    = 123,
+	HTT_STATS_PHY_RESET_STATS_TAG			    = 124,
+	HTT_STATS_SOC_TXRX_STATS_COMMON_TAG		    = 125,
+	HTT_STATS_VDEV_TXRX_STATS_HW_STATS_TAG		    = 126,
+	HTT_STATS_VDEV_RTT_INIT_STATS_TAG		    = 127,
+	HTT_STATS_PER_RATE_STATS_TAG			    = 128,
+	HTT_STATS_MU_PPDU_DIST_TAG			    = 129,
+	HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG		    = 130,
+	/* 11be stats */
+	HTT_STATS_TX_PDEV_BE_RATE_STATS_TAG		    = 131,
+	HTT_STATS_AST_ENTRY_TAG				    = 132,
+	HTT_STATS_TX_PDEV_BE_DL_MU_OFDMA_STATS_TAG	    = 133,
+	HTT_STATS_TX_PDEV_BE_UL_MU_OFDMA_STATS_TAG	    = 134,
+	HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG	    = 135,
+	HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_BE_STATS_TAG	    = 136,
+	HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG		    = 137,
+	HTT_STATS_TX_SELFGEN_BE_STATS_TAG		    = 138,
+	HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG	    = 139,
+	HTT_STATS_TX_PDEV_BE_UL_MU_MIMO_STATS_TAG	    = 140,
+	HTT_STATS_RX_PDEV_BE_UL_MIMO_USER_STATS_TAG	    = 141,
+	HTT_STATS_RX_RING_STATS_TAG			    = 142,
+	HTT_STATS_RX_PDEV_BE_UL_TRIG_STATS_TAG		    = 143,
+	HTT_STATS_TX_PDEV_SAWF_RATE_STATS_TAG		    = 144,
+	HTT_STATS_STRM_GEN_MPDUS_TAG			    = 145,
+	HTT_STATS_STRM_GEN_MPDUS_DETAILS_TAG		    = 146,
+	HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG		    = 147,
+	HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG		    = 148,
+	HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG		    = 149,
+	HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG		    = 150,
+	HTT_STATS_TXBF_OFDMA_BE_NDPA_STATS_TAG		    = 151,
+	HTT_STATS_TXBF_OFDMA_BE_NDP_STATS_TAG		    = 152,
+	HTT_STATS_TXBF_OFDMA_BE_BRP_STATS_TAG		    = 153,
+	HTT_STATS_TXBF_OFDMA_BE_STEER_STATS_TAG		    = 154,
+	HTT_STATS_DMAC_RESET_STATS_TAG			    = 155,
+	HTT_STATS_RX_PDEV_BE_UL_OFDMA_USER_STATS_TAG	    = 156,
+	HTT_STATS_PHY_TPC_STATS_TAG			    = 157,
+	HTT_STATS_PDEV_PUNCTURE_STATS_TAG		    = 158,
+	HTT_STATS_ML_PEER_DETAILS_TAG			    = 159,
+	HTT_STATS_ML_PEER_EXT_DETAILS_TAG		    = 160,
+	HTT_STATS_ML_LINK_INFO_DETAILS_TAG		    = 161,
+	HTT_STATS_TX_PDEV_PPDU_DUR_TAG			    = 162,
+	HTT_STATS_RX_PDEV_PPDU_DUR_TAG			    = 163,
+	HTT_STATS_ODD_PDEV_MANDATORY_TAG		    = 164,
+	HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG	    = 165,
+	HTT_DBG_ODD_MANDATORY_MUMIMO_TAG		    = 166,
+	HTT_DBG_ODD_MANDATORY_MUOFDMA_TAG		    = 167,
+	HTT_STATS_LATENCY_PROF_CAL_STATS_TAG		    = 168,
+	HTT_STATS_TX_PDEV_MUEDCA_PARAMS_STATS_TAG	    = 169,
+	HTT_STATS_PDEV_BW_MGR_STATS_TAG			    = 170,
+	HTT_STATS_TX_PDEV_AP_EDCA_PARAMS_STATS_TAG	    = 171,
+	HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG	    = 172,
+	HTT_STATS_TXBF_OFDMA_BE_STEER_MPDU_STATS_TAG	    = 173,
+	HTT_STATS_PEER_AX_OFDMA_STATS_TAG		    = 174,
+	HTT_STATS_TX_PDEV_MU_EDCA_PARAMS_STATS_TAG	    = 175,
+	HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG	    = 176,
+	HTT_STATS_TX_PDEV_MLO_ABORT_TAG			    = 177,
+	HTT_STATS_TX_PDEV_MLO_TXOP_ABORT_TAG		    = 178,
+	HTT_STATS_UMAC_SSR_TAG				    = 179,
+	HTT_STATS_MAX_TAG,
+};
+
+#define HTT_STATS_MAX_STRING_SZ32            4
+#define HTT_STATS_MACID_INVALID              0xff
+#define HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS     10
+#define HTT_TX_HWQ_MAX_CMD_RESULT_STATS      13
+#define HTT_TX_HWQ_MAX_CMD_STALL_STATS       5
+#define HTT_TX_HWQ_MAX_FES_RESULT_STATS      10
+#define HTT_PDEV_STATS_PPDU_DUR_HIST_BINS 16
+#define HTT_PDEV_STATS_PPDU_DUR_HIST_INTERVAL_US 250
+
+enum htt_tx_pdev_underrun_enum {
+	HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN           = 0,
+	HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+	HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU  = 2,
+	HTT_TX_PDEV_MAX_URRN_STATS                   = 3,
+};
+
+enum htt_ppdu_stats_ru_size {
+	HTT_PPDU_STATS_RU_26,
+	HTT_PPDU_STATS_RU_52,
+	HTT_PPDU_STATS_RU_52_26,
+	HTT_PPDU_STATS_RU_106,
+	HTT_PPDU_STATS_RU_106_26,
+	HTT_PPDU_STATS_RU_242,
+	HTT_PPDU_STATS_RU_484,
+	HTT_PPDU_STATS_RU_484_242,
+	HTT_PPDU_STATS_RU_996,
+	HTT_PPDU_STATS_RU_996_484,
+	HTT_PPDU_STATS_RU_996_484_242,
+	HTT_PPDU_STATS_RU_996x2,
+	HTT_PPDU_STATS_RU_996x2_484,
+	HTT_PPDU_STATS_RU_996x3,
+	HTT_PPDU_STATS_RU_996x3_484,
+	HTT_PPDU_STATS_RU_996x4,
+};
+
+#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS     150
+#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS       9
+#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS  10
+#define HTT_TX_PDEV_MAX_PHY_ERR_STATS          18
+#define HTT_TX_PDEV_SCHED_TX_MODE_MAX          4
+#define HTT_TX_PDEV_NUM_SCHED_ORDER_LOG        20
+
+#define HTT_RX_STATS_REFILL_MAX_RING         4
+#define HTT_RX_STATS_RXDMA_MAX_ERR           16
+#define HTT_RX_STATS_FW_DROP_REASON_MAX      16
+
+/* Bytes stored in little endian order */
+/* Length should be multiple of DWORD */
+struct htt_stats_string_tlv {
+	u32 data[0]; /* Can be variable length */
+} __packed;
+
+#define HTT_STATS_MAC_ID	GENMASK(7, 0)
+
+/* == TX PDEV STATS == */
+struct htt_tx_pdev_stats_cmn_tlv {
+	u32 mac_id__word;
+	u32 hw_queued;
+	u32 hw_reaped;
+	u32 underrun;
+	u32 hw_paused;
+	u32 hw_flush;
+	u32 hw_filt;
+	u32 tx_abort;
+	u32 mpdu_requed;
+	u32 tx_xretry;
+	u32 data_rc;
+	u32 mpdu_dropped_xretry;
+	u32 illgl_rate_phy_err;
+	u32 cont_xretry;
+	u32 tx_timeout;
+	u32 pdev_resets;
+	u32 phy_underrun;
+	u32 txop_ovf;
+	u32 seq_posted;
+	u32 seq_failed_queueing;
+	u32 seq_completed;
+	u32 seq_restarted;
+	u32 mu_seq_posted;
+	u32 seq_switch_hw_paused;
+	u32 next_seq_posted_dsr;
+	u32 seq_posted_isr;
+	u32 seq_ctrl_cached;
+	u32 mpdu_count_tqm;
+	u32 msdu_count_tqm;
+	u32 mpdu_removed_tqm;
+	u32 msdu_removed_tqm;
+	u32 mpdus_sw_flush;
+	u32 mpdus_hw_filter;
+	u32 mpdus_truncated;
+	u32 mpdus_ack_failed;
+	u32 mpdus_expired;
+	u32 mpdus_seq_hw_retry;
+	u32 ack_tlv_proc;
+	u32 coex_abort_mpdu_cnt_valid;
+	u32 coex_abort_mpdu_cnt;
+	u32 num_total_ppdus_tried_ota;
+	u32 num_data_ppdus_tried_ota;
+	u32 local_ctrl_mgmt_enqued;
+	u32 local_ctrl_mgmt_freed;
+	u32 local_data_enqued;
+	u32 local_data_freed;
+	u32 mpdu_tried;
+	u32 isr_wait_seq_posted;
+
+	u32 tx_active_dur_us_low;
+	u32 tx_active_dur_us_high;
+	u32 remove_mpdus_max_retries;
+	u32 comp_delivered;
+	u32 ppdu_ok;
+	u32 self_triggers;
+	u32 tx_time_dur_data;
+	u32 seq_qdepth_repost_stop;
+	u32 mu_seq_min_msdu_repost_stop;
+	u32 seq_min_msdu_repost_stop;
+	u32 seq_txop_repost_stop;
+	u32 next_seq_cancel;
+	u32 fes_offsets_err_cnt;
+	u32 num_mu_peer_blacklisted;
+	u32 mu_ofdma_seq_posted;
+	u32 ul_mumimo_seq_posted;
+	u32 ul_ofdma_seq_posted;
+
+	u32 thermal_suspend_cnt;
+	u32 dfs_suspend_cnt;
+	u32 tx_abort_suspend_cnt;
+	u32 tgt_specific_opaque_txq_suspend_info;
+	u32 last_suspend_reason;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_urrn_tlv_v {
+	u32 urrn_stats[0]; /* HTT_TX_PDEV_MAX_URRN_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_flush_tlv_v {
+	u32 flush_errs[0]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
+};
+
+#define HTT_TX_PDEV_STATS_MLO_ABORT_TLV_SZ(_num_elems) (sizeof(u32) * (_num_elems))
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_mlo_abort_tlv_v {
+	u32 mlo_abort_cnt[0]; /* HTT_TX_PDEV_MAX_MLO_ABORT_REASON_STATS */
+};
+
+#define HTT_TX_PDEV_STATS_MLO_TXOP_ABORT_TLV_SZ(_num_elems) (sizeof(u32) * (_num_elems))
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_mlo_txop_abort_tlv_v {
+	u32 mlo_txop_abort_cnt[0]; /* HTT_TX_PDEV_MAX_MLO_ABORT_REASON_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_tlv_v {
+	u32 sifs_status[0]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_phy_err_tlv_v {
+	u32  phy_errs[0]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_hist_tlv_v {
+	u32 sifs_hist_status[0]; /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
+};
+
+struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
+	u32 num_data_ppdus_legacy_su;
+	u32 num_data_ppdus_ac_su;
+	u32 num_data_ppdus_ax_su;
+	u32 num_data_ppdus_ac_su_txbf;
+	u32 num_data_ppdus_ax_su_txbf;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size .
+ *
+ *  Tried_mpdu_cnt_hist is the histogram of MPDUs tries per HWQ.
+ *  The tries here is the count of the  MPDUS within a PPDU that the
+ *  HW had attempted to transmit on  air, for the HWSCH Schedule
+ *  command submitted by FW.It is not the retry attempts.
+ *  The histogram bins are  0-29, 30-59, 60-89 and so on. The are
+ *   10 bins in this histogram. They are defined in FW using the
+ *  following macros
+ *  #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ *  #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
+	u32 hist_bin_size;
+	u32 tried_mpdu_cnt_hist[]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+};
+
+/* == SOC ERROR STATS == */
+
+/* =============== PDEV ERROR STATS ============== */
+#define HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct htt_hw_stats_intr_misc_tlv {
+	/* Stored as little endian */
+	u8 hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN];
+	u32 mask;
+	u32 count;
+};
+
+#define HTT_STATS_MAX_HW_MODULE_NAME_LEN 8
+struct htt_hw_stats_wd_timeout_tlv {
+	/* Stored as little endian */
+	u8 hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN];
+	u32 count;
+};
+
+struct htt_hw_stats_pdev_errs_tlv {
+	u32    mac_id__word; /* BIT [ 7 :  0] : mac_id */
+	u32    tx_abort;
+	u32    tx_abort_fail_count;
+	u32    rx_abort;
+	u32    rx_abort_fail_count;
+	u32    warm_reset;
+	u32    cold_reset;
+	u32    tx_flush;
+	u32    tx_glb_reset;
+	u32    tx_txq_reset;
+	u32    rx_timeout_reset;
+	u32    mac_cold_reset_restore_cal;
+	u32    mac_cold_reset;
+	u32    mac_warm_reset;
+	u32    mac_only_reset;
+	u32    phy_warm_reset;
+	u32    phy_warm_reset_ucode_trig;
+	u32    mac_warm_reset_restore_cal;
+	u32    mac_sfm_reset;
+	u32    phy_warm_reset_m3_ssr;
+	u32    phy_warm_reset_reason_phy_m3;
+	u32    phy_warm_reset_reason_tx_hw_stuck;
+	u32    phy_warm_reset_reason_num_cca_rx_frame_stuck;
+	u32    phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy;
+	u32    phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang;
+	u32    phy_warm_reset_reason_mac_reset_converted_phy_reset;
+	u32    wal_rx_recovery_rst_mac_hang_count;
+	u32    wal_rx_recovery_rst_known_sig_count;
+	u32    wal_rx_recovery_rst_no_rx_count;
+	u32    wal_rx_recovery_rst_no_rx_consecutive_count;
+	u32    wal_rx_recovery_rst_rx_busy_count;
+	u32    wal_rx_recovery_rst_phy_mac_hang_count;
+	u32    rx_flush_cnt;
+	u32    phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck;
+	u32    phy_warm_reset_reason_tx_consecutive_flush9_war;
+	u32    phy_warm_reset_reason_tx_hwsch_reset_war;
+	u32    phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war;
+	u32    fw_rx_rings_reset;
+};
+
+struct htt_hw_stats_whal_tx_tlv {
+	u32 mac_id__word;
+	u32 last_unpause_ppdu_id;
+	u32 hwsch_unpause_wait_tqm_write;
+	u32 hwsch_dummy_tlv_skipped;
+	u32 hwsch_misaligned_offset_received;
+	u32 hwsch_reset_count;
+	u32 hwsch_dev_reset_war;
+	u32 hwsch_delayed_pause;
+	u32 hwsch_long_delayed_pause;
+	u32 sch_rx_ppdu_no_response;
+	u32 sch_selfgen_response;
+	u32 sch_rx_sifs_resp_trigger;
+};
+
+/* ============ PEER STATS ============ */
+#define	HTT_MSDU_FLOW_STATS_TX_FLOW_NO	GENMASK(15, 0)
+#define	HTT_MSDU_FLOW_STATS_TID_NUM	GENMASK(19, 16)
+#define	HTT_MSDU_FLOW_STATS_DROP_RULE	BIT(20)
+
+struct htt_msdu_flow_stats_tlv {
+	u32 last_update_timestamp;
+	u32 last_add_timestamp;
+	u32 last_remove_timestamp;
+	u32 total_processed_msdu_count;
+	u32 cur_msdu_count_in_flowq;
+	u32 sw_peer_id;
+	u32 tx_flow_no__tid_num__drop_rule;
+	u32 last_cycle_enqueue_count;
+	u32 last_cycle_dequeue_count;
+	u32 last_cycle_drop_count;
+	u32 current_drop_th;
+};
+
+#define MAX_HTT_TID_NAME 8
+
+#define	HTT_TX_TID_STATS_SW_PEER_ID		GENMASK(15, 0)
+#define	HTT_TX_TID_STATS_TID_NUM		GENMASK(31, 16)
+#define	HTT_TX_TID_STATS_NUM_SCHED_PENDING	GENMASK(7, 0)
+#define	HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ	GENMASK(15, 8)
+
+/* Tidq stats */
+struct htt_tx_tid_stats_tlv {
+	/* Stored as little endian */
+	u8     tid_name[MAX_HTT_TID_NAME];
+	u32 sw_peer_id__tid_num;
+	u32 num_sched_pending__num_ppdu_in_hwq;
+	u32 tid_flags;
+	u32 hw_queued;
+	u32 hw_reaped;
+	u32 mpdus_hw_filter;
+
+	u32 qdepth_bytes;
+	u32 qdepth_num_msdu;
+	u32 qdepth_num_mpdu;
+	u32 last_scheduled_tsmp;
+	u32 pause_module_id;
+	u32 block_module_id;
+	u32 tid_tx_airtime;
+};
+
+#define	HTT_TX_TID_STATS_V1_SW_PEER_ID		GENMASK(15, 0)
+#define	HTT_TX_TID_STATS_V1_TID_NUM		GENMASK(31, 16)
+#define	HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING	GENMASK(7, 0)
+#define	HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ	GENMASK(15, 8)
+
+/* Tidq stats */
+struct htt_tx_tid_stats_v1_tlv {
+	/* Stored as little endian */
+	u8 tid_name[MAX_HTT_TID_NAME];
+	u32 sw_peer_id__tid_num;
+	u32 num_sched_pending__num_ppdu_in_hwq;
+	u32 tid_flags;
+	u32 max_qdepth_bytes;
+	u32 max_qdepth_n_msdus;
+	u32 rsvd;
+
+	u32 qdepth_bytes;
+	u32 qdepth_num_msdu;
+	u32 qdepth_num_mpdu;
+	u32 last_scheduled_tsmp;
+	u32 pause_module_id;
+	u32 block_module_id;
+	u32 tid_tx_airtime;
+	u32 allow_n_flags;
+	u32 sendn_frms_allowed;
+};
+
+#define	HTT_RX_TID_STATS_SW_PEER_ID	GENMASK(15, 0)
+#define	HTT_RX_TID_STATS_TID_NUM	GENMASK(31, 16)
+
+struct htt_rx_tid_stats_tlv {
+	u32 sw_peer_id__tid_num;
+	u8 tid_name[MAX_HTT_TID_NAME];
+	u32 dup_in_reorder;
+	u32 dup_past_outside_window;
+	u32 dup_past_within_window;
+	u32 rxdesc_err_decrypt;
+	u32 tid_rx_airtime;
+};
+
+#define HTT_MAX_COUNTER_NAME 8
+struct htt_counter_tlv {
+	u8 counter_name[HTT_MAX_COUNTER_NAME];
+	u32 count;
+};
+
+struct htt_peer_stats_cmn_tlv {
+	u32 ppdu_cnt;
+	u32 mpdu_cnt;
+	u32 msdu_cnt;
+	u32 pause_bitmap;
+	u32 block_bitmap;
+	u32 current_timestamp;
+	u32 peer_tx_airtime;
+	u32 peer_rx_airtime;
+	s32 rssi;
+	u32 peer_enqueued_count_low;
+	u32 peer_enqueued_count_high;
+	u32 peer_dequeued_count_low;
+	u32 peer_dequeued_count_high;
+	u32 peer_dropped_count_low;
+	u32 peer_dropped_count_high;
+	u32 ppdu_transmitted_bytes_low;
+	u32 ppdu_transmitted_bytes_high;
+	u32 peer_ttl_removed_count;
+	u32 inactive_time;
+	u32 remove_mpdus_max_retries;
+};
+
+#define HTT_PEER_DETAILS_VDEV_ID	GENMASK(7, 0)
+#define HTT_PEER_DETAILS_PDEV_ID	GENMASK(15, 8)
+#define HTT_PEER_DETAILS_AST_IDX	GENMASK(31, 16)
+
+struct htt_peer_details_tlv {
+	u32 peer_type;
+	u32 sw_peer_id;
+	u32 vdev_pdev_ast_idx;
+	struct htt_mac_addr mac_addr;
+	u32 peer_flags;
+	u32 qpeer_flags;
+};
+
+struct htt_ast_entry_tlv {
+	u32 sw_peer_id;
+	u32 ast_index;
+	u8 pdev_id;
+	u8 vdev_id;
+	u8 next_hop;
+	u8 mcast;
+	u8 monitor_direct;
+	u8 mesh_sta;
+	u8 mec;
+	u8 intra_bss;
+	u32 reserved;
+	struct htt_mac_addr mac_addr;
+};
+
+enum htt_stats_param_type {
+	HTT_STATS_PREAM_OFDM,
+	HTT_STATS_PREAM_CCK,
+	HTT_STATS_PREAM_HT,
+	HTT_STATS_PREAM_VHT,
+	HTT_STATS_PREAM_HE,
+	HTT_STATS_PREAM_EHT,
+	HTT_STATS_PREAM_RSVD1,
+
+	HTT_STATS_PREAM_COUNT,
+};
+
+enum htt_stats_direction {
+	HTT_STATS_DIRECTION_TX,
+	HTT_STATS_DIRECTION_RX,
+};
+
+enum htt_stats_ppdu_type {
+	HTT_STATS_PPDU_TYPE_MODE_SU,
+	HTT_STATS_PPDU_TYPE_DL_MU_MIMO,
+	HTT_STATS_PPDU_TYPE_UL_MU_MIMO,
+	HTT_STATS_PPDU_TYPE_DL_MU_OFDMA,
+	HTT_STATS_PPDU_TYPE_UL_MU_OFDMA,
+};
+
+#define HTT_TX_PEER_STATS_NUM_MCS_COUNTERS        12
+#define HTT_TX_PEER_STATS_NUM_EXTRA_MCS_COUNTERS   2
+#define HTT_TX_PEER_STATS_NUM_GI_COUNTERS          4
+#define HTT_TX_PEER_STATS_NUM_DCM_COUNTERS         5
+#define HTT_RX_PEER_STATS_NUM_BW_EXT_COUNTERS      4
+#define HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS      4
+#define HTT_TX_PEER_STATS_NUM_BW_COUNTERS          4
+#define HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS      8
+#define HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES       HTT_STATS_PREAM_COUNT
+
+struct htt_tx_peer_rate_stats_tlv {
+	u32 tx_ldpc;
+	u32 rts_cnt;
+	u32 ack_rssi;
+
+	u32 tx_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+	u32 tx_su_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+	u32 tx_mu_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+	/* element 0,1, ...7 -> NSS 1,2, ...8 */
+	u32 tx_nss[HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS];
+	/* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+	u32 tx_bw[HTT_TX_PEER_STATS_NUM_BW_COUNTERS];
+	u32 tx_stbc[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+	u32 tx_pream[HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES];
+
+	/* Counters to track number of tx packets in each GI
+	 * (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+	 */
+	u32 tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS][HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+
+	/* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+	u32 tx_dcm[HTT_TX_PEER_STATS_NUM_DCM_COUNTERS];
+
+};
+
+#define HTT_RX_PEER_STATS_NUM_MCS_COUNTERS        12
+#define HTT_RX_PEER_STATS_NUM_EXTRA_MCS_COUNTERS   2
+#define HTT_RX_PEER_STATS_NUM_GI_COUNTERS          4
+#define HTT_RX_PEER_STATS_NUM_DCM_COUNTERS         5
+#define HTT_RX_PEER_STATS_NUM_BW_COUNTERS          4
+#define HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS      8
+#define HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES       HTT_STATS_PREAM_COUNT
+
+struct htt_rx_peer_rate_stats_tlv {
+	u32 nsts;
+
+	/* Number of rx ldpc packets */
+	u32 rx_ldpc;
+	/* Number of rx rts packets */
+	u32 rts_cnt;
+
+	u32 rssi_mgmt; /* units = dB above noise floor */
+	u32 rssi_data; /* units = dB above noise floor */
+	u32 rssi_comb; /* units = dB above noise floor */
+	u32 rx_mcs[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+	/* element 0,1, ...7 -> NSS 1,2, ...8 */
+	u32 rx_nss[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS];
+	u32 rx_dcm[HTT_RX_PEER_STATS_NUM_DCM_COUNTERS];
+	u32 rx_stbc[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+	/* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+	u32 rx_bw[HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+	u32 rx_pream[HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES];
+	/* units = dB above noise floor */
+	u8 rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+		     [HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+
+	/* Counters to track number of rx packets in each GI in each mcs (0-11) */
+	u32 rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]
+		 [HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+	u32 rx_ulofdma_non_data_ppdu;
+	u32 rx_ulofdma_data_ppdu;
+	u32 rx_ulofdma_mpdu_ok;
+	u32 rx_ulofdma_mpdu_fail;
+	s8  rx_ul_fd_rssi[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS];
+
+	u32 per_chain_rssi_pkt_type;
+	s8  rx_per_chain_rssi_in_dbm[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+				    [HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+	u32 rx_ulmumimo_non_data_ppdu;
+	u32 rx_ulmumimo_data_ppdu;
+	u32 rx_ulmumimo_mpdu_ok;
+	u32 rx_ulmumimo_mpdu_fail;
+	u8  rssi_chain_ext[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+			  [HTT_RX_PEER_STATS_NUM_BW_EXT_COUNTERS];
+
+	/* Stats for MCS 12/13 */
+	u32 rx_mcs_ext[HTT_RX_PEER_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 rx_stbc_ext[HTT_RX_PEER_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 rx_gi_ext[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]
+		     [HTT_RX_PEER_STATS_NUM_EXTRA_MCS_COUNTERS];
+};
+
+enum htt_peer_stats_req_mode {
+	HTT_PEER_STATS_REQ_MODE_NO_QUERY,
+	HTT_PEER_STATS_REQ_MODE_QUERY_TQM,
+	HTT_PEER_STATS_REQ_MODE_FLUSH_TQM,
+};
+
+enum htt_peer_stats_tlv_enum {
+	HTT_PEER_STATS_CMN_TLV       = 0,
+	HTT_PEER_DETAILS_TLV         = 1,
+	HTT_TX_PEER_RATE_STATS_TLV   = 2,
+	HTT_RX_PEER_RATE_STATS_TLV   = 3,
+	HTT_TX_TID_STATS_TLV         = 4,
+	HTT_RX_TID_STATS_TLV         = 5,
+	HTT_MSDU_FLOW_STATS_TLV      = 6,
+	HTT_PEER_SCHED_STATS_TLV     = 7,
+
+	HTT_PEER_STATS_MAX_TLV       = 31,
+};
+
+/* =========== MUMIMO HWQ stats =========== */
+/* MU MIMO stats per hwQ */
+struct htt_tx_hwq_mu_mimo_sch_stats_tlv {
+	u32 mu_mimo_sch_posted;
+	u32 mu_mimo_sch_failed;
+	u32 mu_mimo_ppdu_posted;
+};
+
+struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
+	u32 mu_mimo_mpdus_queued_usr;
+	u32 mu_mimo_mpdus_tried_usr;
+	u32 mu_mimo_mpdus_failed_usr;
+	u32 mu_mimo_mpdus_requeued_usr;
+	u32 mu_mimo_err_no_ba_usr;
+	u32 mu_mimo_mpdu_underrun_usr;
+	u32 mu_mimo_ampdu_underrun_usr;
+};
+
+#define	HTT_TX_HWQ_STATS_MAC_ID	GENMASK(7, 0)
+#define	HTT_TX_HWQ_STATS_HWQ_ID	GENMASK(15, 8)
+
+struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
+	u32 mac_id__hwq_id__word;
+};
+
+/* == TX HWQ STATS == */
+struct htt_tx_hwq_stats_cmn_tlv {
+	u32 mac_id__hwq_id__word;
+
+	/* PPDU level stats */
+	u32 xretry;
+	u32 underrun_cnt;
+	u32 flush_cnt;
+	u32 filt_cnt;
+	u32 null_mpdu_bmap;
+	u32 user_ack_failure;
+	u32 ack_tlv_proc;
+	u32 sched_id_proc;
+	u32 null_mpdu_tx_count;
+	u32 mpdu_bmap_not_recvd;
+
+	/* Selfgen stats per hwQ */
+	u32 num_bar;
+	u32 rts;
+	u32 cts2self;
+	u32 qos_null;
+
+	/* MPDU level stats */
+	u32 mpdu_tried_cnt;
+	u32 mpdu_queued_cnt;
+	u32 mpdu_ack_fail_cnt;
+	u32 mpdu_filt_cnt;
+	u32 false_mpdu_ack_count;
+
+	u32 txq_timeout;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_difs_latency_stats_tlv_v {
+	u32 hist_intvl;
+	/* histogram of ppdu post to hwsch - > cmd status received */
+	u32 difs_latency_hist[]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_result_stats_tlv_v {
+	/* Histogram of sched cmd result */
+	u32 cmd_result[0]; /* HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_stall_stats_tlv_v {
+	/* Histogram of various pause conitions */
+	u32 cmd_stall_status[0]; /* HTT_TX_HWQ_MAX_CMD_STALL_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_fes_result_stats_tlv_v {
+	/* Histogram of number of user fes result */
+	u32 fes_result[0]; /* HTT_TX_HWQ_MAX_FES_RESULT_STATS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ *  The hwq_tried_mpdu_cnt_hist is a  histogram of MPDUs tries per HWQ.
+ *  The tries here is the count of the  MPDUS within a PPDU that the HW
+ *  had attempted to transmit on  air, for the HWSCH Schedule command
+ *  submitted by FW in this HWQ .It is not the retry attempts. The
+ *  histogram bins are  0-29, 30-59, 60-89 and so on. The are 10 bins
+ *  in this histogram.
+ *  they are defined in FW using the following macros
+ *  #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ *  #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
+	u32 hist_bin_size;
+	/* Histogram of number of mpdus on tried mpdu */
+	u32 tried_mpdu_cnt_hist[]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The txop_used_cnt_hist is the histogram of txop per burst. After
+ * completing the burst, we identify the txop used in the burst and
+ * incr the corresponding bin.
+ * Each bin represents 1ms & we have 10 bins in this histogram.
+ * they are deined in FW using the following macros
+ * #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
+ * #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
+ */
+struct htt_tx_hwq_txop_used_cnt_hist_tlv_v {
+	/* Histogram of txop used cnt */
+	u32 txop_used_cnt_hist[0]; /* HTT_TX_HWQ_TXOP_USED_CNT_HIST */
+};
+
+/* == TX SELFGEN STATS == */
+
+enum htt_tx_mumimo_grp_invalid_reason_code_stats {
+	HTT_TX_MUMIMO_GRP_VALID,
+	HTT_TX_MUMIMO_GRP_INVALID_NUM_MU_USERS_EXCEEDED_MU_MAX_USERS,
+	HTT_TX_MUMIMO_GRP_INVALID_SCHED_ALGO_NOT_MU_COMPATIBLE_GID,
+	HTT_TX_MUMIMO_GRP_INVALID_NON_PRIMARY_GRP,
+	HTT_TX_MUMIMO_GRP_INVALID_ZERO_CANDIDATES,
+	HTT_TX_MUMIMO_GRP_INVALID_MORE_CANDIDATES,
+	HTT_TX_MUMIMO_GRP_INVALID_GROUP_SIZE_EXCEED_NSS,
+	HTT_TX_MUMIMO_GRP_INVALID_GROUP_INELIGIBLE,
+	HTT_TX_MUMIMO_GRP_INVALID,
+	HTT_TX_MUMIMO_GRP_INVALID_GROUP_EFF_MU_TPUT_OMBPS,
+	HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE,
+};
+
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS    4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS    8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS       74
+#define HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS    8
+#define HTT_STATS_MAX_MUMIMO_GRP_SZ		      8
+#define HTT_STATS_MUMIMO_TPUT_NUM_BINS 10
+#define HTT_STATS_MAX_INVALID_REASON_CODE \
+	HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE
+/* Reasons stated in htt_tx_mumimo_grp_invalid_reason_code_stats */
+#define HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS \
+	(HTT_STATS_MAX_MUMIMO_GRP_SZ * HTT_STATS_MAX_INVALID_REASON_CODE)
+
+struct htt_tx_selfgen_cmn_stats_tlv {
+	u32 mac_id__word;
+	u32 su_bar;
+	u32 rts;
+	u32 cts2self;
+	u32 qos_null;
+	u32 delayed_bar_1; /* MU user 1 */
+	u32 delayed_bar_2; /* MU user 2 */
+	u32 delayed_bar_3; /* MU user 3 */
+	u32 delayed_bar_4; /* MU user 4 */
+	u32 delayed_bar_5; /* MU user 5 */
+	u32 delayed_bar_6; /* MU user 6 */
+	u32 delayed_bar_7; /* MU user 7 */
+	u32 bar_with_tqm_head_seq_num;
+	u32 bar_with_tid_seq_num;
+	u32 su_sw_rts_queued;
+	u32 su_sw_rts_tried;
+	u32 su_sw_rts_err;
+	u32 su_sw_rts_flushed;
+	u32 su_sw_rts_rcvd_cts_diff_bw;
+};
+
+struct htt_tx_selfgen_ac_stats_tlv {
+	/* 11AC */
+	u32 ac_su_ndpa;
+	u32 ac_su_ndp;
+	u32 ac_mu_mimo_ndpa;
+	u32 ac_mu_mimo_ndp;
+	u32 ac_mu_mimo_brpoll_1;
+	u32 ac_mu_mimo_brpoll_2;
+	u32 ac_mu_mimo_brpoll_3;
+	u32 ac_su_ndpa_queued;
+	u32 ac_su_ndp_queued;
+	u32 ac_mu_mimo_ndpa_queued;
+	u32 ac_mu_mimo_ndp_queued;
+	u32 ac_mu_mimo_brpoll_1_queued;
+	u32 ac_mu_mimo_brpoll_2_queued;
+	u32 ac_mu_mimo_brpoll_3_queued;
+};
+
+struct htt_tx_selfgen_ax_stats_tlv {
+	/* 11AX */
+	u32 ax_su_ndpa;
+	u32 ax_su_ndp;
+	u32 ax_mu_mimo_ndpa;
+	u32 ax_mu_mimo_ndp;
+	union {
+		struct {
+			/* deprecated old names */
+			u32 ax_mu_mimo_brpoll_1;
+			u32 ax_mu_mimo_brpoll_2;
+			u32 ax_mu_mimo_brpoll_3;
+			u32 ax_mu_mimo_brpoll_4;
+			u32 ax_mu_mimo_brpoll_5;
+			u32 ax_mu_mimo_brpoll_6;
+			u32 ax_mu_mimo_brpoll_7;
+		};
+		u32 ax_mu_mimo_brpoll[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1];
+	};
+	u32 ax_basic_trigger;
+	u32 ax_bsr_trigger;
+	u32 ax_mu_bar_trigger;
+	u32 ax_mu_rts_trigger;
+	u32 ax_su_ndpa_queued;
+	u32 ax_su_ndp_queued;
+	u32 ax_mu_mimo_ndpa_queued;
+	u32 ax_mu_mimo_ndp_queued;
+	u32 ax_mu_mimo_brpoll_queued[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1];
+	u32 ax_ul_mumimo_trigger[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_selfgen_ac_err_stats_tlv {
+	/* 11AC error stats */
+	u32 ac_su_ndp_err;
+	u32 ac_su_ndpa_err;
+	u32 ac_mu_mimo_ndpa_err;
+	u32 ac_mu_mimo_ndp_err;
+	u32 ac_mu_mimo_brp1_err;
+	u32 ac_mu_mimo_brp2_err;
+	u32 ac_mu_mimo_brp3_err;
+	u32 ac_su_ndp_flushed;
+	u32 ac_su_ndpa_flushed;
+	u32 ac_mu_mimo_ndpa_flushed;
+	u32 ac_mu_mimo_ndp_flushed;
+	u32 ac_mu_mimo_brp1_flushed;
+	u32 ac_mu_mimo_brp2_flushed;
+	u32 ac_mu_mimo_brp3_flushed;
+};
+
+struct htt_tx_selfgen_ax_err_stats_tlv {
+	/* 11AX error stats */
+	u32 ax_su_ndp_err;
+	u32 ax_su_ndpa_err;
+	u32 ax_mu_mimo_ndpa_err;
+	u32 ax_mu_mimo_ndp_err;
+	union {
+		struct {
+			/* deprecated old names */
+			u32 ax_mu_mimo_brp1_err;
+			u32 ax_mu_mimo_brp2_err;
+			u32 ax_mu_mimo_brp3_err;
+			u32 ax_mu_mimo_brp4_err;
+			u32 ax_mu_mimo_brp5_err;
+			u32 ax_mu_mimo_brp6_err;
+			u32 ax_mu_mimo_brp7_err;
+		};
+	u32 ax_mu_mimo_brp_err[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1];
+};
+	u32 ax_basic_trigger_err;
+	u32 ax_bsr_trigger_err;
+	u32 ax_mu_bar_trigger_err;
+	u32 ax_mu_rts_trigger_err;
+	u32 ax_ulmumimo_trigger_err;
+	u32 ax_mu_mimo_brp_err_num_cbf_received[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+	u32 ax_su_ndpa_flushed;
+	u32 ax_su_ndp_flushed;
+	u32 ax_mu_mimo_ndpa_flushed;
+	u32 ax_mu_mimo_ndp_flushed;
+	u32 ax_mu_mimo_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS - 1];
+	u32 ax_ul_mumimo_trigger_err[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+};
+
+/* == TX MU STATS == */
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS    74
+#define HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS 8
+#define HTT_STATS_MAX_MUMIMO_GRP_SZ 8
+
+struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
+	/* mu-mimo sw sched cmd stats */
+	u32 mu_mimo_sch_posted;
+	u32 mu_mimo_sch_failed;
+	/* MU PPDU stats per hwQ */
+	u32 mu_mimo_ppdu_posted;
+	/*
+	 * Counts the number of users in each transmission of
+	 * the given TX mode.
+	 *
+	 * Index is the number of users - 1.
+	 */
+	u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+	u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+	u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_mumimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+	u32 ax_ul_mumimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+	u32 ac_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+	u32 ax_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+	u32 be_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS];
+	u32 be_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS];
+	u32 ac_mu_mimo_sch_posted_per_grp_sz_ext[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_pdev_mumimo_grp_stats_tlv {
+	u32 dl_mumimo_grp_best_grp_size[HTT_STATS_MAX_MUMIMO_GRP_SZ];
+	u32 dl_mumimo_grp_best_num_usrs[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+	u32 dl_mumimo_grp_eligible[HTT_STATS_MAX_MUMIMO_GRP_SZ];
+	u32 dl_mumimo_grp_ineligible[HTT_STATS_MAX_MUMIMO_GRP_SZ];
+	u32 dl_mumimo_grp_invalid[HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS];
+	u32 dl_mumimo_grp_tputs[HTT_STATS_MUMIMO_TPUT_NUM_BINS];
+	u32 ul_mumimo_grp_best_grp_size[HTT_STATS_MAX_MUMIMO_GRP_SZ];
+	u32 ul_mumimo_grp_best_num_usrs[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+	u32 ul_mumimo_grp_tputs[HTT_STATS_MUMIMO_TPUT_NUM_BINS];
+};
+
+struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
+	u32 mu_mimo_mpdus_queued_usr;
+	u32 mu_mimo_mpdus_tried_usr;
+	u32 mu_mimo_mpdus_failed_usr;
+	u32 mu_mimo_mpdus_requeued_usr;
+	u32 mu_mimo_err_no_ba_usr;
+	u32 mu_mimo_mpdu_underrun_usr;
+	u32 mu_mimo_ampdu_underrun_usr;
+
+	u32 ax_mu_mimo_mpdus_queued_usr;
+	u32 ax_mu_mimo_mpdus_tried_usr;
+	u32 ax_mu_mimo_mpdus_failed_usr;
+	u32 ax_mu_mimo_mpdus_requeued_usr;
+	u32 ax_mu_mimo_err_no_ba_usr;
+	u32 ax_mu_mimo_mpdu_underrun_usr;
+	u32 ax_mu_mimo_ampdu_underrun_usr;
+
+	u32 ax_ofdma_mpdus_queued_usr;
+	u32 ax_ofdma_mpdus_tried_usr;
+	u32 ax_ofdma_mpdus_failed_usr;
+	u32 ax_ofdma_mpdus_requeued_usr;
+	u32 ax_ofdma_err_no_ba_usr;
+	u32 ax_ofdma_mpdu_underrun_usr;
+	u32 ax_ofdma_ampdu_underrun_usr;
+};
+
+/* [0]- nr4 , [1]- nr8 */
+#define HTT_STATS_NUM_NR_BINS 2
+/* Termination status stated in htt_tx_wal_tx_isr_sched_status */
+#define HTT_STATS_MAX_NUM_SCHED_STATUS  9
+#define HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10
+#define HTT_STATS_MAX_NUM_SCHED_STATUS_WORDS \
+	(HTT_STATS_NUM_NR_BINS * HTT_STATS_MAX_NUM_SCHED_STATUS)
+#define HTT_STATS_MAX_MU_PPDU_PER_BURST_WORDS \
+	(HTT_STATS_NUM_NR_BINS * HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST)
+
+enum htt_stats_hw_mode {
+	HTT_STATS_HWMODE_AC = 0,
+	HTT_STATS_HWMODE_AX = 1,
+};
+
+struct htt_tx_pdev_mu_ppdu_dist_stats_tlv_v {
+	u32 hw_mode; /* HTT_STATS_HWMODE_xx */
+	u32 mu_mimo_num_seq_term_status[HTT_STATS_MAX_MU_PPDU_PER_BURST_WORDS];
+	u32 mu_mimo_num_ppdu_completed_per_burst[HTT_STATS_MAX_MU_PPDU_PER_BURST_WORDS];
+	u32 mu_mimo_num_seq_posted[HTT_STATS_NUM_NR_BINS];
+	u32 mu_mimo_num_ppdu_posted_per_burst[HTT_STATS_MAX_MU_PPDU_PER_BURST_WORDS];
+};
+
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC  1
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX  2
+#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3
+
+struct htt_tx_pdev_mpdu_stats_tlv {
+	/* mpdu level stats */
+	u32 mpdus_queued_usr;
+	u32 mpdus_tried_usr;
+	u32 mpdus_failed_usr;
+	u32 mpdus_requeued_usr;
+	u32 err_no_ba_usr;
+	u32 mpdu_underrun_usr;
+	u32 ampdu_underrun_usr;
+	u32 user_index;
+	u32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */
+};
+
+/* == TX SCHED STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_posted_tlv_v {
+	u32 sched_cmd_posted[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_reaped_tlv_v {
+	u32 sched_cmd_reaped[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_order_su_tlv_v {
+	u32 sched_order_su[0]; /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
+};
+
+enum htt_sched_txq_sched_ineligibility_tlv_enum {
+	HTT_SCHED_TID_SKIP_SCHED_MASK_DISABLED = 0,
+	HTT_SCHED_TID_SKIP_NOTIFY_MPDU,
+	HTT_SCHED_TID_SKIP_MPDU_STATE_INVALID,
+	HTT_SCHED_TID_SKIP_SCHED_DISABLED,
+	HTT_SCHED_TID_SKIP_TQM_BYPASS_CMD_PENDING,
+	HTT_SCHED_TID_SKIP_SECOND_SU_SCHEDULE,
+
+	HTT_SCHED_TID_SKIP_CMD_SLOT_NOT_AVAIL,
+	HTT_SCHED_TID_SKIP_NO_ENQ,
+	HTT_SCHED_TID_SKIP_LOW_ENQ,
+	HTT_SCHED_TID_SKIP_PAUSED,
+	HTT_SCHED_TID_SKIP_UL,
+	HTT_SCHED_TID_REMOVE_PAUSED,
+	HTT_SCHED_TID_REMOVE_NO_ENQ,
+	HTT_SCHED_TID_REMOVE_UL,
+	HTT_SCHED_TID_QUERY,
+	HTT_SCHED_TID_SU_ONLY,
+	HTT_SCHED_TID_ELIGIBLE,
+	HTT_SCHED_INELIGIBILITY_MAX,
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_ineligibility_tlv_v {
+	/* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */
+	u32 sched_ineligibility[0];
+};
+
+#define	HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID	GENMASK(7, 0)
+#define	HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID	GENMASK(15, 8)
+
+struct htt_tx_pdev_stats_sched_per_txq_tlv {
+	u32 mac_id__txq_id__word;
+	u32 sched_policy;
+	u32 last_sched_cmd_posted_timestamp;
+	u32 last_sched_cmd_compl_timestamp;
+	u32 sched_2_tac_lwm_count;
+	u32 sched_2_tac_ring_full;
+	u32 sched_cmd_post_failure;
+	u32 num_active_tids;
+	u32 num_ps_schedules;
+	u32 sched_cmds_pending;
+	u32 num_tid_register;
+	u32 num_tid_unregister;
+	u32 num_qstats_queried;
+	u32 qstats_update_pending;
+	u32 last_qstats_query_timestamp;
+	u32 num_tqm_cmdq_full;
+	u32 num_de_sched_algo_trigger;
+	u32 num_rt_sched_algo_trigger;
+	u32 num_tqm_sched_algo_trigger;
+	u32 notify_sched;
+	u32 dur_based_sendn_term;
+	u32 su_notify2_sched;
+	u32 su_optimal_queued_msdus_sched;
+	u32 su_delay_timeout_sched;
+	u32 su_min_txtime_sched_delay;
+	u32 su_no_delay;
+	u32 num_supercycles;
+	u32 num_subcycles_with_sort;
+	u32 num_subcycles_no_sort;
+};
+
+struct htt_stats_tx_sched_cmn_tlv {
+	/* BIT [ 7 :  0]   :- mac_id
+	 * BIT [31 :  8]   :- reserved
+	 */
+	u32 mac_id__word;
+	/* Current timestamp */
+	u32 current_timestamp;
+};
+
+/* == TQM STATS == */
+#define HTT_TX_TQM_MAX_GEN_MPDU_END_REASON          16
+#define HTT_TX_TQM_MAX_LIST_MPDU_END_REASON         16
+#define HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_gen_mpdu_stats_tlv_v {
+	u32 gen_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_stats_tlv_v {
+	u32 list_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_cnt_tlv_v {
+	u32 list_mpdu_cnt_hist[0];
+			/* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
+};
+
+struct htt_tx_tqm_pdev_stats_tlv_v {
+	u32 msdu_count;
+	u32 mpdu_count;
+	u32 remove_msdu;
+	u32 remove_mpdu;
+	u32 remove_msdu_ttl;
+	u32 send_bar;
+	u32 bar_sync;
+	u32 notify_mpdu;
+	u32 sync_cmd;
+	u32 write_cmd;
+	u32 hwsch_trigger;
+	u32 ack_tlv_proc;
+	u32 gen_mpdu_cmd;
+	u32 gen_list_cmd;
+	u32 remove_mpdu_cmd;
+	u32 remove_mpdu_tried_cmd;
+	u32 mpdu_queue_stats_cmd;
+	u32 mpdu_head_info_cmd;
+	u32 msdu_flow_stats_cmd;
+	u32 remove_msdu_cmd;
+	u32 remove_msdu_ttl_cmd;
+	u32 flush_cache_cmd;
+	u32 update_mpduq_cmd;
+	u32 enqueue;
+	u32 enqueue_notify;
+	u32 notify_mpdu_at_head;
+	u32 notify_mpdu_state_valid;
+	/*
+	 * On receiving TQM_FLOW_NOT_EMPTY_STATUS from TQM, (on MSDUs being enqueued
+	 * the flow is non empty), if the number of MSDUs is greater than the threshold,
+	 * notify is incremented. UDP_THRESH counters are for UDP MSDUs, and NONUDP are
+	 * for non-UDP MSDUs.
+	 * MSDUQ_SWNOTIFY_UDP_THRESH1 threshold    - sched_udp_notify1 is incremented
+	 * MSDUQ_SWNOTIFY_UDP_THRESH2 threshold    - sched_udp_notify2 is incremented
+	 * MSDUQ_SWNOTIFY_NONUDP_THRESH1 threshold - sched_nonudp_notify1 is incremented
+	 * MSDUQ_SWNOTIFY_NONUDP_THRESH2 threshold - sched_nonudp_notify2 is incremented
+	 *
+	 * Notify signifies that we trigger the scheduler.
+	 */
+	u32 sched_udp_notify1;
+	u32 sched_udp_notify2;
+	u32 sched_nonudp_notify1;
+	u32 sched_nonudp_notify2;
+};
+
+struct htt_tx_tqm_cmn_stats_tlv {
+	u32 mac_id__word;
+	u32 max_cmdq_id;
+	u32 list_mpdu_cnt_hist_intvl;
+
+	/* Global stats */
+	u32 add_msdu;
+	u32 q_empty;
+	u32 q_not_empty;
+	u32 drop_notification;
+	u32 desc_threshold;
+	u32 hwsch_tqm_invalid_status;
+	u32 missed_tqm_gen_mpdus;
+	u32 tqm_active_tids;
+	u32 tqm_inactive_tids;
+	u32 tqm_active_msduq_flows;
+};
+
+struct htt_tx_tqm_error_stats_tlv {
+	/* Error stats */
+	u32 q_empty_failure;
+	u32 q_not_empty_failure;
+	u32 add_msdu_failure;
+	/* TQM reset debug stats */
+	u32 tqm_cache_ctl_err;
+	u32 tqm_soft_reset;
+	u32 tqm_reset_total_num_in_use_link_descs;
+	u32 tqm_reset_worst_case_num_lost_link_descs;
+	u32 tqm_reset_worst_case_num_lost_host_tx_bufs_count;
+	u32 tqm_reset_num_in_use_link_descs_internal_tqm;
+	u32 tqm_reset_num_in_use_link_descs_wbm_idle_link_ring;
+	u32 tqm_reset_time_to_tqm_hang_delta_ms;
+	u32 tqm_reset_recovery_time_ms;
+	u32 tqm_reset_num_peers_hdl;
+	u32 tqm_reset_cumm_dirty_hw_mpduq_proc_cnt;
+	u32 tqm_reset_cumm_dirty_hw_msduq_proc;
+	u32 tqm_reset_flush_cache_cmd_su_cnt;
+	u32 tqm_reset_flush_cache_cmd_other_cnt;
+	u32 tqm_reset_flush_cache_cmd_trig_type;
+	u32 tqm_reset_flush_cache_cmd_trig_cfg;
+	u32 tqm_reset_flush_cache_cmd_skip_cmd_status_null;
+};
+
+/* == TQM CMDQ stats == */
+#define	HTT_TX_TQM_CMDQ_STATUS_MAC_ID	GENMASK(7, 0)
+#define	HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID	GENMASK(15, 8)
+
+struct htt_tx_tqm_cmdq_status_tlv {
+	u32 mac_id__cmdq_id__word;
+	u32 sync_cmd;
+	u32 write_cmd;
+	u32 gen_mpdu_cmd;
+	u32 mpdu_queue_stats_cmd;
+	u32 mpdu_head_info_cmd;
+	u32 msdu_flow_stats_cmd;
+	u32 remove_mpdu_cmd;
+	u32 remove_msdu_cmd;
+	u32 flush_cache_cmd;
+	u32 update_mpduq_cmd;
+	u32 update_msduq_cmd;
+};
+
+/* == TX-DE STATS == */
+/* Structures for tx de stats */
+struct htt_tx_de_eapol_packets_stats_tlv {
+	u32 m1_packets;
+	u32 m2_packets;
+	u32 m3_packets;
+	u32 m4_packets;
+	u32 g1_packets;
+	u32 g2_packets;
+	u32 rc4_packets;
+	u32 eap_packets;
+	u32 eapol_start_packets;
+	u32 eapol_logoff_packets;
+	u32 eapol_encap_asf_packets;
+};
+
+struct htt_tx_de_classify_failed_stats_tlv {
+	u32 ap_bss_peer_not_found;
+	u32 ap_bcast_mcast_no_peer;
+	u32 sta_delete_in_progress;
+	u32 ibss_no_bss_peer;
+	u32 invalid_vdev_type;
+	u32 invalid_ast_peer_entry;
+	u32 peer_entry_invalid;
+	u32 ethertype_not_ip;
+	u32 eapol_lookup_failed;
+	u32 qpeer_not_allow_data;
+	u32 fse_tid_override;
+	u32 ipv6_jumbogram_zero_length;
+	u32 qos_to_non_qos_in_prog;
+	u32 ap_bcast_mcast_eapol;
+	u32 unicast_on_ap_bss_peer;
+	u32 ap_vdev_invalid;
+	u32 incomplete_llc;
+	u32 eapol_duplicate_m3;
+	u32 eapol_duplicate_m4;
+};
+
+struct htt_tx_de_classify_stats_tlv {
+	u32 arp_packets;
+	u32 igmp_packets;
+	u32 dhcp_packets;
+	u32 host_inspected;
+	u32 htt_included;
+	u32 htt_valid_mcs;
+	u32 htt_valid_nss;
+	u32 htt_valid_preamble_type;
+	u32 htt_valid_chainmask;
+	u32 htt_valid_guard_interval;
+	u32 htt_valid_retries;
+	u32 htt_valid_bw_info;
+	u32 htt_valid_power;
+	u32 htt_valid_key_flags;
+	u32 htt_valid_no_encryption;
+	u32 fse_entry_count;
+	u32 fse_priority_be;
+	u32 fse_priority_high;
+	u32 fse_priority_low;
+	u32 fse_traffic_ptrn_be;
+	u32 fse_traffic_ptrn_over_sub;
+	u32 fse_traffic_ptrn_bursty;
+	u32 fse_traffic_ptrn_interactive;
+	u32 fse_traffic_ptrn_periodic;
+	u32 fse_hwqueue_alloc;
+	u32 fse_hwqueue_created;
+	u32 fse_hwqueue_send_to_host;
+	u32 mcast_entry;
+	u32 bcast_entry;
+	u32 htt_update_peer_cache;
+	u32 htt_learning_frame;
+	u32 fse_invalid_peer;
+	/*
+	 * mec_notify is HTT TX WBM multicast echo check notification
+	 * from firmware to host.  FW sends SA addresses to host for all
+	 * multicast/broadcast packets received on STA side.
+	 */
+	u32    mec_notify;
+};
+
+struct htt_tx_de_classify_status_stats_tlv {
+	u32 eok;
+	u32 classify_done;
+	u32 lookup_failed;
+	u32 send_host_dhcp;
+	u32 send_host_mcast;
+	u32 send_host_unknown_dest;
+	u32 send_host;
+	u32 status_invalid;
+};
+
+struct htt_tx_de_enqueue_packets_stats_tlv {
+	u32 enqueued_pkts;
+	u32 to_tqm;
+	u32 to_tqm_bypass;
+};
+
+struct htt_tx_de_enqueue_discard_stats_tlv {
+	u32 discarded_pkts;
+	u32 local_frames;
+	u32 is_ext_msdu;
+};
+
+struct htt_tx_de_compl_stats_tlv {
+	u32 tcl_dummy_frame;
+	u32 tqm_dummy_frame;
+	u32 tqm_notify_frame;
+	u32 fw2wbm_enq;
+	u32 tqm_bypass_frame;
+};
+
+/*
+ *  The htt_tx_de_fw2wbm_ring_full_hist_tlv is a histogram of time we waited
+ *  for the fw2wbm ring buffer.  we are requesting a buffer in FW2WBM release
+ *  ring,which may fail, due to non availability of buffer. Hence we sleep for
+ *  200us & again request for it. This is a histogram of time we wait, with
+ *  bin of 200ms & there are 10 bin (2 seconds max)
+ *  They are defined by the following macros in FW
+ *  #define ENTRIES_PER_BIN_COUNT 1000  // per bin 1000 * 200us = 200ms
+ *  #define RING_FULL_BIN_ENTRIES (WAL_TX_DE_FW2WBM_ALLOC_TIMEOUT_COUNT /
+ *                               ENTRIES_PER_BIN_COUNT)
+ */
+struct htt_tx_de_fw2wbm_ring_full_hist_tlv {
+	u32 fw2wbm_ring_full_hist[0];
+};
+
+struct htt_tx_de_cmn_stats_tlv {
+	u32   mac_id__word;
+
+	/* Global Stats */
+	u32   tcl2fw_entry_count;
+	u32   not_to_fw;
+	u32   invalid_pdev_vdev_peer;
+	u32   tcl_res_invalid_addrx;
+	u32   wbm2fw_entry_count;
+	u32   invalid_pdev;
+	u32   tcl_res_addrx_timeout;
+	u32   invalid_vdev;
+	u32   invalid_tcl_exp_frame_desc;
+};
+
+/* == RING-IF STATS == */
+#define HTT_STATS_LOW_WM_BINS      5
+#define HTT_STATS_HIGH_WM_BINS     5
+
+#define HTT_RING_IF_STATS_NUM_ELEMS		GENMASK(15, 0)
+#define	HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX	GENMASK(31, 16)
+#define HTT_RING_IF_STATS_HEAD_IDX		GENMASK(15, 0)
+#define HTT_RING_IF_STATS_TAIL_IDX		GENMASK(31, 16)
+#define HTT_RING_IF_STATS_SHADOW_HEAD_IDX	GENMASK(15, 0)
+#define HTT_RING_IF_STATS_SHADOW_TAIL_IDX	GENMASK(31, 16)
+#define HTT_RING_IF_STATS_LWM_THRESH		GENMASK(15, 0)
+#define HTT_RING_IF_STATS_HWM_THRESH		GENMASK(31, 16)
+
+struct htt_ring_if_stats_tlv {
+	u32 base_addr; /* DWORD aligned base memory address of the ring */
+	u32 elem_size;
+	u32 num_elems__prefetch_tail_idx;
+	u32 head_idx__tail_idx;
+	u32 shadow_head_idx__shadow_tail_idx;
+	u32 num_tail_incr;
+	u32 lwm_thresh__hwm_thresh;
+	u32 overrun_hit_count;
+	u32 underrun_hit_count;
+	u32 prod_blockwait_count;
+	u32 cons_blockwait_count;
+	u32 low_wm_hit_count[HTT_STATS_LOW_WM_BINS];
+	u32 high_wm_hit_count[HTT_STATS_HIGH_WM_BINS];
+};
+
+struct htt_ring_if_cmn_tlv {
+	u32 mac_id__word;
+	u32 num_records;
+};
+
+/* == SFM STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sfm_client_user_tlv_v {
+	/* Number of DWORDS used per user and per client */
+	u32 dwords_used_by_user_n[0];
+};
+
+struct htt_sfm_client_tlv {
+	/* Client ID */
+	u32 client_id;
+	/* Minimum number of buffers */
+	u32 buf_min;
+	/* Maximum number of buffers */
+	u32 buf_max;
+	/* Number of Busy buffers */
+	u32 buf_busy;
+	/* Number of Allocated buffers */
+	u32 buf_alloc;
+	/* Number of Available/Usable buffers */
+	u32 buf_avail;
+	/* Number of users */
+	u32 num_users;
+};
+
+struct htt_sfm_cmn_tlv {
+	u32 mac_id__word;
+	/* Indicates the total number of 128 byte buffers
+	 * in the CMEM that are available for buffer sharing
+	 */
+	u32 buf_total;
+	/* Indicates for certain client or all the clients
+	 * there is no dowrd saved in SFM, refer to SFM_R1_MEM_EMPTY
+	 */
+	u32 mem_empty;
+	/* DEALLOCATE_BUFFERS, refer to register SFM_R0_DEALLOCATE_BUFFERS */
+	u32 deallocate_bufs;
+	/* Number of Records */
+	u32 num_records;
+};
+
+/* == SRNG STATS == */
+#define	HTT_SRING_STATS_MAC_ID			GENMASK(7, 0)
+#define HTT_SRING_STATS_RING_ID			GENMASK(15, 8)
+#define HTT_SRING_STATS_ARENA			GENMASK(23, 16)
+#define HTT_SRING_STATS_EP			BIT(24)
+#define HTT_SRING_STATS_NUM_AVAIL_WORDS		GENMASK(15, 0)
+#define HTT_SRING_STATS_NUM_VALID_WORDS		GENMASK(31, 16)
+#define HTT_SRING_STATS_HEAD_PTR		GENMASK(15, 0)
+#define HTT_SRING_STATS_TAIL_PTR		GENMASK(31, 16)
+#define HTT_SRING_STATS_CONSUMER_EMPTY		GENMASK(15, 0)
+#define HTT_SRING_STATS_PRODUCER_FULL		GENMASK(31, 16)
+#define HTT_SRING_STATS_PREFETCH_COUNT		GENMASK(15, 0)
+#define HTT_SRING_STATS_INTERNAL_TAIL_PTR	GENMASK(31, 16)
+
+struct htt_sring_stats_tlv {
+	u32 mac_id__ring_id__arena__ep;
+	u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
+	u32 base_addr_msb;
+	u32 ring_size;
+	u32 elem_size;
+
+	u32 num_avail_words__num_valid_words;
+	u32 head_ptr__tail_ptr;
+	u32 consumer_empty__producer_full;
+	u32 prefetch_count__internal_tail_ptr;
+};
+
+struct htt_sring_cmn_tlv {
+	u32 num_records;
+};
+
+/* == PDEV TX RATE CTRL STATS == */
+#define HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS        12
+#define HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS   2
+#define HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS  2
+#define HTT_TX_PDEV_STATS_NUM_GI_COUNTERS          4
+#define HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS         5
+#define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS          4
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS      8
+#define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES       HTT_STATS_PREAM_COUNT
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS     4
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS    8
+#define HTT_TX_PDEV_STATS_NUM_LTF                  4
+#define HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES   6
+
+#define HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+	(HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+	 HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS)
+
+#define HTT_TX_PDEV_STATS_NUM_MCS_DROP_COUNTERS \
+	(HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS + \
+	 HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS + \
+	 HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS)
+
+#define HTT_TX_PDEV_STATS_NUM_PER_COUNTERS 101
+
+/*
+ * Introduce new TX counters to support 320MHz support and punctured modes
+ */
+enum HTT_TX_PDEV_STATS_NUM_PUNCTURED_MODE_TYPE {
+	HTT_TX_PDEV_STATS_PUNCTURED_NONE = 0,
+	HTT_TX_PDEV_STATS_PUNCTURED_20 = 1,
+	HTT_TX_PDEV_STATS_PUNCTURED_40 = 2,
+	HTT_TX_PDEV_STATS_PUNCTURED_80 = 3,
+	HTT_TX_PDEV_STATS_PUNCTURED_120 = 4,
+	HTT_TX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS = 5
+};
+
+#define HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES 2 /* 0 - Half, 1 - Quarter */
+/* 11be related updates */
+#define HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS 16 /* 0...13,-2,-1 */
+#define HTT_TX_PDEV_STATS_NUM_BE_BW_COUNTERS  5  /* 20,40,80,160,320 MHz */
+#define HTT_TX_PDEV_STATS_NUM_11BE_TRIGGER_TYPES 6
+
+#define HTT_TX_PDEV_STATS_NUM_HE_SIG_B_MCS_COUNTERS 6
+#define HTT_TX_PDEV_STATS_NUM_EHT_SIG_MCS_COUNTERS 4
+
+enum HTT_TX_PDEV_STATS_AX_RU_SIZE {
+	HTT_TX_PDEV_STATS_AX_RU_SIZE_26,
+	HTT_TX_PDEV_STATS_AX_RU_SIZE_52,
+	HTT_TX_PDEV_STATS_AX_RU_SIZE_106,
+	HTT_TX_PDEV_STATS_AX_RU_SIZE_242,
+	HTT_TX_PDEV_STATS_AX_RU_SIZE_484,
+	HTT_TX_PDEV_STATS_AX_RU_SIZE_996,
+	HTT_TX_PDEV_STATS_AX_RU_SIZE_996x2,
+	HTT_TX_PDEV_STATS_NUM_AX_RU_SIZE_COUNTERS,
+};
+
+enum HTT_TX_PDEV_STATS_BE_RU_SIZE {
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_26,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_52,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_52_26,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_106,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_106_26,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_242,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_484,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_484_242,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996_484,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996_484_242,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996x2,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996x2_484,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996x3,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996x3_484,
+	HTT_TX_PDEV_STATS_BE_RU_SIZE_996x4,
+	HTT_TX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS,
+};
+
+struct htt_tx_pdev_rate_stats_tlv {
+	u32 mac_id__word;
+	u32 tx_ldpc;
+	u32 rts_cnt;
+	/* RSSI value of last ack packet (units = dB above noise floor) */
+	u32 ack_rssi;
+
+	u32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+	u32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+	/* element 0,1, ...7 -> NSS 1,2, ...8 */
+	u32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	/* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+	u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 tx_pream[HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+
+	/* Counters to track number of tx packets
+	 * in each GI (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+	 */
+	u32 tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+	/* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+	u32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+	/* Number of CTS-acknowledged RTS packets */
+	u32 rts_success;
+
+	/*
+	 * Counters for legacy 11a and 11b transmissions.
+	 *
+	 * The index corresponds to:
+	 *
+	 * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps
+	 *
+	 * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps,
+	 *       4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps
+	 */
+	u32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+	u32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+
+	u32 ac_mu_mimo_tx_ldpc;
+	u32 ax_mu_mimo_tx_ldpc;
+	u32 ofdma_tx_ldpc;
+
+	/*
+	 * Counters for 11ax HE LTF selection during TX.
+	 *
+	 * The index corresponds to:
+	 *
+	 * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF
+	 */
+	u32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF];
+
+	u32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+	u32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+
+	u32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+
+	u32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			    [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			    [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+		       [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 trigger_type_11ax[HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES];
+	u32 tx_11ax_su_ext;
+	u32 tx_mcs_ext[HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 tx_stbc_ext[HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 tx_gi_ext[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+		     [HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 ax_mu_mimo_tx_mcs_ext[HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 ofdma_tx_mcs_ext[HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 ax_mu_mimo_tx_gi_ext[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+				[HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 ofdma_tx_gi_ext[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			   [HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 tx_mcs_ext_2[HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	u32 tx_bw_320mhz;
+	u32 tx_gi_ext_2[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+		       [HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	u32 tx_su_punctured_mode[HTT_TX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
+	u32 reduced_tx_bw[HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES][HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	/** 11AC VHT DL MU MIMO TX BW stats at reduced channel config */
+	u32 reduced_ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES][HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	/** 11AX HE DL MU MIMO TX BW stats at reduced channel config */
+	u32 reduced_ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES][HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	/** 11AX HE DL MU OFDMA TX BW stats at reduced channel config */
+	u32 reduced_ax_mu_ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES][HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	/** 11AX HE DL MU OFDMA TX RU Size stats */
+	u32 ofdma_tx_ru_size[HTT_TX_PDEV_STATS_NUM_AX_RU_SIZE_COUNTERS];
+	/** 11AX HE DL MU OFDMA HE-SIG-B MCS stats */
+	u32 ofdma_he_sig_b_mcs[HTT_TX_PDEV_STATS_NUM_HE_SIG_B_MCS_COUNTERS];
+	/** 11AX HE SU data + embedded trigger PPDU success stats (stats for HETP ack success PPDU cnt) */
+	u32 ax_su_embedded_trigger_data_ppdu;
+	/** 11AX HE SU data + embedded trigger PPDU failure stats (stats for HETP ack failure PPDU cnt) */
+	u32 ax_su_embedded_trigger_data_ppdu_err;
+	/** sta side trigger stats */
+	u32 trigger_type_11be[HTT_TX_PDEV_STATS_NUM_11BE_TRIGGER_TYPES];
+};
+
+/* == PDEV RX RATE CTRL STATS == */
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS     4
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS    8
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS        12
+#define HTT_RX_PDEV_STATS_NUM_GI_COUNTERS          4
+#define HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS         5
+#define HTT_RX_PDEV_STATS_NUM_BW_COUNTERS          4
+#define HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS      4
+#define HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS      8
+#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES       HTT_STATS_PREAM_COUNT
+#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER             8
+#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+
+#define HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2 /* 12, 13 */
+#define HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2 /* 14, 15 */
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14 /* 0-13 */
+#define HTT_RX_PDEV_STATS_TOTAL_BW_COUNTERS \
+    (HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS + HTT_RX_PDEV_STATS_NUM_BW_COUNTERS)
+#define HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS 5 /* 20, 40, 80, 160, 320Mhz */
+#define HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
+#define HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS 16 /* 0-13, -2, -1 */
+#define HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS  5  /* 20,40,80,160,320 MHz */
+#define HTT_RX_PDEV_STATS_NUM_BW_EXT_2_COUNTERS 8
+
+/* HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS:
+ * RU size index 0: HTT_UL_OFDMA_V0_RU_SIZE_RU_26
+ * RU size index 1: HTT_UL_OFDMA_V0_RU_SIZE_RU_52
+ * RU size index 2: HTT_UL_OFDMA_V0_RU_SIZE_RU_106
+ * RU size index 3: HTT_UL_OFDMA_V0_RU_SIZE_RU_242
+ * RU size index 4: HTT_UL_OFDMA_V0_RU_SIZE_RU_484
+ * RU size index 5: HTT_UL_OFDMA_V0_RU_SIZE_RU_996
+ */
+#define HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
+/* HTT_RX_PDEV_STATS_NUM_RU_SIZE_160MHZ_CNTRS:
+ * RU size index 0: HTT_UL_OFDMA_V0_RU_SIZE_RU_26
+ * RU size index 1: HTT_UL_OFDMA_V0_RU_SIZE_RU_52
+ * RU size index 2: HTT_UL_OFDMA_V0_RU_SIZE_RU_106
+ * RU size index 3: HTT_UL_OFDMA_V0_RU_SIZE_RU_242
+ * RU size index 4: HTT_UL_OFDMA_V0_RU_SIZE_RU_484
+ * RU size index 5: HTT_UL_OFDMA_V0_RU_SIZE_RU_996
+ * RU size index 6: HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
+ */
+#define HTT_RX_PDEV_STATS_NUM_RU_SIZE_160MHZ_CNTRS 7 /* includes 996x2 */
+
+#define HTT_RX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES       2
+
+enum HTT_RX_PDEV_STATS_BE_RU_SIZE {
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_26,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_52,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_52_26,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_106,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_106_26,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_242,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_484,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_484_242,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996_484,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996_484_242,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996x2,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996x2_484,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996x3,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996x3_484,
+    HTT_RX_PDEV_STATS_BE_RU_SIZE_996x4,
+    HTT_RX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS,
+};
+
+/* Introduce new RX counters to support 320MHZ support and punctured modes */
+typedef enum {
+	HTT_RX_PDEV_STATS_PUNCTURED_NONE = 0,
+	HTT_RX_PDEV_STATS_PUNCTURED_20 = 1,
+	HTT_RX_PDEV_STATS_PUNCTURED_40 = 2,
+	HTT_RX_PDEV_STATS_PUNCTURED_80 = 3,
+	HTT_RX_PDEV_STATS_PUNCTURED_120 = 4,
+	HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS = 5
+} HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_TYPE;
+
+struct htt_rx_pdev_rate_stats_tlv {
+	u32 mac_id__word;
+	u32 nsts;
+
+	u32 rx_ldpc;
+	u32 rts_cnt;
+
+	u32 rssi_mgmt; /* units = dB above noise floor */
+	u32 rssi_data; /* units = dB above noise floor */
+	u32 rssi_comb; /* units = dB above noise floor */
+	u32 rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	/* element 0,1, ...7 -> NSS 1,2, ...8 */
+	u32 rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 rx_dcm[HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+	u32 rx_stbc[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	/* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+	u32 rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 rx_pream[HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+	u8 rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+		     [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+					/* units = dB above noise floor */
+
+	/* Counters to track number of rx packets
+	 * in each GI in each mcs (0-11)
+	 */
+	u32 rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	s32 rssi_in_dbm; /* rx Signal Strength value in dBm unit */
+
+	u32 rx_11ax_su_ext;
+	u32 rx_11ac_mumimo;
+	u32 rx_11ax_mumimo;
+	u32 rx_11ax_ofdma;
+	u32 txbf;
+	u32 rx_legacy_cck_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+	u32 rx_legacy_ofdm_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+	u32 rx_active_dur_us_low;
+	u32 rx_active_dur_us_high;
+
+	u32 rx_11ax_ul_ofdma;
+
+	u32 ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ul_ofdma_rx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			  [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ul_ofdma_rx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 ul_ofdma_rx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 ul_ofdma_rx_stbc;
+	u32 ul_ofdma_rx_ldpc;
+
+	/* record the stats for each user index */
+	u32 rx_ulofdma_non_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+	u32 rx_ulofdma_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];     /* ppdu level */
+	u32 rx_ulofdma_mpdu_ok[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];       /* mpdu level */
+	u32 rx_ulofdma_mpdu_fail[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];     /* mpdu level */
+
+	u32 nss_count;
+	u32 pilot_count;
+	/* RxEVM stats in dB */
+	s32 rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+			   [HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS];
+	/* rx_pilot_evm_db_mean:
+	 * EVM mean across pilots, computed as
+	 *     mean(10*log10(rx_pilot_evm_linear)) = mean(rx_pilot_evm_db)
+	 */
+	s32 rx_pilot_evm_db_mean[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	s8 rx_ul_fd_rssi[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+			[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* dBm units */
+	/* per_chain_rssi_pkt_type:
+	 * This field shows what type of rx frame the per-chain RSSI was computed
+	 * on, by recording the frame type and sub-type as bit-fields within this
+	 * field:
+	 * BIT [3 : 0]    :- IEEE80211_FC0_TYPE
+	 * BIT [7 : 4]    :- IEEE80211_FC0_SUBTYPE
+	 * BIT [31 : 8]   :- Reserved
+	 */
+	u32 per_chain_rssi_pkt_type;
+	s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+				   [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 rx_su_ndpa;
+	u32 rx_11ax_su_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 rx_mu_ndpa;
+	u32 rx_11ax_mu_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 rx_br_poll;
+	u32 rx_11ax_dl_ofdma_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 rx_11ax_dl_ofdma_ru[HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+
+	u32 rx_ulmumimo_non_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	u32 rx_ulmumimo_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	u32 rx_ulmumimo_mpdu_ok[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	u32 rx_ulmumimo_mpdu_fail[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	u32 rx_ulofdma_non_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	u32 rx_ulofdma_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+};
+
+struct htt_rx_pdev_rate_ext_stats_tlv {
+	u8 rssi_chain_ext[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+			  [HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+	s8 rx_per_chain_rssi_ext_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+				       [HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+	s32 rssi_mcast_in_dbm;
+	s32 rssi_mgmt_in_dbm;
+	u32 rx_mcs_ext[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 rx_stbc_ext[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 rx_gi_ext[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+		     [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 ul_ofdma_rx_mcs_ext[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 ul_ofdma_rx_gi_ext[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			      [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 rx_11ax_su_txbf_mcs_ext[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 rx_11ax_mu_txbf_mcs_ext[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 rx_11ax_dl_ofdma_mcs_ext[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	u32 rx_mcs_ext_2[HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	u32 rx_bw_ext[HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS];
+	u32 rx_gi_ext_2[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+		       [HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	u32 rx_su_punctured_mode[HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
+	u32 reduced_rx_bw[HTT_RX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES][HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	u8 rssi_chain_ext_2[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS][HTT_RX_PDEV_STATS_NUM_BW_EXT_2_COUNTERS];
+	s8 rx_per_chain_rssi_ext_2_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+					 [HTT_RX_PDEV_STATS_NUM_BW_EXT_2_COUNTERS];
+};
+
+#define HTT_STATS_CMN_MAC_ID_M 0x000000ff
+#define HTT_STATS_CMN_MAC_ID_S 0
+
+#define HTT_STATS_CMN_MAC_ID_GET(_var)			\
+	(((_var) & HTT_STATS_CMN_MAC_ID_M) >>		\
+	 HTT_STATS_CMN_MAC_ID_S)
+
+#define HTT_STATS_CMN_MAC_ID_SET(_var, _val)			\
+	do {							\
+		HTT_CHECK_SET_VAL(HTT_STATS_CMN_MAC_ID, _val);	\
+		((_var) |= ((_val) << HTT_STATS_CMN_MAC_ID_S));	\
+	} while (0)
+
+#define HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5
+
+/* == RX PDEV/SOC STATS == */
+struct htt_rx_soc_fw_stats_tlv {
+	u32 fw_reo_ring_data_msdu;
+	u32 fw_to_host_data_msdu_bcmc;
+	u32 fw_to_host_data_msdu_uc;
+	u32 ofld_remote_data_buf_recycle_cnt;
+	u32 ofld_remote_free_buf_indication_cnt;
+
+	u32 ofld_buf_to_host_data_msdu_uc;
+	u32 reo_fw_ring_to_host_data_msdu_uc;
+
+	u32 wbm_sw_ring_reap;
+	u32 wbm_forward_to_host_cnt;
+	u32 wbm_target_recycle_cnt;
+
+	u32 target_refill_ring_recycle_cnt;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_empty_tlv_v {
+	u32 refill_ring_empty_cnt[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v {
+	u32 refill_ring_num_refill[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
+};
+
+/* RXDMA error code from WBM released packets */
+enum htt_rx_rxdma_error_code_enum {
+	HTT_RX_RXDMA_OVERFLOW_ERR                           = 0,
+	HTT_RX_RXDMA_MPDU_LENGTH_ERR                        = 1,
+	HTT_RX_RXDMA_FCS_ERR                                = 2,
+	HTT_RX_RXDMA_DECRYPT_ERR                            = 3,
+	HTT_RX_RXDMA_TKIP_MIC_ERR                           = 4,
+	HTT_RX_RXDMA_UNECRYPTED_ERR                         = 5,
+	HTT_RX_RXDMA_MSDU_LEN_ERR                           = 6,
+	HTT_RX_RXDMA_MSDU_LIMIT_ERR                         = 7,
+	HTT_RX_RXDMA_WIFI_PARSE_ERR                         = 8,
+	HTT_RX_RXDMA_AMSDU_PARSE_ERR                        = 9,
+	HTT_RX_RXDMA_SA_TIMEOUT_ERR                         = 10,
+	HTT_RX_RXDMA_DA_TIMEOUT_ERR                         = 11,
+	HTT_RX_RXDMA_FLOW_TIMEOUT_ERR                       = 12,
+	HTT_RX_RXDMA_FLUSH_REQUEST                          = 13,
+	HTT_RX_RXDMA_ERR_CODE_RVSD0                         = 14,
+	HTT_RX_RXDMA_ERR_CODE_RVSD1                         = 15,
+
+	/* This MAX_ERR_CODE should not be used in any host/target messages,
+	 * so that even though it is defined within a host/target interface
+	 * definition header file, it isn't actually part of the host/target
+	 * interface, and thus can be modified.
+	 */
+	HTT_RX_RXDMA_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v {
+	u32 rxdma_err[0]; /* HTT_RX_RXDMA_MAX_ERR_CODE */
+};
+
+/* REO error code from WBM released packets */
+enum htt_rx_reo_error_code_enum {
+	HTT_RX_REO_QUEUE_DESC_ADDR_ZERO                     = 0,
+	HTT_RX_REO_QUEUE_DESC_NOT_VALID                     = 1,
+	HTT_RX_AMPDU_IN_NON_BA                              = 2,
+	HTT_RX_NON_BA_DUPLICATE                             = 3,
+	HTT_RX_BA_DUPLICATE                                 = 4,
+	HTT_RX_REGULAR_FRAME_2K_JUMP                        = 5,
+	HTT_RX_BAR_FRAME_2K_JUMP                            = 6,
+	HTT_RX_REGULAR_FRAME_OOR                            = 7,
+	HTT_RX_BAR_FRAME_OOR                                = 8,
+	HTT_RX_BAR_FRAME_NO_BA_SESSION                      = 9,
+	HTT_RX_BAR_FRAME_SN_EQUALS_SSN                      = 10,
+	HTT_RX_PN_CHECK_FAILED                              = 11,
+	HTT_RX_2K_ERROR_HANDLING_FLAG_SET                   = 12,
+	HTT_RX_PN_ERROR_HANDLING_FLAG_SET                   = 13,
+	HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET                 = 14,
+	HTT_RX_REO_ERR_CODE_RVSD                            = 15,
+
+	/* This MAX_ERR_CODE should not be used in any host/target messages,
+	 * so that even though it is defined within a host/target interface
+	 * definition header file, it isn't actually part of the host/target
+	 * interface, and thus can be modified.
+	 */
+	HTT_RX_REO_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v {
+	u32 reo_err[0]; /* HTT_RX_REO_MAX_ERR_CODE */
+};
+
+/* == RX PDEV STATS == */
+#define HTT_STATS_SUBTYPE_MAX     16
+
+struct htt_rx_pdev_fw_stats_tlv {
+	u32 mac_id__word;
+	u32 ppdu_recvd;
+	u32 mpdu_cnt_fcs_ok;
+	u32 mpdu_cnt_fcs_err;
+	u32 tcp_msdu_cnt;
+	u32 tcp_ack_msdu_cnt;
+	u32 udp_msdu_cnt;
+	u32 other_msdu_cnt;
+	u32 fw_ring_mpdu_ind;
+	u32 fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+	u32 fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX];
+	u32 fw_ring_mcast_data_msdu;
+	u32 fw_ring_bcast_data_msdu;
+	u32 fw_ring_ucast_data_msdu;
+	u32 fw_ring_null_data_msdu;
+	u32 fw_ring_mpdu_drop;
+	u32 ofld_local_data_ind_cnt;
+	u32 ofld_local_data_buf_recycle_cnt;
+	u32 drx_local_data_ind_cnt;
+	u32 drx_local_data_buf_recycle_cnt;
+	u32 local_nondata_ind_cnt;
+	u32 local_nondata_buf_recycle_cnt;
+
+	u32 fw_status_buf_ring_refill_cnt;
+	u32 fw_status_buf_ring_empty_cnt;
+	u32 fw_pkt_buf_ring_refill_cnt;
+	u32 fw_pkt_buf_ring_empty_cnt;
+	u32 fw_link_buf_ring_refill_cnt;
+	u32 fw_link_buf_ring_empty_cnt;
+
+	u32 host_pkt_buf_ring_refill_cnt;
+	u32 host_pkt_buf_ring_empty_cnt;
+	u32 mon_pkt_buf_ring_refill_cnt;
+	u32 mon_pkt_buf_ring_empty_cnt;
+	u32 mon_status_buf_ring_refill_cnt;
+	u32 mon_status_buf_ring_empty_cnt;
+	u32 mon_desc_buf_ring_refill_cnt;
+	u32 mon_desc_buf_ring_empty_cnt;
+	u32 mon_dest_ring_update_cnt;
+	u32 mon_dest_ring_full_cnt;
+
+	u32 rx_suspend_cnt;
+	u32 rx_suspend_fail_cnt;
+	u32 rx_resume_cnt;
+	u32 rx_resume_fail_cnt;
+	u32 rx_ring_switch_cnt;
+	u32 rx_ring_restore_cnt;
+	u32 rx_flush_cnt;
+	u32 rx_recovery_reset_cnt;
+};
+
+#define HTT_STATS_PHY_ERR_MAX 43
+
+struct htt_rx_pdev_fw_stats_phy_err_tlv {
+	u32 mac_id__word;
+	u32 total_phy_err_cnt;
+	/* Counts of different types of phy errs
+	 * The mapping of PHY error types to phy_err array elements is HW dependent.
+	 * The only currently-supported mapping is shown below:
+	 *
+	 * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV
+	 * 1 phyrx_err_synth_off
+	 * 2 phyrx_err_ofdma_timing
+	 * 3 phyrx_err_ofdma_signal_parity
+	 * 4 phyrx_err_ofdma_rate_illegal
+	 * 5 phyrx_err_ofdma_length_illegal
+	 * 6 phyrx_err_ofdma_restart
+	 * 7 phyrx_err_ofdma_service
+	 * 8 phyrx_err_ppdu_ofdma_power_drop
+	 * 9 phyrx_err_cck_blokker
+	 * 10 phyrx_err_cck_timing
+	 * 11 phyrx_err_cck_header_crc
+	 * 12 phyrx_err_cck_rate_illegal
+	 * 13 phyrx_err_cck_length_illegal
+	 * 14 phyrx_err_cck_restart
+	 * 15 phyrx_err_cck_service
+	 * 16 phyrx_err_cck_power_drop
+	 * 17 phyrx_err_ht_crc_err
+	 * 18 phyrx_err_ht_length_illegal
+	 * 19 phyrx_err_ht_rate_illegal
+	 * 20 phyrx_err_ht_zlf
+	 * 21 phyrx_err_false_radar_ext
+	 * 22 phyrx_err_green_field
+	 * 23 phyrx_err_bw_gt_dyn_bw
+	 * 24 phyrx_err_leg_ht_mismatch
+	 * 25 phyrx_err_vht_crc_error
+	 * 26 phyrx_err_vht_siga_unsupported
+	 * 27 phyrx_err_vht_lsig_len_invalid
+	 * 28 phyrx_err_vht_ndp_or_zlf
+	 * 29 phyrx_err_vht_nsym_lt_zero
+	 * 30 phyrx_err_vht_rx_extra_symbol_mismatch
+	 * 31 phyrx_err_vht_rx_skip_group_id0
+	 * 32 phyrx_err_vht_rx_skip_group_id1to62
+	 * 33 phyrx_err_vht_rx_skip_group_id63
+	 * 34 phyrx_err_ofdm_ldpc_decoder_disabled
+	 * 35 phyrx_err_defer_nap
+	 * 36 phyrx_err_fdomain_timeout
+	 * 37 phyrx_err_lsig_rel_check
+	 * 38 phyrx_err_bt_collision
+	 * 39 phyrx_err_unsupported_mu_feedback
+	 * 40 phyrx_err_ppdu_tx_interrupt_rx
+	 * 41 phyrx_err_unsupported_cbf
+	 * 42 phyrx_err_other
+	 */
+	u32 phy_err[HTT_STATS_PHY_ERR_MAX];
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v {
+	/* Num error MPDU for each RxDMA error type  */
+	u32 fw_ring_mpdu_err[0]; /* HTT_RX_STATS_RXDMA_MAX_ERR */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_mpdu_drop_tlv_v {
+	/* Num MPDU dropped  */
+	u32 fw_mpdu_drop[0]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */
+};
+
+#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT               (0x1)
+#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT               (0x2)
+#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT               (0x4)
+#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT            (0x8)
+#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT              (0x10)
+#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT           (0x20)
+#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT    (0x40)
+#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT         (0x80)
+
+struct htt_pdev_stats_cca_counters_tlv {
+	/* Below values are obtained from the HW Cycles counter registers */
+	u32 tx_frame_usec;
+	u32 rx_frame_usec;
+	u32 rx_clear_usec;
+	u32 my_rx_frame_usec;
+	u32 usec_cnt;
+	u32 med_rx_idle_usec;
+	u32 med_tx_idle_global_usec;
+	u32 cca_obss_usec;
+};
+
+struct htt_pdev_cca_stats_hist_v1_tlv {
+	u32    chan_num;
+	/* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+	u32    num_records;
+	u32    valid_cca_counters_bitmap;
+	u32    collection_interval;
+
+	/* This will be followed by an array which contains the CCA stats
+	 * collected in the last N intervals,
+	 * if the indication is for last N intervals CCA stats.
+	 * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+	 * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+	 * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+	 */
+};
+
+struct htt_pdev_stats_twt_session_tlv {
+	u32 vdev_id;
+	struct htt_mac_addr peer_mac;
+	u32 flow_id_flags;
+
+	/* TWT_DIALOG_ID_UNAVAILABLE is used
+	 * when TWT session is not initiated by host
+	 */
+	u32 dialog_id;
+	u32 wake_dura_us;
+	u32 wake_intvl_us;
+	u32 sp_offset_us;
+};
+
+struct htt_pdev_stats_twt_sessions_tlv {
+	u32 pdev_id;
+	u32 num_sessions;
+	struct htt_pdev_stats_twt_session_tlv twt_session[];
+};
+
+enum htt_rx_reo_resource_sample_id_enum {
+	/* Global link descriptor queued in REO */
+	HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0           = 0,
+	HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1           = 1,
+	HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2           = 2,
+	/*Number of queue descriptors of this aging group */
+	HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0                   = 3,
+	HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1                   = 4,
+	HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2                   = 5,
+	HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3                   = 6,
+	/* Total number of MSDUs buffered in AC */
+	HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0               = 7,
+	HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1               = 8,
+	HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2               = 9,
+	HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3               = 10,
+
+	HTT_RX_REO_RESOURCE_STATS_MAX                          = 16
+};
+
+struct htt_rx_reo_resource_stats_tlv_v {
+	/* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */
+	u32 sample_id;
+	u32 total_max;
+	u32 total_avg;
+	u32 total_sample;
+	u32 non_zeros_avg;
+	u32 non_zeros_sample;
+	u32 last_non_zeros_max;
+	u32 last_non_zeros_min;
+	u32 last_non_zeros_avg;
+	u32 last_non_zeros_sample;
+};
+
+/* == TX SOUNDING STATS == */
+
+enum htt_txbf_sound_steer_modes {
+	HTT_IMPLICIT_TXBF_STEER_STATS                = 0,
+	HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS        = 1,
+	HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS         = 2,
+	HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS        = 3,
+	HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS         = 4,
+	HTT_TXBF_MAX_NUM_OF_MODES                    = 5
+};
+
+enum htt_stats_sounding_tx_mode {
+	HTT_TX_AC_SOUNDING_MODE                      = 0,
+	HTT_TX_AX_SOUNDING_MODE                      = 1,
+};
+
+struct htt_tx_sounding_stats_tlv {
+	u32 tx_sounding_mode; /* HTT_TX_XX_SOUNDING_MODE */
+	/* Counts number of soundings for all steering modes in each bw */
+	u32 cbf_20[HTT_TXBF_MAX_NUM_OF_MODES];
+	u32 cbf_40[HTT_TXBF_MAX_NUM_OF_MODES];
+	u32 cbf_80[HTT_TXBF_MAX_NUM_OF_MODES];
+	u32 cbf_160[HTT_TXBF_MAX_NUM_OF_MODES];
+	/*
+	 * The sounding array is a 2-D array stored as an 1-D array of
+	 * u32. The stats for a particular user/bw combination is
+	 * referenced with the following:
+	 *
+	 *          sounding[(user* max_bw) + bw]
+	 *
+	 * ... where max_bw == 4 for 160mhz
+	 */
+	u32 sounding[HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+};
+
+struct htt_pdev_obss_pd_stats_tlv {
+	u32 num_obss_tx_ppdu_success;
+	u32 num_obss_tx_ppdu_failure;
+	u32 num_sr_tx_transmissions;
+	u32 num_spatial_reuse_opportunities;
+	u32 num_non_srg_opportunities;
+	u32 num_non_srg_ppdu_tried;
+	u32 num_non_srg_ppdu_success;
+	u32 num_srg_opportunities;
+	u32 num_srg_ppdu_tried;
+	u32 num_srg_ppdu_success;
+	u32 num_psr_opportunities;
+	u32 num_psr_ppdu_tried;
+	u32 num_psr_ppdu_success;
+};
+
+struct htt_ring_backpressure_stats_tlv {
+	u32 pdev_id;
+	u32 current_head_idx;
+	u32 current_tail_idx;
+	u32 num_htt_msgs_sent;
+	/* Time in milliseconds for which the ring has been in
+	 * its current backpressure condition
+	 */
+	u32 backpressure_time_ms;
+	/* backpressure_hist - histogram showing how many times
+	 * different degrees of backpressure duration occurred:
+	 * Index 0 indicates the number of times ring was
+	 * continuously in backpressure state for 100 - 200ms.
+	 * Index 1 indicates the number of times ring was
+	 * continuously in backpressure state for 200 - 300ms.
+	 * Index 2 indicates the number of times ring was
+	 * continuously in backpressure state for 300 - 400ms.
+	 * Index 3 indicates the number of times ring was
+	 * continuously in backpressure state for 400 - 500ms.
+	 * Index 4 indicates the number of times ring was
+	 * continuously in backpressure state beyond 500ms.
+	 */
+	u32 backpressure_hist[5];
+};
+
+/* 11BE STATS TLVs */
+enum htt_tx_err_status_t {
+	HTT_TXERR_NONE,
+	HTT_TXERR_RESP,	/* response timeout, mismatch,
+			 * BW mismatch, mimo ctrl mismatch,
+			 * CRC error.. */
+	HTT_TXERR_FILT,	/* blocked by tx filtering */
+	HTT_TXERR_FIFO,	/* fifo, misc errors in HW */
+	HTT_TXERR_SWABORT, /* software initialted abort (TX_ABORT) */
+
+	HTT_TXERR_RESERVED1,
+	HTT_TXERR_RESERVED2,
+	HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS = 7,
+
+	HTT_TXERR_INVALID = 0xff,
+};
+
+/* Matching enum for htt_tx_selfgen_sch_tsflag_error_stats */
+enum htt_tx_selfgen_sch_tsflag_error_stats {
+	HTT_TX_SELFGEN_SCH_TSFLAG_FLUSH_RCVD_ERR,
+	HTT_TX_SELFGEN_SCH_TSFLAG_FILT_SCHED_CMD_ERR,
+	HTT_TX_SELFGEN_SCH_TSFLAG_RESP_MISMATCH_ERR,
+	HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_MIMO_CTRL_MISMATCH_ERR,
+	HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_BW_MISMATCH_ERR,
+	HTT_TX_SELFGEN_SCH_TSFLAG_RETRY_COUNT_FAIL_ERR,
+	HTT_TX_SELFGEN_SCH_TSFLAG_RESP_TOO_LATE_RECEIVED_ERR,
+	HTT_TX_SELFGEN_SCH_TSFLAG_SIFS_STALL_NO_NEXT_CMD_ERR,
+
+	HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS = 8,
+	HTT_TX_SELFGEN_SCH_TSFLAG_ERROR_STATS_VALID = 8
+};
+
+/* 0...13,-2,-1 */
+#define HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS 16
+/* 20,40,80,160,320 MHz */
+#define HTT_TX_PDEV_STATS_NUM_BE_BW_COUNTERS  5
+
+#define HTT_TX_PDEV_STATS_NUM_HE_SIG_B_MCS_COUNTERS 6
+#define HTT_TX_PDEV_STATS_NUM_EHT_SIG_MCS_COUNTERS 4
+
+struct htt_tx_selfgen_be_stats_tlv {
+	u32 be_su_ndpa;
+	u32 be_su_ndp;
+	u32 be_mu_mimo_ndpa;
+	u32 be_mu_mimo_ndp;
+	u32 be_mu_mimo_brpoll[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1];
+	u32 be_basic_trigger;
+	u32 be_bsr_trigger;
+	u32 be_mu_bar_trigger;
+	u32 be_mu_rts_trigger;
+	u32 be_ulmumimo_trigger;
+	u32 be_su_ndpa_queued;
+	u32 be_su_ndp_queued;
+	u32 be_mu_mimo_ndpa_queued;
+	u32 be_mu_mimo_ndp_queued;
+	u32 be_mu_mimo_brpoll_queued[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1];
+	u32 be_ul_mumimo_trigger[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_selfgen_be_err_stats_tlv {
+	u32 be_su_ndp_err;
+	u32 be_su_ndpa_err;
+	u32 be_mu_mimo_ndpa_err;
+	u32 be_mu_mimo_ndp_err;
+	u32 be_mu_mimo_brp_err[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1];
+	u32 be_basic_trigger_err;
+	u32 be_bsr_trigger_err;
+	u32 be_mu_bar_trigger_err;
+	u32 be_mu_rts_trigger_err;
+	u32 be_ulmumimo_trigger_err;
+	u32 be_mu_mimo_brp_err_num_cbf_received[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS];
+	u32 be_su_ndpa_flushed;
+	u32 be_su_ndp_flushed;
+	u32 be_mu_mimo_ndpa_flushed;
+	u32 be_mu_mimo_ndp_flushed;
+	u32 be_mu_mimo_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS - 1];
+	u32 be_ul_mumimo_trigger_err[HTT_TX_PDEV_STATS_NUM_BE_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_selfgen_be_sched_status_stats_tlv {
+	u32 be_su_ndpa_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_su_ndp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_su_ndp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	u32 be_mu_mimo_ndpa_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_mu_mimo_ndp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_mu_mimo_ndp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	u32 be_mu_brp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_mu_brp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	u32 be_mu_bar_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_mu_bar_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	u32 be_basic_trig_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_basic_trig_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	u32 be_ulmumimo_trig_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	u32 be_ulmumimo_trig_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+};
+
+struct htt_tx_pdev_be_dl_mu_ofdma_sch_stats_tlv {
+	u32 be_mu_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_tx_pdev_be_ul_mu_ofdma_sch_stats_tlv {
+	u32 be_ul_mu_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 be_ul_mu_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 be_ul_mu_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 be_ul_mu_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_tx_pdev_be_ul_mu_mimo_sch_stats_tlv {
+	u32 be_ul_mu_mimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+	u32 be_ul_mu_mimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_pdev_rate_stats_be_tlv {
+	u32 be_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	u32 be_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 be_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BE_BW_COUNTERS];
+	u32 be_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	u32 be_mu_mimo_tx_ldpc;
+};
+
+struct htt_tx_pdev_rate_stats_be_ofdma_tlv {
+	u32 mac_id__word;
+	u32 be_ofdma_tx_ldpc;
+	u32 be_ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	u32 be_ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 be_ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BE_BW_COUNTERS];
+	u32 be_ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	u32 be_ofdma_tx_ru_size[HTT_TX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS];
+	u32 be_ofdma_eht_sig_mcs[HTT_TX_PDEV_STATS_NUM_EHT_SIG_MCS_COUNTERS];
+};
+
+#define HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5
+struct htt_rx_pdev_be_ul_trigger_stats_tlv {
+	u32 mac_id__word;
+
+	u32 rx_11be_ul_ofdma;
+	u32 be_ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	u32 be_ul_ofdma_rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	u32 be_ul_ofdma_rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 be_ul_ofdma_rx_bw[HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS];
+	u32 be_ul_ofdma_rx_stbc;
+	u32 be_ul_ofdma_rx_ldpc;
+
+	/*
+	 * These are arrays to hold the number of PPDUs that we received per RU.
+	 * E.g. PPDUs (data or non data) received in RU26 will be incremented in
+	 * array offset 0 and similarly RU52 will be incremented in array offset 1
+	 */
+	/* PPDU level */
+	u32 be_rx_ulofdma_data_ru_size_ppdu[HTT_RX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS];
+	u32 be_rx_ulofdma_non_data_ru_size_ppdu[HTT_RX_PDEV_STATS_NUM_BE_RU_SIZE_COUNTERS];
+
+	/*
+	 * These arrays hold Target RSSI (rx power the AP wants),
+	 * FD RSSI (rx power the AP sees) & Power headroom values of STAs
+	 * which can be identified by AIDs, during trigger based RX.
+	 * Array acts a circular buffer and holds values for last 5 STAs
+	 * in the same order as RX.
+	 */
+	/*
+	 * STA AID array for identifying which STA the
+	 * Target-RSSI / FD-RSSI / pwr headroom stats are for
+	 */
+	u32 be_uplink_sta_aid[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+
+	/*
+	 * Trig Target RSSI for STA AID in same index - UNIT(dBm)
+	 */
+	s32 be_uplink_sta_target_rssi[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+
+	/*
+	 * Trig FD RSSI from STA AID in same index - UNIT(dBm)
+	 */
+	s32 be_uplink_sta_fd_rssi[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+
+	/*
+	 * Trig power headroom for STA AID in same idx - UNIT(dB)
+	 */
+	u32 be_uplink_sta_power_headroom[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+};
+
+struct htt_rx_pdev_be_ul_mimo_user_stats_tlv {
+	u32 user_index;
+	u32 be_rx_ulmumimo_non_data_ppdu;
+	u32 be_rx_ulmumimo_data_ppdu;
+	u32 be_rx_ulmumimo_mpdu_ok;
+	u32 be_rx_ulmumimo_mpdu_fail;
+};
+
+struct htt_tx_histogram_stats_tlv {
+	u32 rate_retry_mcs_drop_cnt;
+	u32 mcs_drop_rate[HTT_TX_PDEV_STATS_NUM_MCS_DROP_COUNTERS];
+	u32 per_histogram_cnt[HTT_TX_PDEV_STATS_NUM_PER_COUNTERS];
+	u32 low_latency_rate_cnt;
+	u32 su_burst_rate_drop_cnt;
+	u32 su_burst_rate_drop_fail_cnt;
+};
+
+struct htt_rx_pdev_ul_mumimo_trig_be_stats_tlv {
+	u32 mac_id__word;
+
+	/* Number of times UL MUMIMO RX packets received */
+	u32 rx_11be_ul_mumimo;
+
+	/* 11BE EHT UL MU-MIMO RX TB PPDU MCS stats */
+	u32 be_ul_mumimo_rx_mcs[HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	/*
+	 * 11BE EHT UL MU-MIMO RX GI & LTF stats.
+	 * Index 0 indicates 1xLTF + 1.6 msec GI
+	 * Index 1 indicates 2xLTF + 1.6 msec GI
+	 * Index 2 indicates 4xLTF + 3.2 msec GI
+	 */
+	u32 be_ul_mumimo_rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_BE_MCS_COUNTERS];
+	/*
+	 * 11BE EHT UL MU-MIMO RX TB PPDU NSS stats
+	 * (Increments the individual user NSS in the UL MU MIMO PPDU received)
+	 */
+	u32 be_ul_mumimo_rx_nss[HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS];
+	/* 11BE EHT UL MU-MIMO RX TB PPDU BW stats */
+	u32 be_ul_mumimo_rx_bw[HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS];
+	/* Number of times UL MUMIMO TB PPDUs received with STBC */
+	u32 be_ul_mumimo_rx_stbc;
+	/* Number of times UL MUMIMO TB PPDUs received with LDPC */
+	u32 be_ul_mumimo_rx_ldpc;
+
+	/* RSSI in dBm for Rx TB PPDUs */
+	s8 be_rx_ul_mumimo_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS][HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS];
+	/* Target RSSI programmed in UL MUMIMO triggers (units dBm) */
+	s8 be_rx_ul_mumimo_target_rssi[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER][HTT_RX_PDEV_STATS_NUM_BE_BW_COUNTERS];
+	/* FD RSSI measured for Rx UL TB PPDUs (units dBm) */
+	s8 be_rx_ul_mumimo_fd_rssi[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER][HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS];
+	/* Average pilot EVM measued for RX UL TB PPDU */
+	s8 be_rx_ulmumimo_pilot_evm_dB_mean[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER][HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS];
+};
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+
+void ath12k_debugfs_htt_stats_init(struct ath12k *ar);
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+					  struct sk_buff *skb);
+int ath12k_debugfs_htt_stats_req(struct ath12k *ar);
+
+#else /* CONFIG_ATH12K_DEBUGFS */
+
+static inline void ath12k_debugfs_htt_stats_init(struct ath12k *ar)
+{
+}
+
+static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+							struct sk_buff *skb)
+{
+}
+
+static inline int ath12k_debugfs_htt_stats_req(struct ath12k *ar)
+{
+	return 0;
+}
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+
+struct htt_rx_pdev_ul_mumimo_trig_stats_tlv {
+	u32 mac_id__word;
+	u32 rx_11ax_ul_mumimo;
+	u32 ul_mumimo_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ul_mumimo_rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+			   [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ul_mumimo_rx_nss[HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS];
+	u32 ul_mumimo_rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 ul_mumimo_rx_stbc;
+	u32 ul_mumimo_rx_ldpc;
+	u32 ul_mumimo_rx_mcs_ext[HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	u32 ul_mumimo_rx_gi_ext[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+			       [HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	s8  rx_ul_mumimo_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS]
+					  [HTT_RX_PDEV_STATS_TOTAL_BW_COUNTERS];
+	s8  rx_ul_mumimo_target_rssi[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS]
+				    [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	s8  rx_ul_mumimo_fd_rssi[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS]
+				[HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS];
+	s8  rx_ulmumimo_pilot_evm_db_mean[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS]
+					 [HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS];
+	u32 reduced_ul_mumimo_rx_bw[HTT_RX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES]
+				   [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+};
+
+struct htt_rx_pdev_ul_mimo_user_stats_tlv {
+	u32 user_index;
+	u32 rx_ulmumimo_non_data_ppdu;
+	u32 rx_ulmumimo_data_ppdu;
+	u32 rx_ulmumimo_mpdu_ok;
+	u32 rx_ulmumimo_mpdu_fail;
+};
+
+struct htt_rx_pdev_ul_ofdma_user_stats_tlv {
+	u32 user_index;
+	u32 rx_ulofdma_non_data_ppdu;
+	u32 rx_ulofdma_data_ppdu;
+	u32 rx_ulofdma_mpdu_ok;
+	u32 rx_ulofdma_mpdu_fail;
+	u32 rx_ulofdma_non_data_nusers;
+	u32 rx_ulofdma_data_nusers;
+};
+
+#define HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5
+
+struct htt_rx_pdev_ul_trigger_stats_tlv {
+	u32 mac_id__word;
+	u32 rx_11ax_ul_ofdma;
+	u32 ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ul_ofdma_rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+			  [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	u32 ul_ofdma_rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 ul_ofdma_rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 ul_ofdma_rx_stbc;
+	u32 ul_ofdma_rx_ldpc;
+	u32 rx_ulofdma_data_ru_size_ppdu[HTT_RX_PDEV_STATS_NUM_RU_SIZE_160MHZ_CNTRS];
+	u32 rx_ulofdma_non_data_ru_size_ppdu[HTT_RX_PDEV_STATS_NUM_RU_SIZE_160MHZ_CNTRS];
+
+	u32 uplink_sta_aid[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	u32 uplink_sta_target_rssi[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	u32 uplink_sta_fd_rssi[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	u32 uplink_sta_power_headroom[HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	u32 reduced_ul_ofdma_rx_bw[HTT_RX_PDEV_STATS_NUM_REDUCED_CHAN_TYPES]
+				  [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+};
+
+#define HTT_LATENCY_PROFILE_MAX_HIST        3
+#define HTT_STATS_MAX_PROF_STATS_NAME_LEN  32
+#define HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST 3
+struct htt_latency_prof_stats_tlv {
+	u32 print_header;
+	u8 latency_prof_name[HTT_STATS_MAX_PROF_STATS_NAME_LEN];
+	u32 cnt;
+	u32 min;
+	u32 max;
+	u32 last;
+	u32 tot;
+	u32 avg;
+	u32 hist_intvl;
+	u32 hist[HTT_LATENCY_PROFILE_MAX_HIST];
+};
+
+struct htt_latency_prof_ctx_tlv {
+	u32 duration;
+	u32 tx_msdu_cnt;
+	u32 tx_mpdu_cnt;
+	u32 tx_ppdu_cnt;
+	u32 rx_msdu_cnt;
+	u32 rx_mpdu_cnt;
+};
+
+struct htt_latency_prof_cnt_tlv {
+	u32 prof_enable_cnt;
+};
+
+#define HTT_RX_MAX_PEAK_OCCUPANCY_INDEX		10
+#define HTT_RX_MAX_CURRENT_OCCUPANCY_INDEX	10
+#define HTT_RX_SQUARE_INDEX			6
+#define HTT_RX_MAX_PEAK_SEARCH_INDEX		4
+#define HTT_RX_MAX_PENDING_SEARCH_INDEX		4
+
+struct htt_rx_fse_stats_tlv {
+	u32 fse_enable_cnt;
+	u32 fse_disable_cnt;
+	u32 fse_cache_invalidate_entry_cnt;
+	u32 fse_full_cache_invalidate_cnt;
+	u32 fse_num_cache_hits_cnt;
+	u32 fse_num_searches_cnt;
+	u32 fse_cache_occupancy_peak_cnt[HTT_RX_MAX_PEAK_OCCUPANCY_INDEX];
+	u32 fse_cache_occupancy_curr_cnt[HTT_RX_MAX_CURRENT_OCCUPANCY_INDEX];
+	u32 fse_search_stat_square_cnt[HTT_RX_SQUARE_INDEX];
+	u32 fse_search_stat_peak_cnt[HTT_RX_MAX_PEAK_SEARCH_INDEX];
+	u32 fse_search_stat_search_pending_cnt[HTT_RX_MAX_PENDING_SEARCH_INDEX];
+};
+
+/* == PDEV RX RATE EXT STATS == */
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14
+/* HTT_RX_PEER_STATS_NUM_BW_EXT_COUNTERS:
+ * bw index 4 (bw ext index 0): rssi_ext80_low20_chain0
+ * bw index 5 (bw ext index 1): rssi_ext80_low_high20_chain0
+ * bw index 6 (bw ext index 2): rssi_ext80_high_low20_chain0
+ * bw index 7 (bw ext index 3): rssi_ext80_high20_chain0
+ */
+#define HTT_RX_PEER_STATS_NUM_BW_EXT_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS 4
+#define HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS 5
+
+#define HTT_TX_TXBF_RATE_STATS_NUM_REDUCED_CHAN_TYPES 2
+
+struct htt_pdev_txrate_txbf_stats_tlv {
+	u32 tx_su_txbf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+	u32 tx_su_ibf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+	u32 tx_su_ol_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+	u32 tx_su_txbf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 tx_su_ibf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 tx_su_ol_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+	u32 tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+	u32 tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+	u32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+	u32 reduced_tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_REDUCED_CHAN_TYPES]
+				 [HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+	u32 reduced_tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_REDUCED_CHAN_TYPES]
+				[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+	u32 reduced_tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_REDUCED_CHAN_TYPES]
+			       [HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+};
+
+struct htt_txbf_ofdma_ndpa_stats_tlv {
+	u32 ax_ofdma_ndpa_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_ndpa_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_ndpa_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_ndpa_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_ndp_stats_tlv {
+	u32 ax_ofdma_ndp_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_ndp_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_ndp_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_ndp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_brp_stats_tlv {
+	u32 ax_ofdma_brpoll_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_brpoll_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_brp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_brp_err_num_cbf_rcvd[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS + 1];
+};
+
+struct htt_txbf_ofdma_steer_stats_tlv {
+	u32 ax_ofdma_num_ppdu_steer[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_num_ppdu_ol[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_num_usrs_prefetch[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_num_usrs_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ofdma_num_usrs_force_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_ax_ndpa_stats_elem_t {
+	u32 ax_ofdma_ndpa_queued;
+	u32 ax_ofdma_ndpa_tried;
+	u32 ax_ofdma_ndpa_flushed;
+	u32 ax_ofdma_ndpa_err;
+};
+
+struct htt_txbf_ofdma_ax_ndpa_stats_tlv {
+	u32 num_elems_ax_ndpa_arr;
+	u32 arr_elem_size_ax_ndpa;
+	struct htt_txbf_ofdma_ax_ndpa_stats_elem_t ax_ndpa[1];
+};
+
+struct htt_txbf_ofdma_ax_ndp_stats_elem_t {
+	u32 ax_ofdma_ndp_queued;
+	u32 ax_ofdma_ndp_tried;
+	u32 ax_ofdma_ndp_flushed;
+	u32 ax_ofdma_ndp_err;
+};
+
+struct htt_txbf_ofdma_ax_ndp_stats_tlv {
+	u32 num_elems_ax_ndp_arr;
+	u32 arr_elem_size_ax_ndp;
+	struct htt_txbf_ofdma_ax_ndp_stats_elem_t ax_ndp[1];
+};
+
+struct htt_txbf_ofdma_ax_brp_stats_elem_t {
+	u32 ax_ofdma_brpoll_queued;
+	u32 ax_ofdma_brpoll_tried;
+	u32 ax_ofdma_brpoll_flushed;
+	u32 ax_ofdma_brp_err;
+	u32 ax_ofdma_brp_err_num_cbf_rcvd;
+};
+
+struct htt_txbf_ofdma_ax_brp_stats_tlv {
+	u32 num_elems_ax_brp_arr;
+	u32 arr_elem_size_ax_brp;
+	struct htt_txbf_ofdma_ax_brp_stats_elem_t ax_brp[1];
+};
+
+struct htt_txbf_ofdma_ax_steer_stats_elem_t {
+	u32 ax_ofdma_num_ppdu_steer;
+	u32 ax_ofdma_num_ppdu_ol;
+	u32 ax_ofdma_num_usrs_prefetch;
+	u32 ax_ofdma_num_usrs_sound;
+	u32 ax_ofdma_num_usrs_force_sound;
+};
+
+struct htt_txbf_ofdma_ax_steer_stats_tlv {
+	u32 num_elems_ax_steer_arr;
+	u32 arr_elem_size_ax_steer;
+	struct htt_txbf_ofdma_ax_steer_stats_elem_t ax_steer[1];
+};
+
+struct htt_txbf_ofdma_ax_steer_mpdu_stats_tlv {
+	/* 11AX HE OFDMA MPDUs tried in rbo steering */
+	u32 ax_ofdma_rbo_steer_mpdus_tried;
+	/* 11AX HE OFDMA MPDUs failed in rbo steering */
+	u32 ax_ofdma_rbo_steer_mpdus_failed;
+	/* 11AX HE OFDMA MPDUs tried in sifs steering */
+	u32 ax_ofdma_sifs_steer_mpdus_tried;
+	/* 11AX HE OFDMA MPDUs failed in sifs steering */
+	u32 ax_ofdma_sifs_steer_mpdus_failed;
+};
+
+struct htt_txbf_ofdma_be_ndpa_stats_elem_t {
+	u32 be_ofdma_ndpa_queued;
+	u32 be_ofdma_ndpa_tried;
+	u32 be_ofdma_ndpa_flushed;
+	u32 be_ofdma_ndpa_err;
+};
+
+struct htt_txbf_ofdma_be_ndpa_stats_tlv {
+	u32 num_elems_be_ndpa_arr;
+	u32 arr_elem_size_be_ndpa;
+	struct htt_txbf_ofdma_be_ndpa_stats_elem_t be_ndpa[1];
+};
+
+struct htt_txbf_ofdma_be_ndp_stats_elem_t {
+	u32 be_ofdma_ndp_queued;
+	u32 be_ofdma_ndp_tried;
+	u32 be_ofdma_ndp_flushed;
+	u32 be_ofdma_ndp_err;
+};
+
+struct htt_txbf_ofdma_be_ndp_stats_tlv {
+	u32 num_elems_be_ndp_arr;
+	u32 arr_elem_size_be_ndp;
+	struct htt_txbf_ofdma_be_ndp_stats_elem_t be_ndp[1];
+};
+
+struct htt_txbf_ofdma_be_brp_stats_elem_t {
+	u32 be_ofdma_brpoll_queued;
+	u32 be_ofdma_brpoll_tried;
+	u32 be_ofdma_brpoll_flushed;
+	u32 be_ofdma_brp_err;
+	u32 be_ofdma_brp_err_num_cbf_rcvd;
+};
+
+struct htt_txbf_ofdma_be_brp_stats_tlv {
+	u32 num_elems_be_brp_arr;
+	u32 arr_elem_size_be_brp;
+	struct htt_txbf_ofdma_be_brp_stats_elem_t be_brp[1];
+};
+
+struct htt_txbf_ofdma_be_steer_stats_elem_t {
+	u32 be_ofdma_num_ppdu_steer;
+	u32 be_ofdma_num_ppdu_ol;
+	u32 be_ofdma_num_usrs_prefetch;
+	u32 be_ofdma_num_usrs_sound;
+	u32 be_ofdma_num_usrs_force_sound;
+};
+
+struct htt_txbf_ofdma_be_steer_stats_tlv {
+	u32 num_elems_be_steer_arr;
+	u32 arr_elem_size_be_steer;
+	struct htt_txbf_ofdma_be_steer_stats_elem_t be_steer[1];
+};
+
+struct htt_txbf_ofdma_be_steer_mpdu_stats_tlv {
+	/* 11BE EHT OFDMA MPDUs tried in rbo steering */
+	u32 be_ofdma_rbo_steer_mpdus_tried;
+	/* 11BE EHT OFDMA MPDUs failed in rbo steering */
+	u32 be_ofdma_rbo_steer_mpdus_failed;
+	/* 11BE EHT OFDMA MPDUs tried in sifs steering */
+	u32 be_ofdma_sifs_steer_mpdus_tried;
+	/* 11BE EHT OFDMA MPDUs failed in sifs steering */
+	u32 be_ofdma_sifs_steer_mpdus_failed;
+};
+
+struct htt_dmac_reset_stats_tlv {
+	u32 reset_count;
+	u32 reset_time_lo_ms;
+	u32 reset_time_hi_ms;
+	u32 disengage_time_lo_ms;
+	u32 disengage_time_hi_ms;
+	u32 engage_time_lo_ms;
+	u32 engage_time_hi_ms;
+	u32 disengage_count;
+	u32 engage_count;
+	u32 drain_dest_ring_mask;
+};
+
+struct htt_rx_pdev_be_ul_ofdma_user_stats_tlv {
+	u32 user_index;
+	u32 be_rx_ulofdma_non_data_ppdu;
+	u32 be_rx_ulofdma_data_ppdu;
+	u32 be_rx_ulofdma_mpdu_ok;
+	u32 be_rx_ulofdma_mpdu_fail;
+	u32 be_rx_ulofdma_non_data_nusers;
+	u32 be_rx_ulofdma_data_nusers;
+};
+
+#define HTT_MAX_RX_PKT_CNT 8
+#define HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define HTT_MAX_PER_BLK_ERR_CNT 20
+#define HTT_MAX_RX_OTA_ERR_CNT 14
+#define HTT_STATS_MAX_CHAINS 8
+#define HTT_MAX_CH_PWR_INFO_SIZE 16
+#define HTT_PUNCTURE_STATS_MAX_SUBBAND_COUNT 32
+
+#define HTT_PDEV_PUNCTURE_STATS_MAC_ID_M 0x000000ff
+#define HTT_PDEV_PUNCTURE_STATS_MAC_ID_S 0
+
+#define HTT_PDEV_PUNCTURE_STATS_MAC_ID_GET(_var) \
+	(((_var) & HTT_PDEV_PUNCTURE_STATS_MAC_ID_M) >> \
+	 HTT_PDEV_PUNCTURE_STATS_MAC_ID_S)
+#define HTT_PDEV_PUNCTURE_STATS_MAC_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_PDEV_PUNCTURE_STATS_MAC_ID, _val); \
+		((_var) |= ((_val) << HTT_PDEV_PUNCTURE_STATS_MAC_ID_S)); \
+	} while (0)
+
+struct htt_phy_tpc_stats_tlv {
+	u32 pdev_id;
+	u32 tx_power_scale;
+	u32 tx_power_scale_db;
+	u32 min_negative_tx_power;
+	u32 reg_ctl_domain;
+	u32 max_reg_allowed_power[HTT_STATS_MAX_CHAINS];
+	u32 max_reg_allowed_power_6g[HTT_STATS_MAX_CHAINS];
+	u32 twice_max_rd_power;
+	u32 max_tx_power;
+	u32 home_max_tx_power;
+	u32 psd_power;
+	u32 eirp_power;
+	u32 power_type_6ghz;
+	u32 sub_band_cfreq[HTT_MAX_CH_PWR_INFO_SIZE];
+	u32 sub_band_txpower[HTT_MAX_CH_PWR_INFO_SIZE];
+};
+
+struct htt_pdev_puncture_stats_tlv {
+	union {
+		struct {
+			u32 mac_id:8,
+			    reserved:24;
+		};
+		u32 mac_id__word;
+	};
+	u32 direction;
+	u32 preamble;
+	u32 ppdu_type;
+	u32 subband_count;
+	u32 last_used_pattern_mask;
+	u32 num_subbands_used_cnt[HTT_PUNCTURE_STATS_MAX_SUBBAND_COUNT];
+};
+
+enum {
+	HTT_STATS_CAL_PROF_COLD_BOOT = 0,
+	HTT_STATS_CAL_PROF_FULL_CHAN_SWITCH = 1,
+	HTT_STATS_CAL_PROF_SCAN_CHAN_SWITCH = 2,
+	HTT_STATS_CAL_PROF_DPD_SPLIT_CAL = 3,
+
+	HTT_STATS_MAX_PROF_CAL = 4,
+};
+
+#define HTT_STATS_MAX_CAL_IDX_CNT 8
+struct htt_latency_prof_cal_stats_tlv {
+
+	u8 latency_prof_name[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_PROF_STATS_NAME_LEN];
+
+	/** To verify whether prof cal is enabled or not */
+	u32 enable;
+
+	/** current pdev_id */
+	u32 pdev_id;
+
+	/** The cnt is incremented when each time the calindex takes place */
+	u32 cnt[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** Minimum time taken to complete the calibration - in us */
+	u32 min[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** Maximum time taken to complete the calibration -in us */
+	u32 max[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** Time taken by the cal for its final time execution - in us */
+	u32 last[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** Total time taken - in us */
+	u32 tot[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** hist_intvl - by default will be set to 2000 us */
+	u32 hist_intvl[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/**
+	* If last is less than hist_intvl, then hist[0]++,
+	* If last is less than hist_intvl << 1, then hist[1]++,
+	* otherwise hist[2]++.
+	*/
+	u32 hist[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT][HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST];
+
+	/** Pf_last will log the current no of page faults */
+	u32 pf_last[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** Sum of all page faults happened */
+	u32 pf_tot[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** If pf_last > pf_max then pf_max = pf_last */
+	u32 pf_max[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/**
+	* For each cal profile, only certain no of cal indices were invoked,
+	* this member will store what all the indices got invoked per each
+	* cal profile
+	*/
+	u32 enabledCalIdx[HTT_STATS_MAX_PROF_CAL][HTT_STATS_MAX_CAL_IDX_CNT];
+
+	/** No of indices invoked per each cal profile */
+	u32 CalCnt[HTT_STATS_MAX_PROF_CAL];
+};
+
+#define HTT_ML_PEER_DETAILS_NUM_LINKS_M			0x00000003
+#define HTT_ML_PEER_DETAILS_NUM_LINKS_S			0
+#define HTT_ML_PEER_DETAILS_ML_PEER_ID_M		0x00003FFC
+#define HTT_ML_PEER_DETAILS_ML_PEER_ID_S		2
+#define HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_M		0x0001C000
+#define HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_S		14
+#define HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_M		0x00060000
+#define HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_S		17
+#define HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_M		0x00380000
+#define HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_S		19
+#define HTT_ML_PEER_DETAILS_NON_STR_M			0x00400000
+#define HTT_ML_PEER_DETAILS_NON_STR_S			22
+#define HTT_ML_PEER_DETAILS_EMLSR_M			0x00800000
+#define HTT_ML_PEER_DETAILS_EMLSR_S			23
+#define HTT_ML_PEER_DETAILS_IS_STA_KO_M			0x01000000
+#define HTT_ML_PEER_DETAILS_IS_STA_KO_S			24
+#define HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_M		0x06000000
+#define HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_S		25
+#define HTT_ML_PEER_DETAILS_ALLOCATED_M			0x08000000
+#define HTT_ML_PEER_DETAILS_ALLOCATED_S			27
+
+#define HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_M	0x000000ff
+#define HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_S	0
+
+#define HTT_ML_PEER_DETAILS_NUM_LINKS_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_NUM_LINKS_M) >> \
+	 HTT_ML_PEER_DETAILS_NUM_LINKS_S)
+
+#define HTT_ML_PEER_DETAILS_NUM_LINKS_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_NUM_LINKS, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_NUM_LINKS_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_NUM_LINKS_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_ML_PEER_ID_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_ML_PEER_ID_M) >> \
+	 HTT_ML_PEER_DETAILS_ML_PEER_ID_S)
+
+#define HTT_ML_PEER_DETAILS_ML_PEER_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_ML_PEER_ID, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_ML_PEER_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_ML_PEER_ID_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_M) >> \
+	 HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_S)
+
+#define HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_PRIMARY_LINK_IDX_S)); \
+	} while (0)
+#define HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_M) >> \
+	 HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_S)
+
+#define HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_PRIMARY_CHIP_ID_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_M) >> \
+	 HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_S)
+
+#define HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_LINK_INIT_COUNT, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_LINK_INIT_COUNT_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_NON_STR_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_NON_STR_M) >> \
+	 HTT_ML_PEER_DETAILS_NON_STR_S)
+
+#define HTT_ML_PEER_DETAILS_NON_STR_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_NON_STR, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_NON_STR_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_NON_STR_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_EMLSR_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_EMLSR_M) >> \
+	 HTT_ML_PEER_DETAILS_EMLSR_S)
+
+#define HTT_ML_PEER_DETAILS_EMLSR_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_EMLSR, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_EMLSR_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_EMLSR_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_IS_STA_KO_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_IS_STA_KO_M) >> \
+	 HTT_ML_PEER_DETAILS_IS_STA_KO_S)
+
+#define HTT_ML_PEER_DETAILS_IS_STA_KO_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_IS_STA_KO, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_IS_STA_KO_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_IS_STA_KO_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_M) >> \
+	 HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_S)
+#define HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_NUM_LOCAL_LINKS_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_ALLOCATED_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_ALLOCATED_M) >> \
+	 HTT_ML_PEER_DETAILS_ALLOCATED_S)
+
+#define HTT_ML_PEER_DETAILS_ALLOCATED_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_ALLOCATED, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_ALLOCATED_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_ALLOCATED_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_GET(_var) \
+	(((_var) & HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_M) >> \
+	 HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_S)
+
+#define HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP, _val); \
+		((_var) &= ~(HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_DETAILS_PARTICIPATING_CHIPS_BITMAP_S)); \
+	} while (0)
+
+struct htt_ml_peer_details_tlv {
+	struct htt_mac_addr remote_mld_mac_addr;
+	union {
+		struct {
+			u32 num_links:2,
+			    ml_peer_id:12,
+			    primary_link_idx:3,
+			    primary_chip_id:2,
+			    link_init_count:3,
+			    non_str:1,
+			    emlsr:1,
+			    is_sta_ko:1,
+			    num_local_links:2,
+			    allocated:1,
+			    reserved:4;
+		};
+		u32 msg_dword_1;
+	};
+
+	union {
+		struct {
+			u32 participating_chips_bitmap:8,
+			    reserved1:24;
+		};
+		u32 msg_dword_2;
+	};
+
+	u32 ml_peer_flags;
+};
+
+#define HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_M		0x0000003F
+#define HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_S		0
+#define HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_M	0x00000FC0
+#define HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_S	6
+#define HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_M			0x0FFFF000
+#define HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_S			12
+
+#define HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_GET(_var) \
+	(((_var) & HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_M) >> \
+	 HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_S)
+
+#define HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD, _val); \
+		((_var) &= ~(HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_EXT_DETAILS_PEER_ASSOC_IPC_RECVD_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_GET(_var) \
+	(((_var) & HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_M) >> \
+	 HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_S)
+
+#define HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD, _val); \
+		((_var) &= ~(HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_EXT_DETAILS_SCHED_PEER_DELETE_RECVD_S)); \
+	} while (0)
+
+#define HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_GET(_var) \
+	(((_var) & HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_M) >> \
+	 HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_S)
+
+#define HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX, _val); \
+		((_var) &= ~(HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_M)); \
+		((_var) |= ((_val) << HTT_ML_PEER_EXT_DETAILS_MLD_AST_INDEX_S)); \
+	} while (0)
+
+struct htt_ml_peer_ext_details_tlv {
+	union {
+		struct {
+			u32 peer_assoc_ipc_recvd:6,
+			    sched_peer_delete_recvd:6,
+			    mld_ast_index:16,
+			    reserved:4;
+		};
+		u32 msg_dword_1;
+	};
+};
+
+#define HTT_ML_LINK_INFO_VALID_M		0x00000001
+#define HTT_ML_LINK_INFO_VALID_S		0
+#define HTT_ML_LINK_INFO_ACTIVE_M		0x00000002
+#define HTT_ML_LINK_INFO_ACTIVE_S		1
+#define HTT_ML_LINK_INFO_PRIMARY_M		0x00000004
+#define HTT_ML_LINK_INFO_PRIMARY_S		2
+#define HTT_ML_LINK_INFO_ASSOC_LINK_M		0x00000008
+#define HTT_ML_LINK_INFO_ASSOC_LINK_S		3
+#define HTT_ML_LINK_INFO_CHIP_ID_M		0x00000070
+#define HTT_ML_LINK_INFO_CHIP_ID_S		4
+#define HTT_ML_LINK_INFO_IEEE_LINK_ID_M		0x00007F80
+#define HTT_ML_LINK_INFO_IEEE_LINK_ID_S		7
+#define HTT_ML_LINK_INFO_HW_LINK_ID_M		0x00038000
+#define HTT_ML_LINK_INFO_HW_LINK_ID_S		15
+#define HTT_ML_LINK_INFO_LOGICAL_LINK_ID_M	0x000C0000
+#define HTT_ML_LINK_INFO_LOGICAL_LINK_ID_S	18
+#define HTT_ML_LINK_INFO_MASTER_LINK_M		0x00100000
+#define HTT_ML_LINK_INFO_MASTER_LINK_S		20
+#define HTT_ML_LINK_INFO_ANCHOR_LINK_M		0x00200000
+#define HTT_ML_LINK_INFO_ANCHOR_LINK_S		21
+#define HTT_ML_LINK_INFO_INITIALIZED_M		0x00400000
+#define HTT_ML_LINK_INFO_INITIALIZED_S		22
+
+#define HTT_ML_LINK_INFO_SW_PEER_ID_M		0x0000ffff
+#define HTT_ML_LINK_INFO_SW_PEER_ID_S		0
+#define HTT_ML_LINK_INFO_VDEV_ID_M		0x00ff0000
+#define HTT_ML_LINK_INFO_VDEV_ID_S		16
+
+#define HTT_ML_LINK_INFO_VALID_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_VALID_M) >> \
+	 HTT_ML_LINK_INFO_VALID_S)
+
+#define HTT_ML_LINK_INFO_VALID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_VALID, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_VALID_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_VALID_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_ACTIVE_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_ACTIVE_M) >> \
+	 HTT_ML_LINK_INFO_ACTIVE_S)
+
+#define HTT_ML_LINK_INFO_ACTIVE_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_ACTIVE, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_ACTIVE_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_ACTIVE_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_PRIMARY_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_PRIMARY_M) >> \
+	 HTT_ML_LINK_INFO_PRIMARY_S)
+
+#define HTT_ML_LINK_INFO_PRIMARY_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_PRIMARY, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_PRIMARY_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_PRIMARY_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_ASSOC_LINK_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_ASSOC_LINK_M) >> \
+	 HTT_ML_LINK_INFO_ASSOC_LINK_S)
+
+#define HTT_ML_LINK_INFO_ASSOC_LINK_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_ASSOC_LINK, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_ASSOC_LINK_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_ASSOC_LINK_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_CHIP_ID_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_CHIP_ID_M) >> \
+	 HTT_ML_LINK_INFO_CHIP_ID_S)
+
+#define HTT_ML_LINK_INFO_CHIP_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_CHIP_ID, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_CHIP_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_CHIP_ID_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_IEEE_LINK_ID_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_IEEE_LINK_ID_M) >> \
+	 HTT_ML_LINK_INFO_IEEE_LINK_ID_S)
+
+#define HTT_ML_LINK_INFO_IEEE_LINK_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_IEEE_LINK_ID, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_IEEE_LINK_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_IEEE_LINK_ID_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_HW_LINK_ID_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_HW_LINK_ID_M) >> \
+	 HTT_ML_LINK_INFO_HW_LINK_ID_S)
+
+#define HTT_ML_LINK_INFO_HW_LINK_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_HW_LINK_ID, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_HW_LINK_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_HW_LINK_ID_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_LOGICAL_LINK_ID_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_LOGICAL_LINK_ID_M) >> \
+	 HTT_ML_LINK_INFO_LOGICAL_LINK_ID_S)
+
+#define HTT_ML_LINK_INFO_LOGICAL_LINK_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_LOGICAL_LINK_ID, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_LOGICAL_LINK_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_LOGICAL_LINK_ID_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_MASTER_LINK_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_MASTER_LINK_M) >> \
+	 HTT_ML_LINK_INFO_MASTER_LINK_S)
+
+#define HTT_ML_LINK_INFO_MASTER_LINK_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_MASTER_LINK, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_MASTER_LINK_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_MASTER_LINK_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_ANCHOR_LINK_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_ANCHOR_LINK_M) >> \
+	 HTT_ML_LINK_INFO_ANCHOR_LINK_S)
+
+#define HTT_ML_LINK_INFO_ANCHOR_LINK_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_ANCHOR_LINK, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_ANCHOR_LINK_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_ANCHOR_LINK_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_INITIALIZED_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_INITIALIZED_M) >> \
+	 HTT_ML_LINK_INFO_INITIALIZED_S)
+
+#define HTT_ML_LINK_INFO_INITIALIZED_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_INITIALIZED, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_INITIALIZED_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_INITIALIZED_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_SW_PEER_ID_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_SW_PEER_ID_M) >> \
+	 HTT_ML_LINK_INFO_SW_PEER_ID_S)
+
+#define HTT_ML_LINK_INFO_SW_PEER_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_SW_PEER_ID, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_SW_PEER_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_SW_PEER_ID_S)); \
+	} while (0)
+
+#define HTT_ML_LINK_INFO_VDEV_ID_GET(_var) \
+	(((_var) & HTT_ML_LINK_INFO_VDEV_ID_M) >> \
+	 HTT_ML_LINK_INFO_VDEV_ID_S)
+
+#define HTT_ML_LINK_INFO_VDEV_ID_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_ML_LINK_INFO_VDEV_ID, _val); \
+		((_var) &= ~(HTT_ML_LINK_INFO_VDEV_ID_M)); \
+		((_var) |= ((_val) << HTT_ML_LINK_INFO_VDEV_ID_S)); \
+	} while (0)
+
+struct htt_ml_link_info_tlv {
+	union {
+		struct {
+			u32 valid:1,
+			    active:1,
+			    primary:1,
+			    assoc_link:1,
+			    chip_id:3,
+			    ieee_link_id:8,
+			    hw_link_id:3,
+			    logical_link_id:2,
+			    master_link:1,
+			    anchor_link:1,
+			    initialized:1,
+			    reserved:9;
+		};
+		u32 msg_dword_1;
+	};
+
+	union {
+		struct {
+			u32 sw_peer_id:16,
+			    vdev_id:8,
+			    reserved1:8;
+		};
+		u32 msg_dword_2;
+	};
+
+	u32 primary_tid_mask;
+};
+
+struct htt_tx_pdev_ppdu_dur_stats_tlv {
+	/** Tx PPDU duration histogram **/
+	u32 tx_ppdu_dur_hist[HTT_PDEV_STATS_PPDU_DUR_HIST_BINS];
+	u32 tx_success_time_us_low;
+	u32 tx_success_time_us_high;
+	u32 tx_fail_time_us_low;
+	u32 tx_fail_time_us_high;
+	u32 pdev_up_time_us_low;
+	u32 pdev_up_time_us_high;
+};
+
+struct htt_rx_pdev_ppdu_dur_stats_tlv {
+	/** Tx PPDU duration histogram **/
+	u32 rx_ppdu_dur_hist[HTT_PDEV_STATS_PPDU_DUR_HIST_BINS];
+};
+
+#define HTT_TX_PDEV_SIFS_BURST_HIST_STATS 10
+#define HTT_TX_PDEV_STATS_SIFS_HIST_TLV_SZ(_num_elems) (sizeof(u32) * (_num_elems))
+
+struct htt_pdev_mbssid_ctrl_frame_stats_tlv {
+	/** mac_id__word:
+	* BIT [ 7 :  0]   :- mac_id
+	*                    Use the HTT_STATS_CMN_MAC_ID_GET,_SET macros to
+	*                    read/write this bitfield.
+	* BIT [31 :  8]   :- reserved
+	*/
+	u32 mac_id__word;
+	u32 basic_trigger_across_bss;
+	u32 basic_trigger_within_bss;
+	u32 bsr_trigger_across_bss;
+	u32 bsr_trigger_within_bss;
+	u32 mu_rts_across_bss;
+	u32 mu_rts_within_bss;
+	u32 ul_mumimo_trigger_across_bss;
+	u32 ul_mumimo_trigger_within_bss;
+};
+
+struct htt_odd_mandatory_pdev_stats_tlv {
+	u32 hw_queued;
+	u32 hw_reaped;
+	u32 hw_paused;
+	u32 hw_filt;
+	u32 seq_posted;
+	u32 seq_completed;
+	u32 underrun;
+	u32 hw_flush;
+	u32 next_seq_posted_dsr;
+	u32 seq_posted_isr;
+	u32 mpdu_cnt_fcs_ok;
+	u32 mpdu_cnt_fcs_err;
+	u32 msdu_count_tqm;
+	u32 mpdu_count_tqm;
+	u32 mpdus_ack_failed;
+	u32 num_data_ppdus_tried_ota;
+	u32 ppdu_ok;
+	u32 num_total_ppdus_tried_ota;
+	u32 thermal_suspend_cnt;
+	u32 dfs_suspend_cnt;
+	u32 tx_abort_suspend_cnt;
+	u32 suspended_txq_mask;
+	u32 last_suspend_reason;
+	u32 seq_failed_queueing;
+	u32 seq_restarted;
+	u32 seq_txop_repost_stop;
+	u32 next_seq_cancel;
+	u32 seq_min_msdu_repost_stop;
+	u32 total_phy_err_cnt;
+	u32 ppdu_recvd;
+	u32 tcp_msdu_cnt;
+	u32 tcp_ack_msdu_cnt;
+	u32 udp_msdu_cnt;
+	u32 fw_tx_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+	u32 fw_rx_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+	u32 fw_ring_mpdu_err[HTT_RX_STATS_RXDMA_MAX_ERR];
+	u32 urrn_stats[HTT_TX_PDEV_MAX_URRN_STATS];
+	u32 sifs_status[HTT_TX_PDEV_MAX_SIFS_BURST_STATS];
+	u32 sifs_hist_status[HTT_TX_PDEV_SIFS_BURST_HIST_STATS];
+	u32 rx_suspend_cnt;
+	u32 rx_suspend_fail_cnt;
+	u32 rx_resume_cnt;
+	u32 rx_resume_fail_cnt;
+	u32 hwq_beacon_cmd_result[HTT_TX_HWQ_MAX_CMD_RESULT_STATS];
+	u32 hwq_voice_cmd_result[HTT_TX_HWQ_MAX_CMD_RESULT_STATS];
+	u32 hwq_video_cmd_result[HTT_TX_HWQ_MAX_CMD_RESULT_STATS];
+	u32 hwq_best_effort_cmd_result[HTT_TX_HWQ_MAX_CMD_RESULT_STATS];
+	u32 hwq_beacon_mpdu_tried_cnt;
+	u32 hwq_voice_mpdu_tried_cnt;
+	u32 hwq_video_mpdu_tried_cnt;
+	u32 hwq_best_effort_mpdu_tried_cnt;
+	u32 hwq_beacon_mpdu_queued_cnt;
+	u32 hwq_voice_mpdu_queued_cnt;
+	u32 hwq_video_mpdu_queued_cnt;
+	u32 hwq_best_effort_mpdu_queued_cnt;
+	u32 hwq_beacon_mpdu_ack_fail_cnt;
+	u32 hwq_voice_mpdu_ack_fail_cnt;
+	u32 hwq_video_mpdu_ack_fail_cnt;
+	u32 hwq_best_effort_mpdu_ack_fail_cnt;
+	u32 pdev_resets;
+	u32 phy_warm_reset;
+	u32 hwsch_reset_count;
+	u32 phy_warm_reset_ucode_trig;
+	u32 mac_cold_reset;
+	u32 mac_warm_reset;
+	u32 mac_warm_reset_restore_cal;
+	u32 phy_warm_reset_m3_ssr;
+	u32 fw_rx_rings_reset;
+	u32 tx_flush;
+	u32 hwsch_dev_reset_war;
+	u32 mac_cold_reset_restore_cal;
+	u32 mac_only_reset;
+	u32 mac_sfm_reset;
+	u32 tx_ldpc; /* Number of tx PPDUs with LDPC coding */
+	u32 rx_ldpc; /* Number of rx PPDUs with LDPC coding */
+	u32 gen_mpdu_end_reason[HTT_TX_TQM_MAX_GEN_MPDU_END_REASON];
+	u32 list_mpdu_end_reason[HTT_TX_TQM_MAX_LIST_MPDU_END_REASON];
+	u32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS + HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS + HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	u32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 half_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 quarter_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 tx_su_punctured_mode[HTT_TX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
+	u32 rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS + HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS + HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	u32 rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	u32 rx_stbc[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS + HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS + HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	u32 rts_cnt;
+	u32 rts_success;
+};
+
+struct htt_phy_counters_tlv {
+	u32 rx_ofdma_timing_err_cnt;
+	u32 rx_cck_fail_cnt;
+	u32 mactx_abort_cnt;
+	u32 macrx_abort_cnt;
+	u32 phytx_abort_cnt;
+	u32 phyrx_abort_cnt;
+	u32 phyrx_defer_abort_cnt;
+	u32 rx_gain_adj_lstf_event_cnt;
+	u32 rx_gain_adj_non_legacy_cnt;
+	u32 rx_pkt_cnt[HTT_MAX_RX_PKT_CNT];
+	u32 rx_pkt_crc_pass_cnt[HTT_MAX_RX_PKT_CRC_PASS_CNT];
+	u32 per_blk_err_cnt[HTT_MAX_PER_BLK_ERR_CNT];
+	u32 rx_ota_err_cnt[HTT_MAX_RX_OTA_ERR_CNT];
+};
+
+struct htt_phy_stats_tlv {
+	s32 nf_chain[HTT_STATS_MAX_CHAINS];
+	u32 false_radar_cnt;
+	u32 radar_cs_cnt;
+	s32 ani_level;
+	u32 fw_run_time;
+	s32 runtime_nf_chain[HTT_STATS_MAX_CHAINS];
+	u32 current_operating_width;
+	u32 current_device_width;
+	u32 last_radar_type;
+	u32 dfs_reg_domain;
+	u32 radar_mask_bit;
+	s32 radar_rssi;
+	u32 radar_dfs_flags;
+	u32 band_center_frequency_operating;
+	u32 band_center_frequency_device;
+};
+
+struct htt_phy_reset_counters_tlv {
+	u32 pdev_id;
+	u32 cf_active_low_fail_cnt;
+	u32 cf_active_low_pass_cnt;
+	u32 phy_off_through_vreg_cnt;
+	u32 force_calibration_cnt;
+	u32 rf_mode_switch_phy_off_cnt;
+};
+
+struct htt_phy_reset_stats_tlv {
+	u32 pdev_id;
+	u32 chan_mhz;
+	u32 chan_band_center_freq1;
+	u32 chan_band_center_freq2;
+	u32 chan_phy_mode;
+	u32 chan_flags;
+	u32 chan_num;
+	u32 reset_cause;
+	u32 prev_reset_cause;
+	u32 phy_warm_reset_src;
+	u32 rx_gain_tbl_mode;
+	u32 xbar_val;
+	u32 force_calibration;
+	u32 phyrf_mode;
+	u32 phy_homechan;
+	u32 phy_tx_ch_mask;
+	u32 phy_rx_ch_mask;
+	u32 phybb_ini_mask;
+	u32 phyrf_ini_mask;
+	u32 phy_dfs_en_mask;
+	u32 phy_sscan_en_mask;
+	u32 phy_synth_sel_mask;
+	u32 phy_adfs_freq;
+	u32 cck_fir_settings;
+	u32 phy_dyn_pri_chan;
+	u32 cca_thresh;
+	u32 dyn_cca_status;
+	u32 rxdesense_thresh_hw;
+	u32 rxdesense_thresh_sw;
+};
+
+struct htt_peer_ctrl_path_txrx_stats_tlv {
+	u8 peer_mac_addr[6];
+	u8 rsvd[2];
+	u32 peer_tx_mgmt_subtype[ATH12K_STATS_MGMT_FRM_TYPE_MAX];
+	u32 peer_rx_mgmt_subtype[ATH12K_STATS_MGMT_FRM_TYPE_MAX];
+};
+
+struct htt_pdev_ctrl_path_tx_stats_tlv {
+	/* Num MGMT MPDU transmitted by the target */
+	u32 fw_tx_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+};
+
+enum htt_sched_txq_supercycle_triggers_tlv_enum {
+	HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0,
+	HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED,
+	HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES,
+	HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS,
+	HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED,
+	HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED,
+	HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER,
+
+	HTT_SCHED_SUPERCYCLE_TRIGGER_MAX,
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_supercycle_triggers_tlv {
+	u32  supercycle_triggers[0];  /*HTT_SCHED_SUPERCYCLE_TRIGGER_MAX*/
+};
+
+struct htt_hw_war_stats_tlv {
+	u32 mac_id__word;
+	u32 hw_wars[1];
+};
+
+struct htt_peer_sched_stats_tlv {
+	u32 peer_id;
+	u32 num_sched_dl;
+	u32 num_sched_ul;
+	u32 peer_tx_active_dur_us_low;
+	u32 peer_tx_active_dur_us_high;
+	u32 peer_rx_active_dur_us_low;
+	u32 peer_rx_active_dur_us_high;
+	u32 peer_curr_rate_kbps;
+};
+
+struct htt_vdev_rtt_resp_stats_tlv {
+	/* No of Fine Timing Measurement frames transmitted successfully */
+	u32 tx_ftm_suc;
+	/* No of Fine Timing Measurement frames transmitted successfully after retry */
+	u32 tx_ftm_suc_retry;
+	/* No of Fine Timing Measurement frames not transmitted successfully */
+	u32 tx_ftm_fail;
+	/* No of Fine Timing Measurement Request frames received, including initial,
+	 * non-initial, and duplicates
+	 */
+	u32 rx_ftmr_cnt;
+	/* No of duplicate Fine Timing Measurement Request frames received, including
+	 * both initial and non-initial
+	 */
+	u32 rx_ftmr_dup_cnt;
+	/* No of initial Fine Timing Measurement Request frames received */
+	u32 rx_iftmr_cnt;
+	/* No of duplicate initial Fine Timing Measurement Request frames received */
+	u32 rx_iftmr_dup_cnt;
+	/* No of responder sessions rejected when initiator was active */
+	u32 initiator_active_responder_rejected_cnt;
+	/* Responder terminate count */
+	u32 responder_terminate_cnt;
+	u32 vdev_id;
+};
+
+struct htt_vdev_rtt_init_stats_tlv {
+	u32 vdev_id;
+	/* No of Fine Timing Measurement request frames transmitted successfully */
+	u32 tx_ftmr_cnt;
+	/* No of Fine Timing Measurement request frames not transmitted successfully */
+	u32 tx_ftmr_fail;
+	/* No of Fine Timing Measurement request frames transmitted successfully
+	 * after retry
+	 */
+	u32 tx_ftmr_suc_retry;
+	/* No of Fine Timing Measurement frames received, including initial, non-initial
+	 * and duplicates
+	 */
+	u32 rx_ftm_cnt;
+	/* Initiator Terminate count */
+	u32 initiator_terminate_cnt;
+	u32 tx_meas_req_count;
+};
+
+struct htt_pktlog_and_htt_ring_stats_tlv {
+	/* No of pktlog payloads that were dropped in htt_ppdu_stats path */
+	u32 pktlog_lite_drop_cnt;
+	/* No of pktlog payloads that were dropped in TQM path */
+	u32 pktlog_tqm_drop_cnt;
+	/* No of pktlog ppdu stats payloads that were dropped */
+	u32 pktlog_ppdu_stats_drop_cnt;
+	/* No of pktlog ppdu ctrl payloads that were dropped */
+	u32 pktlog_ppdu_ctrl_drop_cnt;
+	/* No of pktlog sw events payloads that were dropped */
+	u32 pktlog_sw_events_drop_cnt;
+};
+
+#define HTT_DLPAGER_STATS_MAX_HIST            10
+#define HTT_DLPAGER_ASYNC_LOCKED_PAGE_COUNT_M 0x000000FF
+#define HTT_DLPAGER_ASYNC_LOCKED_PAGE_COUNT_S 0
+#define HTT_DLPAGER_SYNC_LOCKED_PAGE_COUNT_M  0x0000FF00
+#define HTT_DLPAGER_SYNC_LOCKED_PAGE_COUNT_S  8
+#define HTT_DLPAGER_TOTAL_LOCKED_PAGES_M      0x0000FFFF
+#define HTT_DLPAGER_TOTAL_LOCKED_PAGES_S      0
+#define HTT_DLPAGER_TOTAL_FREE_PAGES_M        0xFFFF0000
+#define HTT_DLPAGER_TOTAL_FREE_PAGES_S        16
+#define HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_M    0x0000FFFF
+#define HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_S    0
+#define HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_M  0xFFFF0000
+#define HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_S  16
+
+#define HTT_DLPAGER_ASYNC_LOCK_PAGE_COUNT_GET(_var) \
+	(((_var) & HTT_DLPAGER_ASYNC_LOCKED_PAGE_COUNT_M) >> \
+	HTT_DLPAGER_ASYNC_LOCKED_PAGE_COUNT_S)
+
+#define HTT_DLPAGER_ASYNC_LOCK_PAGE_COUNT_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_DLPAGER_ASYNC_LOCKED_PAGE_COUNT, _val); \
+		((_var) &= ~(HTT_DLPAGER_ASYNC_LOCKED_PAGE_COUNT_M));\
+		((_var) |= ((_val) << HTT_DLPAGER_ASYNC_LOCKED_PAGE_COUNT_S)); \
+	} while (0)
+
+#define HTT_DLPAGER_SYNC_LOCK_PAGE_COUNT_GET(_var) \
+	(((_var) & HTT_DLPAGER_SYNC_LOCKED_PAGE_COUNT_M) >> \
+	HTT_DLPAGER_SYNC_LOCKED_PAGE_COUNT_S)
+
+#define HTT_DLPAGER_SYNC_LOCK_PAGE_COUNT_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_DLPAGER_SYNC_LOCKED_PAGE_COUNT, _val); \
+		((_var) &= ~(HTT_DLPAGER_SYNC_LOCKED_PAGE_COUNT_M));\
+		((_var) |= ((_val) << HTT_DLPAGER_SYNC_LOCKED_PAGE_COUNT_S)); \
+	} while (0)
+
+#define HTT_DLPAGER_TOTAL_LOCKED_PAGES_GET(_var) \
+	(((_var) & HTT_DLPAGER_TOTAL_LOCKED_PAGES_M) >> \
+	HTT_DLPAGER_TOTAL_LOCKED_PAGES_S)
+
+#define HTT_DLPAGER_TOTAL_LOCKED_PAGES_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_DLPAGER_TOTAL_LOCKED_PAGES, _val); \
+		((_var) &= ~(HTT_DLPAGER_TOTAL_LOCKED_PAGES_M)); \
+		((_var) |= ((_val) << HTT_DLPAGER_TOTAL_LOCKED_PAGES_S)); \
+	} while (0)
+
+#define HTT_DLPAGER_TOTAL_FREE_PAGES_GET(_var) \
+	(((_var) & HTT_DLPAGER_TOTAL_FREE_PAGES_M) >> \
+	HTT_DLPAGER_TOTAL_FREE_PAGES_S)
+
+#define HTT_DLPAGER_TOTAL_FREE_PAGES_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_DLPAGER_TOTAL_FREE_PAGES, _val); \
+		((_var) &= ~(HTT_DLPAGER_TOTAL_FREE_PAGES_M)); \
+		((_var) |= ((_val) << HTT_DLPAGER_TOTAL_FREE_PAGES_S)); \
+	} while (0)
+
+#define HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_GET(_var) \
+	(((_var) & HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_M) >> \
+	HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_S)
+
+#define HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_DLPAGER_LAST_LOCKED_PAGE_IDX, _val); \
+		((_var) &= ~(HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_M)); \
+		((_var) |= ((_val) << HTT_DLPAGER_LAST_LOCKED_PAGE_IDX_S)); \
+	} while (0)
+
+#define HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_GET(_var) \
+	(((_var) & HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_M) >> \
+	HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_S)
+
+#define HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_SET(_var, _val) \
+	do { \
+		HTT_CHECK_SET_VAL(HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX, _val); \
+		((_var) &= ~(HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_M)); \
+		((_var) |= ((_val) << HTT_DLPAGER_LAST_UNLOCKED_PAGE_IDX_S)); \
+	} while (0)
+
+enum {
+	HTT_STATS_PAGE_LOCKED = 0,
+	HTT_STATS_PAGE_UNLOCKED = 1,
+	HTT_STATS_NUM_PAGE_LOCK_STATES
+};
+
+struct htt_dl_pager_stats_tlv {
+	/* msg_dword_1 bitfields:
+	 *     async_lock                 : 8,
+	 *     sync_lock                  : 8,
+	 *     reserved                   : 16;
+	 */
+	u32 msg_dword_1;
+	/* mst_dword_2 bitfields:
+	 *     total_locked_pages         : 16,
+	 *     total_free_pages           : 16;
+	 */
+	u32 msg_dword_2;
+	/* msg_dword_3 bitfields:
+	 *     last_locked_page_idx       : 16,
+	 *     last_unlocked_page_idx     : 16;
+	 */
+	u32 msg_dword_3;
+
+	struct {
+		u32 page_num;
+		u32 num_of_pages;
+		/* timestamp is in microsecond units, from SoC timer clock */
+		u32 timestamp_lsbs;
+		u32 timestamp_msbs;
+	} last_pages_info[HTT_STATS_NUM_PAGE_LOCK_STATES][HTT_DLPAGER_STATS_MAX_HIST];
+};
+
+struct htt_stats_error_tlv_v {
+	u32 htt_stats_type;
+};
+
+#define HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS 7
+
+struct htt_tx_selfgen_ac_sched_status_stats_tlv {
+	/* 11AC VHT SU NDPA scheduler completion status reason code */
+	u32 ac_su_ndpa_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AC VHT SU NDP scheduler completion status reason code */
+	u32 ac_su_ndp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AC VHT SU NDP scheduler error code */
+	u32 ac_su_ndp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	/* 11AC VHT MU MIMO NDPA scheduler completion status reason code */
+	u32 ac_mu_mimo_ndpa_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AC VHT MU MIMO NDP scheduler completion status reason code */
+	u32 ac_mu_mimo_ndp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AC VHT MU MIMO NDP scheduler error code */
+	u32 ac_mu_mimo_ndp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	/* 11AC VHT MU MIMO BRPOLL scheduler completion status reason code */
+	u32 ac_mu_mimo_brp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AC VHT MU MIMO BRPOLL scheduler error code */
+	u32 ac_mu_mimo_brp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+};
+
+struct htt_tx_selfgen_ax_sched_status_stats_tlv {
+	/* 11AX HE SU NDPA scheduler completion status reason code */
+	u32 ax_su_ndpa_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX SU NDP scheduler completion status reason code */
+	u32 ax_su_ndp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX HE SU NDP scheduler error code */
+	u32 ax_su_ndp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	/* 11AX HE MU MIMO NDPA scheduler completion status reason code */
+	u32 ax_mu_mimo_ndpa_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX HE MU MIMO NDP scheduler completion status reason code */
+	u32 ax_mu_mimo_ndp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX HE MU MIMO NDP scheduler error code */
+	u32 ax_mu_mimo_ndp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	/* 11AX HE MU MIMO MU BRPOLL scheduler completion status reason code */
+	u32 ax_mu_brp_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX HE MU MIMO MU BRPOLL scheduler error code */
+	u32 ax_mu_brp_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	/* 11AX HE MU BAR scheduler completion status reason code */
+	u32 ax_mu_bar_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX HE MU BAR scheduler error code */
+	u32 ax_mu_bar_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	/* 11AX HE UL OFDMA Basic Trigger scheduler completion status reason code */
+	u32 ax_basic_trig_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX HE UL OFDMA Basic Trigger scheduler error code */
+	u32 ax_basic_trig_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+	/* 11AX HE UL MUMIMO Basic Trigger scheduler completion status reason code */
+	u32 ax_ulmumimo_trig_sch_status[HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
+	/* 11AX HE UL MUMIMO Basic Trigger scheduler error code */
+	u32 ax_ulmumimo_trig_sch_flag_err[HTT_TX_SELFGEN_NUM_SCH_TSFLAG_ERROR_STATS];
+};
+
+enum htt_stats_rc_mode {
+	HTT_STATS_RC_MODE_DLSU	   = 0,
+	HTT_STATS_RC_MODE_DLMUMIMO = 1,
+};
+
+struct htt_tx_rate_stats_t {
+	u32 ppdus_tried;
+	u32 ppdus_ack_failed;
+	u32 mpdus_tried;
+	u32 mpdus_failed;
+};
+
+struct htt_tx_per_rate_stats_tlv {
+	u32 rc_mode;
+	u32 last_probed_mcs;
+	u32 last_probed_nss;
+	u32 last_probed_bw;
+
+	struct htt_tx_rate_stats_t per_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	struct htt_tx_rate_stats_t per_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	struct htt_tx_rate_stats_t per_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+
+	/** 320MHz extension for PER */
+	struct htt_tx_rate_stats_t per_bw320;
+};
+
+struct htt_tx_pdev_dl_mu_ofdma_sch_stats_tlv {
+	u32 ax_mu_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_tx_pdev_ul_mu_ofdma_sch_stats_tlv {
+	u32 ax_ul_mu_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_mu_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_mu_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+	u32 ax_ul_mu_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_t2h_soc_txrx_stats_common_tlv {
+	u32 inv_peers_msdu_drop_count_hi;
+	u32 inv_peers_msdu_drop_count_lo;
+};
+
+struct htt_t2h_vdev_txrx_stats_hw_stats_tlv {
+	u32 vdev_id;
+	u32 rx_msdu_byte_cnt_hi;
+	u32 rx_msdu_byte_cnt_lo;
+	u32 rx_msdu_cnt_hi;
+	u32 rx_msdu_cnt_lo;
+	u32 tx_msdu_byte_cnt_hi;
+	u32 tx_msdu_byte_cnt_lo;
+	u32 tx_msdu_cnt_hi;
+	u32 tx_msdu_cnt_lo;
+	u32 tx_msdu_excessive_retry_discard_cnt_hi;
+	u32 tx_msdu_excessive_retry_discard_cnt_lo;
+	u32 tx_msdu_cong_ctrl_drop_cnt_hi;
+	u32 tx_msdu_cong_ctrl_drop_cnt_lo;
+	u32 tx_msdu_ttl_expire_drop_cnt_hi;
+	u32 tx_msdu_ttl_expire_drop_cnt_lo;
+};
+
+struct htt_tx_pdev_dl_mu_mimo_sch_stats_tlv {
+	/* Number of MU MIMO schedules posted to HW */
+	u32 mu_mimo_sch_posted;
+	/* Number of MU MIMO schedules failed to post */
+	u32 mu_mimo_sch_failed;
+	/* Number of MU MIMO PPDUs posted to HW */
+	u32 mu_mimo_ppdu_posted;
+	/*
+	 * This is the common description for the below sch stats.
+	 * Counts the number of transmissions of each number of MU users
+	 * in each TX mode.
+	 * The array index is the "number of users - 1".
+	 * For example, ac_mu_mimo_sch_nusers[1] counts the number of 11AC MU2
+	 * TX PPDUs, ac_mu_mimo_sch_nusers[2] counts the number of 11AC MU3
+	 * TX PPDUs and so on.
+	 * The same is applicable for the other TX mode stats.
+	 */
+	/* Represents the count for 11AC DL MU MIMO sequences */
+	u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+	/* Represents the count for 11AX DL MU MIMO sequences */
+	u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+	/* Number of 11AC DL MU MIMO schedules posted per group size */
+	u32 ac_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+	/* Number of 11AX DL MU MIMO schedules posted per group size */
+	u32 ax_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_pdev_ul_mu_mimo_sch_stats_tlv {
+	/* Represents the count for 11AX UL MU MIMO sequences with Basic Triggers */
+	u32 ax_ul_mu_mimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+	/* Represents the count for 11AX UL MU MIMO sequences with BRP Triggers */
+	u32 ax_ul_mu_mimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+};
+
+/* UL RESP Queues 0 - HIPRI, 1 - LOPRI & 2 - BSR */
+#define HTT_STA_UL_OFDMA_NUM_UL_QUEUES 3
+
+/* Actual resp type sent by STA for trigger
+ * 0 - HE TB PPDU, 1 - NULL Delimiter
+ */
+#define HTT_STA_UL_OFDMA_NUM_RESP_END_TYPE 2
+
+/* Counter for MCS 0-13 */
+#define HTT_STA_UL_OFDMA_NUM_MCS_COUNTERS 14
+
+/* Counters BW 20,40,80,160,320 */
+#define HTT_STA_UL_OFDMA_NUM_BW_COUNTERS 5
+
+/* 0 - Half, 1 - Quarter */
+#define HTT_STA_UL_OFDMA_NUM_REDUCED_CHAN_TYPES 2
+
+#define HTT_NUM_AC_WMM	0x4
+
+enum HTT_STA_UL_OFDMA_RX_TRIG_TYPE {
+	HTT_ULTRIG_QBOOST_TRIGGER = 0,
+	HTT_ULTRIG_PSPOLL_TRIGGER,
+	HTT_ULTRIG_UAPSD_TRIGGER,
+	HTT_ULTRIG_11AX_TRIGGER,
+	HTT_ULTRIG_11AX_WILDCARD_TRIGGER,
+	HTT_ULTRIG_11AX_UNASSOC_WILDCARD_TRIGGER,
+	HTT_STA_UL_OFDMA_NUM_TRIG_TYPE,
+};
+
+enum HTT_STA_UL_OFDMA_11AX_TRIG_TYPE {
+	HTT_11AX_TRIGGER_BASIC_E		= 0,
+	HTT_11AX_TRIGGER_BRPOLL_E		= 1,
+	HTT_11AX_TRIGGER_MU_BAR_E		= 2,
+	HTT_11AX_TRIGGER_MU_RTS_E		= 3,
+	HTT_11AX_TRIGGER_BUFFER_SIZE_E		= 4,
+	HTT_11AX_TRIGGER_GCR_MU_BAR_E		= 5,
+	HTT_11AX_TRIGGER_BQRP_E			= 6,
+	HTT_11AX_TRIGGER_NDP_FB_REPORT_POLL_E	= 7,
+	HTT_11AX_TRIGGER_RESERVED_8_E		= 8,
+	HTT_11AX_TRIGGER_RESERVED_9_E		= 9,
+	HTT_11AX_TRIGGER_RESERVED_10_E		= 10,
+	HTT_11AX_TRIGGER_RESERVED_11_E		= 11,
+	HTT_11AX_TRIGGER_RESERVED_12_E		= 12,
+	HTT_11AX_TRIGGER_RESERVED_13_E		= 13,
+	HTT_11AX_TRIGGER_RESERVED_14_E		= 14,
+	HTT_11AX_TRIGGER_RESERVED_15_E		= 15,
+	HTT_STA_UL_OFDMA_NUM_11AX_TRIG_TYPE,
+};
+
+struct htt_print_sta_ul_ofdma_stats_tlv {
+	u32 pdev_id;
+	/* Trigger Type reported by HWSCH on RX reception
+	 * Each index populate enum HTT_STA_UL_OFDMA_RX_TRIG_TYPE
+	 */
+	u32 rx_trigger_type[HTT_STA_UL_OFDMA_NUM_TRIG_TYPE];
+	/* 11AX Trigger Type on RX reception
+	 * Each index populate enum HTT_STA_UL_OFDMA_11AX_TRIG_TYPE
+	 */
+	u32 ax_trigger_type[HTT_STA_UL_OFDMA_NUM_11AX_TRIG_TYPE];
+	/* Num data PPDUs/Delims responded to trigs. per HWQ for UL RESP */
+	u32 num_data_ppdu_responded_per_hwq[HTT_STA_UL_OFDMA_NUM_UL_QUEUES];
+	u32 num_null_delimiters_responded_per_hwq[HTT_STA_UL_OFDMA_NUM_UL_QUEUES];
+	/* Overall UL STA RESP Status 0 - HE TB PPDU, 1 - NULL Delimiter
+	 * Super set of num_data_ppdu_responded_per_hwq,
+	 * num_null_delimiters_responded_per_hwq
+	 */
+	u32 num_total_trig_responses[HTT_STA_UL_OFDMA_NUM_RESP_END_TYPE];
+	/* Time interval between current time ms and last successful trigger RX
+	 * 0xFFFFFFFF denotes no trig received / timestamp roll back
+	 */
+	u32 last_trig_rx_time_delta_ms;
+	/* Rate Statistics for UL OFDMA
+	 * UL TB PPDU TX MCS, NSS, GI, BW from STA HWQ
+	 */
+	u32 ul_ofdma_tx_mcs[HTT_STA_UL_OFDMA_NUM_MCS_COUNTERS];
+	u32 ul_ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	u32 ul_ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			  [HTT_STA_UL_OFDMA_NUM_MCS_COUNTERS];
+	u32 ul_ofdma_tx_ldpc;
+	u32 ul_ofdma_tx_bw[HTT_STA_UL_OFDMA_NUM_BW_COUNTERS];
+
+	/* Trig based PPDU TX/ RBO based PPDU TX Count */
+	u32 trig_based_ppdu_tx;
+	u32 rbo_based_ppdu_tx;
+	/* Switch MU EDCA to SU EDCA Count */
+	u32 mu_edca_to_su_edca_switch_count;
+	/* Num MU EDCA applied Count */
+	u32 num_mu_edca_param_apply_count;
+
+	/* Current MU EDCA Parameters for WMM ACs
+	 * Mode - 0 - SU EDCA, 1- MU EDCA
+	 */
+	u32 current_edca_hwq_mode[HTT_NUM_AC_WMM];
+	/* Contention Window minimum. Range: 1 - 10 */
+	u32 current_cw_min[HTT_NUM_AC_WMM];
+	/* Contention Window maximum. Range: 1 - 10 */
+	u32 current_cw_max[HTT_NUM_AC_WMM];
+	/* AIFS value - 0 -255 */
+	u32 current_aifs[HTT_NUM_AC_WMM];
+	u32 reduced_ul_ofdma_tx_bw[HTT_STA_UL_OFDMA_NUM_REDUCED_CHAN_TYPES]
+				  [HTT_STA_UL_OFDMA_NUM_BW_COUNTERS];
+};
+
+struct htt_tx_pdev_mu_edca_params_stats_tlv_v {
+	u32 relaxed_mu_edca[HTT_NUM_AC_WMM];
+	u32 mumimo_aggressive_mu_edca[HTT_NUM_AC_WMM];
+	u32 mumimo_relaxed_mu_edca[HTT_NUM_AC_WMM];
+	u32 muofdma_aggressive_mu_edca[HTT_NUM_AC_WMM];
+	u32 muofdma_relaxed_mu_edca[HTT_NUM_AC_WMM];
+	u32 latency_mu_edca[HTT_NUM_AC_WMM];
+	u32 psd_boost_mu_edca[HTT_NUM_AC_WMM];
+};
+
+struct htt_tx_pdev_ap_edca_params_stats_tlv_v {
+	u32 ul_mumimo_less_aggressive[HTT_NUM_AC_WMM];
+	u32 ul_mumimo_medium_aggressive[HTT_NUM_AC_WMM];
+	u32 ul_mumimo_highly_aggressive[HTT_NUM_AC_WMM];
+	u32 ul_mumimo_default_relaxed[HTT_NUM_AC_WMM];
+	u32 ul_muofdma_less_aggressive[HTT_NUM_AC_WMM];
+	u32 ul_muofdma_medium_aggressive[HTT_NUM_AC_WMM];
+	u32 ul_muofdma_highly_aggressive[HTT_NUM_AC_WMM];
+	u32 ul_muofdma_default_relaxed[HTT_NUM_AC_WMM];
+};
+
+struct htt_peer_ax_ofdma_stats_tlv {
+	u32 peer_id;
+	u32 ax_basic_trig_count;
+	u32 ax_basic_trig_err;
+	u32 ax_bsr_trig_count;
+	u32 ax_bsr_trig_err;
+	u32 ax_mu_bar_trig_count;
+	u32 ax_mu_bar_trig_err;
+	u32 ax_basic_trig_with_per;
+	u32 ax_bsr_trig_with_per;
+	u32 ax_mu_bar_trig_with_per;
+		/* is_airtime_large_for_dl_ofdma, is_airtime_large_for_ul_ofdma
+	* These fields contain 2 counters each.  The first element in each
+	* array counts how many times the airtime is short enough to use
+	* OFDMA, and the second element in each array counts how many times the
+	* airtime is too large to select OFDMA for the PPDUs involving the peer.
+	*/
+	u32 is_airtime_large_for_dl_ofdma[2];
+	u32 is_airtime_large_for_ul_ofdma[2];
+	/* Last updated value of DL and UL queue depths for each peer per AC */
+	u32 last_updated_dl_qdepth[HTT_NUM_AC_WMM];
+	u32 last_updated_ul_qdepth[HTT_NUM_AC_WMM];
+};
+
+struct htt_pdev_sched_algo_ofdma_stats_tlv {
+	/**
+	 * BIT [ 7 :  0]   :- mac_id
+	 * BIT [31 :  8]   :- reserved
+	 */
+	union {
+		struct {
+			u32 mac_id:8,
+			reserved:24;
+		};
+		u32 mac_id__word;
+	};
+	u32 rate_based_dlofdma_enabled_count[HTT_NUM_AC_WMM];
+	u32 rate_based_dlofdma_disabled_count[HTT_NUM_AC_WMM];
+	u32 rate_based_dlofdma_probing_count[HTT_NUM_AC_WMM];
+	u32 rate_based_dlofdma_monitoring_count[HTT_NUM_AC_WMM];
+	u32 chan_acc_lat_based_dlofdma_enabled_count[HTT_NUM_AC_WMM];
+	u32 chan_acc_lat_based_dlofdma_disabled_count[HTT_NUM_AC_WMM];
+	u32 chan_acc_lat_based_dlofdma_monitoring_count[HTT_NUM_AC_WMM];
+	u32 downgrade_to_dl_su_ru_alloc_fail[HTT_NUM_AC_WMM];
+	u32 candidate_list_single_user_disable_ofdma[HTT_NUM_AC_WMM];
+	u32 dl_cand_list_dropped_high_ul_qos_weight[HTT_NUM_AC_WMM];
+	u32 ax_dlofdma_disabled_due_to_pipelining[HTT_NUM_AC_WMM];
+	u32 dlofdma_disabled_su_only_eligible[HTT_NUM_AC_WMM];
+	u32 dlofdma_disabled_consec_no_mpdus_tried[HTT_NUM_AC_WMM];
+	u32 dlofdma_disabled_consec_no_mpdus_success[HTT_NUM_AC_WMM];
+};
+
+struct htt_umac_ssr_stats_tlv {
+	u32 total_done;
+	u32 trigger_requests_count;
+	u32 total_trig_dropped;
+	u32 umac_disengaged_count;
+	u32 umac_soft_reset_count;
+	u32 umac_engaged_count;
+	u32 last_trigger_request_ms;
+	u32 last_start_ms;
+	u32 last_start_disengage_umac_ms;
+	u32 last_enter_ssr_platform_thread_ms;
+	u32 last_exit_ssr_platform_thread_ms;
+	u32 last_start_engage_umac_ms;
+	u32 last_done_successful_ms;
+	u32 last_e2e_delta_ms;
+	u32 max_e2e_delta_ms;
+	u32 trigger_count_for_umac_hang;
+	u32 trigger_count_for_mlo_quick_ssr;
+	u32 trigger_count_for_unknown_signature;
+	u32 post_reset_tqm_sync_cmd_completion_ms;
+	u32 htt_sync_mlo_initiate_umac_recovery_ms;
+	u32 htt_sync_do_pre_reset_ms;
+	u32 htt_sync_do_post_reset_start_ms;
+	u32 htt_sync_do_post_reset_complete_ms;
+};
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debugfs_sta.c	2024-01-19 17:01:19.857846811 +0100
@@ -0,0 +1,1929 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "debugfs_sta.h"
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs.h"
+
+void ath12k_debugfs_sta_add_tx_stats(struct ath12k_link_sta *arsta,
+				     struct ath12k_per_peer_tx_stats *peer_stats,
+				     u8 legacy_rate_idx)
+{
+	struct rate_info *txrate = &arsta->txrate;
+	struct ath12k_htt_tx_stats *tx_stats;
+	int gi, mcs, bw, nss, ru_type, ppdu_type;
+
+	if (!arsta->tx_stats)
+		return;
+
+	tx_stats = arsta->tx_stats;
+	gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
+	mcs = txrate->mcs;
+	bw = ath12k_mac_mac80211_bw_to_ath12k_bw(txrate->bw);
+	nss = txrate->nss - 1;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH12K_STATS_TYPE_##name]
+
+	if (txrate->flags & RATE_INFO_FLAGS_EHT_MCS) {
+		STATS_OP_FMT(SUCC).eht[0][mcs] += peer_stats->succ_bytes;
+		STATS_OP_FMT(SUCC).eht[1][mcs] += peer_stats->succ_pkts;
+		STATS_OP_FMT(FAIL).eht[0][mcs] += peer_stats->failed_bytes;
+		STATS_OP_FMT(FAIL).eht[1][mcs] += peer_stats->failed_pkts;
+		STATS_OP_FMT(RETRY).eht[0][mcs] += peer_stats->retry_bytes;
+		STATS_OP_FMT(RETRY).eht[1][mcs] += peer_stats->retry_pkts;
+	} else if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+		STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
+		STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
+		STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
+		STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
+		STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
+		STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
+	} else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+		STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
+		STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
+		STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
+		STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
+		STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
+		STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
+	} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+		STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
+		STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
+		STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
+		STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
+		STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
+		STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
+	} else {
+		mcs = legacy_rate_idx;
+
+		STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
+		STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
+		STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
+		STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
+		STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
+		STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
+	}
+
+	ppdu_type = peer_stats->ppdu_type;
+	if ((ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
+	     ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) &&
+	     (txrate->flags & RATE_INFO_FLAGS_HE_MCS ||
+	      txrate->flags & RATE_INFO_FLAGS_EHT_MCS)) {
+		ru_type = peer_stats->ru_tones;
+
+		if (ru_type <= NL80211_RATE_INFO_HE_RU_ALLOC_996) {
+			STATS_OP_FMT(SUCC).ru_loc[0][ru_type] += peer_stats->succ_bytes;
+			STATS_OP_FMT(SUCC).ru_loc[1][ru_type] += peer_stats->succ_pkts;
+			STATS_OP_FMT(FAIL).ru_loc[0][ru_type] += peer_stats->failed_bytes;
+			STATS_OP_FMT(FAIL).ru_loc[1][ru_type] += peer_stats->failed_pkts;
+			STATS_OP_FMT(RETRY).ru_loc[0][ru_type] += peer_stats->retry_bytes;
+			STATS_OP_FMT(RETRY).ru_loc[1][ru_type] += peer_stats->retry_pkts;
+			if (peer_stats->is_ampdu) {
+				STATS_OP_FMT(AMPDU).ru_loc[0][ru_type] +=
+					peer_stats->succ_bytes + peer_stats->retry_bytes;
+				STATS_OP_FMT(AMPDU).ru_loc[1][ru_type] +=
+					peer_stats->succ_pkts + peer_stats->retry_pkts;
+			}
+		}
+	}
+
+	if (ppdu_type < HTT_PPDU_STATS_PPDU_TYPE_MAX) {
+		STATS_OP_FMT(SUCC).transmit_type[0][ppdu_type] += peer_stats->succ_bytes;
+		STATS_OP_FMT(SUCC).transmit_type[1][ppdu_type] += peer_stats->succ_pkts;
+		STATS_OP_FMT(FAIL).transmit_type[0][ppdu_type] +=
+							peer_stats->failed_bytes;
+		STATS_OP_FMT(FAIL).transmit_type[1][ppdu_type] += peer_stats->failed_pkts;
+		STATS_OP_FMT(RETRY).transmit_type[0][ppdu_type] +=
+							peer_stats->retry_bytes;
+		STATS_OP_FMT(RETRY).transmit_type[1][ppdu_type] += peer_stats->retry_pkts;
+		if (peer_stats->is_ampdu) {
+			STATS_OP_FMT(AMPDU).transmit_type[0][ppdu_type] +=
+				peer_stats->succ_bytes + peer_stats->retry_bytes;
+			STATS_OP_FMT(AMPDU).transmit_type[1][ppdu_type] +=
+				peer_stats->succ_pkts + peer_stats->retry_pkts;
+		}
+	}
+
+	if (peer_stats->is_ampdu) {
+		tx_stats->ba_fails += peer_stats->ba_fails;
+
+		if (txrate->flags & RATE_INFO_FLAGS_EHT_MCS) {
+			STATS_OP_FMT(AMPDU).eht[0][mcs] +=
+			peer_stats->succ_bytes + peer_stats->retry_bytes;
+			STATS_OP_FMT(AMPDU).eht[1][mcs] +=
+			peer_stats->succ_pkts + peer_stats->retry_pkts;
+		} else if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+			STATS_OP_FMT(AMPDU).he[0][mcs] +=
+			peer_stats->succ_bytes + peer_stats->retry_bytes;
+			STATS_OP_FMT(AMPDU).he[1][mcs] +=
+			peer_stats->succ_pkts + peer_stats->retry_pkts;
+		} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+			STATS_OP_FMT(AMPDU).ht[0][mcs] +=
+			peer_stats->succ_bytes + peer_stats->retry_bytes;
+			STATS_OP_FMT(AMPDU).ht[1][mcs] +=
+			peer_stats->succ_pkts + peer_stats->retry_pkts;
+		} else {
+			STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+			peer_stats->succ_bytes + peer_stats->retry_bytes;
+			STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+			peer_stats->succ_pkts + peer_stats->retry_pkts;
+		}
+		STATS_OP_FMT(AMPDU).bw[0][bw] +=
+			peer_stats->succ_bytes + peer_stats->retry_bytes;
+		STATS_OP_FMT(AMPDU).nss[0][nss] +=
+			peer_stats->succ_bytes + peer_stats->retry_bytes;
+		STATS_OP_FMT(AMPDU).gi[0][gi] +=
+			peer_stats->succ_bytes + peer_stats->retry_bytes;
+		STATS_OP_FMT(AMPDU).bw[1][bw] +=
+			peer_stats->succ_pkts + peer_stats->retry_pkts;
+		STATS_OP_FMT(AMPDU).nss[1][nss] +=
+			peer_stats->succ_pkts + peer_stats->retry_pkts;
+		STATS_OP_FMT(AMPDU).gi[1][gi] +=
+			peer_stats->succ_pkts + peer_stats->retry_pkts;
+	} else {
+		tx_stats->ack_fails += peer_stats->ba_fails;
+	}
+
+	STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
+	STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
+	STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
+
+	STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
+	STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
+	STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
+
+	STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
+	STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
+	STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
+
+	STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
+	STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
+	STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
+
+	STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
+	STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
+	STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
+
+	STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
+	STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
+	STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
+
+	tx_stats->tx_duration += peer_stats->duration;
+
+	tx_stats->ru_start = peer_stats->ru_start;
+	tx_stats->ru_tones = peer_stats->ru_tones;
+
+	if (peer_stats->mu_grpid <= MAX_MU_GROUP_ID &&
+	    peer_stats->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
+		if (peer_stats->mu_grpid & (MAX_MU_GROUP_ID - 1))
+			tx_stats->mu_group[peer_stats->mu_grpid] =
+						(peer_stats->mu_pos + 1);
+	}
+}
+
+void ath12k_debugfs_sta_update_txcompl(struct ath12k *ar,
+				       struct hal_tx_status *ts)
+{
+	ath12k_dp_tx_update_txcompl(ar, ts);
+}
+
+static ssize_t ath12k_dbg_sta_dump_tx_stats(struct file *file,
+					    char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	struct ath12k_htt_data_stats *stats;
+	static const char *str_name[ATH12K_STATS_TYPE_MAX] = {"success", "fail",
+							      "retry", "ampdu"};
+	static const char *str[ATH12K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+	int len = 0, i, j, k, retval = 0;
+	const int size = 2 * 4096;
+	char *buf, mu_group_id[MAX_MU_GROUP_LENGTH] = {0};
+	u32 index;
+	char *fields[] = {[HAL_WBM_REL_HTT_TX_COMP_STATUS_OK] = "Acked pkt count",
+			  [HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL] = "Status ttl pkt count",
+			  [HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP] = "Dropped pkt count",
+			  [HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ] = "Reinj pkt count",
+			  [HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT] = "Inspect pkt count",
+			  [HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY] = "MEC notify pkt count"};
+	int idx;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	if (!arsta->tx_stats || !arsta->wbm_tx_stats) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	for (k = 0; k < ATH12K_STATS_TYPE_MAX; k++) {
+		for (j = 0; j < ATH12K_COUNTER_TYPE_MAX; j++) {
+			stats = &arsta->tx_stats->stats[k];
+			len += scnprintf(buf + len, size - len, "%s_%s\n",
+					 str_name[k],
+					 str[j]);
+			len += scnprintf(buf + len, size - len, "==========\n");
+			len += scnprintf(buf + len, size - len,
+					 " EHT MCS %s\n\t",
+					 str[j]);
+			for (i = 0; i < ATH12K_EHT_MCS_NUM; i++)
+				len += scnprintf(buf + len, size - len,
+						 "%llu ",
+						 stats->eht[j][i]);
+			len += scnprintf(buf + len, size - len, "\n");
+			len += scnprintf(buf + len, size - len,
+					 " HE MCS %s\n\t",
+					 str[j]);
+			for (i = 0; i < ATH12K_HE_MCS_NUM; i++)
+				len += scnprintf(buf + len, size - len,
+						 "%llu ",
+						 stats->he[j][i]);
+			len += scnprintf(buf + len, size - len, "\n");
+			len += scnprintf(buf + len, size - len,
+					 " VHT MCS %s\n\t",
+					 str[j]);
+			for (i = 0; i < ATH12K_VHT_MCS_NUM; i++)
+				len += scnprintf(buf + len, size - len,
+						 "%llu ",
+						 stats->vht[j][i]);
+			len += scnprintf(buf + len, size - len, "\n");
+			len += scnprintf(buf + len, size - len, " HT MCS %s\n\t",
+					 str[j]);
+			for (i = 0; i < ATH12K_HT_MCS_NUM; i++)
+				len += scnprintf(buf + len, size - len,
+						 "%llu ", stats->ht[j][i]);
+			len += scnprintf(buf + len, size - len, "\n");
+			len += scnprintf(buf + len, size - len,
+					 " BW %s (20,40,80,160,320 MHz)\n", str[j]);
+			len += scnprintf(buf + len, size - len,
+					 "\t%llu %llu %llu %llu %llu\n",
+					 stats->bw[j][0], stats->bw[j][1],
+					 stats->bw[j][2], stats->bw[j][3],
+					 stats->bw[j][4]);
+			len += scnprintf(buf + len, size - len,
+					 " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+			len += scnprintf(buf + len, size - len,
+					 "\t%llu %llu %llu %llu\n",
+					 stats->nss[j][0], stats->nss[j][1],
+					 stats->nss[j][2], stats->nss[j][3]);
+			len += scnprintf(buf + len, size - len,
+					 " GI %s (0.4us,0.8us,1.6us,3.2us)\n",
+					 str[j]);
+			len += scnprintf(buf + len, size - len,
+					 "\t%llu %llu %llu %llu\n",
+					 stats->gi[j][0], stats->gi[j][1],
+					 stats->gi[j][2], stats->gi[j][3]);
+			len += scnprintf(buf + len, size - len,
+					 " legacy rate %s (1,2 ... Mbps)\n  ",
+					 str[j]);
+			for (i = 0; i < ATH12K_LEGACY_NUM; i++)
+				len += scnprintf(buf + len, size - len, "%llu ",
+						 stats->legacy[j][i]);
+
+			len += scnprintf(buf + len, size - len, "\n ru %s:\n", str[j]);
+			len += scnprintf(buf + len, size - len,
+					 "\tru 26: %llu\n", stats->ru_loc[j][0]);
+			len += scnprintf(buf + len, size - len,
+					 "\tru 52: %llu\n", stats->ru_loc[j][1]);
+			len += scnprintf(buf + len, size - len,
+					 "\tru 106: %llu\n", stats->ru_loc[j][2]);
+			len += scnprintf(buf + len, size - len,
+					 "\tru 242: %llu\n", stats->ru_loc[j][3]);
+			len += scnprintf(buf + len, size - len,
+					 "\tru 484: %llu\n", stats->ru_loc[j][4]);
+			len += scnprintf(buf + len, size - len,
+					 "\tru 996: %llu\n", stats->ru_loc[j][5]);
+
+			len += scnprintf(buf + len, size - len,
+					 " ppdu type %s:\n", str[j]);
+			if (k == ATH12K_STATS_TYPE_FAIL ||
+			    k == ATH12K_STATS_TYPE_RETRY) {
+				len += scnprintf(buf + len, size - len,
+						 "\tSU/MIMO: %llu\n",
+						 stats->transmit_type[j][0]);
+				len += scnprintf(buf + len, size - len,
+						 "\tOFDMA/OFDMA_MIMO: %llu\n",
+						 stats->transmit_type[j][2]);
+			} else {
+				len += scnprintf(buf + len, size - len,
+						 "\tSU: %llu\n",
+						 stats->transmit_type[j][0]);
+				len += scnprintf(buf + len, size - len,
+						 "\tMIMO: %llu\n",
+						 stats->transmit_type[j][1]);
+				len += scnprintf(buf + len, size - len,
+						 "\tOFDMA: %llu\n",
+						 stats->transmit_type[j][2]);
+				len += scnprintf(buf + len, size - len,
+						 "\tOFDMA_MIMO: %llu\n",
+						 stats->transmit_type[j][3]);
+			}
+		}
+	}
+
+	len += scnprintf(buf + len, size - len, "\n");
+
+	for (i = 0; i < MAX_MU_GROUP_ID;) {
+		index = 0;
+		for (j = 0; j < MAX_MU_GROUP_SHOW && i < MAX_MU_GROUP_ID; j++) {
+			index += snprintf(&mu_group_id[index],
+					  MAX_MU_GROUP_LENGTH - index,
+					  " %d",
+					  arsta->tx_stats->mu_group[i]);
+			i++;
+		}
+		len += scnprintf(buf + len, size - len,
+				 "User position list for GID %02d->%d: [%s]\n",
+				 i - MAX_MU_GROUP_SHOW, i - 1, mu_group_id);
+	}
+	len += scnprintf(buf + len, size - len,
+			 "\nLast Packet RU index [%d], Size [%d]\n",
+			 arsta->tx_stats->ru_start, arsta->tx_stats->ru_tones);
+
+	len += scnprintf(buf + len, size - len,
+			 "\nTX duration\n %llu usecs\n",
+			 arsta->tx_stats->tx_duration);
+	len += scnprintf(buf + len, size - len,
+			 "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+	len += scnprintf(buf + len, size - len,
+			 "ack fails\n %llu\n\n", arsta->tx_stats->ack_fails);
+
+	len += scnprintf(buf + len, size - len, "WBM tx completion stats of data pkts :\n");
+	for (idx = 0; idx <= HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY; idx++) {
+		len += scnprintf(buf + len, size - len,
+				 "%-23s :  %llu\n",
+				 fields[idx],
+				 arsta->wbm_tx_stats->wbm_tx_comp_stats[idx]);
+	}
+
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len > size)
+		len = size;
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+	return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+	.read = ath12k_dbg_sta_dump_tx_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file,
+					    char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	struct ath12k_rx_peer_stats *rx_stats;
+	int len = 0, i, retval = 0;
+	const int size = 4 * 4096;
+	char *buf;
+	int mcs = 0, bw = 0, nss = 0, gi = 0, bw_num = 0, num_run, found;
+	static const char *legacy_rate_str[HAL_RX_MAX_NUM_LEGACY_RATES] = {
+					"1Mbps", "2Mbps", "5.5Mbps", "6Mbps",
+					"9Mbps", "11Mbps", "12Mbps", "18Mbps",
+					"24Mbps", "36 Mbps", "48Mbps", "54Mbps"};
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	rx_stats = arsta->rx_stats;
+
+	if (!rx_stats) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	spin_lock_bh(&ar->ab->base_lock);
+
+	len += scnprintf(buf + len, size - len, "RX peer stats:\n");
+	len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+			 rx_stats->num_msdu);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+			 rx_stats->tcp_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+			 rx_stats->udp_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+			 rx_stats->ampdu_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+			 rx_stats->non_ampdu_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+			 rx_stats->stbc_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+			 rx_stats->beamformed_count);
+	len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+			 rx_stats->num_mpdu_fcs_ok);
+	len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+			 rx_stats->num_mpdu_fcs_err);
+	len += scnprintf(buf + len, size - len,
+			 "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu 11BE%llu\n",
+			 rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+			 rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+			 rx_stats->pream_cnt[4], rx_stats->pream_cnt[6]);
+	len += scnprintf(buf + len, size - len,
+			 "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+			 rx_stats->reception_type[0], rx_stats->reception_type[1],
+			 rx_stats->reception_type[2], rx_stats->reception_type[3]);
+	len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+	for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+		len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+	len += scnprintf(buf + len, size - len, "\nRX Duration:%llu\n",
+			 rx_stats->rx_duration);
+
+	len += scnprintf(buf + len, size - len, "\nRX success packet stats:\n");
+	len += scnprintf(buf + len, size - len, "\nEHT packet stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->pkt_stats.be_mcs_count[i],
+				 (i + 1) % 7 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nHE packet stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->pkt_stats.he_mcs_count[i],
+				 (i + 1) % 6 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nVHT packet stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->pkt_stats.vht_mcs_count[i],
+				 (i + 1) % 5 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nHT packet stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->pkt_stats.ht_mcs_count[i],
+				 (i + 1) % 8 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nLegacy rate packet stats:\n");
+	for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++)
+		len += scnprintf(buf + len, size - len, "%s: %llu%s", legacy_rate_str[i],
+				 rx_stats->pkt_stats.legacy_count[i],
+				 (i + 1) % 4 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nNSS packet stats:\n");
+	for (i = 0; i < HAL_RX_MAX_NSS; i++)
+		len += scnprintf(buf + len, size - len, "%dx%d: %llu ", i + 1, i + 1,
+				 rx_stats->pkt_stats.nss_count[i]);
+	len += scnprintf(buf + len, size - len,
+			 "\n\nGI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
+			 rx_stats->pkt_stats.gi_count[0],
+			 rx_stats->pkt_stats.gi_count[1],
+			 rx_stats->pkt_stats.gi_count[2],
+			 rx_stats->pkt_stats.gi_count[3]);
+	len += scnprintf(buf + len, size - len,
+			 "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu 320Mhz %llu\n",
+			 rx_stats->pkt_stats.bw_count[0],
+			 rx_stats->pkt_stats.bw_count[1],
+			 rx_stats->pkt_stats.bw_count[2],
+			 rx_stats->pkt_stats.bw_count[3],
+			 rx_stats->pkt_stats.bw_count[4]);
+	len += scnprintf(buf + len, size - len, "\nRate Table (packets):\n");
+	num_run = HAL_RX_BW_MAX * HAL_RX_GI_MAX * HAL_RX_MAX_NSS;
+
+	for (i = 0; i < num_run; i++) {
+		found = 0;
+		for (mcs = 0; mcs < (HAL_RX_MAX_MCS_HT + 1); mcs++)
+			if (rx_stats->pkt_stats.rx_rate[bw][gi][nss][mcs]) {
+				found = 1;
+				break;
+			}
+
+		if (found) {
+			switch (bw) {
+			case 0:
+				bw_num = 20;
+				break;
+			case 1:
+				bw_num = 40;
+				break;
+			case 2:
+				bw_num = 80;
+				break;
+			case 3:
+				bw_num = 160;
+				break;
+			case 4:
+				bw_num = 320;
+				break;
+			}
+			len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ",
+					 bw_num, gi, nss + 1, nss + 1);
+			for (mcs = 0; mcs < (HAL_RX_MAX_MCS_HT + 1); mcs++) {
+				if (rx_stats->pkt_stats.rx_rate[bw][gi][nss][mcs])
+					len += scnprintf(buf + len, size - len, " %d:%llu",
+							 mcs, rx_stats->pkt_stats.rx_rate[bw][gi][nss][mcs]);
+			}
+		}
+		if (nss++ >= HAL_RX_MAX_NSS - 1) {
+			nss = 0;
+			if (gi++ >= HAL_RX_GI_MAX - 1) {
+				gi = 0;
+				if (bw < HAL_RX_BW_MAX - 1)
+					bw++;
+			}
+		}
+	}
+
+	len += scnprintf(buf + len, size - len, "\nRX success byte stats:\n");
+	len += scnprintf(buf + len, size - len, "\nEHT byte stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->byte_stats.be_mcs_count[i],
+				 (i + 1) % 7 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nHE byte stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->byte_stats.he_mcs_count[i],
+				 (i + 1) % 6 ? "\t" : "\n");
+
+	len += scnprintf(buf + len, size - len, "\nVHT byte stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->byte_stats.vht_mcs_count[i],
+				 (i + 1) % 5 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nHT byte stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++)
+		len += scnprintf(buf + len, size - len, "MCS %d: %llu%s", i,
+				 rx_stats->byte_stats.ht_mcs_count[i],
+				 (i + 1) % 8 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nLegacy rate byte stats:\n");
+	for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++)
+		len += scnprintf(buf + len, size - len, "%s: %llu%s", legacy_rate_str[i],
+				 rx_stats->byte_stats.legacy_count[i],
+				 (i + 1) % 4 ? "\t" : "\n");
+	len += scnprintf(buf + len, size - len, "\nNSS byte stats:\n");
+	for (i = 0; i < HAL_RX_MAX_NSS; i++)
+		len += scnprintf(buf + len, size - len, "%dx%d: %llu ", i + 1, i + 1,
+				 rx_stats->byte_stats.nss_count[i]);
+	len += scnprintf(buf + len, size - len,
+			 "\n\nGI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
+			 rx_stats->byte_stats.gi_count[0],
+			 rx_stats->byte_stats.gi_count[1],
+			 rx_stats->byte_stats.gi_count[2],
+			 rx_stats->byte_stats.gi_count[3]);
+	len += scnprintf(buf + len, size - len,
+			 "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu 320Mhz %llu\n",
+			 rx_stats->byte_stats.bw_count[0],
+			 rx_stats->byte_stats.bw_count[1],
+			 rx_stats->byte_stats.bw_count[2],
+			 rx_stats->byte_stats.bw_count[3],
+			 rx_stats->byte_stats.bw_count[4]);
+	len += scnprintf(buf + len, size - len, "\nRate Table (bytes):\n");
+
+
+	bw = 0;
+	gi = 0;
+	nss = 0;
+	for (i = 0; i < num_run; i++) {
+		found = 0;
+		for (mcs = 0; mcs < (HAL_RX_MAX_MCS_HT + 1); mcs++)
+			if (rx_stats->byte_stats.rx_rate[bw][gi][nss][mcs]) {
+				found = 1;
+				break;
+			}
+
+		if (found) {
+			switch (bw) {
+			case 0:
+				bw_num = 20;
+				break;
+			case 1:
+				bw_num = 40;
+				break;
+			case 2:
+				bw_num = 80;
+				break;
+			case 3:
+				bw_num = 160;
+				break;
+			case 4:
+				bw_num = 320;
+				break;
+			}
+			len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ",
+					 bw_num, gi, nss + 1, nss + 1);
+			for (mcs = 0; mcs < (HAL_RX_MAX_MCS_HT + 1); mcs++) {
+				if (rx_stats->byte_stats.rx_rate[bw][gi][nss][mcs])
+					len += scnprintf(buf + len, size - len, " %d:%llu", mcs, rx_stats->byte_stats.rx_rate[bw][gi][nss][mcs]);
+			}
+		}
+
+		if (nss++ >= HAL_RX_MAX_NSS - 1) {
+			nss = 0;
+			if (gi++ >= HAL_RX_GI_MAX - 1) {
+				gi = 0;
+				if (bw < HAL_RX_BW_MAX - 1)
+					bw++;
+			}
+		}
+	}
+	len += scnprintf(buf + len, size - len, "\n");
+	len += scnprintf(buf + len, size - len,
+			 "\nDCM: %llu\nRU26:  %llu\nRU52:  %llu\nRU106: %llu\nRU242: %llu\nRU484: %llu\nRU996: %llu\n",
+			 rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+			 rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+			 rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+			 rx_stats->ru_alloc_cnt[5]);
+
+	len += scnprintf(buf + len, size - len, "\n");
+
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	if (len > size)
+		len = size;
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+	return retval;
+}
+
+static const struct file_operations fops_rx_stats = {
+	.read = ath12k_dbg_sta_dump_rx_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int
+ath12k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
+{
+	struct ieee80211_link_sta *link_sta = inode->i_private;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	struct debug_htt_stats_req *stats_req;
+	int type;
+	int ret;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	type = ar->debug.htt_stats.type;
+	if ((type != ATH12K_DBG_HTT_EXT_STATS_PEER_INFO &&
+	     type != ATH12K_DBG_HTT_EXT_PEER_CTRL_PATH_TXRX_STATS) ||
+	    type == ATH12K_DBG_HTT_EXT_STATS_RESET) {
+		mutex_unlock(&ah->conf_mutex);
+		return -EPERM;
+	}
+
+	stats_req = vzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE);
+	if (!stats_req) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	ar->debug.htt_stats.stats_req = stats_req;
+	stats_req->type = ATH12K_DBG_HTT_EXT_STATS_PEER_INFO;
+	memcpy(stats_req->peer_addr, link_sta->addr, ETH_ALEN);
+	ret = ath12k_debugfs_htt_stats_req(ar);
+	mutex_unlock(&ar->conf_mutex);
+	if (ret < 0)
+		goto out;
+
+	file->private_data = stats_req;
+	mutex_unlock(&ah->conf_mutex);
+	return 0;
+out:
+	vfree(stats_req);
+	ar->debug.htt_stats.stats_req = NULL;
+	mutex_unlock(&ah->conf_mutex);
+	return ret;
+}
+
+static int
+ath12k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
+{
+	struct ieee80211_link_sta *link_sta = inode->i_private;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+	mutex_lock(&ar->conf_mutex);
+	vfree(file->private_data);
+	ar->debug.htt_stats.stats_req = NULL;
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+
+	return 0;
+}
+
+static ssize_t ath12k_dbg_sta_read_htt_peer_stats(struct file *file,
+						  char __user *user_buf,
+						  size_t count, loff_t *ppos)
+{
+	struct debug_htt_stats_req *stats_req = file->private_data;
+	char *buf;
+	u32 length = 0;
+
+	buf = stats_req->buf;
+	length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_htt_peer_stats = {
+	.open = ath12k_dbg_sta_open_htt_peer_stats,
+	.release = ath12k_dbg_sta_release_htt_peer_stats,
+	.read = ath12k_dbg_sta_read_htt_peer_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_write_peer_pktlog(struct file *file,
+						const char __user *buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	int ret, enable;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	ret = kstrtoint_from_user(buf, count, 0, &enable);
+	if (ret)
+		goto out;
+
+	ar->debug.pktlog_peer_valid = enable;
+	memcpy(ar->debug.pktlog_peer_addr, link_sta->addr, ETH_ALEN);
+
+	/* Send peer based pktlog enable/disable */
+	ret = ath12k_wmi_pdev_peer_pktlog_filter(ar, link_sta->addr, enable);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
+			    sta->addr, ret);
+		goto out;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "peer pktlog filter set to %d\n",
+		   enable);
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath12k_dbg_sta_read_peer_pktlog(struct file *file,
+					       char __user *ubuf,
+					       size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	char buf[32] = {0};
+	int len;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
+			ar->debug.pktlog_peer_valid,
+			ar->debug.pktlog_peer_addr);
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_pktlog = {
+	.write = ath12k_dbg_sta_write_peer_pktlog,
+	.read = ath12k_dbg_sta_read_peer_pktlog,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_write_delba(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k *ar = arsta->arvif->ar;
+	u32 tid, initiator, reason;
+	int ret;
+	char buf[64] = {0};
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+				     user_buf, count);
+	if (ret <= 0)
+		return ret;
+
+	ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+	if (ret != 3)
+		return -EINVAL;
+
+	/* Valid TID values are 0 through 15 */
+	if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH12K_STATE_ON ||
+	    ahsta->aggr_mode != ATH12K_DBG_AGGR_MODE_MANUAL) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath12k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+				    tid, initiator, reason);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+			    arsta->arvif->vdev_id, sta->addr, tid, initiator,
+			    reason);
+	}
+	ret = count;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_delba = {
+	.write = ath12k_dbg_sta_write_delba,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_write_addba_resp(struct file *file,
+					       const char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k *ar = arsta->arvif->ar;
+	u32 tid, status;
+	int ret;
+	char buf[64] = {0};
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+				     user_buf, count);
+	if (ret <= 0)
+		return ret;
+
+	ret = sscanf(buf, "%u %u", &tid, &status);
+	if (ret != 2)
+		return -EINVAL;
+
+	/* Valid TID values are 0 through 15 */
+	if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH12K_STATE_ON ||
+	    ahsta->aggr_mode != ATH12K_DBG_AGGR_MODE_MANUAL) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath12k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+					tid, status);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+			    arsta->arvif->vdev_id, sta->addr, tid, status);
+	}
+	ret = count;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+	.write = ath12k_dbg_sta_write_addba_resp,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_write_addba(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k *ar = arsta->arvif->ar;
+	u32 tid, buf_size;
+	int ret;
+	char buf[64] = {0};
+
+	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+				     user_buf, count);
+	if (ret <= 0)
+		return ret;
+
+	ret = sscanf(buf, "%u %u", &tid, &buf_size);
+	if (ret != 2)
+		return -EINVAL;
+
+	/* Valid TID values are 0 through 15 */
+	if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH12K_STATE_ON ||
+	    ahsta->aggr_mode != ATH12K_DBG_AGGR_MODE_MANUAL) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath12k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+				    tid, buf_size);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+			    arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+	}
+
+	ret = count;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_addba = {
+	.write = ath12k_dbg_sta_write_addba,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_read_aggr_mode(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k *ar = arsta->arvif->ar;
+	char buf[64];
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len,
+			"aggregation mode: %s\n\n%s\n%s\n",
+			(ahsta->aggr_mode == ATH12K_DBG_AGGR_MODE_AUTO) ?
+			"auto" : "manual", "auto = 0", "manual = 1");
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_dbg_sta_write_aggr_mode(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k *ar = arsta->arvif->ar;
+	u32 aggr_mode;
+	int ret;
+
+	if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+		return -EINVAL;
+
+	if (aggr_mode >= ATH12K_DBG_AGGR_MODE_MAX)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH12K_STATE_ON ||
+	    aggr_mode == ahsta->aggr_mode) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath12k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to clear addba session ret: %d\n",
+			    ret);
+		goto out;
+	}
+
+	ahsta->aggr_mode = aggr_mode;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+	.read = ath12k_dbg_sta_read_aggr_mode,
+	.write = ath12k_dbg_sta_write_aggr_mode,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t
+ath12k_write_htt_peer_stats_reset(struct file *file,
+				  const char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	struct htt_ext_stats_cfg_params cfg_params = { 0 };
+	int ret;
+	u8 type;
+
+	ret = kstrtou8_from_user(user_buf, count, 0, &type);
+	if (ret)
+		return ret;
+
+	if (!type)
+		return ret;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	mutex_lock(&ar->conf_mutex);
+	cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+	cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
+				HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+
+	cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+
+	cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), link_sta->addr[0]);
+	cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), link_sta->addr[1]);
+	cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), link_sta->addr[2]);
+	cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), link_sta->addr[3]);
+
+	cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), link_sta->addr[4]);
+	cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), link_sta->addr[5]);
+
+	cfg_params.cfg3 |= ATH12K_HTT_PEER_STATS_RESET;
+
+	ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar,
+						 ATH12K_DBG_HTT_EXT_STATS_PEER_INFO,
+						 &cfg_params,
+						 0ULL);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
+		mutex_unlock(&ar->conf_mutex);
+		mutex_unlock(&ah->conf_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+
+	ret = count;
+
+	return ret;
+}
+
+static const struct file_operations fops_htt_peer_stats_reset = {
+	.write = ath12k_write_htt_peer_stats_reset,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t
+ath12k_dbg_sta_dump_driver_tx_pkts_flow(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	int len = 0, ret_val;
+	const int size = ATH12K_DRV_TX_STATS_SIZE;
+	char *buf;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	if (!arsta->tx_stats) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	buf = kzalloc(ATH12K_DRV_TX_STATS_SIZE, GFP_KERNEL);
+	if (!buf) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	spin_lock_bh(&ar->ab->base_lock);
+	len += scnprintf(buf + len, size - len,
+			 "Tx packets inflow from mac80211: %u\n",
+			 atomic_read(&arsta->drv_tx_pkts.pkts_in));
+	len += scnprintf(buf + len, size - len,
+			 "Tx packets outflow to HW: %u\n",
+			 atomic_read(&arsta->drv_tx_pkts.pkts_out));
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	if (len > size)
+		len = size;
+
+	ret_val = simple_read_from_buffer((char *)user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+	return ret_val;
+}
+
+static const struct file_operations fops_driver_tx_pkts_flow = {
+	.read = ath12k_dbg_sta_dump_driver_tx_pkts_flow,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_reset_tx_stats(struct file *file,
+					     const char __user *buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	int ret, reset;
+
+	ret = kstrtoint_from_user(buf, count, 0, &reset);
+	if (ret)
+		return ret;
+
+	if (!reset || reset > 1)
+		return -EINVAL;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	if (!arsta->tx_stats || !arsta->wbm_tx_stats) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+	spin_lock_bh(&ar->ab->base_lock);
+	memset(arsta->tx_stats, 0, sizeof(*arsta->tx_stats));
+	atomic_set(&arsta->drv_tx_pkts.pkts_in, 0);
+	atomic_set(&arsta->drv_tx_pkts.pkts_out, 0);
+	memset(arsta->wbm_tx_stats->wbm_tx_comp_stats, 0, sizeof(*arsta->wbm_tx_stats));
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	mutex_unlock(&ah->conf_mutex);
+
+	ret = count;
+	return ret;
+}
+
+static const struct file_operations fops_reset_tx_stats = {
+	.write = ath12k_dbg_sta_reset_tx_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t
+ath12k_dbg_sta_dump_driver_rx_pkts_flow(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	struct ath12k_rx_peer_stats *rx_stats;
+	int len = 0, ret_val = 0;
+	const int size = 1024;
+	char *buf;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	rx_stats = arsta->rx_stats;
+	if (!rx_stats) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	spin_lock_bh(&ar->ab->base_lock);
+
+	len += scnprintf(buf + len, size - len,
+			 "Rx packets inflow from HW: %u\n",
+			 atomic_read(&arsta->drv_rx_pkts.pkts_frm_hw));
+	len += scnprintf(buf + len, size - len,
+			 "Rx packets outflow from driver: %u\n",
+			 atomic_read(&arsta->drv_rx_pkts.pkts_out));
+
+	len += scnprintf(buf + len, size - len, "\n");
+
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	if (len > size)
+		len = size;
+
+	ret_val = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+	return ret_val;
+}
+
+static const struct file_operations fops_driver_rx_pkts_flow = {
+	.read = ath12k_dbg_sta_dump_driver_rx_pkts_flow,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file,
+					     const char __user *buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	int ret, reset;
+
+	ret = kstrtoint_from_user(buf, count, 0, &reset);
+	if (ret)
+		return ret;
+
+	if (!reset || reset > 1)
+		return -EINVAL;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+	if (!arsta->rx_stats) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	spin_lock_bh(&ar->ab->base_lock);
+	memset(arsta->rx_stats, 0, sizeof(*arsta->rx_stats));
+	atomic_set(&arsta->drv_rx_pkts.pkts_frm_hw, 0);
+	atomic_set(&arsta->drv_rx_pkts.pkts_out, 0);
+	spin_unlock_bh(&ar->ab->base_lock);
+	mutex_unlock(&ah->conf_mutex);
+
+	ret = count;
+	return ret;
+}
+
+static const struct file_operations fops_reset_rx_stats = {
+	.write = ath12k_dbg_sta_reset_rx_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+#ifdef CONFIG_ATH12K_SAWF
+static int ath12k_fill_sawf_tx_delay_stats(struct ath12k_base *ab, struct ath12k_peer *peer,
+					   char *buf, int len, int size,
+					   u8 q_id, u8 tid)
+{
+	struct ath12k_sawf_stats *sawf_stats;
+	struct sawf_tx_delay_stats *tx_delay_stats;
+
+	lockdep_assert_held(&ab->base_lock);
+
+	sawf_stats = &peer->sawf_stats;
+	if (!sawf_stats)
+		return 0;
+
+	tx_delay_stats = &sawf_stats->tx_delay_stats[tid][q_id];
+
+	len += scnprintf(buf + len, size - len, "Delay Bins\n");
+	len += scnprintf(buf + len, size - len, "Min %u\n", tx_delay_stats->delay_hist.min);
+	len += scnprintf(buf + len, size - len, "Max %u\n", tx_delay_stats->delay_hist.max);
+	len += scnprintf(buf + len, size - len, "Avg %u\n", tx_delay_stats->delay_hist.avg);
+	len += scnprintf(buf + len, size - len, "NWDelay moving avg %u\n", tx_delay_stats->nwdelay_avg);
+	len += scnprintf(buf + len, size - len, "SWDelay moving avg %u\n", tx_delay_stats->swdelay_avg);
+	len += scnprintf(buf + len, size - len, "HWDelay moving avg %u\n", tx_delay_stats->hwdelay_avg);
+	len += scnprintf(buf + len, size - len, "Delay Bound Success %llu\n", tx_delay_stats->success);
+	len += scnprintf(buf + len, size - len, "Delay Bound Failure %llu\n", tx_delay_stats->failure);
+
+	return len;
+}
+
+static ssize_t ath12k_dbg_sta_dump_sawf_tx_delay_stats(struct file *file,
+						       char __user *user_buf,
+						       size_t count, loff_t *ppos)
+{
+
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_sawf_peer_ctx *peer_ctx = NULL;
+	const int size = 2 * ATH12K_SAWF_STATS_SIZE;
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_sawf_ctx *sawf_ctx;
+	u8 q_id, tid, max_usr_def_q_sawf;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_peer *peer;
+	struct ath12k *ar;
+	int len = 0;
+	char *buf;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	spin_lock_bh(&ar->ab->base_lock);
+
+	peer = ath12k_peer_find(ar->ab, arsta->arvif->vdev_id, arsta->addr);
+	if (!peer) {
+		ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+			   arsta->addr, arsta->arvif->vdev_id);
+		spin_unlock_bh(&ar->ab->base_lock);
+		len = -ENOENT;
+		goto unlock;
+	}
+
+	peer_ctx = &peer->sawf_ctx_peer;
+
+	sawf_ctx = ath12k_get_sawf_context();
+	if (!sawf_ctx || !peer_ctx) {
+		spin_unlock_bh(&ar->ab->base_lock);
+		len = -EINVAL;
+		goto unlock;
+	}
+
+	max_usr_def_q_sawf = sawf_ctx->max_msduq_per_tid -
+					sawf_ctx->default_msduq_per_tid;
+
+	if (!arsta->sawf_svc_id)
+		goto dump_stats;
+
+	for (tid = 0; tid < ATH12K_SAWF_MAX_TID_SUPPORT; tid++) {
+		for (q_id = 0; q_id < max_usr_def_q_sawf; q_id++) {
+			if (arsta->sawf_svc_id == peer_ctx->msduq_table[tid][q_id].svc_id) {
+				len = ath12k_fill_sawf_tx_delay_stats(ar->ab, peer, buf, len, size, q_id, tid);
+				goto exit;
+			}
+		}
+	}
+
+dump_stats:
+	for (tid = 0; tid < ATH12K_SAWF_MAX_TID_SUPPORT; tid++) {
+		for (q_id = 0; q_id < max_usr_def_q_sawf; q_id++)
+			len = ath12k_fill_sawf_tx_delay_stats(ar->ab, peer, buf, len, size, q_id, tid);
+	}
+
+exit:
+	spin_unlock_bh(&ar->ab->base_lock);
+	if (len > size)
+		len = size;
+	if (len)
+		len = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+unlock:
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+	kfree(buf);
+	return len;
+}
+
+static const struct file_operations fops_sawf_tx_delay_stats = {
+	.read = ath12k_dbg_sta_dump_sawf_tx_delay_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath12k_fill_sawf_tx_stats(struct ath12k_base *ab, struct ath12k_peer *peer,
+				     char *buf, int len, int size,
+				     u8 q_id, u8 tid)
+{
+	struct sawf_fw_mpdu_stats *svc_intval_stats;
+	struct sawf_fw_mpdu_stats *burst_size_stats;
+	struct ath12k_sawf_stats *sawf_stats;
+	struct sawf_tx_stats *tx_stats;
+
+	lockdep_assert_held(&ab->base_lock);
+
+	sawf_stats = &peer->sawf_stats;
+	if (!sawf_stats)
+		return 0;
+
+	tx_stats = &sawf_stats->tx_stats[tid][q_id];
+
+	svc_intval_stats = &sawf_stats->tx_stats[tid][q_id].svc_intval_stats;
+
+	burst_size_stats = &sawf_stats->tx_stats[tid][q_id].burst_size_stats;
+
+	len += scnprintf(buf + len, size - len, "tx_succ_pkts: %u\n",
+			 tx_stats->tx_success.num_pkts);
+	len += scnprintf(buf + len, size - len, "tx_succ_bytes: %llu\n",
+			 tx_stats->tx_success.bytes);
+	len += scnprintf(buf + len, size - len, "tx_ingress_pkts: %u\n",
+			 tx_stats->tx_ingress.num_pkts);
+	len += scnprintf(buf + len, size - len, "tx_ingress_bytes: %llu\n",
+			 tx_stats->tx_ingress.bytes);
+	len += scnprintf(buf + len, size - len, "tx_remove_mpdu_pkts: %u\n",
+			 tx_stats->dropped.fw_rem.num_pkts);
+	len += scnprintf(buf + len, size - len, "tx_remove_mpdu_bytes: %llu\n",
+			 tx_stats->dropped.fw_rem.bytes);
+	len += scnprintf(buf + len, size - len, "tx_remove_tx_pkts: %u\n",
+			 tx_stats->dropped.fw_rem_tx);
+	len += scnprintf(buf + len, size - len, "tx_remove_notx_pkts: %u\n",
+			 tx_stats->dropped.fw_rem_notx);
+	len += scnprintf(buf + len, size - len, "tx_remove_aged_pkts: %u\n",
+			 tx_stats->dropped.age_out);
+	len += scnprintf(buf + len, size - len, "tx_remove_fw_reason1: %u\n",
+			 tx_stats->dropped.fw_reason1);
+	len += scnprintf(buf + len, size - len, "tx_remove_fw_reason2: %u\n",
+			 tx_stats->dropped.fw_reason2);
+	len += scnprintf(buf + len, size - len, "tx_remove_fw_reason3: %u\n",
+			 tx_stats->dropped.fw_reason3);
+	len += scnprintf(buf + len, size - len, "tx_failed: %u\n",
+			 tx_stats->tx_failed);
+	len += scnprintf(buf + len, size - len, "queue_depth: %u\n",
+			 tx_stats->queue_depth);
+
+	if (svc_intval_stats) {
+		len += scnprintf(buf + len, size - len, "Service intvl success_cnt : %llu\n",
+				 svc_intval_stats->success_cnt);
+		len += scnprintf(buf + len, size - len, "Service intvl failure_cnt : %llu\n",
+				 svc_intval_stats->failure_cnt);
+	}
+
+	if (burst_size_stats) {
+		len += scnprintf(buf + len, size - len, "Burst Size success_cnt : %llu\n",
+				 burst_size_stats->success_cnt);
+		len += scnprintf(buf + len, size - len, "Burst Size failure_cnt : %llu\n",
+				 burst_size_stats->failure_cnt);
+	}
+
+	return len;
+}
+
+static ssize_t ath12k_dbg_sta_dump_sawf_tx_stats(struct file *file,
+						 char __user *user_buf,
+						 size_t count, loff_t *ppos)
+{
+
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_sawf_peer_ctx *peer_ctx = NULL;
+	const int size = 2 * ATH12K_SAWF_STATS_SIZE;
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_sawf_ctx *sawf_ctx;
+	u8 q_id, tid, max_usr_def_q_sawf;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_peer *peer;
+	struct ath12k *ar;
+	int len = 0;
+	char *buf;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	spin_lock_bh(&ar->ab->base_lock);
+
+	peer = ath12k_peer_find(ar->ab, arsta->arvif->vdev_id, arsta->addr);
+	if (!peer) {
+		ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+			   arsta->addr, arsta->arvif->vdev_id);
+		spin_unlock_bh(&ar->ab->base_lock);
+		len = -ENOENT;
+		goto unlock;
+	}
+
+	peer_ctx = &peer->sawf_ctx_peer;
+
+	sawf_ctx = ath12k_get_sawf_context();
+	if (!sawf_ctx || !peer_ctx) {
+		spin_unlock_bh(&ar->ab->base_lock);
+		len = -EINVAL;
+		goto unlock;
+	}
+
+	max_usr_def_q_sawf = sawf_ctx->max_msduq_per_tid -
+					sawf_ctx->default_msduq_per_tid;
+
+	if (!arsta->sawf_svc_id)
+		goto dump_stats;
+
+	for (tid = 0; tid < ATH12K_SAWF_MAX_TID_SUPPORT; tid++) {
+		for (q_id = 0; q_id < max_usr_def_q_sawf; q_id++) {
+			if (arsta->sawf_svc_id == peer_ctx->msduq_table[tid][q_id].svc_id) {
+				len = ath12k_fill_sawf_tx_stats(ar->ab, peer, buf, len, size, q_id, tid);
+				goto exit;
+			}
+		}
+	}
+
+dump_stats:
+	for (tid = 0; tid < ATH12K_SAWF_MAX_TID_SUPPORT; tid++) {
+		for (q_id = 0; q_id < max_usr_def_q_sawf; q_id++)
+			len = ath12k_fill_sawf_tx_stats(ar->ab, peer, buf, len, size, q_id, tid);
+	}
+
+exit:
+	spin_unlock_bh(&ar->ab->base_lock);
+	if (len > size)
+		len = size;
+	if (len)
+		len = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+unlock:
+	mutex_unlock(&ar->conf_mutex);
+	mutex_unlock(&ah->conf_mutex);
+	kfree(buf);
+	return len;
+}
+
+static const struct file_operations fops_sawf_tx_stats = {
+	.read = ath12k_dbg_sta_dump_sawf_tx_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_set_svc_id(struct file *file,
+					 const char __user *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ieee80211_sta *sta = link_sta->sta;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	u8 link_id = link_sta->link_id;
+	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	int ret, svc_id;
+
+	ret = kstrtoint_from_user(buf, count, 0, &svc_id);
+	if (ret)
+		return ret;
+
+	if (svc_id && (svc_id <  ATH12K_SAWF_SVC_CLASS_MIN ||
+	    svc_id > ATH12K_SAWF_SVC_CLASS_MAX))
+	    	return -EINVAL;
+
+	mutex_lock(&ah->conf_mutex);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	arsta = ahsta->link[link_id];
+
+	if (!arsta || !arsta->arvif->ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	spin_lock_bh(&ar->ab->base_lock);
+	arsta->sawf_svc_id = svc_id;
+	spin_unlock_bh(&ar->ab->base_lock);
+	mutex_unlock(&ah->conf_mutex);
+
+	ret = count;
+	return ret;
+}
+
+static const struct file_operations fops_svc_id = {
+	.write = ath12k_dbg_sta_set_svc_id,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+#endif /* CONFIG_ATH12K_SAWF */
+
+void ath12k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			       struct ieee80211_sta *sta, struct dentry *dir)
+{
+	debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+	debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+	debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+	debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
+}
+
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+				    struct ieee80211_link_sta *link_sta, struct dentry *dir)
+{
+	u8 link_id = link_sta->link_id;
+	struct ath12k *ar;
+	struct ath12k_hw *ah = hw->priv;
+
+	mutex_lock(&ah->conf_mutex);
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	if (!ar) {
+		mutex_unlock(&ah->conf_mutex);
+		return;
+	}
+
+	if (ath12k_debugfs_is_extd_tx_stats_enabled(ar)) {
+		debugfs_create_file("tx_stats", 0400, dir, link_sta,
+				    &fops_tx_stats);
+		debugfs_create_file("reset_tx_stats", 0600, dir, link_sta,
+				    &fops_reset_tx_stats);
+		debugfs_create_file("driver_tx_pkts_flow", 0400, dir, link_sta,
+				    &fops_driver_tx_pkts_flow);
+	}
+	if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) {
+		debugfs_create_file("rx_stats", 0400, dir, link_sta,
+				    &fops_rx_stats);
+		debugfs_create_file("reset_rx_stats", 0600, dir, link_sta,
+				    &fops_reset_rx_stats);
+		debugfs_create_file("driver_rx_pkts_flow", 0400, dir, link_sta,
+				    &fops_driver_rx_pkts_flow);
+	}
+
+	debugfs_create_file("htt_peer_stats", 0400, dir, link_sta,
+			    &fops_htt_peer_stats);
+
+	debugfs_create_file("peer_pktlog", 0644, dir, link_sta,
+			    &fops_peer_pktlog);
+
+	if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
+		     ar->ab->wmi_ab.svc_map))
+		debugfs_create_file("htt_peer_stats_reset", 0600, dir, link_sta,
+				    &fops_htt_peer_stats_reset);
+
+#ifdef CONFIG_ATH12K_SAWF
+	if (ath12k_debugfs_is_sawf_stats_enabled(ar)) {
+		debugfs_create_file("svc_id", 0400, dir, link_sta,
+				    &fops_svc_id);
+		debugfs_create_file("sawf_tx_stats", 0400, dir, link_sta,
+				    &fops_sawf_tx_stats);
+		debugfs_create_file("sawf_tx_delay_stats", 0400, dir, link_sta,
+				    &fops_sawf_tx_delay_stats);
+	}
+#endif
+	mutex_unlock(&ah->conf_mutex);
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/debugfs_sta.h	2024-01-19 17:01:19.857846811 +0100
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_STA_H_
+#define _ATH12K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+#include "hal_tx.h"
+#include "dp_rx.h"
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+
+void ath12k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			       struct ieee80211_sta *sta, struct dentry *dir);
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+				    struct ieee80211_link_sta *link_sta, struct dentry *dir);
+void ath12k_debugfs_sta_add_tx_stats(struct ath12k_link_sta *arsta,
+				     struct ath12k_per_peer_tx_stats *peer_stats,
+				     u8 legacy_rate_idx);
+void ath12k_debugfs_sta_update_txcompl(struct ath12k *ar,
+				       struct hal_tx_status *ts);
+
+#else /* CONFIG_ATH12K_DEBUGFS */
+
+#define ath12k_debugfs_sta_op_add NULL
+#define ath12k_debugfs_link_sta_op_add NULL
+
+static inline void
+ath12k_debugfs_sta_add_tx_stats(struct ath12k_link_sta *arsta,
+				struct ath12k_per_peer_tx_stats *peer_stats,
+				u8 legacy_rate_idx)
+{
+}
+
+static inline void ath12k_debugfs_sta_update_txcompl(struct ath12k *ar,
+						     struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+#endif /* _ATH12K_DEBUGFS_STA_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/pcic.c	2024-04-19 16:04:28.957735776 +0200
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include "core.h"
+#include "pcic.h"
+#include "debug.h"
+#include "ppe.h"
+
+unsigned int tx_comp_budget = 0x7F;
+module_param_named(tx_comp_budget, tx_comp_budget, uint, 0644);
+MODULE_PARM_DESC(tx_comp_budget, "tx_comp_budget");
+
+unsigned int ath12k_napi_poll_budget = 0x7f;
+module_param_named(napi_budget, ath12k_napi_poll_budget, uint, 0644);
+MODULE_PARM_DESC(napi_budget, "Napi budget processing per rx intr");
+
+static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
+	"bhi",
+	"mhi-er0",
+	"mhi-er1",
+	"ce0",
+	"ce1",
+	"ce2",
+	"ce3",
+	"ce4",
+	"ce5",
+	"ce6",
+	"ce7",
+	"ce8",
+	"ce9",
+	"ce10",
+	"ce11",
+	"ce12",
+	"ce13",
+	"ce14",
+	"ce15",
+	"host2wbm-desc-feed",
+	"host2reo-re-injection",
+	"host2reo-command",
+	"host2rxdma-monitor-ring3",
+	"host2rxdma-monitor-ring2",
+	"host2rxdma-monitor-ring1",
+	"reo2ost-exception",
+	"wbm2host-rx-release",
+	"reo2host-status",
+	"reo2host-destination-ring4",
+	"reo2host-destination-ring3",
+	"reo2host-destination-ring2",
+	"reo2host-destination-ring1",
+	"rxdma2host-monitor-destination-mac3",
+	"rxdma2host-monitor-destination-mac2",
+	"rxdma2host-monitor-destination-mac1",
+	"ppdu-end-interrupts-mac3",
+	"ppdu-end-interrupts-mac2",
+	"ppdu-end-interrupts-mac1",
+	"rxdma2host-monitor-status-ring-mac3",
+	"rxdma2host-monitor-status-ring-mac2",
+	"rxdma2host-monitor-status-ring-mac1",
+	"host2rxdma-host-buf-ring-mac3",
+	"host2rxdma-host-buf-ring-mac2",
+	"host2rxdma-host-buf-ring-mac1",
+	"rxdma2host-destination-ring-mac3",
+	"rxdma2host-destination-ring-mac2",
+	"rxdma2host-destination-ring-mac1",
+	"host2tcl-input-ring4",
+	"host2tcl-input-ring3",
+	"host2tcl-input-ring2",
+	"host2tcl-input-ring1",
+	"wbm2host-tx-completions-ring4",
+	"wbm2host-tx-completions-ring3",
+	"wbm2host-tx-completions-ring2",
+	"wbm2host-tx-completions-ring1",
+	"tcl2host-status-ring",
+};
+
+char dp_irq_name[ATH12K_MAX_PCI_DOMAINS + 1][ATH12K_EXT_IRQ_DP_NUM_VECTORS][DP_IRQ_NAME_LEN] = {};
+char dp_pcic_irq_name[ATH12K_MAX_PCI_DOMAINS + 1][ATH12K_EXT_IRQ_DP_NUM_VECTORS][DP_IRQ_NAME_LEN] = {};
+char ce_irq_name[ATH12K_MAX_PCI_DOMAINS + 1][ATH12K_IRQ_NUM_MAX][DP_IRQ_NAME_LEN] = {};
+
+void ath12k_pcic_config_static_window(struct ath12k_base *ab)
+{
+	u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
+	u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
+	u32 window;
+
+	window = (umac_window << 12) | (ce_window << 6);
+
+	iowrite32(WINDOW_ENABLE_BIT | window, ab->mem + WINDOW_REG_ADDRESS);
+}
+
+static void ath12k_pcic_select_static_window(struct ath12k_base *ab, u32 addr)
+{
+	u32 curr_window, cur_val, prev_window = 0;
+	volatile u32 read_val = 0;
+	int retry = 0;
+	u32 window = u32_get_bits(addr, WINDOW_VALUE_MASK);
+
+	prev_window = readl_relaxed(ab->mem + WINDOW_REG_ADDRESS);
+
+	/* Clear out last 6 bits of window register */
+	prev_window = prev_window & ~(0x3f);
+
+	/* Write the new last 6 bits of window register. Only window 1 values
+	 * are changed. Window 2 and 3 are unaffected.
+	 */
+	curr_window = prev_window | window;
+
+	/* Skip writing into window register if the read value
+	 * is same as calculated value.
+	 */
+	if (curr_window == prev_window)
+		return;
+
+	cur_val = WINDOW_ENABLE_BIT | curr_window;
+	writel_relaxed(cur_val, ab->mem + WINDOW_REG_ADDRESS);
+
+	read_val = readl_relaxed(ab->mem + WINDOW_REG_ADDRESS);
+
+	/* If value written is not yet reflected, wait till it is reflected */
+	while ((read_val != cur_val) && (retry < 10)) {
+		mdelay(1);
+		read_val = readl_relaxed(ab->mem + WINDOW_REG_ADDRESS);
+		retry++;
+	}
+	if (retry == 10)
+		ath12k_warn(ab, "Failed to set static window for cmem init\n");
+}
+
+u32 ath12k_pcic_cmem_read32(struct ath12k_base *ab, u32 addr)
+{
+	u32 val;
+
+	if (addr < WINDOW_START)
+		return readl_relaxed(ab->mem + addr);
+
+	ath12k_pcic_select_static_window(ab, addr);
+
+	val = readl_relaxed(ab->mem + WINDOW_START + (addr & WINDOW_RANGE_MASK));
+
+	return val;
+}
+
+void ath12k_pcic_cmem_write32(struct ath12k_base *ab, u32 addr, u32 value)
+{
+	if (addr < WINDOW_START) {
+		writel_relaxed(value, ab->mem + addr);
+		return;
+	}
+
+	ath12k_pcic_select_static_window(ab, addr);
+
+	writel_relaxed(value, ab->mem + WINDOW_START + (addr & WINDOW_RANGE_MASK));
+}
+
+u32 ath12k_pcic_get_window_start(struct ath12k_base *ab, u32 offset)
+{
+	u32 window_start;
+
+	/* If offset lies within DP register range, use 3rd window */
+	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
+		window_start = 3 * WINDOW_START;
+	/* If offset lies within CE register range, use 2nd window */
+	else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
+		window_start = 2 * WINDOW_START;
+	else
+		window_start = WINDOW_START;
+
+	return window_start;
+}
+
+static void ath12k_pcic_free_ext_irq(struct ath12k_base *ab)
+{
+	int i, j;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
+		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+		for (j = 0; j < irq_grp->num_irq; j++)
+			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+		netif_napi_del(&irq_grp->napi);
+	}
+}
+
+void ath12k_pcic_free_irq(struct ath12k_base *ab)
+{
+	int i, irq_idx;
+
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+			continue;
+		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+	}
+
+	ath12k_pcic_free_ext_irq(ab);
+}
+
+void ath12k_pcic_free_hybrid_irq(struct ath12k_base *ab)
+{
+	struct platform_device *pdev = ab->pdev;
+
+	ath12k_pcic_free_irq(ab);
+	platform_msi_domain_free_irqs(&pdev->dev);
+}
+
+static void ath12k_pcic_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
+{
+	u32 irq_idx;
+
+	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
+	enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath12k_pcic_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
+{
+	u32 irq_idx;
+
+	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
+	disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath12k_pcic_ce_irqs_disable(struct ath12k_base *ab)
+{
+	int i;
+
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+			continue;
+		ath12k_pcic_ce_irq_disable(ab, i);
+	}
+}
+
+static void ath12k_pcic_ce_tasklet(struct tasklet_struct *t)
+{
+	struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+
+	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+	ath12k_pcic_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath12k_pcic_ce_interrupt_handler(int irq, void *arg)
+{
+	struct ath12k_ce_pipe *ce_pipe = arg;
+	struct ath12k_base *ab = ce_pipe->ab;
+
+	if (unlikely(!ab->ce_pipe_init_done))
+		return IRQ_HANDLED;
+
+	/* last interrupt received for this CE */
+	ce_pipe->timestamp = jiffies;
+
+	ath12k_pcic_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+	tasklet_schedule(&ce_pipe->intr_tq);
+
+	return IRQ_HANDLED;
+}
+
+static void ath12k_pcic_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+{
+	int i;
+
+	for (i = 0; i < irq_grp->num_irq; i++)
+		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath12k_pcic_ext_irq_disable(struct ath12k_base *sc)
+{
+	int i;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(sc); i++) {
+		struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+		ath12k_pcic_ext_grp_disable(irq_grp);
+
+		napi_synchronize(&irq_grp->napi);
+		napi_disable(&irq_grp->napi);
+	}
+}
+
+static void ath12k_pcic_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
+{
+	int i;
+
+	for (i = 0; i < irq_grp->num_irq; i++)
+		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath12k_pcic_sync_ext_irqs(struct ath12k_base *ab)
+{
+	int i, j, irq_idx;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
+		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+		for (j = 0; j < irq_grp->num_irq; j++) {
+			irq_idx = irq_grp->irqs[j];
+			synchronize_irq(ab->irq_num[irq_idx]);
+		}
+	}
+}
+
+static int ath12k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
+						struct ath12k_ext_irq_grp,
+						napi);
+	struct ath12k_base *ab = irq_grp->ab;
+	int work_done;
+
+	work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+	if (work_done < budget) {
+		if(likely(napi_complete_done(napi, work_done)))
+				ath12k_pcic_ext_grp_enable(irq_grp);
+	}
+
+	if (work_done > budget)
+		work_done = budget;
+
+	return work_done;
+}
+
+static irqreturn_t ath12k_pcic_ext_interrupt_handler(int irq, void *arg)
+{
+	struct ath12k_ext_irq_grp *irq_grp = arg;
+
+	ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
+
+	/* last interrupt received for this group */
+	irq_grp->timestamp = jiffies;
+
+	ath12k_pcic_ext_grp_disable(irq_grp);
+
+	napi_schedule(&irq_grp->napi);
+
+	return IRQ_HANDLED;
+}
+
+void ath12k_pcic_ce_irqs_enable(struct ath12k_base *ab)
+{
+	int i;
+
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+			continue;
+		ath12k_pcic_ce_irq_enable(ab, i);
+	}
+}
+
+static int ath12k_pcic_ext_config_gic_msi_irq(struct ath12k_base *ab,
+					      struct platform_device *pdev,
+					      struct msi_desc *msi_desc, int i)
+{
+	u32 user_base_data = 0, base_vector = 0, base_idx;
+	struct ath12k_ext_irq_grp *irq_grp;
+	int j, budget, ret = 0, num_vectors = 0;
+	u8 userpd_id;
+	u32 num_irq = 0;
+
+	userpd_id = ab->userpd_id;
+	base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
+	ret = ath12k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
+						  &user_base_data, &base_vector);
+	if (ret < 0)
+		return ret;
+
+	irq_grp = &ab->ext_irq_grp[i];
+	irq_grp->ab = ab;
+	irq_grp->grp_id = i;
+	init_dummy_netdev(&irq_grp->napi_ndev);
+
+	if (ab->hw_params->ring_mask->rx_mon_dest[i])
+		budget = NAPI_POLL_WEIGHT;
+	else
+		budget = ath12k_napi_poll_budget;
+
+	/* Apply a reduced budget for tx completion to prioritize tx enqueue operation */
+	if (ab->hw_params->ring_mask->tx[i])
+		budget = tx_comp_budget;
+
+	if (ab->hw_params->ring_mask->tx[i] ||
+	    ab->hw_params->ring_mask->rx[i] ||
+	    ab->hw_params->ring_mask->rx_err[i] ||
+	    ab->hw_params->ring_mask->rx_wbm_rel[i] ||
+	    ab->hw_params->ring_mask->reo_status[i] ||
+	    ab->hw_params->ring_mask->host2rxdma[i] ||
+	    ab->hw_params->ring_mask->ppe2tcl[i] ||
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	    ab->hw_params->ring_mask->wbm2sw6_ppeds_tx_cmpln[i] ||
+#endif
+	    ab->hw_params->ring_mask->reo2ppe[i] ||
+	    ab->hw_params->ring_mask->rx_mon_dest[i]) {
+		num_irq = 1;
+	}
+
+	irq_grp->num_irq = num_irq;
+	irq_grp->irqs[0] = base_idx + i;
+
+	for (j = 0; j < irq_grp->num_irq; j++) {
+		int irq_idx = irq_grp->irqs[j];
+		int vector = (i % num_vectors);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+		if (ab->hw_params->ring_mask->ppe2tcl[i] ||
+			ab->hw_params->ring_mask->wbm2sw6_ppeds_tx_cmpln[i] ||
+			ab->hw_params->ring_mask->reo2ppe[i]) {
+			ret = ath12k_pcic_get_msi_data(ab, msi_desc, i);
+			if (ret) {
+				ath12k_err(ab, "failed to get msi data for irq %d: %d",
+						msi_desc->irq, ret);
+				return ret;
+			}
+		} else {
+#endif
+			netif_napi_add_weight(&irq_grp->napi_ndev, &irq_grp->napi,
+		       ath12k_pcic_ext_grp_napi_poll, budget);
+
+			scnprintf(dp_pcic_irq_name[userpd_id][i], DP_IRQ_NAME_LEN,
+				  "pcic%u_wlan_dp_%u", userpd_id, i);
+			irq_set_status_flags(msi_desc->irq, IRQ_DISABLE_UNLAZY);
+			ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+					       ath12k_pcic_ext_interrupt_handler, IRQF_SHARED,
+					       dp_pcic_irq_name[userpd_id][i], irq_grp);
+			if (ret) {
+				ath12k_err(ab, "failed request irq %d: %d\n", irq_idx, ret);
+				return ret;
+			}
+			ab->irq_num[irq_idx] = msi_desc->irq;
+			ab->ipci.dp_irq_num[vector] = msi_desc->irq;
+			ab->ipci.dp_msi_data[i] = msi_desc->msg.data;
+			disable_irq_nosync(ab->irq_num[irq_idx]);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+		}
+#endif
+	}
+	return ret;
+}
+
+static int ath12k_pcic_config_gic_msi_irq(struct ath12k_base *ab,
+					  struct platform_device *pdev,
+					  struct msi_desc *msi_desc, int i)
+{
+	struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+	int irq_idx, ret;
+	u8 userpd_id = ab->userpd_id;
+
+	tasklet_setup(&ce_pipe->intr_tq, ath12k_pcic_ce_tasklet);
+	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+
+	scnprintf(ce_irq_name[userpd_id][irq_idx], DP_IRQ_NAME_LEN,
+		  "pci%u_wlan_ce_%u", userpd_id, i);
+
+	ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+			       ath12k_pcic_ce_interrupt_handler, IRQF_SHARED,
+			       ce_irq_name[userpd_id][irq_idx], ce_pipe);
+	if (ret) {
+		ath12k_warn(ab, "failed to request irq %d: %d\n", irq_idx, ret);
+		return ret;
+	}
+
+	ab->irq_num[irq_idx] = msi_desc->irq;
+	ab->ipci.ce_msi_data[i] = msi_desc->msg.data;
+	ath12k_pcic_ce_irq_disable(ab, i);
+
+	return ret;
+}
+
+static void ath12k_pcic_kill_tasklets(struct ath12k_base *ab)
+{
+	int i;
+
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+			continue;
+
+		tasklet_kill(&ce_pipe->intr_tq);
+	}
+}
+
+static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
+{
+	int i;
+	int irq_idx;
+
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+			continue;
+
+		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+		synchronize_irq(ab->irq_num[irq_idx]);
+	}
+}
+
+void ath12k_pcic_ce_irq_disable_sync(struct ath12k_base *ab)
+{
+	ath12k_pcic_ce_irqs_disable(ab);
+	ath12k_pci_sync_ce_irqs(ab);
+	ath12k_pcic_kill_tasklets(ab);
+}
+
+int ath12k_pcic_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+				    u8 *ul_pipe, u8 *dl_pipe)
+{
+	const struct service_to_pipe *entry;
+	bool ul_set = false, dl_set = false;
+	int i;
+
+	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
+		entry = &ab->hw_params->svc_to_ce_map[i];
+
+		if (__le32_to_cpu(entry->service_id) != service_id)
+			continue;
+
+		switch (__le32_to_cpu(entry->pipedir)) {
+		case PIPEDIR_NONE:
+			break;
+		case PIPEDIR_IN:
+			WARN_ON(dl_set);
+			*dl_pipe = __le32_to_cpu(entry->pipenum);
+			dl_set = true;
+			break;
+		case PIPEDIR_OUT:
+			WARN_ON(ul_set);
+			*ul_pipe = __le32_to_cpu(entry->pipenum);
+			ul_set = true;
+			break;
+		case PIPEDIR_INOUT:
+			WARN_ON(dl_set);
+			WARN_ON(ul_set);
+			*dl_pipe = __le32_to_cpu(entry->pipenum);
+			*ul_pipe = __le32_to_cpu(entry->pipenum);
+			dl_set = true;
+			ul_set = true;
+			break;
+		}
+	}
+
+	if (WARN_ON(!ul_set || !dl_set))
+		return -ENOENT;
+
+	return 0;
+}
+
+int ath12k_pcic_get_msi_irq(struct ath12k_base *ab, unsigned int vector)
+{
+	return ab->msi.irqs[vector];
+}
+
+int ath12k_pcic_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
+					int *num_vectors, u32 *user_base_data,
+					u32 *base_vector)
+{
+	const struct ath12k_msi_config *msi_config = ab->msi.config;
+	int idx;
+
+	for (idx = 0; idx < msi_config->total_users; idx++) {
+		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+			*num_vectors = msi_config->users[idx].num_vectors;
+			*user_base_data = msi_config->users[idx].base_vector
+				+ ab->msi.ep_base_data;
+			*base_vector = msi_config->users[idx].base_vector;
+
+			ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+				   user_name, *num_vectors, *user_base_data,
+				   *base_vector);
+
+			return 0;
+		}
+	}
+
+	ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+	return -EINVAL;
+}
+
+void ath12k_pcic_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
+				 u32 *msi_addr_hi)
+{
+	*msi_addr_lo = ab->msi.addr_lo;
+	*msi_addr_hi = ab->msi.addr_hi;
+}
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+int ath12k_pcic_ppeds_register_interrupts(struct ath12k_base *ab, int type, int vector,
+					int ring_num)
+{
+
+	int ret, irq;
+	u8 bus_id = ab->userpd_id;
+	struct platform_device *pdev = ab->pdev;
+
+	if (ab->ppeds_node_idx == -1) {
+		ath12k_err(ab, "invalid ppeds_node_idx in ppeds_register_interrupts\n");
+		return -EINVAL;
+	}
+
+	if (type == HAL_PPE2TCL) {
+		irq = ab->dp.ppeds_irq[PPEDS_IRQ_PPE2TCL];
+		if (!irq)
+			goto irq_fail;
+		irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+		snprintf(&ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE2TCL], sizeof(ab->dp.ppeds_irq_name),
+			 "pci%d_ppe2tcl_%d", bus_id, ab->ppeds_node_idx);
+		ret = devm_request_irq(&pdev->dev, irq,  ath12k_ds_ppe2tcl_irq_handler,
+				  IRQF_SHARED,
+			    ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE2TCL], (void *)ath12k_dp_get_ppe_ds_ctxt(ab));
+		if (ret)
+			goto irq_fail;
+		ab->dp.ppeds_irq[PPEDS_IRQ_PPE2TCL] = irq;
+	} else if (type == HAL_REO2PPE) {
+		irq = ab->dp.ppeds_irq[PPEDS_IRQ_REO2PPE];
+		if (!irq)
+			goto irq_fail;
+		irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+		snprintf(&ab->dp.ppeds_irq_name[PPEDS_IRQ_REO2PPE], sizeof(ab->dp.ppeds_irq_name),
+			 "pci%d_reo2ppe_%d", bus_id, ab->ppeds_node_idx);
+		ret = devm_request_irq(&pdev->dev, irq,  ath12k_ds_reo2ppe_irq_handler,
+				  IRQF_SHARED,
+				  ab->dp.ppeds_irq_name[PPEDS_IRQ_REO2PPE], (void *)ath12k_dp_get_ppe_ds_ctxt(ab));
+		if (ret)
+			goto irq_fail;
+		ab->dp.ppeds_irq[PPEDS_IRQ_REO2PPE] = irq;
+	} else if (type == HAL_WBM2SW_RELEASE && ring_num == HAL_WBM2SW_PPEDS_TX_CMPLN_RING_NUM) {
+		irq = ab->dp.ppeds_irq[PPEDS_IRQ_PPE_WBM2SW_REL];
+		if (!irq)
+			goto irq_fail;
+		irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+		snprintf(&ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE_WBM2SW_REL], sizeof(ab->dp.ppeds_irq_name),
+			 "pci%d_ppe_wbm_rel_%d", bus_id, ab->ppeds_node_idx);
+		ret = devm_request_irq(&pdev->dev, irq,  ath12k_dp_ppeds_handle_tx_comp,
+				  IRQF_SHARED,
+				  ab->dp.ppeds_irq_name[PPEDS_IRQ_PPE_WBM2SW_REL],(void *)ab);
+		if (ret)
+			goto irq_fail;
+		ab->dp.ppeds_irq[PPEDS_IRQ_PPE_WBM2SW_REL] = irq;
+	} else {
+		return 0;
+	}
+	disable_irq_nosync(irq);
+
+	return 0;
+
+irq_fail:
+	return ret;
+}
+
+void ath12k_pcic_ppeds_irq_disable(struct ath12k_base *ab, enum ppeds_irq_type type)
+{
+	disable_irq_nosync(ab->dp.ppeds_irq[type]);
+}
+
+void ath12k_pcic_ppeds_irq_enable(struct ath12k_base *ab, enum ppeds_irq_type type)
+{
+	enable_irq(ab->dp.ppeds_irq[type]);
+}
+
+void ath12k_pcic_ppeds_free_interrupts(struct ath12k_base *ab)
+{
+	disable_irq_nosync(ab->dp.ppeds_irq[PPEDS_IRQ_PPE2TCL]);
+	free_irq(ab->dp.ppeds_irq[PPEDS_IRQ_PPE2TCL], ath12k_dp_get_ppe_ds_ctxt(ab));
+
+	disable_irq_nosync(ab->dp.ppeds_irq[PPEDS_IRQ_REO2PPE]);
+	free_irq(ab->dp.ppeds_irq[PPEDS_IRQ_REO2PPE], ath12k_dp_get_ppe_ds_ctxt(ab));
+
+	disable_irq_nosync(ab->dp.ppeds_irq[PPEDS_IRQ_PPE_WBM2SW_REL]);
+	free_irq(ab->dp.ppeds_irq[PPEDS_IRQ_PPE_WBM2SW_REL], ab);
+}
+
+irqreturn_t ath12k_pcic_dummy_irq_handler(int irq, void *context)
+{
+	return IRQ_HANDLED;
+}
+
+int ath12k_pcic_get_msi_data(struct ath12k_base *ab, struct msi_desc *msi_desc,
+		int i)
+{
+	int ret, type;
+	struct platform_device *pdev = ab->pdev;
+
+	if (ab->hw_params->ring_mask->ppe2tcl[i])
+		type = PPEDS_IRQ_PPE2TCL;
+	else if (ab->hw_params->ring_mask->reo2ppe[i])
+		type = PPEDS_IRQ_REO2PPE;
+	else if (ab->hw_params->ring_mask->wbm2sw6_ppeds_tx_cmpln[i])
+		type = PPEDS_IRQ_PPE_WBM2SW_REL;
+	else
+		return -EINVAL;
+
+	/* For multi-platform device, to retrieve msi base address and irq data,
+	 * request a dummy irq  store the base address and data to
+	 * provide the required base address/data info in
+	 * hal_srng_init and srng_msi_setup API calls.
+	*/
+	ab->dp.ppeds_irq[type] = msi_desc->irq;
+	ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+				ath12k_pcic_dummy_irq_handler, IRQF_SHARED,
+				"dummy", (void *)ab);
+
+	if (ret)
+		return -EINVAL;
+
+	ab->ipci.dp_msi_data[i] = msi_desc->msg.data;
+	disable_irq_nosync(ab->dp.ppeds_irq[type]);
+	free_irq(ab->dp.ppeds_irq[type], (void *)ab);
+
+	return 0;
+}
+#endif
+
+void ath12k_pcic_ext_irq_enable(struct ath12k_base *ab)
+{
+	int i;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
+		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+		napi_enable(&irq_grp->napi);
+		ath12k_pcic_ext_grp_enable(irq_grp);
+	}
+}
+
+void ath12k_pcic_ext_irq_disable(struct ath12k_base *ab)
+{
+	__ath12k_pcic_ext_irq_disable(ab);
+	ath12k_pcic_sync_ext_irqs(ab);
+}
+
+void ath12k_pcic_stop(struct ath12k_base *ab)
+{
+	ath12k_pcic_ce_irq_disable_sync(ab);
+	ath12k_ce_cleanup_pipes(ab);
+}
+
+int ath12k_pcic_start(struct ath12k_base *ab)
+{
+	ath12k_pcic_ce_irqs_enable(ab);
+	ath12k_ce_rx_post_buf(ab);
+
+	return 0;
+}
+
+u32 ath12k_pcic_ipci_read32(struct ath12k_base *ab, u32 offset)
+{
+	u32 val, window_start;
+
+	window_start = ath12k_pcic_get_window_start(ab, offset);
+	val = ioread32(ab->mem + window_start +
+		       (offset & WINDOW_RANGE_MASK));
+
+	return val;
+}
+
+void ath12k_pcic_ipci_write32(struct ath12k_base *ab, u32 offset, u32 value)
+{
+	u32 window_start;
+
+	window_start = ath12k_pcic_get_window_start(ab, offset);
+	iowrite32(value, ab->mem + window_start +
+		  (offset & WINDOW_RANGE_MASK));
+}
+
+static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
+{
+	int i, j, ret, num_vectors = 0;
+	u32 user_base_data = 0, base_vector = 0, base_idx, budget;
+	struct ath12k_pci *ar_pci = (struct ath12k_pci *)ab->drv_priv;
+
+	base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
+	ret = ath12k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
+						  &user_base_data, &base_vector);
+	if (ret < 0)
+		return ret;
+
+	if (ath12k_napi_poll_budget < NAPI_POLL_WEIGHT)
+		ath12k_napi_poll_budget = NAPI_POLL_WEIGHT;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX(ab); i++) {
+		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+		u32 num_irq = 0;
+
+		irq_grp->ab = ab;
+		irq_grp->grp_id = i;
+		init_dummy_netdev(&irq_grp->napi_ndev);
+
+		if (ab->hw_params->ring_mask->rx_mon_dest[i])
+			budget = NAPI_POLL_WEIGHT;
+		else
+			budget = ath12k_napi_poll_budget;
+
+		/* Apply a reduced budget for tx completion to prioritize tx
+		 * enqueue operation
+		 */
+		if (ab->hw_params->ring_mask->tx[i])
+			budget = tx_comp_budget;
+		 netif_napi_add_weight(&irq_grp->napi_ndev, &irq_grp->napi,
+				       ath12k_pcic_ext_grp_napi_poll, budget);
+
+		if (ab->hw_params->ring_mask->tx[i] ||
+		    ab->hw_params->ring_mask->rx[i] ||
+		    ab->hw_params->ring_mask->rx_err[i] ||
+		    ab->hw_params->ring_mask->rx_wbm_rel[i] ||
+		    ab->hw_params->ring_mask->reo_status[i] ||
+		    ab->hw_params->ring_mask->host2rxdma[i] ||
+		    ab->hw_params->ring_mask->ppe2tcl[i] ||
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+		    ab->hw_params->ring_mask->wbm2sw6_ppeds_tx_cmpln[i] ||
+#endif
+		    ab->hw_params->ring_mask->reo2ppe[i] ||
+		    ab->hw_params->ring_mask->rx_mon_dest[i]) {
+			num_irq = 1;
+		}
+
+		irq_grp->num_irq = num_irq;
+		irq_grp->irqs[0] = base_idx + i;
+
+		for (j = 0; j < irq_grp->num_irq; j++) {
+			int irq_idx = irq_grp->irqs[j];
+			int vector = (i % num_vectors) + base_vector;
+			int irq = ath12k_hif_get_msi_irq(ab, vector);
+			u8 bus_id = pci_domain_nr(ar_pci->pdev->bus);
+
+			if (bus_id > ATH12K_MAX_PCI_DOMAINS) {
+				ath12k_dbg(ab, ATH12K_DBG_PCI, "bus_id:%d\n",
+					    bus_id);
+				bus_id = ATH12K_MAX_PCI_DOMAINS;
+			}
+
+			ab->irq_num[irq_idx] = irq;
+
+			ath12k_dbg(ab, ATH12K_DBG_PCI, "irq:%d group:%d\n", irq, i);
+
+			scnprintf(dp_irq_name[bus_id][i], DP_IRQ_NAME_LEN,
+				  "pci%u_wlan_dp_%u", bus_id, i);
+			ath12k_dbg(ab, ATH12K_DBG_PCI, "PCI bus id: pci:%d IRQ Name:%s\n",
+				   bus_id, dp_irq_name[bus_id][i]);
+			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+			ret = request_irq(irq, ath12k_pcic_ext_interrupt_handler,
+					  IRQF_SHARED,
+					  dp_irq_name[bus_id][i], irq_grp);
+			if (ret) {
+				ath12k_err(ab, "failed request irq %d: %d\n",
+					   vector, ret);
+				return ret;
+			}
+
+			disable_irq_nosync(ab->irq_num[irq_idx]);
+		}
+	}
+
+	return 0;
+}
+
+int ath12k_pcic_config_irq(struct ath12k_base *ab)
+{
+	struct ath12k_ce_pipe *ce_pipe;
+	u32 msi_data_start;
+	u32 msi_data_count, msi_data_idx;
+	u32 msi_irq_start;
+	unsigned int msi_data;
+	int irq, i, ret, irq_idx;
+
+	ret = ath12k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
+						  &msi_data_start, &msi_irq_start);
+	if (ret)
+		return ret;
+
+	/* Configure CE irqs */
+	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
+		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+			continue;
+
+		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+		irq = ath12k_hif_get_msi_irq(ab, msi_data);
+		ce_pipe = &ab->ce.ce_pipe[i];
+
+		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+
+		tasklet_setup(&ce_pipe->intr_tq, ath12k_pcic_ce_tasklet);
+
+		ret = request_irq(irq, ath12k_pcic_ce_interrupt_handler,
+				  IRQF_SHARED, irq_name[irq_idx],
+				  ce_pipe);
+		if (ret) {
+			ath12k_err(ab, "failed to request irq %d: %d\n",
+				   irq_idx, ret);
+			return ret;
+		}
+
+		ab->irq_num[irq_idx] = irq;
+		msi_data_idx++;
+
+		ath12k_pcic_ce_irq_disable(ab, i);
+	}
+
+	ret = ath12k_pci_ext_irq_config(ab);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void ath12k_msi_msg_handler(struct msi_desc *desc, struct msi_msg *msg)
+{
+	desc->msg.address_lo = msg->address_lo;
+	desc->msg.address_hi = msg->address_hi;
+	desc->msg.data = msg->data;
+}
+
+int ath12k_pcic_config_hybrid_irq(struct ath12k_base *ab)
+{
+	int ret;
+	struct platform_device *pdev = ab->pdev;
+	struct msi_desc *msi_desc;
+	bool ce_done = false;
+	int user_base_data, base_vector, num_vectors = 0;
+	int i = 0, j = 0, k = 0;
+
+	if (ab->userpd_id != USERPD_1 &&
+	    ab->userpd_id != USERPD_2) {
+		ath12k_warn(ab, "ath12k userpd invalid %d\n", ab->userpd_id);
+		return -ENODEV;
+	}
+
+	ab->msi.config = &ath12k_msi_config[ATH12K_MSI_CONFIG_IPCI];
+
+	ret = platform_msi_domain_alloc_irqs(&pdev->dev, ab->msi.config->total_vectors,
+					     ath12k_msi_msg_handler);
+
+	if (ret) {
+		ath12k_warn(ab, "failed to alloc irqs %d ab %pM\n", ret, ab);
+		return ret;
+	}
+
+	/* TODO: Need to optimize the below code to have one loop */
+        msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ALL) {
+		ret = ath12k_pcic_get_user_msi_assignment(ab, "CE", &num_vectors,
+                                                         &user_base_data, &base_vector);
+                if (ret < 0)
+                        return ret;
+
+		if (i < base_vector) {
+			i++;
+			continue;
+		}
+		if (j < ab->hw_params->ce_count && i < (num_vectors + base_vector)) {
+			while(j < ab->hw_params->ce_count &&
+			      ath12k_ce_get_attr_flags(ab, j) & CE_ATTR_DIS_INTR) {
+				++j;
+			}
+
+			ret = ath12k_pcic_config_gic_msi_irq(ab, pdev, msi_desc, j);
+			if (ret) {
+				ath12k_warn(ab, "failed to request irq %d\n", ret);
+				return ret;
+			}
+
+			if (j == 0) {
+				ab->msi.addr_lo = msi_desc->msg.address_lo;
+				ab->msi.addr_hi = msi_desc->msg.address_hi;
+				ab->msi.ep_base_data = msi_desc->msg.data;
+				ath12k_info(ab, "msi ep base data %d\n", ab->msi.ep_base_data);
+			}
+
+			j++;
+			if (j != ab->hw_params->ce_count)
+				ce_done = false;
+
+		} else {
+			ret = ath12k_pcic_ext_config_gic_msi_irq(ab, pdev, msi_desc, k);
+			if (ret) {
+				ath12k_warn(ab, "failed to config ext msi irq %d\n", ret);
+				return ret;
+			}
+			k++;
+		}
+		i++;
+	}
+
+	i = 0;
+
+        msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ALL) {
+		ret = ath12k_pcic_get_user_msi_assignment(ab, "CE", &num_vectors,
+							  &user_base_data, &base_vector);
+		if (ret < 0)
+                        return ret;
+
+                if (i < base_vector) {
+                        i++;
+                        continue;
+		}
+		if (i < (num_vectors + base_vector)) {
+			if (!ce_done  && j < ab->hw_params->ce_count) {
+				while(j < ab->hw_params->ce_count &&
+				      ath12k_ce_get_attr_flags(ab, j) & CE_ATTR_DIS_INTR) {
+					j++;
+				}
+				if (j == ab->hw_params->ce_count) {
+					ce_done = true;
+					i++;
+					continue;
+				}
+
+				ret = ath12k_pcic_config_gic_msi_irq(ab, pdev, msi_desc, j);
+				if (ret) {
+					ath12k_warn(ab, "failed to request irq %d\n", ret);
+					return ret;
+				}
+				j++;
+			}
+		} else {
+			if (k >= ATH12K_EXT_IRQ_GRP_NUM_MAX(ab))
+				break;
+			ret = ath12k_pcic_ext_config_gic_msi_irq(ab, pdev, msi_desc, k);
+			if (ret) {
+				ath12k_warn(ab, "failed to config ext msi irq %d\n", ret);
+				return ret;
+			}
+			k++;
+		}
+		i++;
+	}
+	ab->ipci.gic_enabled = 1;
+	wake_up(&ab->ipci.gic_msi_waitq);
+
+	return ret;
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/pcic.h	2024-01-19 17:01:19.869847139 +0100
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_PCI_CMN_H
+#define _ATH11K_PCI_CMN_H
+
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include "core.h"
+#include "hif.h"
+#include <linux/msi.h>
+
+#define ATH12K_PCI_IRQ_CE0_OFFSET		3
+
+#define WINDOW_ENABLE_BIT		0x40000000
+#define WINDOW_REG_ADDRESS		0x310c
+#define WINDOW_VALUE_MASK		GENMASK(24, 19)
+#define WINDOW_START			0x80000
+#define WINDOW_RANGE_MASK		GENMASK(18, 0)
+
+#define ATH12K_MAX_PCI_DOMAINS          0x5
+#define DP_IRQ_NAME_LEN 20
+
+static const struct ath12k_msi_config ath12k_msi_config[] = {
+	{
+		/* MSI spec expects number of interrupts to be a power of 2 */
+		.total_vectors = 32,
+		.total_users = 3,
+		.users = (struct ath12k_msi_user[]) {
+			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
+			{ .name = "DP", .num_vectors = 16, .base_vector = 8 },
+		},
+	},
+	{
+		/* In DP, we use num_vectors as 9 (6 REGULAR DP INTERRUPTS + 3 PPEDS
+		 * INTERRUPTS)
+		 */
+		.total_vectors = 15,
+		.total_users = 3,
+		.users = (struct ath12k_msi_user[]) {
+			{ .name = "QDSS", .num_vectors = 1, .base_vector = 0 },
+			{ .name = "CE", .num_vectors = 5, .base_vector = 1 },
+			{ .name = "DP", .num_vectors = 9, .base_vector = 6 },
+		},
+	},
+};
+
+int ath12k_pcic_start(struct ath12k_base *ab);
+void ath12k_pcic_stop(struct ath12k_base *ab);
+void ath12k_pcic_ipci_write32(struct ath12k_base *ab, u32 offset, u32 value);
+u32 ath12k_pcic_ipci_read32(struct ath12k_base *ab, u32 offset);
+int ath12k_pcic_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
+					int *num_vectors, u32 *user_base_data,
+					u32 *base_vector);
+void ath12k_pcic_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
+				 u32 *msi_addr_hi);
+void ath12k_pcic_config_static_window(struct ath12k_base *ab);
+int ath12k_pcic_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+				    u8 *ul_pipe, u8 *dl_pipe);
+void ath12k_pcic_free_hybrid_irq(struct ath12k_base *ab);
+void ath12k_pcic_cmem_write32(struct ath12k_base *ab, u32 addr,
+			      u32 value);
+u32 ath12k_pcic_cmem_read32(struct ath12k_base *ab, u32 addr);
+void ath12k_pcic_ext_irq_enable(struct ath12k_base *ab);
+void ath12k_pcic_ext_irq_disable(struct ath12k_base *ab);
+u32 ath12k_pcic_get_window_start(struct ath12k_base *ab, u32 offset);
+void ath12k_pcic_ce_irqs_enable(struct ath12k_base *ab);
+void ath12k_pcic_ce_irq_disable_sync(struct ath12k_base *ab);
+int ath12k_pcic_get_msi_irq(struct ath12k_base *ab, unsigned int vector);
+int ath12k_pcic_config_hybrid_irq(struct ath12k_base *ab);
+int ath12k_pcic_config_irq(struct ath12k_base *ab);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+int ath12k_pcic_ppeds_register_interrupts(struct ath12k_base *ab, int type,
+					int vector, int ring_num);
+void ath12k_pcic_ppeds_free_interrupts(struct ath12k_base *ab);
+void ath12k_pcic_ppeds_irq_enable(struct ath12k_base *ab, enum ppeds_irq_type type);
+void ath12k_pcic_ppeds_irq_disable(struct ath12k_base *ab, enum ppeds_irq_type type);
+int ath12k_pcic_get_msi_data(struct ath12k_base *ab, struct msi_desc *msi_desc, int i);
+#endif
+void ath12k_pcic_free_irq(struct ath12k_base *ab);
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/pktlog.h	2024-03-18 14:40:14.859741552 +0100
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PKTLOG_H_
+#define _PKTLOG_H_
+
+#ifdef CONFIG_ATH12K_PKTLOG
+#define CUR_PKTLOG_VER          10010  /* Packet log version */
+#define PKTLOG_MAGIC_NUM        7735225
+#define PKTLOG_NEW_MAGIC_NUM	2453506
+#define PKTLOG_MAGIC_NUM_FW_VERSION_SUPPORT 0xDECDF1F0
+
+/* Masks for setting pktlog events filters */
+#define ATH_PKTLOG_RX		0x000000001
+#define ATH_PKTLOG_TX		0x000000002
+#define ATH_PKTLOG_RCFIND	0x000000004
+#define ATH_PKTLOG_RCUPDATE	0x000000008
+
+#define ATH_DEBUGFS_PKTLOG_SIZE_DEFAULT (8 * 1024 * 1024)
+#define ATH_PKTLOG_FILTER_DEFAULT (ATH_PKTLOG_TX | ATH_PKTLOG_RX | \
+				   ATH_PKTLOG_RCFIND | ATH_PKTLOG_RCUPDATE)
+
+enum {
+	PKTLOG_FLG_FRM_TYPE_LOCAL_S = 0,
+	PKTLOG_FLG_FRM_TYPE_REMOTE_S,
+	PKTLOG_FLG_FRM_TYPE_CLONE_S,
+	PKTLOG_FLG_FRM_TYPE_UNKNOWN_S
+};
+
+struct ath12k_pktlog_hdr_arg {
+	u16 log_type;
+	u8 *payload;
+	u16 payload_size;
+	u8 *pktlog_hdr;
+};
+
+struct ath12k_pl_fw_info {
+	u32 pdev_id;
+	u8 software_image[40];
+	u8 chip_info[40];
+	u32 pktlog_defs_json_version;
+} __packed;
+
+struct ath12k_pktlog_decode_info {
+        u8 software_image[40];
+        u8 chip_info[40];
+        u32 pktlog_defs_json_version;
+};
+
+struct ath12k_pktlog_bufhdr {
+	u32 magic_num;  /* Used by post processing scripts */
+	u32 version;    /* Set to CUR_PKTLOG_VER */
+	u8 software_image[40];
+	u8 chip_info[40];
+	u32 pktlog_defs_json_version;
+};
+
+struct ath12k_pktlog_buf {
+	struct ath12k_pktlog_bufhdr bufhdr;
+	int rd_offset;
+	int wr_offset;
+	char log_data[0];
+};
+
+struct ath12k_pktlog {
+	struct ath12k_pktlog_buf *buf;
+	u32 filter;
+	u32 buf_size;           /* Size of buffer in bytes */
+	spinlock_t lock;
+	u8 hdr_size;
+	u8 hdr_size_field_offset;
+	u32 fw_version_record;
+	u32 invalid_decode_info;
+};
+
+#endif /* CONFIG_ATH12K_PKTLOG */
+#endif /* _PKTLOG_H_ */
+
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/ppe.h	2024-03-18 14:40:14.859741552 +0100
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_PPE_H
+#define ATH12K_PPE_H
+
+#define ATH12K_PPEDS_DEFAULT_POOL_ID 0
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+
+#define ATH12K_DP_PPEDS_NAPI_DONE_BIT	1
+#define ATH12K_DP_PPEDS_TX_COMP_NAPI_BIT	2
+
+struct dp_ppe_ds_idxs {
+	u32 ppe2tcl_start_idx;
+	u32 reo2ppe_start_idx;
+};
+
+void ath12k_dp_srng_ppeds_cleanup(struct ath12k_base *ab);
+int ath12k_dp_srng_ppeds_setup(struct ath12k_base *ab);
+int ath12k_dp_ppeds_register_soc(struct ath12k_dp *dp,
+				 struct dp_ppe_ds_idxs *idx);
+void ath12k_dp_ppeds_stop(struct ath12k_base *ab);
+int ath12k_dp_ppeds_start(struct ath12k_base *ab);
+int ath12k_ppeds_detach( struct ath12k_base *ab);
+int ath12k_ppeds_attach( struct ath12k_base *ab);
+int ath12k_mac_op_ppeds_attach_vdev(struct ath12k_link_vif *arvif,
+                                void *vp_arg, int *ppe_vp_num,
+                                struct ieee80211_ppe_vp_ds_params *vp_params);
+void ath12k_mac_op_ppeds_detach_vdev(struct ath12k_link_vif *arvif,
+                                     struct ieee80211_ppe_vp_ds_params *vp_params);
+void ath12k_dp_peer_ppeds_route_setup(struct ath12k *ar, struct ath12k_link_vif *arvif,
+				      struct ath12k_link_sta *arsta);
+int ath12k_ppeds_get_handle(struct ath12k_base *ab);
+void *ath12k_dp_get_ppe_ds_ctxt(struct ath12k_base *ab);
+irqreturn_t ath12k_ds_ppe2tcl_irq_handler(int irq, void *ctxt);
+irqreturn_t ath12k_ds_reo2ppe_irq_handler(int irq, void *ctxt);
+irqreturn_t ath12k_dp_ppeds_handle_tx_comp(int irq, void *ctxt);
+void ath12k_dp_ppeds_update_vp_entry(struct ath12k *ar,
+				     struct ath12k_link_vif *arvif);
+void ath12k_dp_ppeds_service_enable_disable(struct ath12k_base *ab,
+					    bool enable);
+void ath12k_dp_ppeds_interrupt_stop(struct ath12k_base *ab);
+void ath12k_dp_ppeds_stop(struct ath12k_base *ab);
+void ath12k_dp_ppeds_interrupt_start(struct ath12k_base *ab);
+void ath12k_ppeds_partner_link_start_queues(struct ath12k *ar);
+void ath12k_ppeds_partner_link_stop_queues(struct ath12k *ar);
+#endif
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/sawf.h	2024-03-18 14:40:14.859741552 +0100
@@ -0,0 +1,902 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_SAWF_H
+#define ATH12K_SAWF_H
+
+struct hal_tx_status;
+struct stats_config;
+struct telemetry_sawftx_stats;
+struct telemetry_sawfdelay_stats;
+
+#ifdef CONFIG_ATH12K_SAWF
+#include <ath/ath_sawf.h>
+
+#define ATH12K_MAC_ADDR_SIZE	6
+#define DP_SAWF_INVALID_PARAM	-1
+
+#define ATH12K_SAWF_SVC_CLASS_MIN 1
+#define ATH12K_SAWF_SVC_CLASS_MAX 128
+#define ATH12K_SAWF_MAX_TID_SUPPORT 8
+#define MAX_Q_PER_TID 8
+/**
+ ** SAWF_metadata related information.
+ **/
+#define SAWF_VALID_TAG 0xAA
+#define SAWF_TAG_SHIFT	0x18
+
+/* Skb mark for SAWF */
+#define SAWF_MSDUQ_ID			GENMASK(5, 0)
+#define SAWF_PEER_ID			GENMASK(15, 6)
+#define SAWF_SERVICE_CLASS_ID		GENMASK(23, 16)
+#define SAWF_TAG_ID			GENMASK(31, 24)
+#define SAWF_NW_DELAY			GENMASK(23, 6)
+#define SAWF_NW_DELAY_SHIFT		0x6
+#define SAWF_NW_DELAY_MAX		0x3FFFF
+
+#define TID_FROM_Q_ID			GENMASK(2, 0)
+#define FLOW_OVERRIDE_FROM_Q_ID		BIT(3)
+#define WHO_CLASSIFY_INFO_FROM_Q_ID	GENMASK(5, 4)
+
+#define SAWF_PEER_MSDUQ_INVALID	0xFFFF
+#define SAWF_MSDUQ_ID_INVALID	0x3F
+#define SAWF_MSDUQ_ID		GENMASK(5, 0)
+#define SAWF_PEER_ID		GENMASK(15, 6)
+
+#define MSDUQID_TID_MASK	GENMASK(2, 0)
+#define MSDUQID_Q_MASK		GENMASK(5, 3)
+/*
+ * Min throughput limit 0 - 10 Gb/s
+ * Granularity: 1 Kb/s
+ */
+#define ATH12K_SAWF_MIN_MIN_THROUGHPUT 0
+#define ATH12K_SAWF_MAX_MIN_THROUGHPUT (10 * 1024 * 1024)
+
+/*
+ * Max throughput limit 0 - 10 Gb/s.
+ * Granularity: 1 Kb/s
+ */
+#define ATH12K_SAWF_MIN_MAX_THROUGHPUT 0
+#define ATH12K_SAWF_MAX_MAX_THROUGHPUT (10 * 1024 * 1024)
+
+/*
+ * Service interval limit 0 - 10 secs.
+ * Granularity: 100 µs
+ */
+#define ATH12K_SAWF_MIN_SVC_INTERVAL 0
+#define ATH12K_SAWF_MAX_SVC_INTERVAL (10 * 100 * 100)
+
+/*
+ * Burst size 0 - 16 MB.
+ * Granularity: 1 Byte.
+ */
+#define ATH12K_SAWF_MIN_BURST_SIZE 0
+#define ATH12K_SAWF_MAX_BURST_SIZE (16 * 1024 * 1024)
+/*
+ * Delay bound limit 0 - 10 secs
+ * Granularity: 100 µs
+ */
+#define ATH12K_SAWF_MIN_DELAY_BOUND 0
+#define ATH12K_SAWF_MAX_DELAY_BOUND (10 * 100 * 100)
+
+/*
+ * Msdu TTL limit 0 - 10 secs.
+ * Granularity: 100 µs
+ */
+#define ATH12K_SAWF_MIN_MSDU_TTL 0
+#define ATH12K_SAWF_MAX_MSDU_TTL (10 * 100 * 100)
+
+/*
+ * Priority limit 0 - 127.
+ * Higher the numerical value, higher is the priority.
+ */
+#define ATH12K_SAWF_MIN_PRIORITY 0
+#define ATH12K_SAWF_MAX_PRIORITY 127
+
+/*
+ * TID limit 0 - 7
+ */
+#define ATH12K_SAWF_MIN_TID 0
+#define ATH12K_SAWF_MAX_TID 7
+/*
+ * MSDU Loss Rate limit 0 - 100%.
+ * Granularity: 0.01%
+ */
+#define ATH12K_SAWF_MIN_MSDU_LOSS_RATE 0
+#define ATH12K_SAWF_MAX_MSDU_LOSS_RATE 10000
+
+#define ATH12K_SAWF_STATS_SIZE	4096
+
+enum SAWF_SVC_PARAM_DEFAULTS {
+	SAWF_SVC_PARAM_DEFAULT_MIN_THRUPUT    = 0,
+	SAWF_SVC_PARAM_DEFAULT_MAX_THRUPUT    = 0xffffffff,
+	SAWF_SVC_PARAM_DEFAULT_BURST_SIZE     = 0,
+	SAWF_SVC_PARAM_DEFAULT_SVC_INTERVAL   = 0xffffffff,
+	SAWF_SVC_PARAM_DEFAULT_DELAY_BOUND    = 0xffffffff,
+	SAWF_SVC_PARAM_DEFAULT_TIME_TO_LIVE   = 0xffffffff,
+	SAWF_SVC_PARAM_DEFAULT_PRIORITY       = 0,
+	SAWF_SVC_PARAM_DEFAULT_TID            = 0xffffffff,
+	SAWF_SVC_PARAM_DEFAULT_MSDU_LOSS_RATE = 0,
+	SAWF_SVC_PARAM_DEFAULT_UL_BURST_SIZE  = 0,
+	SAWF_SVC_PARAM_DEFAULT_UL_MIN_TPUT    = 0,
+	SAWF_SVC_PARAM_DEFAULT_UL_MAX_LATENCY = 0xffffffff,
+	SAWF_SVC_PARAM_DEFAULT_UL_SVC_INTERVAL = 0xffffffff,
+	SAWF_SVC_PARAM_DEFAULT_UL_OFDMA_DISABLE = 0,
+	SAWF_SVC_PARAM_DEFAULT_UL_MU_MIMO_DISABLE = 0,
+};
+
+#define DELAY_BOUND_ULTRA_LOW 10
+#define DELAY_BOUND_LOW 100
+#define DELAY_BOUND_MID 200
+#define DELAY_BOUND_HIGH 300
+#define SVC_INTERVAL_ULTRA_LOW 20
+#define SVC_INTERVAL_LOW 50
+#define TIME_TO_LIVE_ULTRA_LOW 20
+#define TIME_TO_LIVE_LOW 200
+#define TIME_TO_LIVE_MID 250
+
+/**
+ * struct ath12k_sawf_svc_params - Service Class Parameters
+ * @svc_id: Service ID
+ * @app_name: Service class name
+ * @min_throughput_rate: min throughput in kilobits per second
+ * @max_throughput_rate: max throughput in kilobits per second
+ * @burst_size:  burst size in bytes
+ * @service_interval: service interval
+ * @delay_bound: delay bound in milli seconds
+ * @msdu_ttl: MSDU Time-To-Live
+ * @priority: Priority
+ * @tid: TID
+ * @msdu_rate_loss: MSDU loss rate in parts per million
+ * @ul_service_interval: Uplink service interval
+ * @ul_burst_size: Uplink Burst Size
+ * @ul_min_tput: Uplink min_throughput
+ * @ul_max_latency: Uplink max latency
+ * @ul_ofdma_disable: Disable ofdma
+ * @ul_mu_mimo_disable: Disale MU MIMO
+ * @configured: indicating if the serivice class is configured.
+ */
+
+struct ath12k_sawf_svc_params {
+	u8 svc_id;
+	u32 min_throughput_rate;
+	u32 max_throughput_rate;
+	u32 burst_size;
+	u32 service_interval;
+	u32 delay_bound;
+	u32 msdu_ttl;
+	u32 priority;
+	u32 tid;
+	u32 msdu_rate_loss;
+	u32 ul_service_interval;
+	u32 ul_burst_size;
+	u32 ul_min_tput;
+	u32 ul_max_latency;
+	bool ul_ofdma_disable;
+	bool ul_mu_mimo_disable;
+	bool configured;
+};
+
+struct ath12k_ul_params {
+	struct list_head list;
+	struct net_device *dest_dev;
+	struct net_device *src_dev;
+	u8 dst_mac[ETH_ALEN];
+	u8 src_mac[ETH_ALEN];
+	u8 fw_service_id;
+	u8 rv_service_id;
+	u8 add_or_sub;
+};
+
+/**
+ * struct ath12k_sawf_ctx- SAWF context
+ * @svc_classes: List of all service classes
+ */
+struct ath12k_sawf_ctx {
+	struct ath12k_sawf_svc_params svc_classes[ATH12K_SAWF_SVC_CLASS_MAX];
+	u32 max_msduq_per_tid;
+	u32 default_msduq_per_tid;
+	spinlock_t sawf_svc_lock;
+	struct workqueue_struct *workqueue;
+	struct work_struct ul_configure;
+	struct list_head list;
+};
+
+/**
+ * struct ath12k_msduq_map-  MSDU Q Map struct
+ * mapping of MSDUQ with service class ID
+ * @svc_id: Service Class ID
+ * @is_reserved: Flag to hold the allocaiton status.
+ * @msduq_id: unique id of the MSDUQ.
+ */
+struct ath12k_msduq_map {
+	u8 svc_id;
+	bool is_reserved;
+	u16 msduq_id;
+};
+
+/**
+ * struct ath12k_sawf_def_queue_report : default Q map report
+ */
+struct ath12k_sawf_def_queue_report {
+	u8 svc_class_id;
+};
+
+/**
+ * struct ath12k_sawf_peer_ctx
+ * A Data base to maintain the allocation status of user deined MSDUQ.
+ * The allocation is mapped with the service class ID.
+ * @msduq_table: map table of msduq and svc id per peer
+ * @def_q_map: default q map report
+ * @telemetry_peer_ctx: pointer to hold the structure reference
+ * maintained in telemetry agent module.
+ */
+struct ath12k_sawf_peer_ctx {
+	struct ath12k_msduq_map msduq_table[ATH12K_SAWF_MAX_TID_SUPPORT][MAX_Q_PER_TID];
+	struct ath12k_sawf_def_queue_report def_q_map[ATH12K_SAWF_MAX_TID_SUPPORT];
+	void *telemetry_peer_ctx;
+};
+
+/* MSG_TYPE => HTT_H2T_SAWF_DEF_QUEUES_MAP_REQ
+ *
+ * @details
+ * The SAWF_DEF_QUEUES_MAP_REQ message is sent by the host to link
+ * the default MSDU queues for one of the TIDs within the specified peer
+ * to the specified service class.
+ * The TID is indirectly specified - each service class is associated
+ * with a TID.  All default MSDU queues for this peer-TID will be
+ * linked to the service class in question.
+ *
+ * |31                          16|15           8|7            0|
+ * |------------------------------+--------------+--------------|
+ * |             peer ID          | svc class ID |   msg type   |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * dword0 - b'7:0       - msg_type: This will be set to
+ *                        0x1c (HTT_H2T_SAWF_DEF_QUEUES_MAP_REQ)
+ *          b'15:8      - service class ID
+ *          b'31:16     - peer ID
+ */
+struct ath12k_htt_h2t_sawf_def_q_map_req {
+	u32 info;
+};
+
+#define HTT_H2T_MSG_TYPE_ID			GENMASK(7, 0)
+#define HTT_H2T_SAWF_DEF_Q_MAP_SVC_ID		GENMASK(15, 8)
+#define HTT_H2T_SAWF_DEF_Q_UMAP_SVC_ID		GENMASK(15, 8)
+#define HTT_H2T_SAWF_DEF_Q_MAP_PEER_ID		GENMASK(31, 16)
+#define HTT_H2T_SAWF_DEF_Q_UMAP_PEER_ID		GENMASK(31, 16)
+#define	HTT_H2T_SAWF_DEF_Q_MAP_TID_MASK_ID	GENMASK(15, 8)
+#define HTT_H2T_SAWF_DEF_Q_MAP_ETO_ID		BIT(0)
+/* MSG_TYPE => HTT_H2T_SAWF_DEF_QUEUES_UNMAP_REQ
+ *
+ * @details
+ * The SAWF_DEF_QUEUES_UNMAP_REQ message is sent by the host to
+ * remove the linkage of the specified peer-TID's MSDU queues to
+ * service classes.
+ *
+ * |31                          16|15           8|7            0|
+ * |------------------------------+--------------+--------------|
+ * |             peer ID          | svc class ID |   msg type   |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * dword0 - b'7:0       - msg_type: This will be set to
+ *                        0x1d (HTT_H2T_SAWF_DEF_QUEUES_UNMAP_REQ)
+ *          b'15:8      - service class ID
+ *          b'31:16     - peer ID
+ *                        A HTT_H2T_SAWF_DEF_QUEUES_UNMAP_PEER_ID_WILDCARD
+ *                        value for peer ID indicates that the target should
+ *                        apply the UNMAP_REQ to all peers.
+ */
+struct ath12k_htt_h2t_sawf_def_q_unmap_req {
+	u32 info;
+};
+
+/* MSG_TYPE => HTT_H2T_SAWF_DEF_QUEUES_MAP_REPORT_REQ
+ *
+ * @details
+ * The SAWF_DEF_QUEUES_MAP_REPORT_REQ message is sent by the host to
+ * request the target to report what service class the default MSDU queues
+ * of the specified TIDs within the peer are linked to.
+ * The target will respond with a SAWF_DEF_QUEUES_MAP_REPORT_CONF message
+ * to report what service class (if any) the default MSDU queues for
+ * each of the specified TIDs are linked to.
+ *
+ * |31                          16|15           8|7        1|  0|
+ * |------------------------------+--------------+--------------|
+ * |             peer ID          |   TID mask   |   msg type   |
+ * |------------------------------------------------------------|
+ * |                           reserved                     |ETO|
+ * |------------------------------------------------------------|
+ * Header fields:
+ * dword0 - b'7:0       - msg_type: This will be set to
+ *                        0x1e (HTT_H2T_SAWF_DEF_QUEUES_MAP_REPORT_REQ)
+ *          b'15:8      - TID mask
+ *          b'31:16     - peer ID
+ * dword1 - b'0         - "Existing Tids Only" flag
+ *                        If this flag is set, the DEF_QUEUES_MAP_REPORT_CONF
+ *                        message generated by this REQ will only show the
+ *                        mapping for TIDs that actually exist in the target's
+ *                        peer object.
+ *                        Any TIDs that are covered by a MAP_REQ but which
+ *                        do not actually exist will be shown as being
+ *                        unmapped (i.e. svc class ID 0xff).
+ *                        If this flag is cleared, the MAP_REPORT_CONF message
+ *                        will consider not only the mapping of TIDs currently
+ *                        existing in the peer, but also the mapping that will
+ *                        be applied for any TID objects created within this
+ *                        peer in the future.
+ *          b'31:1      - reserved for future use
+ */
+struct ath12k_htt_h2t_sawf_def_q_map_report_req {
+	u32 info;
+	u32 info1;
+};
+
+#define HTT_T2H_SAWF_Q_MAP_REPORT_INFO0_PEER_ID	GENMASK(31, 16)
+#define HTT_T2H_SAWF_Q_MAP_REPORT_TID_ID	GENMASK(7, 0)
+#define HTT_T2H_SAWF_Q_MAP_REPORT_SVC_ID	GENMASK(15, 8)
+
+#define HTT_SAWF_SVC_CLASS_INVALID_ID 0xff
+
+struct ath12k_htt_t2h_sawf_q_map_report {
+	u32 info;
+	u32 tid_report[ATH12K_SAWF_MAX_TID_SUPPORT]; //MAX_TIDT
+} __packed;
+
+/**
+ * struct wmi_peer_latency_info_params - peer latency info params
+ * @peer_mac: peer mac address
+ * @service_interval: service interval in miliseconds
+ * @burst_size: burst size in bytes
+ * @latency_tid: tid associated with this latency information
+ * @ac: Access Category associated with this tid
+ * @ul_enable: Bit to indicate ul latency enable
+ * @dl_enable: Bit to indicate dl latency enable
+ * @flow_id: Flow id associated with tid
+ * @add_or_sub: Bit to indicate add/delete of latency params
+ * @sawf_ul_param: Bit to indicate if UL params are for SAWF/SCS
+ * @max_latency: Maximum latency in milliseconds
+ * @min_throughput: Minimum throughput in Kbps
+ */
+struct ath12k_sawf_wmi_peer_latency_param {
+	u8 svc_id;
+	u8 peer_mac[ETH_ALEN];
+	u32 service_interval;
+	u32 burst_size;
+	u32	latency_tid :8,
+		ac          :2,
+		ul_enable   :1,
+		dl_enable   :1,
+		flow_id     :4,
+		add_or_sub  :2,
+		sawf_ul_param :1,
+		ofdma_disable :1,
+		mu_mimo_disable :1,
+		reserved    :11;
+	u32 max_latency;
+	u32 min_throughput;
+};
+
+#define LATENCY_TID_INFO_TID_NUM		GENMASK(7, 0)
+#define LATENCY_TID_INFO_AC			GENMASK(9, 8)
+#define LATENCY_TID_INFO_DL_EN			BIT(10)
+#define LATENCY_TID_INFO_UL_EN			BIT(11)
+#define LATENCY_TID_INFO_BURST_SZ_SUM		GENMASK(13, 12)
+#define LATENCY_TID_INFO_MSDUQ_ID		GENMASK(17, 14)
+#define LATENCY_TID_INFO_UL_OFDMA_DISABLE	BIT(18)
+#define LATENCY_TID_INFO_UL_MU_MIMO_DISABLE	BIT(19)
+#define LATENCY_TID_INFO_SAWF_UL_PARAM		BIT(20)
+
+/* struct wmi_peer_tid_latency_config_fixed_param:
+ * Currently wmi_peer_tid_set_latency_request_fixed_param will be sent
+ * per TID per latency configured client.
+ * In future this command might come for multiple latency configured
+ * clients together.
+ * The clients are expected to be associated while receiving this command.
+ * @tlv_header
+ * 	TLV tag and len;
+ * @pdev_id
+ *	device ID
+ */
+struct wmi_peer_tid_latency_config_fixed_param {
+	__le32 tlv_header;
+	__le32 pdev_id;
+} __packed;
+
+/** struct wmi_tid_latency_info
+ * @tlv_header:
+ *	TLV Tag and Len
+ * @wmi_mac_addr destmac
+ * 	Mac address of end client
+ * @service_interval
+ * 	Maximum expected average delay between 2 schedules in milliseconds
+ * 	of given TID type when it has active traffic.
+ * 	0x0 is considered as invalid service interval.
+ * @burst_size_diff
+ * 	Cumulative number of bytes are expected to be transmitted or
+ * 	received in the service interval when this specific Peer-TID
+ * 	has active traffic.
+ * 	If cumulative number of bytes is 0x0, it is considered as
+ * 	invalid burst size.  In that case, firmware would try to transmit
+ * 	and receive as many bytes as it can for this specific Peer-TID.
+ * 	This burst size will be added or subtracted from vdev burst size
+ * 	based on burst size sum bit in latency tid info.
+ * 	The VDEV burst size will be considered to be 0 when no VDEV latency
+ * 	command is received.
+ * 	If host needs to set burst size for a peer then they can use the
+ * 	peer cmd and set burst size sum bit to 1.
+ * @max_latency
+ * 	The maximum end to end latency expectation, in milliseconds.
+ * 	If this value is 0x0, it shall be ignored.
+ * @max_per
+ * 	The maximum PER (as a percent) for the peer-TID, in range 1 - 100
+ * 	If this value is 0x0, it shall be ignored.
+ * @min_tput
+ * 	The minimum guaranteed throughput to the peer-TID, in Kbps.
+ * 	If this value is 0x0, it shall be ignored.
+ * @latency_tid_info
+ *  Bits 21-31      - Reserved (Shall be zero)
+ *  Bit  20         - Flag to indicate SAWF UL params (and not mesh latenc
+ *  Bit  19         - Disable UL MU-MIMO. If set, UL MU-MIMO is disabled
+ *                    for the specified AC. Note that TID level control is
+ *                    not possible for UL MU-MIMO (the granularity is AC).
+ *  Bit  18         - Disable UL OFDMA. If set, UL OFDMA is disabled for
+ *                    the specified AC. Note that TID level control is not
+ *                    possible for UL OFDMA (the granularity is AC).
+ *  Bits 14-17      - MSDU queue flow id within the TID for configuring
+ *                    latency info per MSDU flow queue
+ *  Bit  12-13      - burst size sum. Bit to indicate whether to add or
+ *                    subtract burst_size_diff from vdev cmd burst size:
+ *                    1 -> addition
+ *                    2 -> subtraction
+ *  Bit   11        - UL latency config indication.
+ *                    If this bit is set then this latency info will
+ *                    be used when triggering UL traffic.  Until the
+ *                    AC specified in bits 8-9 has transferred at least
+ *                    burst_size amount of UL data within the service
+ *                    period, the AP will continue sending UL triggers
+ *                    when the STA has data of the specified access
+ *                    category ready to transmit.
+ *                    Note that the TID specified in bits 0-7 does not
+ *                    apply to UL; the TID-to-AC mapping applied to DL
+ *                    data that can be adjusted by the TID specified
+ *                    in bits 0-7 and the AC specified in bits 8-9 is
+ *                    distinct from the TID-to-AC mapping applied to
+ *                    UL data.
+ *  Bit   10        - DL latency config indication. If the bit is set
+ *                    then DL TID will use this latency config.
+ *  Bits  8 - 9     - This bit has info on the custom AC of DL TID.
+ *                    Also if bit 11 is set, the AP will apply some
+ *                    of these latency specs (in particular, burst_size)
+ *                    to UL traffic for this AC, by sending UL triggers
+ *                    until the desired amount of data has been received
+ *                    within the service period.
+ *  Bits  0 - 7     - Specifies the TID of interest that corresponds
+ *                    to the AC specified in bits 8-9.  This can be
+ *                    used to adjust the TID-to-AC mapping applied to
+ *                    DL data (if bit 10 is set).
+ */
+struct wmi_tid_latency_info {
+	__le32 tlv_header;
+	struct wmi_mac_addr destmac;
+	__le32 service_interval;
+	__le32 burst_size_diff;
+	__le32 max_latency;
+	__le32 max_per;
+	__le32 min_tput;
+	__le32 latency_tid_info;
+} __packed;
+
+/**
+ * ath12k_sawf_stats_level - sawf stats level
+ * @ATH12K_SAWF_STATS_BASIC : sawf basic stats
+ * @ATH12K_SAWF_STATS_ADVANCED : sawf advanced stats
+ * @ATH12K_SAWF_STATS_LATENCY : sawf latency stats
+ */
+enum ath12k_sawf_stats_level {
+	ATH12K_SAWF_STATS_BASIC = BIT(0),
+	ATH12K_SAWF_STATS_ADVNCD = BIT(1),
+	ATH12K_SAWF_STATS_LATENCY = BIT(2),
+};
+
+#define ATH12K_SAWF_STATS_MAX (ATH12K_SAWF_STATS_BASIC | ATH12K_SAWF_STATS_ADVNCD | ATH12K_SAWF_STATS_LATENCY)
+
+/**
+ * struct sawf_fw_mpdu_stats- per-mpdu Tx success/failure snapshot
+ * @success_cnt: count of pkts successfully transmitted
+ * @failure_cnt: count of pkts failed to transmit
+ */
+struct sawf_fw_mpdu_stats {
+	u64 success_cnt;
+	u64 failure_cnt;
+};
+
+/**
+ * struct dp_pkt_info - packet info
+ * @num: number of packets
+ * @bytes: total nunmber of bytes
+ */
+struct dp_pkt_info {
+	u32 num_pkts;
+	u64 bytes;
+};
+
+/**
+ * struct sawf_tx_stats- Tx stats
+ * @tx_success: transmit success stats
+ * @tx_ingress: enqueue success stats
+ * @dropped: detailed information for tx-drops
+ * @svc_intval_stats: success/failure stats per service-interval
+ * @burst_size_stats: success/failure stats per burst-size
+ * @tx_failed: tx failure count
+ * @queue_depth: transmit queue-depth
+ * @throughput: throughput
+ * @ingress_rate: ingress-rate
+ * @tid: tid used for transmit
+ * @msduq: MSDU queue used for transmit
+ */
+struct sawf_tx_stats {
+	struct dp_pkt_info tx_success;
+	struct dp_pkt_info tx_ingress;
+	struct {
+		struct dp_pkt_info fw_rem;
+		u32 fw_rem_notx;
+		u32 fw_rem_tx;
+		u32 age_out;
+		u32 fw_reason1;
+		u32 fw_reason2;
+		u32 fw_reason3;
+	} dropped;
+	struct sawf_fw_mpdu_stats svc_intval_stats;
+	struct sawf_fw_mpdu_stats burst_size_stats;
+	u32 tx_failed;
+	u32 queue_depth;
+	u32 throughput;
+	u32 ingress_rate;
+	u8 tid;
+	u8 msduq;
+};
+
+/**
+ * struct ath12k_delay_hist_stats - Histogram of a stats
+ * @max: Max frequency
+ * @min: Minimum frequency
+ * @avg: Average frequency
+ */
+
+enum stats_if_hist_bucket_index {
+	HIST_BUCKET_0,
+	HIST_BUCKET_1,
+	HIST_BUCKET_2,
+	HIST_BUCKET_3,
+	HIST_BUCKET_4,
+	HIST_BUCKET_5,
+	HIST_BUCKET_6,
+	HIST_BUCKET_7,
+	HIST_BUCKET_8,
+	HIST_BUCKET_9,
+	HIST_BUCKET_10,
+	HIST_BUCKET_11,
+	HIST_BUCKET_12,
+	HIST_BUCKET_MAX,
+};
+
+enum stats_if_delay_bucket_index {
+	DELAY_BUCKET_0,
+	DELAY_BUCKET_1,
+	DELAY_BUCKET_2,
+	DELAY_BUCKET_3,
+	DELAY_BUCKET_4,
+	DELAY_BUCKET_5,
+	DELAY_BUCKET_6,
+	DELAY_BUCKET_7,
+	DELAY_BUCKET_8,
+	DELAY_BUCKET_9,
+	DELAY_BUCKET_10,
+	DELAY_BUCKET_11,
+	DELAY_BUCKET_12,
+	DELAY_BUCKET_MAX,
+};
+
+enum hist_types {
+	HIST_TYPE_SW_ENQEUE_DELAY,
+	HIST_TYPE_HW_COMP_DELAY,
+	HIST_TYPE_REAP_STACK,
+	HIST_TYPE_HW_TX_COMP_DELAY,
+	HIST_TYPE_MAX,
+};
+
+struct hist_bucket {
+	enum hist_types hist_type;
+	u64 freq[HIST_BUCKET_MAX];
+};
+
+struct ath12k_delay_hist_stats {
+	struct hist_bucket hist;
+	int max;
+	int min;
+	int avg;
+};
+
+/**
+ * struct sawf_tx_delay_stats- sawf Tx-delay stats
+ * @delay_hist: histogram for various delay-buckets
+ * @nwdelay_avg: moving average for nwdelay
+ * @swdelay_avg: moving average for swdelay
+ * @hwdelay_avg: moving average for hwdelay
+ * @num_pkt: count of pkts for which delay is calculated
+ * @nwdelay_win_total: total nwdelay for a window
+ * @swdelay_win_total: total swdelay for a window
+ * @hwdelay_win_total: total hwdelay for a window
+ * @success: count of pkts that met delay-bound
+ * @failure: count of pkts that did not meet delay-bound
+ */
+
+struct sawf_tx_delay_stats {
+	struct ath12k_delay_hist_stats delay_hist;
+
+	u32 nwdelay_avg;
+        u32 swdelay_avg;
+        u32 hwdelay_avg;
+        u32 num_pkt;
+        u64 nwdelay_win_total;
+        u64 swdelay_win_total;
+        u64 hwdelay_win_total;
+        u64 success;
+        u64 failure;
+};
+/**
+ * sawf_stats - sawf stats
+ * @delay: delay stats per host MSDU queue
+ * @tx_stats: Tx stats per host MSDU queue
+ * @lock: Protection for sawf-stats
+ */
+struct ath12k_sawf_stats {
+	struct sawf_tx_stats tx_stats[ATH12K_SAWF_MAX_TID_SUPPORT][MAX_Q_PER_TID];
+        struct sawf_tx_delay_stats tx_delay_stats[ATH12K_SAWF_MAX_TID_SUPPORT][MAX_Q_PER_TID];
+};
+
+/* MSG_TYPE => HTT_H2T_MSG_TYPE_STREAMING_STATS_REQ
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target message that requests the target to start or stop producing
+ * ongoing stats of the specified type.
+ *
+ * |31|30         |23          16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |EN| reserved  | stats type   |    reserved  |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                   config param [0]                        |
+ * |-----------------------------------------------------------|
+ * |                   config param [1]                        |
+ * |-----------------------------------------------------------|
+ * |                   config param [2]                        |
+ * |-----------------------------------------------------------|
+ * |                   config param [3]                        |
+ * |-----------------------------------------------------------|
+ * Where:
+ *   - EN is an enable/disable flag
+ * Header fields:
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this is a streaming stats upload request message
+ *     Value: 0x20 (HTT_H2T_MSG_TYPE_STREAMING_STATS_REQ)
+ *   - STATS_TYPE
+ *     Bits 23:16
+ *     Purpose: identifies which FW statistics to upload
+ *     Value: Defined values are HTT_STRM_GEN_MPDUS_STATS for basic
+ *            stats and HTT_STRM_GEN_MPDUS_DETAILS_STAT for extended
+ *            stats.
+ *   - ENABLE
+ *     Bit 31
+ *     Purpose: enable/disable the target's ongoing stats of the specified type
+ *     Value:
+ *         0 - disable ongoing production of the specified stats type
+ *         1 - enable  ongoing production of the specified stats type
+ *   - CONFIG_PARAM [0]
+ *     Bits 31:0
+ *     Purpose: give an opaque configuration value to the specified stats type
+ *     Value: stats-type specific configuration value
+ *   - CONFIG_PARAM [1]
+ *     Bits 31:0
+ *     Purpose: give an opaque configuration value to the specified stats type
+ *     Value: stats-type specific configuration value
+ *   - CONFIG_PARAM [2]
+ *     Bits 31:0
+ *     Purpose: give an opaque configuration value to the specified stats type
+ *     Value: stats-type specific configuration value
+ *   - CONFIG_PARAM [3]
+ *     Bits 31:0
+ *     Purpose: give an opaque configuration value to the specified stats type
+ *     Value: stats-type specific configuration value
+ */
+
+#define HTT_STRM_GEN_MPDUS_STATS 43
+#define HTT_STRM_GEN_MPDUS_DETAILS_STATS 44
+#define HTT_H2T_MSG_TYPE_STREAMING_STATS_TYPE GENMASK(23,16)
+#define HTT_H2T_MSG_TYPE_STREAMING_STATS_CONFIGURE BIT(31)
+
+struct ath12k_htt_h2t_sawf_streaming_req {
+	u32 info;
+	u32 config_param_0;
+	u32 config_param_1;
+	u32 config_param_2;
+	u32 config_param_3;
+};
+
+/**
+ * @brief target -> host streaming statistics upload
+ *
+ * MSG_TYPE => HTT_T2H_MSG_TYPE_STREAMING_STATS_IND
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host streaming stats upload indication message.
+ * The host can use a HTT_H2T_MSG_TYPE_STREAMING_STATS_REQ message to enable
+ * the target to produce an ongoing series of HTT_T2H_MSG_TYPE_STREAMING_STATS_IND
+ * STREAMING_STATS_IND messages, and can also use the
+ * HTT_H2T_MSG_TYPE_STREAMING_STATS_REQ message to halt the target's production of
+ * HTT_T2H_MSG_TYPE_STREAMING_STATS_IND messages.
+ *
+ * The HTT_T2H_MSG_TYPE_STREAMING_STATS_IND message contains a payload of TLVs
+ * containing the stats enabled by the host's HTT_H2T_MSG_TYPE_STREAMING_STATS_REQ
+ * message.
+ *
+ * |31                                           8|7             0|
+ * |--------------------------------------------------------------|
+ * |                   reserved                   |    msg type   |
+ * |--------------------------------------------------------------|
+ * |                   type-specific stats info                   |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: Identifies this as a streaming statistics upload indication
+ *             message.
+ *    Value: 0x2f (HTT_T2H_MSG_TYPE_STREAMING_STATS_IND)
+ */
+
+#define HTT_T2H_STREAMING_STATS_IND_HDR_SIZE 4
+#define SAWF_TTH_TID_MASK GENMASK(3,0)
+#define SAWF_TTH_QTYPE_MASK GENMASK(7,4)
+
+struct htt_stats_strm_gen_mpdus_tlv {
+/*
+ * |31     24|23      20|19       16|15                          0|
+ * |---------+----------+----------+------------------------------|
+ * |Reserved |   QTYPE  |   TID    |            Peer ID           |
+ * |--------------------------------------------------------------|
+ * |    svc interval failure       |   svc interval success       |
+ * |--------------------------------------------------------------|
+ * |    burst size failure         |   burst size success         |
+ * |--------------------------------------------------------------|
+*/
+	__le16 peer_id;
+	__le16 info;
+	__le16 svc_interval_success;
+	__le16 svc_interval_failure;
+	__le16 burst_size_success;
+	__le16 burst_size_failure;
+} __packed;
+
+struct htt_stats_strm_gen_mpdus_details_tlv {
+	__le16 peer_id;
+	__le16 info;
+	__le16 svc_interval_timestamp_prior_ms;
+	__le16 svc_interval_timestamp_now_ms;
+	__le16 svc_interval_interval_spec_ms;
+	__le16 svc_interval_interval_margin_ms;
+	/* consumed_bytes_orig:
+	 * Raw count (actually estimate) of how many bytes were removed
+	 * from the MSDU queue by the GEN_MPDUS operation.
+	 */
+	__le16 burst_size_consumed_bytes_orig;
+	/* consumed_bytes_final:
+	 * Adjusted count of removed bytes that incorporates normalizing
+	 * by the actual service interval compared to the expected
+	 * service interval.
+	 * This allows the burst size computation to be independent of
+	 * whether the target is doing GEN_MPDUS at only the service
+	 * interval, or substantially more often than the service
+	 * interval.
+	 *     consumed_bytes_final = consumed_bytes_orig /
+	 *         (svc_interval / ref_svc_interval)
+	 */
+	__le16 burst_size_consumed_bytes_final;
+	__le16 burst_size_remaining_bytes;
+	__le16 burst_size_reserved;
+	__le16 burst_size_burst_size_spec;
+	__le16 burst_size_margin_bytes;
+} __packed;
+
+extern bool ath12k_sawf_enable;
+struct ath12k_sawf_ctx *ath12k_get_sawf_context(void);
+void ath12k_sawf_init(struct ath12k_base *ab);
+void ath12k_sawf_deinit(struct ath12k_base *ab);
+bool ath12k_validate_sawf_param(struct ath12k_sawf_svc_params *params);
+bool ath12k_svc_id_configured(u8 svc_id);
+bool ath12k_svc_id_valid(u8 svc_id);
+void ath12k_update_svc_class(struct ath12k_sawf_svc_params *sawf_params);
+bool ath12k_disable_svc_class(u8 svc_id);
+int ath12k_get_tid(u8 svc_id);
+int ath12k_sawf_reconfigure_on_crash(struct ath12k_base *ab);
+int ath12k_create_send_svc_params(struct ath12k_sawf_svc_params *param);
+int ath12k_sawf_disable_config(u8 svc_id);
+void ath12k_send_ul_configs(struct work_struct *work);
+u16 ath12k_sawf_get_msduq(struct net_device *netdev,
+			  u8 *peer_mac, u32 service_id,
+			  u32 dscp, u32 rule_id);
+void ath12k_sawf_config_ul(struct net_device *dest_dev, u8 *dst_mac, struct net_device *src_dev,  u8 *src_mac,
+			   u8 fw_service_id, u8 rv_service_id,
+			   u8 add_or_sub);
+int ath12k_sawf_wmi_config_ul(struct ath12k_base *ab,
+			      struct ath12k_sawf_wmi_peer_latency_param *latency_info);
+struct ath12k_base *ath12k_sawf_get_ab_from_netdev(struct net_device *dev,
+						u8 *peer_mac,
+						u16 *peer_id);
+
+int ath12k_sawf_def_qmap_req(struct ath12k_base *ab,
+			     u8 svc_id, u16 peer_id);
+int ath12k_sawf_def_qunmap_req(struct ath12k_base *ab,
+			       u8 svc_id, u16 peer_id);
+int ath12k_sawf_def_qmap_report_req(struct ath12k_base *ab,
+				    u16 peer_id);
+
+void ath12k_htt_sawf_def_q_map_report_handler(struct ath12k_base *ab,
+					      struct sk_buff *skb);
+void ath12k_sawf_stats_update(struct ath12k *ar, struct sk_buff *skb,
+			      struct hal_tx_status *ts,
+			      struct ath12k_peer *peer, ktime_t timestamp);
+void ath12k_sawf_tx_enqueue_peer_stats(struct ath12k_base *ab,
+				       struct ath12k_peer *peer,
+				       u32 msduq_id, u32 len);
+int ath12k_htt_sawf_streaming_stats_configure(struct ath12k *ar,
+					      u8 stats_type,
+					      u8 configure,
+					      u32 config_param_0,
+					      u32 config_param_1,
+					      u32 config_param_2,
+					      u32 config_param_3);
+void ath12k_htt_sawf_streaming_stats_ind_handler(struct ath12k_base *ab,
+						 struct sk_buff *skb);
+int telemetry_extract_data(struct stats_config *cfg,
+			   struct telemetry_sawftx_stats *tx_stats,
+			   struct telemetry_sawfdelay_stats *delay_stats,
+			   struct ath12k_base *ab);
+int ath12k_telemetry_sla_reset_stats(u8 svc_id, u8 *peer_mac, u8 *mld_mac_addr,
+				     u8 set_clear);
+int ath12k_telemetry_get_sawf_tx_stats_tput(void *ptr, void *stats, u64 *in_bytes,
+					    u64 *in_cnt, u64 *tx_bytes,
+					    u64 *tx_cnt, u8 tid_v, u8 msduq_id);
+int ath12k_telemetry_get_sawf_tx_stats_mpdu(void *ptr, void *stats, u64 *svc_int_pass,
+					    u64 *svc_int_fail, u64 *burst_pass,
+					    u64 *burst_fail, u8 tid_v, u8 msduq_id);
+int ath12k_telemetry_get_sawf_tx_stats_drop(void *ptr, void *stats, u64 *pass,
+					    u64 *drop, u64 *drop_ttl,
+					    u8 tid, u8 msduq_id);
+void ath12k_telemetry_notify_breach(u8 *mac_addr, u8 svc_id, u8 param,
+				    bool set_clear, u8 tid);
+void ath12k_sdwf_fill_hbucket_type(struct ath12k_peer *peer);
+#else /* CONFIG_ATH12K_SAWF */
+
+static inline void ath12k_sawf_init(struct ath12k_base *ab) {
+	return;
+}
+
+static inline void ath12k_sawf_deinit(struct ath12k_base *ab) {
+	return;
+}
+
+static inline void ath12k_sawf_stats_update(struct ath12k *ar, struct sk_buff *skb,
+					    struct hal_tx_status *ts,
+			      		    struct ath12k_peer *peer,
+					    ktime_t timestamp)
+{
+	return;
+}
+#endif /* CONFIG_ATH12K_SAWF */
+#endif /* ATH11K_SAWF_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/spectral.h	2024-01-19 17:01:19.873847249 +0100
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_SPECTRAL_H
+#define ATH12K_SPECTRAL_H
+
+#include "../spectral_common.h"
+#include "dbring.h"
+
+/* enum ath12k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ *	something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ *	is performed manually.
+ */
+enum ath12k_spectral_mode {
+	ATH12K_SPECTRAL_DISABLED = 0,
+	ATH12K_SPECTRAL_BACKGROUND,
+	ATH12K_SPECTRAL_MANUAL,
+};
+
+struct ath12k_spectral {
+	struct ath12k_dbring rx_ring;
+	/* Protects enabled */
+	spinlock_t lock;
+	struct rchan *rfs_scan;	/* relay(fs) channel for spectral scan */
+	struct dentry *scan_ctl;
+	struct dentry *scan_count;
+	struct dentry *scan_bins;
+	enum ath12k_spectral_mode mode;
+	u16 count;
+	u8 fft_size;
+	bool enabled;
+	bool is_primary;
+	u32 ch_width;
+	struct wmi_spectral_capabilities_event spectral_cap;
+};
+
+#ifdef CONFIG_ATH12K_SPECTRAL
+
+int ath12k_spectral_init(struct ath12k_base *ab);
+void ath12k_spectral_deinit(struct ath12k_base *ab);
+int ath12k_spectral_vif_stop(struct ath12k_link_vif *arvif);
+void ath12k_spectral_reset_buffer(struct ath12k *ar);
+enum ath12k_spectral_mode ath12k_spectral_get_mode(struct ath12k *ar);
+struct ath12k_dbring *ath12k_spectral_get_dbring(struct ath12k *ar);
+
+#else
+
+static inline int ath12k_spectral_init(struct ath12k_base *ab)
+{
+	return 0;
+}
+
+static inline void ath12k_spectral_deinit(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_spectral_vif_stop(struct ath12k_link_vif *arvif)
+{
+	return 0;
+}
+
+static inline void ath12k_spectral_reset_buffer(struct ath12k *ar)
+{
+}
+
+static inline
+enum ath12k_spectral_mode ath12k_spectral_get_mode(struct ath12k *ar)
+{
+	return ATH12K_SPECTRAL_DISABLED;
+}
+
+static inline
+struct ath12k_dbring *ath12k_spectral_get_dbring(struct ath12k *ar)
+{
+	return NULL;
+}
+
+#endif /* CONFIG_ATH12K_SPECTRAL */
+#endif /* ATH12K_SPECTRAL_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/telemetry.h	2024-01-19 17:01:19.873847249 +0100
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_TELEMETRY_H
+#define ATH12K_TELEMETRY_H
+
+#ifdef CONFIG_ATH12K_SAWF
+
+/**
+ * struct ath12k_sla_samples_cfg- telemetry sawf sla samples configuration
+ * @moving_avg_pkt: Number of packets per window to calculate moving average
+ * @moving_avg_win: Number of windows to calculate moving average
+ * @sla_num_pkt: Number of packets for SLA detection
+ * @sla_time_sec:  Number of seconds for SLA detection
+ */
+struct ath12k_sla_samples_cfg {
+	u32 moving_avg_pkt;
+	u32 moving_avg_win;
+	u32 sla_num_pkt;
+	u32 sla_time_sec;
+};
+
+/**
+ * struct ath12k_sla_thershold_cfg- telemetry sawf sla
+ * thershold configuration
+ * @svc_id: service class id
+ * @min_throughput_rate: min throughput thershold percentage
+ * @max_throughput_rate: max throughput thershold percentage
+ * @burst_size:  burst size thershold percentage
+ * @service_interval: service interval thershold percentage
+ * @delay_bound: delay bound thershold percentage
+ * @msdu_ttl: MSDU Time-To-Live thershold percentage
+ * @msdu_rate_loss: MSDU loss rate thershold percentage
+ */
+struct ath12k_sla_thershold_cfg {
+	u8 svc_id;
+	u32 min_throughput_rate;
+	u32 max_throughput_rate;
+	u32 burst_size;
+	u32 service_interval;
+	u32 delay_bound;
+	u32 msdu_ttl;
+	u32 msdu_rate_loss;
+};
+
+
+/**
+ * struct ath12k_sla_detect- telemetry sawf sla
+ * sla breach detection option
+ * @SLA_DETECT_NUM_PACKET: Number of packets per window
+ * @SLA_DETECT_PER_SECOND: Number of windows
+ * @SLA_DETECT_MOV_AVG: Number of packets to calculate
+ *			 moving average for SLA detection
+ * @SLA_DETECT_NUM_SECOND:  Number of seconds for SLA detection
+ */
+enum ath12k_sla_detect {
+	SLA_DETECT_NUM_PACKET,
+	SLA_DETECT_PER_SECOND,
+	SLA_DETECT_MOV_AVG,
+	SLA_DETECT_NUM_SECOND,
+	SLA_DETECT_MAX,
+};
+
+/**
+ * struct ath12k_sla_detect_cfg- telemetry sawf sla
+ * breach detection configuration
+ * @sla_detect: sla detection option
+ * @min_throughput_rate: min throughput thershold percentage
+ * @max_throughput_rate: max throughput thershold percentage
+ * @burst_size:  burst size thershold percentage
+ * @service_interval: service interval thershold percentage
+ * @delay_bound: delay bound thershold percentage
+ * @msdu_ttl: MSDU Time-To-Live thershold percentage
+ * @msdu_rate_loss: MSDU loss rate thershold percentage
+ */
+struct ath12k_sla_detect_cfg {
+	enum ath12k_sla_detect sla_detect;
+	u32 min_throughput_rate;
+	u32 max_throughput_rate;
+	u32 burst_size;
+	u32 service_interval;
+	u32 delay_bound;
+	u32 msdu_ttl;
+	u32 msdu_rate_loss;
+};
+
+/**
+ * struct ath12k_telemetry_ctx- Telemetry context
+ */
+struct ath12k_telemetry_ctx {
+	struct ath12k_sla_samples_cfg sla_samples_params;
+	struct ath12k_sla_thershold_cfg sla_thershold_params;
+	struct ath12k_sla_detect_cfg sla_detect_params;
+};
+
+void ath12k_telemetry_init(struct ath12k_base *ab);
+void ath12k_telemetry_deinit(struct ath12k_base *ab);
+struct ath12k_telemetry_ctx *get_telemetry_context(void);
+int ath12k_telemetry_sawf_sla_samples_config(struct ath12k_sla_samples_cfg param);
+int ath12k_telemetry_sawf_sla_thershold_config(struct ath12k_sla_thershold_cfg param);
+int ath12k_telemetry_sawf_sla_detection_config(struct ath12k_sla_detect_cfg param);
+bool ath12k_telemetry_get_sla_num_pkts(u32 *pkt_num);
+bool ath12k_telemetry_get_sla_mov_avg_num_pkt(u32 *mov_avg);
+#else /* CONFIG_ATH12K_SAWF */
+
+static inline void ath12k_telemetry_init(struct ath12k_base *ab) {
+	return;
+}
+
+static inline void ath12k_telemetry_deinit(struct ath12k_base *ab) {
+	return;
+}
+
+#endif /* CONFIG_ATH12K_SAWF */
+#endif /* ATH12K_TELEMETRY_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/telemetry_agent_if.h	2024-01-19 17:01:19.873847249 +0100
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_TELEMETRY_AGENT_IF_H
+#define ATH12K_TELEMETRY_AGENT_IF_H
+
+u32 ath12k_telemetry_agent_init(void);
+u32 ath12k_telemetry_agent_deinit(void);
+
+int ath12k_sawf_get_tput_stats(void *soc, void *arg, u64 *in_bytes,
+			       u64 *in_cnt, u64 *tx_bytes,
+			       u64 *tx_cnt, u8 tid, u8 msduq);
+int ath12k_sawf_get_mpdu_stats(void *soc, void *arg, u64 *svc_int_pass,
+			       u64 *svc_int_fail, u64 *burst_pass,
+			       u64 *burst_fail, u8 tid, u8 msduq);
+int ath12k_sawf_get_drop_stats(void *soc, void *arg, u64 *pass,
+			       u64 *drop, u64 *drop_ttl,
+			       u8 tid, u8 msduq);
+void ath12k_sawf_notify_breach(u8 *mac_addr, u8 svc_id, u8 param,
+			       bool set_clear, u8 tid);
+void *ath12k_telemetry_peer_ctx_alloc(void *peer, void *sawf_stats,
+				      u8 *mac_addr,
+				      u8 svc_id, u8 hostq_id);
+void ath12k_telemetry_peer_ctx_free(void *telemetry_peer_ctx);
+bool ath12k_telemetry_update_tid_msduq(void *telemetry_peer_ctx,
+				       u8 hostq_id, u8 tid, u8 msduq_idx);
+bool ath12k_telemetry_set_mov_avg_params(u32 num_pkt, u32 num_win);
+bool ath12k_telemetry_set_sla_params(u32 num_pkt, u32 time_sec);
+bool ath12k_telemetry_set_sla_cfg(u8 svc_id, u8 min_tput_rate, u8 max_tput_rate,
+				  u8 burst_size, u8 svc_interval, u8 delay_bound,
+				  u8 msdu_ttl, u8 msdu_rate_loss);
+bool ath12k_telemetry_set_svclass_cfg(bool enable, u8 svc_id,
+				      u32 min_tput_rate,
+				      u32 max_tput_rate,
+				      u32 burst_size,
+				      u32 svc_interval,
+				      u32 delay_bound,
+				      u32 msdu_ttl,
+				      u32 msdu_rate_loss);
+bool ath12k_telemetry_set_sla_detect_cfg(u8 detect_type,
+					 u8 min_tput_rate,
+					 u8 max_tput_rate,
+					 u8 burst_size,
+					 u8 svc_interval,
+					 u8 delay_bound,
+					 u8 msdu_ttl,
+					 u8 msdu_rate_loss);
+bool ath12k_telemetry_update_delay(void *telemetry_ctx, u8 tid,
+				   u8 queue, u64 pass, u64 fail);
+bool ath12k_telemetry_update_delay_mvng(void *telemetry_ctx,
+					u8 tid, u8 queue,
+					u64 nwdelay_winavg,
+					u64 swdelay_winavg,
+					u64 hwdelay_winavg);
+bool ath12k_telemetry_update_msdu_drop(void *telemetry_ctx, u8 tid,
+				       u8 queue, u64 success,
+				       u64 failure_drop,
+				       u64 failure_ttl);
+bool ath12k_telemetry_get_rate(void *telemetry_ctx, u8 tid, u8 queue,
+			       u32 *egress_rate, u32 *ingress_rate);
+bool ath12k_telemetry_get_mov_avg(void *telemetry_ctx, u8 tid, u8 queue,
+				  u32 *nwdelay_avg, u32 *swdelay_avg,
+				  u32 *hwdelay_avg);
+int ath12k_telemetry_reset_peer_stats(u8 *peer_mac);
+#endif /* ATH12K_TELEMETRY_AGENT_IF_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/testmode.c	2024-01-19 17:01:19.877847358 +0100
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "hif.h"
+#include "testmode_i.h"
+
+#define ATH12K_FTM_SEGHDR_CURRENT_SEQ		GENMASK(3, 0)
+#define ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS	GENMASK(7, 4)
+
+static const struct nla_policy ath12k_tm_policy[ATH12K_TM_ATTR_MAX + 1] = {
+	[ATH12K_TM_ATTR_CMD]		= { .type = NLA_U32 },
+	[ATH12K_TM_ATTR_DATA]		= { .type = NLA_BINARY,
+					    .len = ATH12K_TM_DATA_MAX_LEN },
+	[ATH12K_TM_ATTR_WMI_CMDID]	= { .type = NLA_U32 },
+	[ATH12K_TM_ATTR_VERSION_MAJOR]	= { .type = NLA_U32 },
+	[ATH12K_TM_ATTR_VERSION_MINOR]	= { .type = NLA_U32 },
+	[ATH12K_TM_ATTR_FWLOG]		= { .type = NLA_BINARY,
+					    .len = 2048 },
+	[ATH12K_TM_ATTR_LINK_IDX]		= { .type = NLA_U8 },
+	[ATH12K_TM_ATTR_DUAL_MAC]               = { .type = NLA_U8 },
+};
+
+void ath12k_fwlog_write(struct ath12k_base *ab, u8 *data, int len)
+{
+	struct sk_buff *nl_skb;
+	int ret, i;
+	struct ath12k *ar = NULL;
+	struct ath12k_pdev *pdev;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (pdev && pdev->ar)  {
+			ar = pdev->ar;
+			break;
+		}
+	}
+
+	if (!ar)
+		return;
+
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy, len, GFP_ATOMIC);
+
+	if (!nl_skb) {
+		ath12k_warn(ab,  "failed to allocate skb for fwlog event\n");
+		return;
+	}
+
+	ret = nla_put(nl_skb, ATH12K_TM_ATTR_FWLOG, len, data);
+	if (ret) {
+		ath12k_warn(ab, "failed to put fwlog wmi event to nl: %d\n", ret);
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	if (ab->ag->mlo_capable) {
+		ret = nla_put_u8(nl_skb, ATH12K_TM_ATTR_LINK_IDX, ar->link_idx);
+		if (ret) {
+			ath12k_warn(ab, "failed to put link idx wmi event to nl: %d\n", ret);
+			kfree_skb(nl_skb);
+			return;
+		}
+	}
+
+	if (ab->num_radios == 2)
+		ret = nla_put_u8(nl_skb, ATH12K_TM_ATTR_DUAL_MAC, ab->num_radios);
+
+	if (ret) {
+		ath12k_warn(ab, "failed to put dual mac wmi event to nl: %d\n", ret);
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+struct ath12k *ath12k_tm_get_ar(struct ath12k_base *ab)
+{
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar = NULL;
+	int i;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+		if (ar) {
+			if (ar->state == ATH12K_STATE_TM)
+				break;
+		}
+	}
+	return ar;
+}
+/* This function handles unsegmented events. Data in various events are aggregated
+* in application layer, this event is unsegmented from host perspective.
+*/
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, struct sk_buff *skb)
+{
+	struct sk_buff *nl_skb;
+	struct ath12k *ar;
+
+	ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+		   "testmode event wmi cmd_id %d skb length %d\n",
+		   cmd_id, skb->len);
+
+	ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+	ar = ath12k_tm_get_ar(ab);
+	if (!ar) {
+		ath12k_warn(ab, "testmode event not handled due to invalid pdev\n");
+		return;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+
+
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+						   2 * nla_total_size(sizeof(u32)) +
+						   nla_total_size(skb->len),
+						   GFP_ATOMIC);
+	if (!nl_skb) {
+		ath12k_warn(ab,
+			    "failed to allocate skb for testmode wmi event\n");
+		goto out;
+	}
+	if (nla_put_u32(nl_skb, ATH12K_TM_ATTR_CMD, ATH12K_TM_CMD_WMI) ||
+	    nla_put_u32(nl_skb, ATH12K_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH12K_TM_ATTR_DATA, skb->len, skb->data)) {
+		ath12k_warn(ab, "failed to populate testmode unsegmented event\n");
+		kfree_skb(nl_skb);
+		goto out;
+	}
+
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+	spin_unlock_bh(&ar->data_lock);
+	return;
+
+out:
+	spin_unlock_bh(&ar->data_lock);
+	ath12k_warn(ab, "Failed to send testmode event to higher layers\n");
+}
+
+/* This function handles segmented events.
+* Data of various events received from fw is aggregated and
+* sent to application layer
+*/
+int ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+			    const struct wmi_ftm_event_msg *ftm_msg,
+                            u16 length)
+{
+	struct sk_buff *nl_skb;
+	int ret = 0;
+	struct ath12k *ar;
+	u8 const *buf_pos;
+	u16 datalen;
+	u8 total_segments, current_seq;
+	u32 data_pos;
+	u32 pdev_id;
+
+	ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+		   "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+		   cmd_id, ftm_msg, length);
+	ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length);
+	pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
+
+	if (pdev_id >= ab->num_radios) {
+		ath12k_warn(ab, "testmode event not handled due to invalid pdev id\n");
+		return -EINVAL;
+	}
+
+	ar = ab->pdevs[pdev_id].ar;
+	if (!ar) {
+		ath12k_warn(ab, "testmode event not handled due to absence of pdev\n");
+		return -ENODEV;
+	}
+
+	current_seq = FIELD_GET(ATH12K_FTM_SEGHDR_CURRENT_SEQ,
+				ftm_msg->seg_hdr.segmentinfo);
+	total_segments = FIELD_GET(ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS,
+				   ftm_msg->seg_hdr.segmentinfo);
+	datalen = length - (sizeof(struct wmi_ftm_seg_hdr));
+	buf_pos = ftm_msg->data;
+
+	spin_lock_bh(&ar->data_lock);
+	if (current_seq == 0) {
+		ab->ftm_event_obj.expected_seq = 0;
+		ab->ftm_event_obj.data_pos = 0;
+	}
+
+	data_pos = ab->ftm_event_obj.data_pos;
+
+	if ((data_pos + datalen) > ATH12K_FTM_EVENT_MAX_BUF_LENGTH) {
+		ath12k_warn(ab,
+			     "Invalid event length date_pos[%d] datalen[%d]\n",
+			      data_pos, datalen);
+		goto out;
+	}
+
+	memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen);
+	data_pos += datalen;
+
+	if (++ab->ftm_event_obj.expected_seq != total_segments) {
+		ab->ftm_event_obj.data_pos = data_pos;
+		ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+			   "partial data received current_seq[%d], total_seg[%d]\n",
+			   current_seq, total_segments);
+		goto out;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+		   "total data length[%d] = [%d]\n",
+		   data_pos, ftm_msg->seg_hdr.len);
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+						   2 * nla_total_size(sizeof(u32)) +
+						   nla_total_size(data_pos),
+						   GFP_ATOMIC);
+	if (!nl_skb) {
+		ath12k_warn(ab,
+			    "failed to allocate skb for testmode wmi event\n");
+		goto out;
+	}
+
+	if (nla_put_u32(nl_skb, ATH12K_TM_ATTR_CMD,
+			ATH12K_TM_CMD_WMI_FTM) ||
+	    nla_put_u32(nl_skb, ATH12K_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH12K_TM_ATTR_DATA, data_pos,
+		    &ab->ftm_event_obj.eventdata[0])) {
+		ath12k_warn(ab, "failed to populate testmode event");
+		kfree_skb(nl_skb);
+		goto out;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+out:
+	spin_unlock_bh(&ar->data_lock);
+	return ret;
+
+}
+static int ath12k_tm_cmd_get_version(struct ath12k *ar, struct nlattr *tb[])
+{
+	struct sk_buff *skb;
+	int ret;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+		   "testmode cmd get version_major %d version_minor %d\n",
+		   ATH12K_TESTMODE_VERSION_MAJOR,
+		   ATH12K_TESTMODE_VERSION_MINOR);
+
+	skb = cfg80211_testmode_alloc_reply_skb(ar->ah->hw->wiphy,
+						nla_total_size(sizeof(u32)));
+	if (!skb)
+		return -ENOMEM;
+
+	if (nla_put_u32(skb, ATH12K_TM_ATTR_VERSION_MAJOR,
+			ATH12K_TESTMODE_VERSION_MAJOR) ||
+	    nla_put_u32(skb, ATH12K_TM_ATTR_VERSION_MINOR,
+			ATH12K_TESTMODE_VERSION_MINOR)) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	return cfg80211_testmode_reply(skb);
+}
+
+static int ath12k_tm_cmd_testmode_start(struct ath12k *ar, struct nlattr *tb[])
+{
+	int ret;
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, " enter testmode cmd fw start\n");
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state == ATH12K_STATE_TM) {
+		ret = -EALREADY;
+		goto err;
+	}
+
+	/* start utf only when the driver is not in use  */
+	if (ar->state != ATH12K_STATE_OFF) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	ar->ab->ftm_event_obj.eventdata =
+		kzalloc(ATH12K_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL);
+	if (!ar->ab->ftm_event_obj.eventdata) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ar->state = ATH12K_STATE_TM;
+	ar->ftm_msgref = 0;
+	mutex_unlock(&ar->conf_mutex);
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, " enter testmode cmd started\n");
+	return 0;
+err:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath12k_tm_cmd_wmi(struct ath12k *ar, struct nlattr *tb[],
+			     struct ieee80211_vif *vif, u8 link_id)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	struct ath12k_vif *ahvif;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_hw *ah = ar->ah;
+	u32 cmd_id, buf_len;
+	int ret, tag;
+	void *buf;
+	u32 *ptr;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (!tb[ATH12K_TM_ATTR_DATA]) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!tb[ATH12K_TM_ATTR_WMI_CMDID]) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	buf = nla_data(tb[ATH12K_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH12K_TM_ATTR_DATA]);
+	if (!buf_len) {
+		ath12k_warn(ar->ab, "No data present in testmode command\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	cmd_id = nla_get_u32(tb[ATH12K_TM_ATTR_WMI_CMDID]);
+
+	ptr = (u32 *)buf;
+	tag = FIELD_GET(WMI_TLV_TAG, *ptr);
+	ptr++;
+
+	if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
+		*ptr = ar->pdev->pdev_id;
+
+	if (ar->ab->fw_mode != ATH12K_FIRMWARE_MODE_FTM &&
+	    (tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) {
+		if (vif) {
+			mutex_lock(&ah->conf_mutex);
+			ahvif = (struct ath12k_vif *)vif->drv_priv;
+			arvif = ahvif->link[link_id];
+			if (!arvif) {
+				ath12k_warn(ar->ab, "failed to find link interface\n");
+				mutex_unlock(&ah->conf_mutex);
+				ret = -EINVAL;
+				goto out;
+			}
+			*ptr = arvif->vdev_id;
+			mutex_unlock(&ah->conf_mutex);
+		}
+		else {
+			ret = -EINVAL;
+			ath12k_warn(ar->ab, "vdev is not up for given vdev id, so failed to send wmi command (testmode): %d\n",
+				    ret);
+			goto out;
+		}
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+		   "testmode cmd wmi cmd_id %d  buf length %d\n",
+		   cmd_id, buf_len);
+
+	ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+	if (!skb) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(skb->data, buf, buf_len);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+	if (ret) {
+		dev_kfree_skb(skb);
+		ath12k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = 0;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[])
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	u32 cmd_id, buf_len, hdr_info;
+	int ret;
+	void *buf;
+
+	/* if buf_len is 0 no data is sent, return error */
+	u8 segnumber = 0, seginfo;
+	u16 chunk_len, total_bytes, num_segments;
+	u8 *bufpos;
+	struct wmi_ftm_cmd *ftm_cmd;
+	mutex_lock(&ar->conf_mutex);
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, "ar->state  %d\n", ar->state);
+	if (ar->state != ATH12K_STATE_TM) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	if (!tb[ATH12K_TM_ATTR_DATA]) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	buf = nla_data(tb[ATH12K_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH12K_TM_ATTR_DATA]);
+	cmd_id = WMI_PDEV_UTF_CMDID;
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+		"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+		cmd_id, buf, buf_len);
+	ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+	bufpos = buf;
+	total_bytes = buf_len;
+	num_segments = total_bytes / MAX_WMI_UTF_LEN;
+	if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+		num_segments++;
+
+	while (buf_len) {
+		if (buf_len > MAX_WMI_UTF_LEN)
+			chunk_len = MAX_WMI_UTF_LEN;	/* MAX message */
+		else
+			chunk_len = buf_len;
+
+		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
+					sizeof(struct wmi_ftm_cmd)));
+
+		if (!skb) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
+		hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+			FIELD_PREP(WMI_TLV_LEN, (chunk_len +
+				sizeof(struct wmi_ftm_seg_hdr)));
+		ftm_cmd->tlv_header = hdr_info;
+		ftm_cmd->seg_hdr.len = total_bytes;
+		ftm_cmd->seg_hdr.msgref = ar->ftm_msgref;
+		seginfo = FIELD_PREP(ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
+			FIELD_PREP(ATH12K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
+		ftm_cmd->seg_hdr.segmentinfo = seginfo;
+		segnumber++;
+		memcpy(&ftm_cmd->data, bufpos, chunk_len);
+		ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+		if (ret) {
+			ath12k_warn(ar->ab, "ftm wmi command fail: %d\n", ret);
+			goto out;
+		}
+
+		buf_len -= chunk_len;
+		bufpos += chunk_len;
+	}
+	++ar->ftm_msgref;
+	ret = 0;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  /*u8 link_id, */void *data, int len)
+{
+	u8 link_id = 0;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar;
+	struct ath12k_base *ab;
+	struct nlattr *tb[ATH12K_TM_ATTR_MAX + 1];
+	enum ath12k_tm_cmd cmd_type;
+	int ret = 0;
+
+
+	ret = nla_parse(tb, ATH12K_TM_ATTR_MAX, data, len, ath12k_tm_policy,
+			NULL);
+	if (ret)
+		return ret;
+
+	if (!tb[ATH12K_TM_ATTR_CMD])
+		return -EINVAL;
+
+	cmd_type = nla_get_u32(tb[ATH12K_TM_ATTR_CMD]);
+	mutex_lock(&ah->conf_mutex);
+
+	if (vif == NULL && (cmd_type == ATH12K_TM_CMD_WMI_FTM ||
+	    cmd_type == ATH12K_TM_CMD_TESTMODE_START ||
+	    cmd_type == ATH12K_TM_CMD_WMI)) {
+		if (ah->num_radio)
+			ar = ah->radio;
+	} else {
+		ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	}
+
+	if (!ar) {
+		ath12k_err(NULL,
+			   "unable to determine device\n");
+		mutex_unlock(&ah->conf_mutex);
+		return -EINVAL;
+	}
+
+	if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+		ath12k_warn(ar->ab, "invalid link id specified\n");
+		mutex_unlock(&ah->conf_mutex);
+		return -EINVAL;
+	}
+
+	ab = ar->ab;
+	mutex_unlock(&ah->conf_mutex);
+
+	switch (cmd_type) {
+	case ATH12K_TM_CMD_WMI:
+		return ath12k_tm_cmd_wmi(ar, tb, vif, link_id);
+	case ATH12K_TM_CMD_TESTMODE_START:
+		return ath12k_tm_cmd_testmode_start(ar, tb);
+	case ATH12K_TM_CMD_GET_VERSION:
+		return ath12k_tm_cmd_get_version(ar, tb);
+	case ATH12K_TM_CMD_WMI_FTM:
+		set_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
+		return ath12k_tm_cmd_process_ftm(ar, tb);
+	case ATH12K_TM_CMD_TESTMODE_STOP:
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/testmode.h	2024-01-19 17:01:19.877847358 +0100
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "hif.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, struct sk_buff *skb);
+int ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+			    const struct wmi_ftm_event_msg *ftm_msg,
+			    u16 length);
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  /* u8 link_id, */void *data, int len);
+void ath12k_fwlog_write(struct ath12k_base *ab, u8 *data, int len);
+#else
+
+static inline void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+				                    struct sk_buff *skb)
+{
+	return;
+}
+
+static inline int ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+					  const struct wmi_ftm_event_msg *msg,
+					  u16 length)
+{
+	return 0;
+}
+static inline int ath12k_tm_cmd(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				/*u8 link_id, */void *data, int len)
+{
+	return 0;
+}
+
+static inline void ath12k_fwlog_write(struct ath12k_base *ab, u8 *data, int len)
+{
+
+}
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/testmode_i.h	2024-01-19 17:01:19.877847358 +0100
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* "API" level of the ath12k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH12K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH12K_TESTMODE_VERSION_MINOR 0
+
+#define ATH12K_TM_DATA_MAX_LEN		5000
+#define ATH12K_FTM_EVENT_MAX_BUF_LENGTH 2048
+
+enum ath12k_tm_attr {
+	__ATH12K_TM_ATTR_INVALID		= 0,
+	ATH12K_TM_ATTR_CMD			= 1,
+	ATH12K_TM_ATTR_DATA			= 2,
+	ATH12K_TM_ATTR_WMI_CMDID		= 3,
+	ATH12K_TM_ATTR_VERSION_MAJOR		= 4,
+	ATH12K_TM_ATTR_VERSION_MINOR		= 5,
+	ATH12K_TM_ATTR_WMI_OP_VERSION		= 6,
+	ATH12K_TM_ATTR_FWLOG                    = 7,
+	ATH12K_TM_ATTR_LINK_IDX			= 8,
+	ATH12K_TM_ATTR_DUAL_MAC			= 9,
+
+	/* keep last */
+	__ATH12K_TM_ATTR_AFTER_LAST,
+	ATH12K_TM_ATTR_MAX		= __ATH12K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath12k testmode interface commands specified in
+ * ATH12K_TM_ATTR_CMD
+ */
+enum ath12k_tm_cmd {
+	/* Returns the supported ath12k testmode interface version in
+	 * ATH12K_TM_ATTR_VERSION. Always guaranteed to work. User space
+	 * uses this to verify it's using the correct version of the
+	 * testmode interface
+	 */
+	ATH12K_TM_CMD_GET_VERSION = 0,
+
+	/* Boots the UTF firmware, the netdev interface must be down at thetime. */
+	ATH12K_TM_CMD_TESTMODE_START = 1,
+
+	/* Shuts down the UTF firmware and puts the driver back into OFFstate.*/
+	ATH12K_TM_CMD_TESTMODE_STOP = 2,
+
+	/* The command used to transmit a WMI command to the firmware and
+	 * the event to receive WMI events from the firmware. Without
+	 * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+	 * provided with ATH12K_TM_ATTR_WMI_CMDID and payload in
+	 * ATH12K_TM_ATTR_DATA.
+	 */
+	ATH12K_TM_CMD_WMI = 3,
+
+	/* The command used to transmit a FTM WMI command to the firmware
+	* and the event to receive WMI events from the firmware.The data
+	* received  only contain the payload, Need to add the tlv
+	* header and send the cmd to fw with commandid WMI_PDEV_UTF_CMDID.
+	*/
+	ATH12K_TM_CMD_WMI_FTM = 4,
+};
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/thermal.c	2024-01-30 08:24:48.934651329 +0100
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+
+static int
+ath12k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long *state)
+{
+	*state = ATH12K_THERMAL_THROTTLE_MAX;
+
+	return 0;
+}
+
+static int
+ath12k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long *state)
+{
+	struct ath12k *ar = cdev->devdata;
+
+	mutex_lock(&ar->conf_mutex);
+	*state = ar->thermal.throttle_state;
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+}
+
+static int
+ath12k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long throttle_state)
+{
+	struct ath12k *ar = cdev->devdata;
+	int ret;
+
+	if (throttle_state > ATH12K_THERMAL_THROTTLE_MAX) {
+		ath12k_warn(ar->ab, "throttle state %ld is exceeding the limit %d\n",
+			    throttle_state, ATH12K_THERMAL_THROTTLE_MAX);
+		return -EINVAL;
+	}
+	mutex_lock(&ar->conf_mutex);
+	ret = ath12k_thermal_set_throttling(ar, throttle_state);
+	if (ret == 0)
+		ar->thermal.throttle_state = throttle_state;
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct thermal_cooling_device_ops ath12k_thermal_ops = {
+	.get_max_state = ath12k_thermal_get_max_throttle_state,
+	.get_cur_state = ath12k_thermal_get_cur_throttle_state,
+	.set_cur_state = ath12k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath12k_thermal_show_temp(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct ath12k *ar = dev_get_drvdata(dev);
+	int ret, temperature;
+	unsigned long time_left;
+
+	mutex_lock(&ar->conf_mutex);
+
+	/* Can't get temperature when the card is off, unless in FTM mode */
+	if (!ath12k_ftm_mode && ar->state != ATH12K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	reinit_completion(&ar->thermal.wmi_sync);
+	ret = ath12k_wmi_send_pdev_temperature_cmd(ar);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to read temperature %d\n", ret);
+		goto out;
+	}
+
+	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) {
+		ret = -ESHUTDOWN;
+		goto out;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+						ATH12K_THERMAL_SYNC_TIMEOUT_HZ);
+	if (!time_left) {
+		ath12k_warn(ar->ab, "failed to synchronize thermal read\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	temperature = ar->thermal.temperature;
+	spin_unlock_bh(&ar->data_lock);
+
+	/* display in millidegree celcius */
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+void ath12k_thermal_event_temperature(struct ath12k *ar, int temperature)
+{
+	spin_lock_bh(&ar->data_lock);
+	ar->thermal.temperature = temperature;
+	spin_unlock_bh(&ar->data_lock);
+	complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath12k_thermal_show_temp,
+			  NULL, 0);
+
+static struct attribute *ath12k_hwmon_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(ath12k_hwmon);
+
+int ath12k_thermal_set_throttling(struct ath12k *ar, u32 throttle_state)
+{
+	struct ath12k_base *sc = ar->ab;
+	struct ath12k_wmi_thermal_mitigation_arg param;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (ar->state != ATH12K_STATE_ON)
+		return 0;
+
+	memset(&param, 0, sizeof(param));
+	param.pdev_id = ar->pdev->pdev_id;
+	param.enable = throttle_state ? 1 : 0;
+	param.dc = ATH12K_THERMAL_DEFAULT_DUTY_CYCLE;
+	param.dc_per_event = 0xFFFFFFFF;
+
+	param.levelconf[0].tmplwm = ATH12K_THERMAL_TEMP_LOW_MARK;
+	param.levelconf[0].tmphwm = ATH12K_THERMAL_TEMP_HIGH_MARK;
+	param.levelconf[0].dcoffpercent = throttle_state;
+	param.levelconf[0].priority = 0; /* disable all data tx queues */
+	ret = ath12k_wmi_send_thermal_mitigation_cmd(ar, &param);
+	if (ret) {
+		ath12k_warn(sc, "failed to send thermal mitigation duty cycle %u ret %d\n",
+			    throttle_state, ret);
+	}
+	return ret;
+}
+
+int ath12k_thermal_register(struct ath12k_base *ab)
+{
+	struct thermal_cooling_device *cdev;
+	struct ath12k *ar;
+	struct ath12k_pdev *pdev;
+	struct ieee80211_hw *hw;
+	int i, ret;
+	char pdev_name[20];
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+		if (!ar)
+			continue;
+
+		hw = ar->ah->hw;
+		memset(pdev_name, 0, sizeof(pdev_name));
+
+		cdev = thermal_cooling_device_register("ath12k_thermal", ar,
+						       &ath12k_thermal_ops);
+
+		if (IS_ERR(cdev)) {
+			ath12k_err(ab, "failed to setup thermal device result: %ld\n",
+				   PTR_ERR(cdev));
+			ret = -EINVAL;
+			goto err_thermal_destroy;
+		}
+
+		snprintf(pdev_name, sizeof(pdev_name), "%s%d", "cooling_device",
+			 ar->link_idx);
+
+		ar->thermal.cdev = cdev;
+
+		ret = sysfs_create_link(&hw->wiphy->dev.kobj, &cdev->device.kobj,
+					pdev_name);
+		if (ret) {
+			ath12k_err(ab, "failed to create cooling device symlink\n");
+			goto err_thermal_destroy;
+		}
+
+		if (!IS_REACHABLE(CONFIG_HWMON))
+			return 0;
+
+		ar->thermal.hwmon_dev = hwmon_device_register_with_groups(&hw->wiphy->dev,
+									  "ath12k_hwmon", ar,
+									  ath12k_hwmon_groups);
+		if (IS_ERR(ar->thermal.hwmon_dev)) {
+			ath12k_err(ar->ab, "failed to register hwmon device: %ld\n",
+				   PTR_ERR(ar->thermal.hwmon_dev));
+			ar->thermal.hwmon_dev = NULL;
+			ret = -EINVAL;
+			goto err_thermal_destroy;
+		}
+	}
+
+	return 0;
+
+err_thermal_destroy:
+	ath12k_thermal_unregister(ab);
+	return ret;
+}
+
+void ath12k_thermal_unregister(struct ath12k_base *ab)
+{
+	struct ath12k *ar;
+	struct ath12k_pdev *pdev;
+	struct ieee80211_hw *hw;
+	int i;
+	char pdev_name[20];
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+		if (!ar)
+			continue;
+
+		hw = ar->ah->hw;
+		memset(pdev_name, 0, sizeof(pdev_name));
+
+		snprintf(pdev_name, sizeof(pdev_name), "%s%d", "cooling_device",
+			 ar->link_idx);
+
+		if (ar->thermal.hwmon_dev) {
+			hwmon_device_unregister(ar->thermal.hwmon_dev);
+			ar->thermal.hwmon_dev = NULL;
+		}
+
+		sysfs_remove_link(&hw->wiphy->dev.kobj, pdev_name);
+		thermal_cooling_device_unregister(ar->thermal.cdev);
+	}
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/thermal.h	2024-01-19 17:01:19.877847358 +0100
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_THERMAL_
+#define _ATH12K_THERMAL_
+
+#define ATH12K_THERMAL_TEMP_LOW_MARK -100
+#define ATH12K_THERMAL_TEMP_HIGH_MARK 150
+#define ATH12K_THERMAL_THROTTLE_MAX     100
+#define ATH12K_THERMAL_DEFAULT_DUTY_CYCLE 100
+#define ATH12K_HWMON_NAME_LEN           15
+#define ATH12K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+
+#define THERMAL_LEVELS  1
+struct thermal_mitigation_params {
+	u32 pdev_id;
+	u32 enable;
+	u32 dc;
+	u32 dc_per_event;
+	struct tt_level_config levelconf[THERMAL_LEVELS];
+};
+
+struct ath12k_thermal {
+	struct thermal_cooling_device *cdev;
+	struct completion wmi_sync;
+	struct device *hwmon_dev;
+
+	/* protected by conf_mutex */
+	u32 throttle_state;
+	/* temperature value in Celcius degree
+	 * protected by data_lock
+	 */
+	int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+int ath12k_thermal_register(struct ath12k_base *sc);
+void ath12k_thermal_unregister(struct ath12k_base *sc);
+int ath12k_thermal_set_throttling(struct ath12k *ar, u32 throttle_state);
+void ath12k_thermal_event_temperature(struct ath12k *ar, int temperature);
+#else
+static inline int ath12k_thermal_register(struct ath12k_base *sc)
+{
+	return 0;
+}
+
+static inline void ath12k_thermal_unregister(struct ath12k_base *sc)
+{
+}
+
+static inline int ath12k_thermal_set_throttling(struct ath12k *ar, u32 throttle_state)
+{
+	return 0;
+}
+
+static inline void ath12k_thermal_event_temperature(struct ath12k *ar,
+						    int temperature)
+{
+}
+
+#endif
+#endif /* _ATH12K_THERMAL_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/umac_reset.c	2024-03-18 14:40:14.859741552 +0100
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include "core.h"
+#include "coredump.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+#include "dp.h"
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+#include "ppe.h"
+#endif
+
+int ath12k_htt_umac_reset_msg_send(struct ath12k_base *ab,
+				   struct ath12k_htt_umac_reset_setup_cmd_params *params)
+{
+	struct sk_buff *skb;
+	struct htt_dp_umac_reset_setup_req_cmd *cmd;
+	int ret;
+	int len = sizeof(*cmd);
+
+	skb = ath12k_htc_alloc_skb(ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_dp_umac_reset_setup_req_cmd *)skb->data;
+	cmd->msg_info = u32_encode_bits(HTT_H2T_MSG_TYPE_UMAC_RESET_PREREQUISITE_SETUP,
+					HTT_H2T_MSG_TYPE_SET);
+	cmd->msg_info |= u32_encode_bits(0, HTT_H2T_MSG_METHOD);
+	cmd->msg_info |= u32_encode_bits(0, HTT_T2H_MSG_METHOD);
+	cmd->msi_data = params->msi_data;
+	cmd->msg_shared_mem.size = sizeof(struct htt_h2t_paddr_size);
+	cmd->msg_shared_mem.addr_lo = params->addr_lo;
+	cmd->msg_shared_mem.addr_hi = params->addr_hi;
+
+
+	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+
+	if (ret) {
+		ath12k_warn(ab, "DP UMAC INIT msg send failed ret:%d\n", ret);
+		goto err_free;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "DP UMAC INIT msg sent from host\n");
+	return 0;
+
+err_free:
+	dev_kfree_skb_any(skb);
+	return ret;
+}
+
+int ath12k_htt_umac_reset_send_start_pre_reset_cmd(struct ath12k_base *ab, int is_initiator,
+						   int is_target_recovery)
+{
+	struct sk_buff *skb;
+	struct h2t_umac_hang_recovery_start_pre_reset *cmd;
+	int ret;
+	int len = sizeof(*cmd);
+
+	skb = ath12k_htc_alloc_skb(ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct h2t_umac_hang_recovery_start_pre_reset*)skb->data;
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr = u32_encode_bits(HTT_H2T_MSG_TYPE_UMAC_RESET_START_PRE_RESET,
+				   HTT_H2T_UMAC_RESET_MSG_TYPE);
+	cmd->hdr |= u32_encode_bits(is_initiator, HTT_H2T_UMAC_RESET_IS_INITIATOR_SET);
+	cmd->hdr |= u32_encode_bits(is_target_recovery, HTT_H2T_UMAC_RESET_IS_TARGET_RECOVERY_SET);
+
+	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+	if (ret) {
+                ath12k_warn(ab, "failed to send htt umac reset pre reset start: %d\n",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath12k_get_umac_reset_intr_offset(struct ath12k_base *ab)
+{
+	int i;
+
+	for (i = 0; i < ATH12K_EXT_IRQ_NUM_MAX; i++) {
+		if (ab->hw_params->ring_mask->umac_dp_reset[i])
+			return i;
+	}
+	return 0;
+}
+
+int ath12k_htt_umac_reset_setup_cmd(struct ath12k_base *ab)
+{
+	int msi_data_count;
+	struct ath12k_htt_umac_reset_setup_cmd_params params = {};
+	u32 msi_data_start, msi_irq_start;
+	int ret, intr_ctxt;
+	struct ath12k_dp_umac_reset *umac_reset = &ab->dp_umac_reset;
+
+	intr_ctxt = ath12k_get_umac_reset_intr_offset(ab);
+	ret = ath12k_hif_get_user_msi_vector(ab, "DP",
+					     &msi_data_count, &msi_data_start,
+					     &msi_irq_start);
+	if (ret)
+		params.msi_data = ATH12K_UMAC_RESET_IPC;
+	else
+		params.msi_data = (intr_ctxt % msi_data_count) + msi_data_start;
+	params.addr_lo = umac_reset->shmem_paddr_aligned & HAL_ADDR_LSB_REG_MASK;
+	params.addr_hi = (u64)umac_reset->shmem_paddr_aligned >> HAL_ADDR_MSB_REG_SHIFT;
+
+	return ath12k_htt_umac_reset_msg_send(ab, &params);
+}
+
+int ath12k_dp_umac_reset_init(struct ath12k_base *ab)
+{
+	struct ath12k_dp_umac_reset *umac_reset;
+	int alloc_size, ret;
+
+	if (!ab->hw_params->support_umac_reset)
+		return 0;
+
+	umac_reset = &ab->dp_umac_reset;
+	umac_reset->magic_num = ATH12K_DP_UMAC_RESET_SHMEM_MAGIC_NUM;
+
+	alloc_size = sizeof(struct ath12k_dp_htt_umac_reset_recovery_msg_shmem_t) +
+			    ATH12K_DP_UMAC_RESET_SHMEM_ALIGN - 1;
+
+	umac_reset->shmem_vaddr_unaligned = dma_alloc_coherent(ab->dev,
+                                                     alloc_size,
+                                                     &umac_reset->shmem_paddr_unaligned,
+                                                     GFP_KERNEL);
+	if (!umac_reset->shmem_vaddr_unaligned) {
+		ath12k_warn(ab, "Failed to allocate memory with size:%u\n", alloc_size);
+		return -ENOMEM;
+	}
+
+	umac_reset->shmem_vaddr_aligned =
+		PTR_ALIGN(umac_reset->shmem_vaddr_unaligned, ATH12K_DP_UMAC_RESET_SHMEM_ALIGN);
+	umac_reset->shmem_paddr_aligned =
+		umac_reset->shmem_paddr_unaligned + ((unsigned long)umac_reset->shmem_vaddr_aligned -
+				(unsigned long)umac_reset->shmem_vaddr_unaligned);
+	umac_reset->shmem_size = alloc_size;
+	umac_reset->shmem_vaddr_aligned->magic_num = ATH12K_DP_UMAC_RESET_SHMEM_MAGIC_NUM;
+	umac_reset->intr_offset = ath12k_get_umac_reset_intr_offset(ab);
+	memset(&umac_reset->ts, 0, sizeof(struct ath12k_umac_reset_ts));
+
+	ret = ath12k_hif_dp_umac_reset_irq_config(ab);
+	if (ret) {
+		ath12k_warn(ab, "Failed to register interrupt for UMAC RECOVERY\n");
+		goto shmem_free;
+	}
+
+	ret = ath12k_htt_umac_reset_setup_cmd(ab);
+	if (ret) {
+		ath12k_warn(ab, "Unable to setup UMAC RECOVERY\n");
+		goto free_irq;
+	}
+
+	ath12k_hif_dp_umac_reset_enable_irq(ab);
+	return 0;
+
+free_irq:
+	ath12k_hif_dp_umac_reset_free_irq(ab);
+shmem_free:
+	dma_free_coherent(ab->dev,
+			  umac_reset->shmem_size,
+			  umac_reset->shmem_vaddr_unaligned,
+			  umac_reset->shmem_paddr_unaligned);
+	umac_reset->shmem_vaddr_unaligned = NULL;
+	return ret;
+}
+
+void ath12k_umac_reset_pre_reset_validation(struct ath12k_base *ab, bool *is_initiator,
+					    bool *is_target_recovery)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset;
+	*is_initiator = *is_target_recovery = false;
+
+	if (!ag)
+		return;
+
+	mlo_umac_reset = &ag->mlo_umac_reset;
+
+	spin_lock_bh(&mlo_umac_reset->lock);
+	if (mlo_umac_reset->initiator_chip == ab->chip_id)
+		*is_initiator = true;
+	if (mlo_umac_reset->umac_reset_info & BIT(1))
+		*is_target_recovery = true;
+	spin_unlock_bh(&mlo_umac_reset->lock);
+}
+
+void ath12k_umac_reset_completion(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset = &ag->mlo_umac_reset;
+
+	if (!mlo_umac_reset)
+		return;
+
+	if (!(mlo_umac_reset->umac_reset_info & BIT(0)))
+		return;
+
+	spin_lock_bh(&mlo_umac_reset->lock);
+	mlo_umac_reset->umac_reset_info = 0;
+	mlo_umac_reset->initiator_chip = 0;
+	spin_unlock_bh(&mlo_umac_reset->lock);
+}
+
+void ath12k_umac_reset_send_htt(struct ath12k_base *ab, int tx_event)
+{
+	struct ath12k_dp_umac_reset *umac_reset = &ab->dp_umac_reset;
+	struct ath12k_dp_htt_umac_reset_recovery_msg_shmem_t *shmem_vaddr_aligned;
+	bool is_initiator, is_target_recovery;
+	int ret;
+
+	shmem_vaddr_aligned = umac_reset->shmem_vaddr_aligned;
+
+	switch(tx_event) {
+	case ATH12K_UMAC_RESET_TX_CMD_TRIGGER_DONE:
+		ath12k_umac_reset_pre_reset_validation(ab, &is_initiator,
+						       &is_target_recovery);
+		ret = ath12k_htt_umac_reset_send_start_pre_reset_cmd(ab,
+								     is_initiator,
+								     is_target_recovery);
+		ab->dp_umac_reset.ts.trigger_done = jiffies_to_msecs(jiffies);
+		if (ret)
+			ath12k_warn(ab, "Unable to send umac trigger\n");
+		break;
+	case ATH12K_UMAC_RESET_TX_CMD_PRE_RESET_DONE:
+		shmem_vaddr_aligned->h2t_msg = u32_encode_bits(1,
+						ATH12K_HTT_UMAC_RESET_MSG_SHMEM_PRE_RESET_DONE_SET);
+		ab->dp_umac_reset.ts.pre_reset_done = jiffies_to_msecs(jiffies);
+		break;
+	case ATH12K_UMAC_RESET_TX_CMD_POST_RESET_START_DONE:
+		shmem_vaddr_aligned->h2t_msg = u32_encode_bits(1,
+						ATH12K_HTT_UMAC_RESET_MSG_SHMEM_POST_RESET_START_DONE_SET);
+		ab->dp_umac_reset.ts.post_reset_done = jiffies_to_msecs(jiffies);
+		break;
+	case ATH12K_UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE:
+		shmem_vaddr_aligned->h2t_msg = u32_encode_bits(1,
+				ATH12K_HTT_UMAC_RESET_MSG_SHMEM_POST_RESET_COMPLETE_DONE);
+		ab->dp_umac_reset.ts.post_reset_complete_done = jiffies_to_msecs(jiffies);
+		break;
+        }
+
+	if (tx_event == ATH12K_UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE) {
+		ath12k_umac_reset_completion(ab);
+		ath12k_info(ab, "MLO UMAC Recovery completed\n");
+		ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "Time taken for trigger_start:%llums "
+			   "trigger_done: %llums pre_reset:%llums post_reset:%llums "
+			   "post_reset_complete:%llums",
+			   ab->dp_umac_reset.ts.trigger_start, ab->dp_umac_reset.ts.trigger_done,
+			   ab->dp_umac_reset.ts.pre_reset_done - ab->dp_umac_reset.ts.pre_reset_start,
+			   ab->dp_umac_reset.ts.post_reset_done - ab->dp_umac_reset.ts.post_reset_start,
+			   ab->dp_umac_reset.ts.post_reset_complete_done - ab->dp_umac_reset.ts.post_reset_complete_start);
+		memset(&umac_reset->ts, 0, sizeof(struct ath12k_umac_reset_ts));
+	}
+
+	return;
+}
+
+int ath12k_umac_reset_notify_target(struct ath12k_base *ab, int tx_event)
+{
+	struct ath12k_base *partner_ab;
+	struct ath12k_hw_group *ag = ab->ag;
+	int i;
+
+	for (i = 0; i < ag->num_chip; i++) {
+		partner_ab = ag->ab[i];
+
+		if (test_bit(ATH12K_FLAG_RECOVERY, &partner_ab->dev_flags))
+			continue;
+
+		ath12k_umac_reset_send_htt(partner_ab, tx_event);
+	}
+
+	return 0;
+}
+
+void ath12k_umac_reset_initiate_recovery(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset = &ag->mlo_umac_reset;
+
+	if (!mlo_umac_reset)
+		return;
+
+	spin_lock_bh(&mlo_umac_reset->lock);
+
+	if (mlo_umac_reset->umac_reset_info &
+	    ATH12K_IS_UMAC_RESET_IN_PROGRESS) {
+		spin_unlock_bh(&mlo_umac_reset->lock);
+		ath12k_warn(ab, "UMAC RECOVERY IS IN PROGRESS\n");
+		return;
+	}
+	mlo_umac_reset->umac_reset_info = BIT(0); /* UMAC recovery is in progress */
+	mlo_umac_reset->umac_reset_info |= BIT(1); /* Target recovery */
+	atomic_set(&mlo_umac_reset->response_chip, 0);
+	mlo_umac_reset->initiator_chip = ab->chip_id;
+	spin_unlock_bh(&mlo_umac_reset->lock);
+}
+
+void ath12k_umac_reset_notify_target_sync_and_send(struct ath12k_base *ab,
+						   enum dp_umac_reset_tx_cmd tx_event)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset = &ag->mlo_umac_reset;
+
+	if (atomic_read(&mlo_umac_reset->response_chip) >= ab->ag->num_started) {
+		ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "response chip:%d num_started:%d sending notify\n",
+			   atomic_read(&mlo_umac_reset->response_chip), ab->ag->num_started);
+		ath12k_umac_reset_notify_target(ab, tx_event);
+		atomic_set(&mlo_umac_reset->response_chip, 0);
+	} else {
+		ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "response_chip:%d num_started:%d not matching.. hold on notify\n",
+			   atomic_read(&mlo_umac_reset->response_chip), ab->ag->num_started);
+	}
+	return;
+}
+
+void ath12k_umac_reset_notify_pre_reset_done(struct ath12k_base *ab)
+{
+	struct ath12k_dp *dp = &ab->dp;
+
+	if (dp->ppeds_service_running)
+		return;
+
+	ath12k_umac_reset_notify_target_sync_and_send(ab,
+						      ATH12K_UMAC_RESET_TX_CMD_PRE_RESET_DONE);
+	ab->dp_umac_reset.umac_pre_reset_in_prog = false;
+}
+
+void ath12k_umac_reset_handle_pre_reset(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset = &ag->mlo_umac_reset;
+
+	set_bit(ATH12K_FLAG_UMAC_PRERESET_START, &ab->dev_flags);
+	ath12k_dp_reset_interrupt_mask(ab);
+	atomic_inc(&mlo_umac_reset->response_chip);
+
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (ab->ppeds_handle) {
+		ath12k_dp_ppeds_service_enable_disable(ab, true);
+		ab->dp_umac_reset.umac_pre_reset_in_prog = true;
+		ath12k_dp_ppeds_interrupt_stop(ab);
+		ath12k_dp_ppeds_stop(ab);
+		ath12k_dp_ppeds_service_enable_disable(ab, false);
+	}
+#endif
+	if (!test_bit(ATH12K_FLAG_PPE_DS_ENABLED, &ab->dev_flags))
+		ath12k_umac_reset_notify_target_sync_and_send(ab,
+							      ATH12K_UMAC_RESET_TX_CMD_PRE_RESET_DONE);
+	return;
+}
+
+void ath12k_umac_reset_handle_post_reset_complete(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_mlo_dp_umac_reset *mlo_umac_reset = &ag->mlo_umac_reset;
+
+	clear_bit(ATH12K_FLAG_UMAC_PRERESET_START, &ab->dev_flags);
+
+	atomic_inc(&mlo_umac_reset->response_chip);
+	ath12k_dp_restore_interrupt_mask(ab);
+#ifdef CONFIG_ATH12K_PPE_DS_SUPPORT
+	if (ab->ppeds_handle) {
+		ath12k_dp_ppeds_start(ab);
+		ath12k_dp_ppeds_interrupt_start(ab);
+	}
+#endif
+	ath12k_umac_reset_notify_target_sync_and_send(ab, ATH12K_UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE);
+	ath12k_dp_peer_tid_setup(ab);
+	return;
+}
+
+void ath12k_dp_umac_reset_action(struct ath12k_base *ab,
+				 enum dp_umac_reset_recover_action rx_event)
+{
+	switch(rx_event) {
+	case ATH12K_UMAC_RESET_INIT_UMAC_RECOVERY:
+	case ATH12K_UMAC_RESET_INIT_TARGET_RECOVERY_SYNC_USING_UMAC:
+		ath12k_umac_reset_initiate_recovery(ab);
+		ab->dp_umac_reset.ts.trigger_start = jiffies_to_msecs(jiffies);
+		ath12k_umac_reset_notify_target(ab, ATH12K_UMAC_RESET_TX_CMD_TRIGGER_DONE);
+		break;
+	case ATH12K_UMAC_RESET_DO_POST_RESET_COMPLETE:
+		ab->dp_umac_reset.ts.post_reset_complete_start = jiffies_to_msecs(jiffies);
+		ath12k_umac_reset_handle_post_reset_complete(ab);
+		break;
+	case ATH12K_UMAC_RESET_DO_POST_RESET_START:
+		ab->dp_umac_reset.ts.post_reset_start = jiffies_to_msecs(jiffies);
+		ath12k_umac_reset_handle_post_reset_start(ab);
+		break;
+	case ATH12K_UMAC_RESET_DO_PRE_RESET:
+		ab->dp_umac_reset.ts.pre_reset_start = jiffies_to_msecs(jiffies);
+		ath12k_umac_reset_handle_pre_reset(ab);
+		break;
+	default:
+		ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "Unknown UMAC RESET event received\n");
+		break;
+	}
+	return;
+}
+
+void ath12k_dp_umac_reset_handle(struct ath12k_base *ab)
+{
+	struct ath12k_dp_umac_reset *umac_reset = &ab->dp_umac_reset;
+	struct ath12k_dp_htt_umac_reset_recovery_msg_shmem_t *shmem_vaddr;
+	int rx_event, num_event = 0;
+	u32 t2h_msg;
+
+	shmem_vaddr = umac_reset->shmem_vaddr_aligned;
+	if (!shmem_vaddr) {
+		ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "Shared memory address NULL\n");
+		return;
+	}
+
+	if (shmem_vaddr->magic_num != umac_reset->magic_num) {
+		ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "Shared memory address is invalid shmem:0x%x u:0x%x\n",
+			   shmem_vaddr->magic_num, umac_reset->magic_num);
+		return;
+	}
+
+	t2h_msg = shmem_vaddr->t2h_msg;
+	shmem_vaddr->t2h_msg = 0;
+
+	rx_event = ATH12K_UMAC_RESET_RX_EVENT_NONE;
+
+	if (u32_get_bits(t2h_msg, HTT_ATH12K_UMAC_RESET_T2H_INIT_UMAC_RECOVERY)) {
+		rx_event |= ATH12K_UMAC_RESET_INIT_UMAC_RECOVERY;
+		num_event++;
+	}
+
+	if (u32_get_bits(t2h_msg, HTT_ATH12K_UMAC_RESET_T2H_INIT_TARGET_RECOVERY_SYNC_USING_UMAC)) {
+		rx_event |= ATH12K_UMAC_RESET_INIT_TARGET_RECOVERY_SYNC_USING_UMAC;
+		num_event++;
+	}
+
+	if (u32_get_bits(t2h_msg, HTT_ATH12K_UMAC_RESET_T2H_DO_PRE_RESET)) {
+		rx_event |= ATH12K_UMAC_RESET_DO_PRE_RESET;
+		num_event++;
+	}
+
+	if (u32_get_bits(t2h_msg, HTT_ATH12K_UMAC_RESET_T2H_DO_POST_RESET_START)) {
+		rx_event |= ATH12K_UMAC_RESET_DO_POST_RESET_START;
+		num_event++;
+	}
+
+	if (u32_get_bits(t2h_msg, HTT_ATH12K_UMAC_RESET_T2H_DO_POST_RESET_COMPLETE)) {
+		rx_event |= ATH12K_UMAC_RESET_DO_POST_RESET_COMPLETE;
+		num_event++;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "Deduced rx event:%d num:%d\n", rx_event, num_event);
+
+	if (num_event > 1) {
+		ath12k_dbg(ab, ATH12K_DBG_DP_UMAC_RESET, "Multiple event notified in single msg\n");
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	ath12k_dp_umac_reset_action(ab, rx_event);
+	return;
+}
+
+void ath12k_dp_umac_reset_deinit(struct ath12k_base *ab)
+{
+	struct ath12k_dp_umac_reset *umac_reset;
+
+	if (!ab->hw_params->support_umac_reset)
+		return;
+
+	umac_reset = &ab->dp_umac_reset;
+
+	if (!umac_reset)
+		return;
+
+	ath12k_hif_dp_umac_reset_free_irq(ab);
+
+	if (umac_reset->shmem_vaddr_unaligned) {
+		dma_free_coherent(ab->dev,
+				  umac_reset->shmem_size,
+				  umac_reset->shmem_vaddr_unaligned,
+				  umac_reset->shmem_paddr_unaligned);
+		umac_reset->shmem_vaddr_unaligned = NULL;
+
+	}
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/vendor.c	2024-03-18 14:40:14.859741552 +0100
@@ -0,0 +1,1317 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+#include <net/netlink.h>
+#include <net/mac80211.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "telemetry.h"
+#include "sawf.h"
+#include "bondif.h"
+
+#ifdef CONFIG_ATH12K_SAWF
+static const struct nla_policy
+ath12k_vendor_sdwf_phy_policy[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_OPERATION] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS] = {.type = NLA_NESTED},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_SAMPLES_PARAMS] = {.type = NLA_NESTED},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_DETECT_PARAMS] = {.type = NLA_NESTED},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_THRESHOLD_PARAMS] = {.type = NLA_NESTED},
+};
+
+static const struct nla_policy
+ath12k_vendor_sdwf_dev_policy[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_OPERATION] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS] = {.type = NLA_NESTED},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_STREAMING_STATS_PARAMS] = {.type = NLA_NESTED},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_RESET_STATS] = {.type = NLA_NESTED},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_SLA_BREACHED_PARAMS] = {.type = NLA_NESTED},
+};
+
+static const struct nla_policy
+ath12k_vendor_sdwf_svc_policy[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MIN_TP] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX_TP] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_BURST_SIZE] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_INTERVAL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_DELAY_BOUND] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_TTL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_PRIO] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_TID] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_RATE_LOSS] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_SVC_INTERVAL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MIN_TPUT] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MAX_LATENCY ] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_BURST_SIZE] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_OFDMA_DISABLE] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MU_MIMO_DISABLE] = {.type = NLA_U8},
+};
+
+static const struct nla_policy
+ath12k_vendor_sdwf_def_qmap_req_policy[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_SVC_ID] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR] = {.type = NLA_STRING, .len = 18},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_TID] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_STATUS] = {.type = NLA_U8},
+};
+
+static const struct nla_policy
+ath12k_vendor_telemetry_sdwf_sla_thershold_config_policy[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_SVC_ID] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MIN_TP] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MAX_TP] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_BURST_SIZE] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_INTERVAL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_DELAY_BOUND] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MSDU_TTL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MSDU_RATE_LOSS] = {.type = NLA_U32},
+};
+
+static const struct nla_policy
+ath12k_vendor_telemetry_sdwf_sla_detect_config_policy[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_PARAM] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MIN_TP] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MAX_TP] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_BURST_SIZE] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_INTERVAL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_DELAY_BOUND] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MSDU_TTL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MSDU_RATE_LOSS ] = {.type = NLA_U32},
+};
+
+static const struct nla_policy
+ath12k_vendor_telemetry_sdwf_sla_samples_config_policy[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MOVING_AVG_PKT] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MOVING_AVG_WIN] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_SLA_NUM_PKT] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_SLA_TIME_SEC] = {.type = NLA_U32},
+};
+
+static const struct nla_policy
+ath12k_vendor_sawf_streaming[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_BASIC_STATS] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_EXTND_STATS] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_MLO_LINK_ID] = {.type = NLA_U32},
+};
+
+static const struct nla_policy
+ath12k_telemetric_req_policy[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_LEVEL] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_OBJECT] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_TYPE] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_AGGREGATE] = {.type = NLA_FLAG},
+	[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_FEATURE_FLAG] = {.type = NLA_U64},
+	[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_STA_MAC] = {.type = NLA_BINARY,
+						     .len = ETH_ALEN},
+	[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_SERVICEID] = {.type = NLA_U8},
+};
+
+static const struct nla_policy
+ath12k_telemetric_sla_policy[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MAC] = {.type = NLA_BINARY,
+								 .len = ETH_ALEN},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SVC_ID] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_TYPE] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SET_CLEAR] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MLD_MAC] = {.type = NLA_BINARY,
+								     .len = ETH_ALEN},
+	[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_AC] = {.type = NLA_U8},
+};
+
+#endif
+
+static const struct nla_policy
+ath12k_vendor_scs_config_policy[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DST_MAC_ADDR] = {.type = NLA_BINARY,
+							       .len = ETH_ALEN},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_SERVICE_INTERVAL] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_BURST_SIZE] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DELAY_BOUND] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_MINIMUM_DATA_RATE] = {.type = NLA_U32},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_REQUEST_TYPE] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_USER_PRIORITY] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_ACCESS_CATEGORY] = {.type = NLA_U8},
+	[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DIRECTION] = {.type = NLA_U8},
+};
+
+#ifdef CONFIG_ATH12K_SAWF
+static const struct nla_policy
+ath12k_ds_policy[QCA_WLAN_VENDOR_ATTR_DS_MAX + 1] = {
+	[QCA_WLAN_VENDOR_ATTR_CONFIG_EHT_MLO_MODE] = {.type = NLA_U8},
+};
+
+
+static void ath12k_set_default_values(struct ath12k_sawf_svc_params *param)
+{
+	param->min_throughput_rate = SAWF_SVC_PARAM_DEFAULT_MIN_THRUPUT;
+	param->max_throughput_rate = SAWF_SVC_PARAM_DEFAULT_MAX_THRUPUT;
+	param->burst_size = SAWF_SVC_PARAM_DEFAULT_BURST_SIZE;
+	param->service_interval = SAWF_SVC_PARAM_DEFAULT_SVC_INTERVAL;
+	param->delay_bound = SAWF_SVC_PARAM_DEFAULT_DELAY_BOUND;
+	param->msdu_ttl = SAWF_SVC_PARAM_DEFAULT_TIME_TO_LIVE;
+	param->priority = SAWF_SVC_PARAM_DEFAULT_PRIORITY;
+	param->tid = SAWF_SVC_PARAM_DEFAULT_TID;
+	param->msdu_rate_loss = SAWF_SVC_PARAM_DEFAULT_MSDU_LOSS_RATE;
+	param->ul_burst_size = SAWF_SVC_PARAM_DEFAULT_UL_BURST_SIZE;
+	param->ul_min_tput = SAWF_SVC_PARAM_DEFAULT_UL_MIN_TPUT;
+	param->ul_max_latency = SAWF_SVC_PARAM_DEFAULT_UL_MAX_LATENCY;
+	param->ul_service_interval = SAWF_SVC_PARAM_DEFAULT_UL_SVC_INTERVAL;
+	param->ul_ofdma_disable = SAWF_SVC_PARAM_DEFAULT_UL_OFDMA_DISABLE;
+	param->ul_mu_mimo_disable = SAWF_SVC_PARAM_DEFAULT_UL_MU_MIMO_DISABLE;
+}
+
+static int ath12k_vendor_set_sdwf_config(struct wiphy *wiphy,
+					 struct wireless_dev *wdev,
+					 struct nlattr *svc_params)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX + 1];
+	struct ath12k_sawf_svc_params sdwf_param;
+	int ret = 0;
+
+	memset(&sdwf_param, 0, sizeof(struct ath12k_sawf_svc_params));
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX,
+				svc_params,
+				ath12k_vendor_sdwf_svc_policy, NULL);
+	if (ret) {
+		ath12k_err(NULL, "Invalid attribute with SAWF configure command\n");
+		return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID]) {
+		sdwf_param.svc_id = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID]);
+	} else {
+		ath12k_err(NULL, "Mandatory attributes not available\n");
+		return -EINVAL;
+	}
+
+	ath12k_set_default_values(&sdwf_param);
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MIN_TP])
+		sdwf_param.min_throughput_rate = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MIN_TP]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX_TP])
+		sdwf_param.max_throughput_rate = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX_TP]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_BURST_SIZE])
+		sdwf_param.burst_size = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_BURST_SIZE]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_INTERVAL])
+		sdwf_param.service_interval = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_INTERVAL]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_DELAY_BOUND])
+		sdwf_param.delay_bound = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_DELAY_BOUND]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_TTL])
+		sdwf_param.msdu_ttl = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_TTL]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_PRIO])
+		sdwf_param.priority = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_PRIO]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_TID])
+		sdwf_param.tid = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_TID]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_RATE_LOSS])
+		sdwf_param.msdu_rate_loss = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_RATE_LOSS]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_SVC_INTERVAL])
+		sdwf_param.ul_service_interval =
+			nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_SVC_INTERVAL]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_BURST_SIZE])
+		sdwf_param.ul_burst_size =
+			nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_BURST_SIZE]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MIN_TPUT])
+		sdwf_param.ul_min_tput =
+			nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MIN_TPUT]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MAX_LATENCY ])
+		sdwf_param.ul_max_latency =
+			nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MAX_LATENCY ]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_OFDMA_DISABLE])
+		sdwf_param.ul_ofdma_disable =
+			nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_OFDMA_DISABLE]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MU_MIMO_DISABLE])
+		sdwf_param.ul_mu_mimo_disable =
+			nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MU_MIMO_DISABLE]);
+
+	ret = ath12k_create_send_svc_params(&sdwf_param);
+
+	return ret;
+}
+
+static int ath12k_vendor_disable_sdwf_config(struct nlattr *svc_params)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX + 1];
+	u8 svc_id = 0;
+	int ret = 0;
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX,
+			       svc_params,
+			       ath12k_vendor_sdwf_svc_policy, NULL);
+	if (ret) {
+		ath12k_err(NULL, "Invalid attributes with SAWF disable command\n");
+		return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID]) {
+		svc_id = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID]);
+	} else {
+		ath12k_err(NULL, "Mandatory attribute not available\n");
+		return -EINVAL;
+	}
+
+	ret = ath12k_sawf_disable_config(svc_id);
+	return ret;
+}
+
+static int ath12k_vendor_view_sdwf_config(struct wiphy *wiphy,
+					  struct wireless_dev *wdev,
+					  struct sk_buff *msg,
+					  const void *data,
+					  int data_len,
+					  unsigned long *storage)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_MAX + 1];
+	struct nlattr *svc[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX + 1];
+	struct ath12k_sawf_ctx *sdwf_ctx;
+	struct ath12k_sawf_svc_params *svc_class;
+	struct nlattr *sdwf_svc_classes, *sdwf_svc_class;
+	int ret = 0, i, j = 0;
+	int tailroom = 0, nest_start_length = 0, nest_end_length = 0, nested_range = 0;
+	u8 svc_id = 0;
+
+	if (!ath12k_sawf_enable)
+		return -EOPNOTSUPP;
+
+	sdwf_ctx = ath12k_get_sawf_context();
+	if (!sdwf_ctx) {
+		ath12k_err(NULL, "SAWF context not available\n");
+		return -ENODATA;
+	}
+
+	ret = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_SDWF_PHY_MAX, data, data_len,
+			 ath12k_vendor_sdwf_phy_policy, NULL);
+
+	if (ret) {
+		ath12k_err(NULL, "Invalid attributes with SAWF commands\n");
+		return -EINVAL;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_OPERATION] &&
+	    nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_OPERATION]) == QCA_WLAN_VENDOR_SDWF_PHY_OPER_SVC_GET) {
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS]) {
+			ret = nla_parse_nested(svc, QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX,
+					       tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS],
+					       ath12k_vendor_sdwf_svc_policy, NULL);
+			if (ret) {
+				ath12k_err(NULL, "Invalid attribute with SAWF view command\n");
+				return -EINVAL;
+			}
+			if (svc[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID]) {
+				bool isconfigured;
+
+				svc_id = nla_get_u8(svc[QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID]);
+				if (!ath12k_svc_id_valid(svc_id)) {
+					ath12k_err(NULL, "Invalid Service ID: %u\n", svc_id);
+					return -EINVAL;
+				}
+				spin_lock_bh(&sdwf_ctx->sawf_svc_lock);
+				isconfigured = ath12k_svc_id_configured(svc_id);
+				spin_unlock_bh(&sdwf_ctx->sawf_svc_lock);
+				if (!isconfigured)
+					return -EINVAL;
+			}
+		}
+	} else {
+		ath12k_err(NULL, "Invalid attribute with SAWF view command\n");
+		return -EINVAL;
+	}
+	/* return 0 to end the dump */
+	if (storage && (*storage == ATH12K_SAWF_SVC_CLASS_MAX))
+		return 0;
+
+	sdwf_svc_classes = nla_nest_start(msg,
+					  QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS);
+
+	if (!sdwf_svc_classes)
+		return -ENOBUFS;
+
+	tailroom = skb_tailroom(msg);
+	spin_lock_bh(&sdwf_ctx->sawf_svc_lock);
+	for (i = (svc_id) ? (svc_id - 1) : (*storage);
+	     i < ATH12K_SAWF_SVC_CLASS_MAX && tailroom > nested_range;
+	     i += (svc_id) ? (ATH12K_SAWF_SVC_CLASS_MAX) : (1)) {
+		if (!sdwf_ctx->svc_classes[i].configured)
+			continue;
+
+		svc_class = &sdwf_ctx->svc_classes[i];
+		nest_start_length = msg->len;
+		sdwf_svc_class = nla_nest_start(msg, j);
+		if (nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID,
+			       svc_class->svc_id) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MIN_TP,
+				svc_class->min_throughput_rate) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX_TP,
+				svc_class->max_throughput_rate) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_BURST_SIZE,
+				svc_class->burst_size) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_INTERVAL,
+				svc_class->service_interval) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_DELAY_BOUND,
+				svc_class->delay_bound) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_TTL,
+				svc_class->msdu_ttl) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_PRIO,
+				 svc_class->priority) ||
+		    nla_put_u32(msg, QCA_WLAN_VENDOR_ATTR_SDWF_SVC_TID,
+				svc_class->tid) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_RATE_LOSS,
+				svc_class->msdu_rate_loss) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_SVC_INTERVAL,
+				svc_class->ul_service_interval) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MIN_TPUT,
+				svc_class->ul_min_tput) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MAX_LATENCY,
+				svc_class->ul_max_latency) ||
+		    nla_put_u32(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_BURST_SIZE,
+				svc_class->ul_burst_size) ||
+		    nla_put_u8(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_OFDMA_DISABLE,
+				svc_class->ul_ofdma_disable) ||
+		    nla_put_u8(msg,
+				QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MU_MIMO_DISABLE,
+				svc_class->ul_mu_mimo_disable))
+			goto nla_put_failure;
+
+		nest_end_length = nla_nest_end(msg, sdwf_svc_class);
+		nested_range = nest_end_length - nest_start_length;
+		tailroom -= nested_range;
+		j++;
+	}
+	spin_unlock_bh(&sdwf_ctx->sawf_svc_lock);
+	nla_nest_end(msg, sdwf_svc_classes);
+
+	*storage = (svc_id) ? (ATH12K_SAWF_SVC_CLASS_MAX) : (i);
+
+	if (!j)
+		return 0;
+
+	return msg->len;
+
+nla_put_failure:
+	spin_unlock_bh(&sdwf_ctx->sawf_svc_lock);
+	return -ENOBUFS;
+}
+
+static int ath12k_vendor_sdwf_def_qmap_req(struct wireless_dev *wdev,
+					   struct nlattr *def_q_params)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX + 1];
+	struct ath12k_base *ab = NULL;
+	u8 svc_id;
+	u8 *mac_addr_p = NULL;
+	u16 peer_id = 0xFFFF;
+	u8 mac_addr[ATH12K_MAC_ADDR_SIZE] = { 0 };
+	int ret = 0;
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX,
+			       def_q_params,
+			       ath12k_vendor_sdwf_def_qmap_req_policy, NULL);
+	if (ret) {
+		ath12k_warn(NULL, "invalid sawf def q map policy attribute\n");
+		return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR])	{
+		mac_addr_p = nla_data(tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR]);
+		if (sscanf(mac_addr_p,
+			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+			   &mac_addr[0],
+			   &mac_addr[1],
+			   &mac_addr[2],
+			   &mac_addr[3],
+			   &mac_addr[4],
+			   &mac_addr[5]) != ATH12K_MAC_ADDR_SIZE) {
+			ath12k_warn(NULL, "invalid Macaddr %s\n", mac_addr_p);
+			return -1;
+		}
+	}
+	svc_id = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_SVC_ID]);
+
+	ath12k_dbg(NULL, ATH12K_DBG_SAWF,
+		   "Default Q map:svcid[%u]macaddr[%pM]\n", svc_id, mac_addr);
+
+	ab = ath12k_sawf_get_ab_from_netdev(wdev->netdev, mac_addr, &peer_id);
+	if (ab)
+		ret = ath12k_sawf_def_qmap_req(ab, svc_id, peer_id);
+
+	return ret;
+}
+
+static int ath12k_vendor_sdwf_def_qunmap_req(struct wireless_dev *wdev,
+					     struct nlattr *def_q_params)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX + 1];
+	struct ath12k_base *ab;
+	u8 svc_id;
+	u16 peer_id = 0xFFFF;
+	u8 *mac_addr_p = NULL;
+	u8 mac_addr[ATH12K_MAC_ADDR_SIZE] = { 0 };
+	int ret = 0;
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX,
+			       def_q_params,
+			       ath12k_vendor_sdwf_def_qmap_req_policy, NULL);
+	if (ret) {
+		ath12k_warn(NULL, "invalid sawf def q unmap policy attribute\n");
+		return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR]) {
+		mac_addr_p = nla_data(tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR]);
+		if (sscanf(mac_addr_p,
+			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+			   &mac_addr[0],
+			   &mac_addr[1],
+			   &mac_addr[2],
+			   &mac_addr[3],
+			   &mac_addr[4],
+			   &mac_addr[5]) != ATH12K_MAC_ADDR_SIZE) {
+			ath12k_warn(NULL, "invalid Macaddr %s\n", mac_addr_p);
+			return -1;
+		}
+	}
+	svc_id = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_SVC_ID]);
+
+	ath12k_dbg(NULL, ATH12K_DBG_SAWF,
+		   "Default Q unmap:svcid[%u]macaddr[%pM]\n", svc_id, mac_addr);
+
+	ab = ath12k_sawf_get_ab_from_netdev(wdev->netdev, mac_addr, &peer_id);
+
+	if (ab)
+		ret = ath12k_sawf_def_qunmap_req(ab, svc_id, peer_id);
+
+	return ret;
+}
+
+static int ath12k_vendor_sdwf_def_qmap_report_req(struct wireless_dev *wdev,
+						  struct nlattr *def_q_params)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX + 1];
+	struct ath12k_base *ab = NULL;
+	u16 peer_id = 0xFFFF;
+	u8 *mac_addr_p = NULL;
+	u8 mac_addr[ATH12K_MAC_ADDR_SIZE] = { 0 };
+	int ret = 0;
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX,
+			       def_q_params,
+			       ath12k_vendor_sdwf_def_qmap_req_policy, NULL);
+	if (ret) {
+		ath12k_warn(NULL, "invalid sawf def q map report req policy attribute\n");
+		return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR])	{
+		mac_addr_p = nla_data(tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR]);
+		if (sscanf(mac_addr_p,
+			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+			   &mac_addr[0],
+			   &mac_addr[1],
+			   &mac_addr[2],
+			   &mac_addr[3],
+			   &mac_addr[4],
+			   &mac_addr[5]) != ATH12K_MAC_ADDR_SIZE) {
+			ath12k_warn(NULL, "invalid Macaddr %s\n", mac_addr_p);
+			return -1;
+		}
+	}
+	ath12k_dbg(NULL, ATH12K_DBG_SAWF,
+		   "Default Q map report:macaddr[%pM]\n", mac_addr);
+
+	ab = ath12k_sawf_get_ab_from_netdev(wdev->netdev, mac_addr, &peer_id);
+	if (ab)
+		ret = ath12k_sawf_def_qmap_report_req(ab, peer_id);
+
+	return ret;
+}
+
+static int ath12k_vendor_telemetry_sdwf_sla_samples_config(struct nlattr *sla_samples)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MAX + 1];
+	struct ath12k_sla_samples_cfg t_param;
+	int ret = 0;
+
+	if (!ath12k_sawf_enable) {
+		ath12k_warn(NULL, "sawf is not enabled \n");
+		return -ENOSYS;
+	}
+
+	memset(&t_param, 0, sizeof(struct ath12k_sla_samples_cfg));
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MAX,
+			       sla_samples,
+			       ath12k_vendor_telemetry_sdwf_sla_samples_config_policy, NULL);
+	if (ret) {
+		ath12k_warn(NULL, "invalid set telemetry sla samples config policy attribute\n");
+		return ret;
+	}
+
+	t_param.moving_avg_pkt =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MOVING_AVG_PKT]);
+	t_param.moving_avg_win =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MOVING_AVG_WIN]);
+	t_param.sla_num_pkt =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_SLA_NUM_PKT]);
+	t_param.sla_time_sec =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_SLA_TIME_SEC]);
+
+	ret = ath12k_telemetry_sawf_sla_samples_config(t_param);
+	return ret;
+}
+
+static int ath12k_vendor_telemetry_sdwf_sla_thershold_config(struct nlattr *sla_threshold)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MAX + 1];
+	struct ath12k_sla_thershold_cfg t_param;
+	int ret = 0;
+
+	memset(&t_param, 0, sizeof(struct ath12k_sla_thershold_cfg));
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MAX,
+			       sla_threshold,
+			       ath12k_vendor_telemetry_sdwf_sla_thershold_config_policy, NULL);
+	if (ret) {
+		ath12k_warn(NULL, "invalid telemetry sla thershold config policy attribute\n");
+		return ret;
+	}
+
+	t_param.svc_id =
+		nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_SVC_ID]);
+	t_param.min_throughput_rate =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MIN_TP]);
+	t_param.max_throughput_rate =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MAX_TP]);
+	t_param.burst_size =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_BURST_SIZE]);
+	t_param.service_interval =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_INTERVAL]);
+	t_param.delay_bound =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_DELAY_BOUND]);
+	t_param.msdu_ttl =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MSDU_TTL]);
+	t_param.msdu_rate_loss =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MSDU_RATE_LOSS]);
+
+	ret = ath12k_telemetry_sawf_sla_thershold_config(t_param);
+
+	return ret;
+}
+
+static int ath12k_vendor_telemetry_sdwf_sla_detection_config(struct nlattr *sla_detect)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MAX + 1];
+	struct ath12k_sla_detect_cfg t_param;
+	int ret = 0;
+
+	memset(&t_param, 0, sizeof(struct ath12k_sla_detect_cfg));
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MAX,
+			       sla_detect,
+			       ath12k_vendor_telemetry_sdwf_sla_detect_config_policy, NULL);
+	if (ret) {
+		ath12k_warn(NULL, "invalid telemetry sdwf sla detection config policy attribute\n");
+		return ret;
+	}
+
+	t_param.sla_detect =
+		nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_PARAM]);
+	t_param.min_throughput_rate =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MIN_TP]);
+	t_param.max_throughput_rate =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MAX_TP]);
+	t_param.burst_size =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_BURST_SIZE]);
+	t_param.service_interval =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_INTERVAL]);
+	t_param.delay_bound =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_DELAY_BOUND]);
+	t_param.msdu_ttl =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MSDU_TTL]);
+	t_param.msdu_rate_loss =
+		nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MSDU_RATE_LOSS ]);
+
+	ret = ath12k_telemetry_sawf_sla_detection_config(t_param);
+
+	return ret;
+}
+
+static struct ath12k *ath12k_get_ar_from_wdev(struct wireless_dev *wdev, u8 link_id)
+{
+	struct ieee80211_vif *vif =  NULL;
+	struct ath12k_vif *ahvif = NULL;
+	struct ieee80211_hw *hw = NULL;
+	struct ath12k *ar = NULL;
+
+	vif = wdev_to_ieee80211_vif(wdev);
+	if (!vif)
+		return NULL;
+
+	ahvif = (struct ath12k_vif *)vif->drv_priv;
+	if (!ahvif)
+		return NULL;
+
+	mutex_lock(&ahvif->ah->conf_mutex);
+	hw = ahvif->ah->hw;
+	if (!hw) {
+		mutex_unlock(&ahvif->ah->conf_mutex);
+		return NULL;
+	}
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	mutex_unlock(&ahvif->ah->conf_mutex);
+
+	return ar;
+}
+
+static int ath12k_vendor_sdwf_streaming_stats_configure(struct wireless_dev *wdev,
+							struct nlattr *streaming_stats)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_MAX + 1];
+	struct ath12k *ar = NULL;
+	int ret = 0;
+	u8 basic_stats_configure, extnd_stats_configure, link_id;
+
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_MAX,
+			       streaming_stats,
+			       ath12k_vendor_sawf_streaming, NULL);
+	if (ret) {
+		ath12k_warn(NULL, "invalid sawf streaming stats configuration\n");
+		return ret;
+	}
+
+	if (wdev->valid_links) { /* MLO case */
+		if (!tb[QCA_WLAN_VENDOR_ATTR_SDWF_MLO_LINK_ID])
+			return -EINVAL;
+		link_id = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_MLO_LINK_ID]);
+		if (!(wdev->valid_links & BIT(link_id)))
+			return -ENOLINK;
+	} else { /* NON-MLO case */
+		if (!tb[QCA_WLAN_VENDOR_ATTR_SDWF_MLO_LINK_ID])
+			link_id = 0;
+		else
+			return -EINVAL;
+	}
+
+	ar = ath12k_get_ar_from_wdev(wdev, link_id);
+	if (!ar)
+		return -ENODATA;
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_BASIC_STATS]) {
+		basic_stats_configure = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_BASIC_STATS]);
+		ret = ath12k_htt_sawf_streaming_stats_configure(ar, HTT_STRM_GEN_MPDUS_STATS,
+								basic_stats_configure, 0, 0, 0, 0);
+		if (ret)
+			return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_EXTND_STATS]) {
+		extnd_stats_configure = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_EXTND_STATS]);
+		ret = ath12k_htt_sawf_streaming_stats_configure(ar, HTT_STRM_GEN_MPDUS_DETAILS_STATS,
+								extnd_stats_configure, 0, 0, 0, 0);
+	}
+
+	return ret;
+}
+
+static int telemetry_build_nd_send_reply_msg(struct wiphy *wiphy, struct stats_config *cfg,
+					     u8 *mac_addr, struct unified_stats *stats)
+{
+	struct sk_buff *skb = NULL;
+	void *data = NULL;
+	struct nlattr *attr;
+	u32 storage = 0, multiple_reply_len = 0;
+	int rem_len = 0, data_len = 0, nla_size = nla_total_size(0), ret = 0;
+	u8 i, feat = 0;
+	bool data_pending = false, multiple_reply_set = false;
+	char *vap="wifi";
+
+	do {
+		skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, NLMSG_DEFAULT_SIZE);
+		if (!skb)
+			return -ENOMEM;
+
+		if (nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_STATS_LEVEL, cfg->lvl) ||
+		    nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_STATS_OBJECT, STATS_OBJ_STA) ||
+		    nla_put(skb, QCA_WLAN_VENDOR_ATTR_STATS_OBJ_ID, ETH_ALEN, mac_addr) ||
+		    nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_STATS_SERVICEID, cfg->serviceid) ||
+		    nla_put_string(skb, QCA_WLAN_VENDOR_ATTR_STATS_PARENT_IF, vap) ||
+		    nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_STATS_TYPE, cfg->type)) {
+			kfree_skb(skb);
+			return -ENOMEM;
+		}
+
+		if (data_pending) {
+			multiple_reply_set = true;
+			data_pending = false;
+			/* Length to include the flag QCA_WLAN_VENDOR_ATTR_STATS_MULTI_REPLY */
+			multiple_reply_len = nla_size;
+		}
+
+		attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_STATS_RECURSIVE);
+		for (i = feat; i < STATS_FEAT_MAX; i++) {
+			if (!stats->feat[i] || !stats->size[i])
+				continue;
+
+			rem_len = skb_tailroom(skb);
+			if (rem_len < 0) {
+				ath12k_err(NULL, "SAWF: skb is corrupted\n");
+				kfree_skb(skb);
+				return -ECANCELED;
+			}
+			rem_len -= multiple_reply_len;
+			rem_len -= nla_size;
+			rem_len = (rem_len > 0) ? rem_len : 0;
+
+			if (stats->size[i] >= rem_len) {
+				data_len = rem_len;
+				data = stats->feat[i] + storage;
+				stats->size[i] -= rem_len;
+				storage += data_len;
+				feat = i;
+				data_pending = true;
+			} else {
+				data_len = stats->size[i];
+				data = stats->feat[i] + storage;
+				storage = 0;
+			}
+			if (nla_put(skb, i+1, data_len, data)) {
+				kfree_skb(skb);
+				return -ENOMEM;
+			}
+
+			if (data_pending)
+				break;
+		}
+
+		nla_nest_end(skb, attr);
+
+		if (multiple_reply_set && nla_put_flag(skb, QCA_WLAN_VENDOR_ATTR_STATS_MULTI_REPLY)) {
+			kfree_skb(skb);
+			return -ENOMEM;
+		}
+
+		ret = cfg80211_vendor_cmd_reply(skb);
+		if (ret) {
+			kfree_skb(skb);
+			ath12k_err(NULL, "SAWF: stats msg send failed with err=%d\n", ret);
+			return ret;
+		}
+	} while(data_pending);
+
+	return ret;
+}
+
+static int ath12k_vendor_telemetry_getstats(struct wiphy *wiphy,
+					    struct wireless_dev *wdev,
+					    const void *data,
+					    int data_len)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_MAX + 1];
+	struct stats_config cfg;
+	struct unified_stats *stats;
+	struct telemetry_sawfdelay_stats *delay_stats = NULL;
+	struct telemetry_sawftx_stats *tx_stats = NULL;
+	struct ath12k_base *ab = NULL;
+	int ret = 0, i;
+	u16 peer_id = 0xFFFF;
+	u8 mac_addr[ETH_ALEN] = { 0 };
+
+	if (!ath12k_sawf_enable)
+		return -EOPNOTSUPP;
+
+	ret = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_TELEMETRIC_MAX, data, data_len,
+			ath12k_telemetric_req_policy, NULL);
+
+	if (ret) {
+		ath12k_err(NULL, "Invalid attribute with telemetry getstats command\n");
+		return ret;
+	}
+
+	memset(&cfg, 0, sizeof(struct stats_config));
+
+	if (wiphy)
+		cfg.wiphy = wiphy;
+	if (tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_LEVEL])
+		cfg.lvl = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_LEVEL]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_OBJECT])
+		cfg.obj = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_OBJECT]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_TYPE])
+		cfg.type = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_TYPE]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_AGGREGATE])
+		cfg.aggregate = true;
+	if (tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_FEATURE_FLAG])
+		cfg.feat = nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_FEATURE_FLAG]);
+	if (tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_STA_MAC] &&
+	    (nla_len(tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_STA_MAC]) == ETH_ALEN)) {
+		memcpy(mac_addr, nla_data(tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_STA_MAC]), ETH_ALEN);
+		cfg.mac = mac_addr;
+	}
+	if (tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_SERVICEID])
+		cfg.serviceid = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_TELEMETRIC_SERVICEID]);
+
+	if (!(cfg.feat))
+		return -EINVAL;
+
+	ab = ath12k_sawf_get_ab_from_netdev(wdev->netdev, mac_addr, &peer_id);
+	if (!ab)
+		return -ENODATA;
+
+	stats = kzalloc(sizeof(struct unified_stats), GFP_KERNEL);
+	if (!stats)
+		return -ENOMEM;
+
+	if (cfg.feat & STATS_FEAT_FLG_SAWFTX) {
+		tx_stats = kzalloc(sizeof(struct telemetry_sawftx_stats), GFP_KERNEL);
+		if (!tx_stats) {
+			ret = -ENOMEM;
+			goto end_stats;
+		}
+		stats->feat[STATS_FEAT_SAWFTX] = tx_stats;
+		stats->size[STATS_FEAT_SAWFTX] = sizeof(struct telemetry_sawftx_stats);
+	}
+
+	if (cfg.feat & STATS_FEAT_FLG_SAWFDELAY) {
+		delay_stats = kzalloc(sizeof(struct telemetry_sawfdelay_stats), GFP_KERNEL);
+		if (!delay_stats) {
+			ret = -ENOMEM;
+			goto end_stats;
+		}
+		stats->feat[STATS_FEAT_SAWFDELAY] = delay_stats;
+		stats->size[STATS_FEAT_SAWFDELAY] = sizeof(struct telemetry_sawfdelay_stats);
+	}
+
+	ret = telemetry_extract_data(&cfg, tx_stats, delay_stats, ab);
+	if (ret)
+		goto end_stats;
+
+	ret = telemetry_build_nd_send_reply_msg(wiphy, &cfg, mac_addr, stats);
+
+end_stats:
+	for (i = 0; i < STATS_FEAT_MAX; i++) {
+		if (stats->feat[i])
+			kfree(stats->feat[i]);
+		stats->feat[i] = NULL;
+	}
+	kfree(stats);
+
+	return ret;
+}
+
+static int ath12k_vendor_ds_handler(struct wiphy *wihpy,
+			    struct wireless_dev *wdev,
+			    const void *data,
+			    int data_len)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_DS_MAX + 1];
+	int ret = 0;
+	int ml_netdev = 0;
+
+	if (!g_bonded_interface_model)
+		return 0;
+
+	ret = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_DS_MAX, data, data_len,
+			ath12k_ds_policy, NULL);
+
+	if (ret) {
+		ath12k_err(NULL, "Invalid attribute with ds %d\n", ret);
+		return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_CONFIG_EHT_MLO_MODE])
+		ml_netdev = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_CONFIG_EHT_MLO_MODE]);
+
+	ath12k_dbg(NULL, ATH12K_DBG_PPE,
+		   "Marking dev [%s] as mlo netdev. %d\n", wdev->netdev->name, ml_netdev);
+	wdev->ml_netdev = ml_netdev;
+
+	return 0;
+}
+
+static int ath12k_vendor_telemetry_sla_reset_stats(struct nlattr *clr_stats)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_MAX + 1];
+	int ret = 0;
+	u8 svc_id, mac_addr[ETH_ALEN] = { 0 }, mld_mac_addr[ETH_ALEN] = { 0 }, set_clear;
+
+	ret = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_MAX,
+			       clr_stats,
+			       ath12k_telemetric_sla_policy, NULL);
+
+	if (ret) {
+		ath12k_err(NULL, "Invalid attribute with telemetry sla reset stats command\n");
+		return ret;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SVC_ID])
+		svc_id = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SVC_ID]);
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MAC] &&
+	    (nla_len(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MAC]) == ETH_ALEN))
+		memcpy(mac_addr, nla_data(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MAC]),
+		       ETH_ALEN);
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MLD_MAC] &&
+	(nla_len(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MLD_MAC]) == ETH_ALEN))
+		memcpy(mld_mac_addr, nla_data(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MLD_MAC]),
+		       ETH_ALEN);
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SET_CLEAR])
+		set_clear = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SET_CLEAR]);
+
+	return ath12k_telemetry_sla_reset_stats(svc_id, mac_addr, mld_mac_addr,
+						set_clear);
+}
+
+void ath12k_vendor_telemetry_notify_breach(struct ieee80211_vif *vif, u8 *mac_addr,
+					   u8 svc_id, u8 param, bool set_clear,
+					   u8 tid, u8 *mld_addr)
+{
+	struct nlattr *notify_params;
+	struct wireless_dev *wdev;
+	struct sk_buff *skb;
+	u8 access_category;
+
+	wdev = ieee80211_vif_to_wdev(vif);
+
+	if (!wdev)
+		return;
+
+	if (!wdev->wiphy)
+		return;
+
+	skb = cfg80211_vendor_event_alloc(wdev->wiphy, wdev, NLMSG_DEFAULT_SIZE,
+					  0, GFP_KERNEL);
+	if (!skb) {
+		ath12k_err(NULL, "No memory available to send notify breach event\n");
+		return;
+	}
+
+	switch (tid) {
+	case 0:
+	case 3:
+		access_category = 0; //AC_BE
+		break;
+	case 1:
+	case 2:
+		access_category = 1; //AC_BK
+		break;
+	case 4:
+	case 5:
+		access_category = 2; //AC_VI
+		break;
+	case 6:
+	case 7:
+		access_category = 3; //AC_VO
+		break;
+	default:
+		ath12k_err(NULL, "Invalid TID = %u for notifying breach event\n", tid);
+		goto err;
+	}
+
+	notify_params = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_SDWF_DEV_SLA_BREACHED_PARAMS);
+	if (nla_put(skb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MAC, ETH_ALEN, mac_addr) ||
+	    (mld_addr && nla_put(skb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MLD_MAC,
+	    ETH_ALEN, mld_addr)) ||
+	    nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SVC_ID, svc_id) ||
+	    nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_TYPE, param) ||
+	    nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SET_CLEAR, set_clear) ||
+	    nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_AC, access_category)) {
+		ath12k_err(NULL, "No memory available at NL to send notify breach event\n");
+		goto err;
+	}
+
+	nla_nest_end(skb, notify_params);
+	cfg80211_vendor_event(skb, GFP_KERNEL);
+	return;
+err:
+	kfree(skb);
+	return;
+}
+
+static int ath12k_vendor_sdwf_phy_operations(struct wiphy *wiphy,
+					     struct wireless_dev *wdev,
+					     const void *data,
+					     int data_len)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_MAX + 1];
+	u8 sdwf_oper;
+	int ret = 0;
+
+	if (!ath12k_sawf_enable)
+		return -EOPNOTSUPP;
+
+	ret = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_SDWF_PHY_MAX, data, data_len,
+			 ath12k_vendor_sdwf_phy_policy, NULL);
+	if (ret) {
+		ath12k_err(NULL, "Invalid attributes with SAWF radio level commands\n");
+		goto end;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_OPERATION]) {
+		sdwf_oper = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_OPERATION]);
+	} else {
+		ath12k_err(NULL, "SAWF radio level operation missing\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	switch (sdwf_oper) {
+	case QCA_WLAN_VENDOR_SDWF_PHY_OPER_SVC_SET:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS]) {
+			ret = ath12k_vendor_set_sdwf_config(wiphy, wdev,
+							    tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF svc parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_PHY_OPER_SVC_DEL:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS]) {
+			ret = ath12k_vendor_disable_sdwf_config(tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF service id missing with delete operation\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_PHY_OPER_SLA_SAMPLES_SET:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_SAMPLES_PARAMS]) {
+			ret = ath12k_vendor_telemetry_sdwf_sla_samples_config(tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_SAMPLES_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF sla samples parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_PHY_OPER_SLA_BREACH_DETECTION_SET:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_DETECT_PARAMS]) {
+			ret = ath12k_vendor_telemetry_sdwf_sla_detection_config(tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_DETECT_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF sla breach detect parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_PHY_OPER_SLA_THRESHOLD_SET:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_THRESHOLD_PARAMS]) {
+			ret = ath12k_vendor_telemetry_sdwf_sla_thershold_config(tb[QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_THRESHOLD_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF sla threshold parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	default:
+		ath12k_err(NULL, "Invalid operation with SAWF radio level commands\n");
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+}
+
+static int ath12k_vendor_sdwf_dev_operations(struct wiphy *wiphy,
+					     struct wireless_dev *wdev,
+					     const void *data,
+					     int data_len)
+{
+	struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_MAX + 1];
+	u8 sdwf_oper;
+	int ret = 0;
+
+	if (!ath12k_sawf_enable)
+		return -EOPNOTSUPP;
+
+	ret = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_SDWF_DEV_MAX, data, data_len,
+			ath12k_vendor_sdwf_dev_policy, NULL);
+	if (ret) {
+		ath12k_err(NULL, "Invalid attributes with SAWF device level commands\n");
+		goto end;
+	}
+
+	if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_OPERATION]) {
+		sdwf_oper = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_OPERATION]);
+	} else {
+		ath12k_err(NULL, "SAWF device level operation missing\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	switch (sdwf_oper) {
+	case QCA_WLAN_VENDOR_SDWF_DEV_OPER_DEF_Q_MAP:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS]) {
+			ret = ath12k_vendor_sdwf_def_qmap_req(wdev,
+							      tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF default Queue map parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_DEV_OPER_DEF_Q_UNMAP:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS]) {
+			ret = ath12k_vendor_sdwf_def_qunmap_req(wdev, tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF default Queue unmap parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_DEV_OPER_DEF_Q_MAP_GET:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS]) {
+			ret = ath12k_vendor_sdwf_def_qmap_report_req(wdev, tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF default Queue map report parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_DEV_OPER_STREAMING_STATS:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_STREAMING_STATS_PARAMS]) {
+			ret = ath12k_vendor_sdwf_streaming_stats_configure(wdev, tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_STREAMING_STATS_PARAMS]);
+		} else {
+			ath12k_err(NULL, "SAWF default streaming statsparameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	case QCA_WLAN_VENDOR_SDWF_DEV_OPER_RESET_STATS:
+		if (tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_RESET_STATS]) {
+			ret = ath12k_vendor_telemetry_sla_reset_stats(tb[QCA_WLAN_VENDOR_ATTR_SDWF_DEV_RESET_STATS]);
+		} else {
+			ath12k_err(NULL, "SAWF clear telemetry stats parameters missing\n");
+			ret = -EINVAL;
+			goto end;
+		}
+		break;
+	default:
+		ath12k_err(NULL, "Invalid operation = %d with SAWF device level commands\n", sdwf_oper);
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+}
+#endif /* CONFIG_ATH12K_SAWF */
+
+#define NL_VENDOR_GET_ATTR(res, info, attr, bit) \
+	do { \
+		struct nlattr *nl_attr; \
+		nl_attr = info[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_##attr]; \
+		if (nl_attr) \
+			res = nla_get_u##bit(nl_attr); \
+	} while (0)
+
+static int ath12k_vendor_set_scs_qos_params(struct wiphy *wihpy,
+					    struct wireless_dev *wdev,
+					    const void *data,
+					    int data_len)
+{
+	struct nlattr *info[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_MAX + 1];
+	struct ath12k_latency_params params = { 0 };
+	u8 *peer_mac;
+	int ret;
+
+	ret = nla_parse(info, QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_MAX,
+			data, data_len, ath12k_vendor_scs_config_policy, NULL);
+	if (ret) {
+		ath12k_err(NULL, "Invalid SCS attributes\n");
+		return ret;
+	}
+
+	NL_VENDOR_GET_ATTR(params.service_interval, info, SERVICE_INTERVAL, 32);
+	NL_VENDOR_GET_ATTR(params.burst_size, info, BURST_SIZE, 32);
+	NL_VENDOR_GET_ATTR(params.delay_bound, info, DELAY_BOUND, 32);
+	NL_VENDOR_GET_ATTR(params.min_data_rate, info, MINIMUM_DATA_RATE, 32);
+	NL_VENDOR_GET_ATTR(params.req_type, info, REQUEST_TYPE, 8);
+	NL_VENDOR_GET_ATTR(params.user_priority, info, USER_PRIORITY, 8);
+	NL_VENDOR_GET_ATTR(params.ac, info, ACCESS_CATEGORY, 8);
+	NL_VENDOR_GET_ATTR(params.direction, info, DIRECTION, 8);
+	if (info[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DST_MAC_ADDR]) {
+		peer_mac = nla_data(info[QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DST_MAC_ADDR]);
+		memcpy(params.peer_mac, peer_mac, ETH_ALEN);
+	}
+
+	ret = ath12k_mac_op_set_scs(wdev, &params);
+
+	return ret;
+}
+
+static struct wiphy_vendor_command ath12k_vendor_commands[] = {
+#ifdef CONFIG_ATH12K_SAWF
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_SDWF_PHY_OPS,
+		.doit = ath12k_vendor_sdwf_phy_operations,
+		.dumpit = ath12k_vendor_view_sdwf_config,
+		.policy = ath12k_vendor_sdwf_phy_policy,
+		.maxattr = QCA_WLAN_VENDOR_ATTR_SDWF_PHY_MAX,
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_SDWF_DEV_OPS,
+		.doit = ath12k_vendor_sdwf_dev_operations,
+		.policy = ath12k_vendor_sdwf_dev_policy,
+		.maxattr = QCA_WLAN_VENDOR_ATTR_SDWF_DEV_MAX,
+		.flags = WIPHY_VENDOR_CMD_NEED_NETDEV,
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_TELEMETRIC_DATA,
+		.doit = ath12k_vendor_telemetry_getstats,
+		.policy = ath12k_telemetric_req_policy,
+		.maxattr = QCA_WLAN_VENDOR_ATTR_TELEMETRIC_MAX,
+		.flags = WIPHY_VENDOR_CMD_NEED_NETDEV,
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION,
+		.doit = ath12k_vendor_ds_handler,
+		.policy = ath12k_ds_policy,
+		.maxattr = QCA_WLAN_VENDOR_ATTR_DS_MAX,
+		.flags = WIPHY_VENDOR_CMD_NEED_NETDEV,
+	},
+#endif /* CONFIG_ATH12K_SAWF */
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_SCS_RULE_CONFIG,
+		.doit = ath12k_vendor_set_scs_qos_params,
+		.policy = ath12k_vendor_scs_config_policy,
+		.maxattr = QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_MAX,
+		.flags = WIPHY_VENDOR_CMD_NEED_NETDEV,
+	},
+};
+
+static const struct nl80211_vendor_cmd_info ath12k_vendor_events[] = {
+#ifdef CONFIG_ATH12K_SAWF
+	{
+		.vendor_id = QCA_NL80211_VENDOR_ID,
+		.subcmd = QCA_NL80211_VENDOR_SUBCMD_SDWF_DEV_OPS,
+	},
+#endif
+};
+
+int ath12k_vendor_register(struct ath12k_hw *ah)
+{
+	ah->hw->wiphy->vendor_commands = ath12k_vendor_commands;
+	ah->hw->wiphy->n_vendor_commands = ARRAY_SIZE(ath12k_vendor_commands);
+	ah->hw->wiphy->vendor_events = ath12k_vendor_events;
+	ah->hw->wiphy->n_vendor_events = ARRAY_SIZE(ath12k_vendor_events);
+	return 0;
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/ath12k/vendor.h	2024-03-18 14:40:14.859741552 +0100
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+#ifndef ATH12K_VENDOR_H
+#define ATH12K_VENDOR_H
+
+#define QCA_NL80211_VENDOR_ID 0x001374
+
+enum qca_nl80211_vendor_subcmds {
+	/* Wi-Fi configuration subcommand */
+	QCA_NL80211_VENDOR_SUBCMD_SET_WIFI_CONFIGURATION = 74,
+	QCA_NL80211_VENDOR_SUBCMD_SCS_RULE_CONFIG = 218,
+#ifdef CONFIG_ATH12K_SAWF
+	QCA_NL80211_VENDOR_SUBCMD_SDWF_PHY_OPS = 235,
+	QCA_NL80211_VENDOR_SUBCMD_SDWF_DEV_OPS = 236,
+	QCA_NL80211_VENDOR_SUBCMD_TELEMETRIC_DATA = 334,
+#endif
+};
+
+#ifdef CONFIG_ATH12K_SAWF
+enum qca_wlan_vendor_attr_sdwf_phy {
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_OPERATION = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SVC_PARAMS = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_SAMPLES_PARAMS = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_DETECT_PARAMS = 4,
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_SLA_THRESHOLD_PARAMS = 5,
+
+	/* keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_PHY_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_sdwf_phy_oper {
+	QCA_WLAN_VENDOR_SDWF_PHY_OPER_SVC_SET = 0,
+	QCA_WLAN_VENDOR_SDWF_PHY_OPER_SVC_DEL = 1,
+	QCA_WLAN_VENDOR_SDWF_PHY_OPER_SVC_GET = 2,
+	QCA_WLAN_VENDOR_SDWF_PHY_OPER_SLA_SAMPLES_SET = 3,
+	QCA_WLAN_VENDOR_SDWF_PHY_OPER_SLA_BREACH_DETECTION_SET = 4,
+	QCA_WLAN_VENDOR_SDWF_PHY_OPER_SLA_THRESHOLD_SET = 5,
+};
+
+enum qca_wlan_vendor_attr_sdwf_svc {
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_ID = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MIN_TP = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX_TP = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_BURST_SIZE = 4,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_INTERVAL = 5,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_DELAY_BOUND = 6,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_TTL = 7,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_PRIO = 8,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_TID = 9,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MSDU_RATE_LOSS = 10,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_SVC_INTERVAL = 11,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MIN_TPUT = 12,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MAX_LATENCY = 13,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_BURST_SIZE = 14,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_OFDMA_DISABLE = 15,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_UL_MU_MIMO_DISABLE = 16,
+	/* The below are used by MCC */
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_BUFFER_LATENCY_TOLERANCE = 17,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_TX_TRIGGER_DSCP = 18,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_TX_REPLACE_DSCP = 19,
+
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_SVC_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_attr_sdwf_sla_samples {
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MOVING_AVG_PKT = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MOVING_AVG_WIN = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_SLA_NUM_PKT = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_SLA_TIME_SEC = 4,
+
+	/* keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_SAMPLES_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_attr_sdwf_sla_detect {
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_PARAM = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MIN_TP = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MAX_TP = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_BURST_SIZE = 4,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_INTERVAL = 5,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_DELAY_BOUND = 6,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MSDU_TTL = 7,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MSDU_RATE_LOSS = 8,
+
+	/* keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_DETECT_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_sdwf_sla_detect_param {
+	QCA_WLAN_VENDOR_SDWF_SLA_DETECT_PARAM_NUM_PACKET,
+	QCA_WLAN_VENDOR_SDWF_SLA_DETECT_PARAM_PER_SECOND,
+	QCA_WLAN_VENDOR_SDWF_SLA_DETECT_PARAM_MOV_AVG,
+	QCA_WLAN_VENDOR_SDWF_SLA_DETECT_PARAM_NUM_SECOND,
+	QCA_WLAN_VENDOR_SDWF_SLA_DETECT_PARAM_MAX,
+};
+
+enum qca_wlan_vendor_attr_sdwf_sla_threshold {
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_SVC_ID = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MIN_TP = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MAX_TP = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_BURST_SIZE = 4,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_INTERVAL = 5,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_DELAY_BOUND = 6,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MSDU_TTL = 7,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MSDU_RATE_LOSS = 8,
+
+	/* keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_THRESHOLD_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_attr_sdwf_dev {
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_OPERATION = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_DEF_Q_PARAMS = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_STREAMING_STATS_PARAMS = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_RESET_STATS = 4,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_SLA_BREACHED_PARAMS = 5,
+
+	/* keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEV_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_sdwf_dev_oper {
+	QCA_WLAN_VENDOR_SDWF_DEV_OPER_DEF_Q_MAP = 0,
+	QCA_WLAN_VENDOR_SDWF_DEV_OPER_DEF_Q_UNMAP = 1,
+	QCA_WLAN_VENDOR_SDWF_DEV_OPER_DEF_Q_MAP_GET = 2,
+	QCA_WLAN_VENDOR_SDWF_DEV_OPER_STREAMING_STATS = 3,
+	QCA_WLAN_VENDOR_SDWF_DEV_OPER_RESET_STATS = 4,
+	QCA_WLAN_VENDOR_SDWF_DEV_OPER_BREACH_DETECTED = 5,
+};
+
+enum qca_wlan_vendor_attr_sdwf_def_q_map {
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_SVC_ID = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAC_ADDR = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_TID = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_STATUS = 4,
+
+	/* keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_DEF_Q_MAP_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_attr_sdwf_streaming_stats {
+	QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_BASIC_STATS = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_EXTND_STATS = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_MLO_LINK_ID = 3,
+
+	/* keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_STREAMING_AFTER_LAST - 1,
+};
+
+enum qca_wlan_vendor_attr_sdwf_sla_breach_param {
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MAC = 1,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SVC_ID = 2,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_TYPE = 3,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_SET_CLEAR = 4,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_PEER_MLD_MAC = 5,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_AC = 6,
+
+	/* Keep last */
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_MAX =
+	QCA_WLAN_VENDOR_ATTR_SDWF_SLA_BREACH_PARAM_AFTER_LAST - 1
+};
+
+enum qca_wlan_vendor_sdwf_sla_breach_type {
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_INVALID = 0,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_MIN_THROUGHPUT,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_MAX_THROUGHPUT,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_BURST_SIZE,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_SERVICE_INTERVAL,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_DELAY_BOUND,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_MSDU_TTL,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_MSDU_LOSS,
+	QCA_WLAN_VENDOR_SDWF_SLA_BREACH_PARAM_TYPE_MAX,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_telemetric: Defines attributes to be used in
+ * request message of QCA_NL80211_VENDOR_SUBCMD_TELEMETRIC_DATA vendor command.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_LEVEL: Defines stats levels like Basic or
+ * Advance or Debug.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_OBJECT: Defines stats objects like STA or
+ * VAP or Radio or SoC.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_TYPE: Defines stats types like Data or
+ * control.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_AGGREGATE: Defines aggregation flag for
+ * driver agrregation.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_FEATURE_FLAG: Defines feature flags for
+ * which stats is requested.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_STA_MAC: Defines STA MAC Address if the
+ * request is for particular STA object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_SERVICEID: Defines serviceid for sawf stats.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_TELEMETRIC_MAX: Defines maximum attribute counts to be
+ * used in QCA_NL80211_VENDOR_SUBCMD_TELEMETRIC_DATA vendor command request.
+ */
+enum qca_wlan_vendor_attr_telemetric {
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_LEVEL = 1,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_OBJECT,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_TYPE,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_AGGREGATE,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_FEATURE_FLAG,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_STA_MAC,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_SERVICEID,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_MLD_LINK,
+
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_TELEMETRIC_MAX =
+		QCA_WLAN_VENDOR_ATTR_TELEMETRIC_AFTER_LAST -1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_stats: Defines attributes to be used in response of
+ * QCA_NL80211_VENDOR_SUBCMD_TELEMETRIC_DATA vendor command.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_LEVEL: Used for stats levels like Basic or
+ * Advance or Debug.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_OBJECT: Required (u8)
+ * Used with the command, carrying stats, to specify for which stats_object enum.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_OBJ_ID: Used for Object ID like for STA MAC
+ * address or for VAP or Radio or SoC respective interface name.
+ *
+ * QCA_WLAN_VENDOR_ATTR_STATS_SERVICEID: Used for sawf levels stats like per
+ * peer or per peer per serviceclass.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_PARENT_IF: Used for Parent Object interface name
+ * like for STA VAP name, for VAP Radio interface name and for Radio SoC
+ * interface name.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_TYPE: Used for stats types like Data or
+ * control.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_RECURSIVE: Required (NESTED Flag)
+ * Used with the command to specify the nested stats.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_MULTI_REPLY: Set this flag if current reply
+ * messageis holding data from previous reply.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_STATS_MAX: Defines maximum attriutes can be used in
+ * QCA_NL80211_VENDOR_SUBCMD_TELEMETRIC_DATA vendor command response.
+ */
+
+enum qca_wlan_vendor_attr_stats {
+	QCA_WLAN_VENDOR_ATTR_STATS_LEVEL = 1,
+	QCA_WLAN_VENDOR_ATTR_STATS_OBJECT,
+	QCA_WLAN_VENDOR_ATTR_STATS_OBJ_ID,
+	QCA_WLAN_VENDOR_ATTR_STATS_SERVICEID,
+	QCA_WLAN_VENDOR_ATTR_STATS_PARENT_IF,
+	QCA_WLAN_VENDOR_ATTR_STATS_TYPE,
+	QCA_WLAN_VENDOR_ATTR_STATS_RECURSIVE,
+	QCA_WLAN_VENDOR_ATTR_STATS_MULTI_REPLY,
+	QCA_WLAN_VENDOR_ATTR_STATS_MAX,
+};
+
+struct telemetry_sawf_tx_stat {
+	struct dp_pkt_info tx_success;
+	struct dp_pkt_info tx_ingress;
+	struct {
+		struct dp_pkt_info fw_rem;
+		u32 fw_rem_notx;
+		u32 fw_rem_tx;
+		u32 age_out;
+		u32 fw_reason1;
+		u32 fw_reason2;
+		u32 fw_reason3;
+	} dropped;
+	struct sawf_fw_mpdu_stats svc_intval_stats;
+	struct sawf_fw_mpdu_stats burst_size_stats;
+	u32 tx_failed;
+	u32 queue_depth;
+	u32 throughput;
+	u32 ingress_rate;
+};
+
+struct telemetry_sawftx_stats {
+	struct telemetry_sawf_tx_stat tx[ATH12K_SAWF_MAX_TID_SUPPORT][MAX_Q_PER_TID];
+	u8 tid;
+	u8 msduq;
+};
+
+struct telemetry_sawf_delay_stat {
+	struct ath12k_delay_hist_stats delay_hist;
+	u8 cur_win;
+	u32 nwdelay_avg;
+	u32 swdelay_avg;
+	u32 hwdelay_avg;
+	u64 delay_bound_success;
+	u64 delay_bound_failure;
+};
+
+struct telemetry_sawfdelay_stats {
+	struct telemetry_sawf_delay_stat delay[ATH12K_SAWF_MAX_TID_SUPPORT][MAX_Q_PER_TID];
+	u8 tid;
+	u8 msduq;
+};
+
+/**
+ * enum qca_wlan_vendor_attr_feat: Defines nested attributes to be used in
+ * response of QCA_NL80211_VENDOR_SUBCMD_TELEMETRIC_DATA vendor command.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_ME: Used for Multicast Enhancement stats for a
+ * particular stats object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_RX: Used for Rx stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_TX: Used for Tx stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_AST: Used for AST stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_CFR: Used for CFR stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_FWD: Used for BSS stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_RAW: Used for RAW mode stats for a particular
+ * object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_TSO: Used for TSO stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_TWT: Used for TWT stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_VOW: Used for VOW  stats for a particular object.
+*
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_WDI: Used for WDI stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_WMI: Used for WMI stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_IGMP: Used for IGMP stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_LINK: Used for Link related stats for a particular
+ * object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_MESH: Used for Mesh related stats for a particular
+ * object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_RATE: Used for Rate stats for a particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_NAWDS: Used for NAWDS related stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_DELAY: Used for DELAY related stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_JITTER: Used for JITTER related stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_TXCAP: Used for TXCAP realted stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_MONITOR: Used for MONITOR realted stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_SAWFDELAY: Used for SAWFDELAY related stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_SAWFTX: Used for SAWFTX related stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_DETER: Used for DETERMINISTIC related stats for a
+ * particular object.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FEAT_MAX: Defines Maximum count of feature attributes.
+ */
+enum qca_wlan_vendor_attr_feat {
+	QCA_WLAN_VENDOR_ATTR_FEAT_ME = 1,
+	QCA_WLAN_VENDOR_ATTR_FEAT_RX,
+	QCA_WLAN_VENDOR_ATTR_FEAT_TX,
+	QCA_WLAN_VENDOR_ATTR_FEAT_AST,
+	QCA_WLAN_VENDOR_ATTR_FEAT_CFR,
+	QCA_WLAN_VENDOR_ATTR_FEAT_FWD,
+	QCA_WLAN_VENDOR_ATTR_FEAT_RAW,
+	QCA_WLAN_VENDOR_ATTR_FEAT_TSO,
+	QCA_WLAN_VENDOR_ATTR_FEAT_TWT,
+	QCA_WLAN_VENDOR_ATTR_FEAT_VOW,
+	QCA_WLAN_VENDOR_ATTR_FEAT_WDI,
+	QCA_WLAN_VENDOR_ATTR_FEAT_WMI,
+	QCA_WLAN_VENDOR_ATTR_FEAT_IGMP,
+	QCA_WLAN_VENDOR_ATTR_FEAT_LINK,
+	QCA_WLAN_VENDOR_ATTR_FEAT_MESH,
+	QCA_WLAN_VENDOR_ATTR_FEAT_RATE,
+	QCA_WLAN_VENDOR_ATTR_FEAT_NAWDS,
+	QCA_WLAN_VENDOR_ATTR_FEAT_DELAY,
+	QCA_WLAN_VENDOR_ATTR_FEAT_JITTER,
+	QCA_WLAN_VENDOR_ATTR_FEAT_TXCAP,
+	QCA_WLAN_VENDOR_ATTR_FEAT_MONITOR,
+	QCA_WLAN_VENDOR_ATTR_FEAT_SAWFDELAY,
+	QCA_WLAN_VENDOR_ATTR_FEAT_SAWFTX,
+	QCA_WLAN_VENDOR_ATTR_FEAT_DETER,
+	/**
+	 * New attribute must be add before this.
+	 * Also define the corresponding feature
+	 * index in enum stats_feat.
+	 */
+	QCA_WLAN_VENDOR_ATTR_FEAT_MAX,
+};
+
+enum stats_feat {
+	STATS_FEAT_ME,
+	STATS_FEAT_RX,
+	STATS_FEAT_TX,
+	STATS_FEAT_AST,
+	STATS_FEAT_CFR,
+	STATS_FEAT_FWD,
+	STATS_FEAT_RAW,
+	STATS_FEAT_TSO,
+	STATS_FEAT_TWT,
+	STATS_FEAT_VOW,
+	STATS_FEAT_WDI,
+	STATS_FEAT_WMI,
+	STATS_FEAT_IGMP,
+	STATS_FEAT_LINK,
+	STATS_FEAT_MESH,
+	STATS_FEAT_RATE,
+	STATS_FEAT_NAWDS,
+	STATS_FEAT_DELAY,
+	STATS_FEAT_JITTER,
+	STATS_FEAT_TXCAP,
+	STATS_FEAT_MONITOR,
+	STATS_FEAT_SAWFDELAY,
+	STATS_FEAT_SAWFTX,
+	STATS_FEAT_DETER,
+	STATS_FEAT_MAX,
+};
+
+/**
+ * enum stats_level: Defines detailing levels
+ * @STATS_LVL_BASIC:    Very minimal stats data
+ * @STATS_LVL_ADVANCE:  Mostly feature specific stats data
+ * @STATS_LVL_DEBUG:    Stats data for debug purpose
+ * @STATS_LVL_MAX:      Max supported Stats levels
+ */
+enum stats_level {
+	STATS_LVL_BASIC,
+	STATS_LVL_ADVANCE,
+	STATS_LVL_DEBUG,
+	STATS_LVL_MAX = STATS_LVL_DEBUG,
+};
+
+/**
+ * enum stats_object: Defines the Stats specific to object
+ * @STATS_OBJ_STA:   Stats for station/peer associated to AP
+ * @STATS_OBJ_VAP:   Stats for VAP
+ * @STATS_OBJ_MLD:   Stats for MLD group
+ * @STATS_OBJ_RADIO: Stats for particular Radio
+ * @STATS_OBJ_AP:    Stats for SoC
+ * @STATS_OBJ_MAX:   Max supported objects
+ */
+enum stats_object {
+	STATS_OBJ_STA,
+	STATS_OBJ_VAP,
+	STATS_OBJ_MLD,
+	STATS_OBJ_RADIO,
+	STATS_OBJ_AP,
+	STATS_OBJ_MAX = STATS_OBJ_AP,
+};
+
+/**
+ * enum stats_type: Defines the Stats for specific category
+ * @STATS_TYPE_DATA: Stats for Data frames
+ * @STATS_TYPE_CTRL: Stats for Control/Management frames
+ * @STATS_TYPE_MAX:  Max supported types
+ */
+enum stats_type {
+	STATS_TYPE_DATA,
+	STATS_TYPE_CTRL,
+	STATS_TYPE_MAX = STATS_TYPE_CTRL,
+};
+
+/**
+ * struct stats_config: Structure to hold user configurations
+ * @wiphy:  Pointer to wiphy structure which came as part of User request
+ * @feat:  Feat flag set to dedicated bit of this field
+ * @lvl:  Requested level of Stats (i.e. Basic, Advance or Debug)
+ * @obj:  Requested stats for object (i.e. AP, Radio, Vap or STA)
+ * @type:  Requested stats category
+ * @aggregate: Aggregate in driver
+ * @serviceid: service id for checking the level of sawf stats
+ */
+struct stats_config {
+	struct wiphy *wiphy;
+	u64 feat;
+	enum stats_level lvl;
+	enum stats_object obj;
+	enum stats_type type;
+	bool aggregate;
+	u8 serviceid;
+	u8 *mac;
+};
+
+#define STATS_FEAT_FLG_SAWFDELAY 0x00400000
+#define STATS_FEAT_FLG_SAWFTX 0x00800000
+/**
+ * struct unified_stats: Structure to carry all feature specific stats in driver
+ *                       level for stats response setup
+ * All features are void pointers and its corresponding sizes.
+ * This can hold Basic or Advance or Debug structures independently.
+ */
+struct unified_stats {
+	void *feat[STATS_FEAT_MAX];
+	u_int32_t size[STATS_FEAT_MAX];
+};
+
+enum qca_wlan_vendor_attr_ds {
+	QCA_WLAN_VENDOR_ATTR_DS_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_CONFIG_EHT_MLO_MODE = 90,
+	/* Keep last */
+	QCA_WLAN_VENDOR_ATTR_DS_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_DS_MAX =
+		QCA_WLAN_VENDOR_ATTR_DS_AFTER_LAST - 1
+};
+
+void ath12k_vendor_telemetry_notify_breach(struct ieee80211_vif *vif, u8 *mac_addr,
+					   u8 svc_id, u8 param, bool set_clear,
+					   u8 tid, u8 *mld_addr);
+#endif /* CONFIG_ATH12K_SAWF */
+
+int ath12k_vendor_register(struct ath12k_hw *ah);
+
+enum qca_wlan_vendor_attr_scs_rule_config {
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_INVALID = 0,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_RULE_ID = 1,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_REQUEST_TYPE = 2,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_OUTPUT_TID = 3,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_CLASSIFIER_TYPE = 4,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_VERSION = 5,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_SRC_IPV4_ADDR = 6,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_DST_IPV4_ADDR = 7,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_SRC_IPV6_ADDR = 8,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_DST_IPV6_ADDR = 9,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_SRC_PORT = 10,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_DST_PORT = 11,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_DSCP = 12,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_NEXT_HEADER = 13,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS4_FLOW_LABEL = 14,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS10_PROTOCOL_INSTANCE = 15,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS10_NEXT_HEADER = 16,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS10_FILTER_MASK = 17,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_TCLAS10_FILTER_VALUE = 18,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_SERVICE_CLASS_ID = 19,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DST_MAC_ADDR = 20,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_NETDEV_IF_INDEX = 21,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_SERVICE_INTERVAL = 22,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_BURST_SIZE = 23,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DELAY_BOUND = 24,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_MINIMUM_DATA_RATE = 25,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_USER_PRIORITY = 26,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_ACCESS_CATEGORY = 27,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_DIRECTION = 28,
+
+	/* Keep last */
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_AFTER_LAST,
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_MAX =
+	QCA_WLAN_VENDOR_ATTR_SCS_RULE_CONFIG_AFTER_LAST - 1,
+};
+
+#endif /* QCA_VENDOR_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/net/wireless/ath/testmode_i.h	2023-10-05 12:33:41.379635169 +0200
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* "API" level of the ath testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH_TESTMODE_VERSION_MINOR 0
+
+#define ATH_TM_DATA_MAX_LEN		5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 	2048
+
+enum ath_tm_attr {
+	__ATH_TM_ATTR_INVALID		= 0,
+	ATH_TM_ATTR_CMD			= 1,
+	ATH_TM_ATTR_DATA		= 2,
+	ATH_TM_ATTR_WMI_CMDID		= 3,
+	ATH_TM_ATTR_VERSION_MAJOR	= 4,
+	ATH_TM_ATTR_VERSION_MINOR	= 5,
+	ATH_TM_ATTR_WMI_OP_VERSION	= 6,
+	ATH_TM_ATTR_FWLOG		= 7,
+	ATH_TM_ATTR_DUAL_MAC		= 9,
+
+	/* keep last */
+	__ATH_TM_ATTR_AFTER_LAST,
+	ATH_TM_ATTR_MAX			= __ATH_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath testmode interface commands specified in
+ * ATH_TM_ATTR_CMD
+ */
+enum ath_tm_cmd {
+	/* Returns the supported ath testmode interface version in
+	 * ATH_TM_ATTR_VERSION. Always guaranteed to work. User space
+	 * uses this to verify it's using the correct version of the
+	 * testmode interface
+	 */
+	ATH_TM_CMD_GET_VERSION = 0,
+
+	/* Set ar state to test mode. */
+	ATH_TM_CMD_TESTMODE_START = 1,
+
+	/* Set ar state back into OFF state. */
+	ATH_TM_CMD_TESTMODE_STOP = 2,
+
+	/* The command used to transmit a WMI command to the firmware and
+	 * the event to receive WMI events from the firmware. Without
+	 * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+	 * provided with ATH_TM_ATTR_WMI_CMDID and payload in
+	 * ATH_TM_ATTR_DATA.
+	 */
+	ATH_TM_CMD_WMI = 3,
+
+	/* The command used to transmit a FTM WMI command to the firmware
+	 * and the event to receive WMI events from the firmware. The data
+	 * received only contain the payload. Need to add the tlv
+	 * header and send the cmd to fw with commandid WMI_PDEV_UTF_CMDID.
+	 */
+	ATH_TM_CMD_WMI_FTM = 4,
+};
diff -Nruw linux-6.4-fbx/drivers/net/wireless/marvell/mwl8k_new./Makefile linux-6.4-fbx/drivers/net/wireless/marvell/mwl8k_new/Makefile
--- linux-6.4-fbx/drivers/net/wireless/marvell/mwl8k_new./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/net/wireless/marvell/mwl8k_new/Makefile	2023-03-10 17:18:24.474042758 +0100
@@ -0,0 +1,12 @@
+mwl8k_new-$(CONFIG_DEBUG_FS) += debugfs.o
+mwl8k_new-y += fw.o
+mwl8k_new-y += main.o
+mwl8k_new-y += utils.o
+
+mwl8k_new-y += svc_console.o
+mwl8k_new-y += svc_dma_test.o
+mwl8k_new-y += svc_vtty.o
+
+mwl8k_new-y += wifi_core.o
+
+obj-$(CONFIG_MWL8K_NEW)	+= mwl8k_new.o
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/of/configfs.c	2023-02-27 20:55:42.852893915 +0100
@@ -0,0 +1,279 @@
+/*
+ * Configfs entries for device-tree
+ *
+ * Copyright (C) 2013 - Pantelis Antoniou <panto@antoniou-consulting.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/ctype.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/limits.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <linux/sizes.h>
+
+#include "of_private.h"
+
+struct cfs_overlay_item {
+	struct config_item	item;
+
+	char			path[PATH_MAX];
+
+	const struct firmware	*fw;
+	struct device_node	*overlay;
+	int			ov_id;
+
+	void			*dtbo;
+	int			dtbo_size;
+};
+
+static inline struct cfs_overlay_item *to_cfs_overlay_item(
+		struct config_item *item)
+{
+	return item ? container_of(item, struct cfs_overlay_item, item) : NULL;
+}
+
+static ssize_t cfs_overlay_item_path_show(struct config_item *item,
+		char *page)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+	return sprintf(page, "%s\n", overlay->path);
+}
+
+static ssize_t cfs_overlay_item_path_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+	const char *p = page;
+	char *s;
+	int err;
+
+	/* if it's set do not allow changes */
+	if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+		return -EPERM;
+
+	/* copy to path buffer (and make sure it's always zero terminated */
+	count = snprintf(overlay->path, sizeof(overlay->path) - 1, "%s", p);
+	overlay->path[sizeof(overlay->path) - 1] = '\0';
+
+	/* strip trailing newlines */
+	s = overlay->path + strlen(overlay->path);
+	while (s > overlay->path && *--s == '\n')
+		*s = '\0';
+
+	pr_debug("%s: path is '%s'\n", __func__, overlay->path);
+
+	err = request_firmware(&overlay->fw, overlay->path, NULL);
+	if (err != 0)
+		goto out_err;
+
+	err = of_overlay_fdt_apply((void *)overlay->fw->data,
+				   overlay->fw->size,
+				   &overlay->ov_id);
+	if (err != 0)
+		goto out_err;
+
+	return count;
+
+out_err:
+
+	release_firmware(overlay->fw);
+	overlay->fw = NULL;
+
+	overlay->path[0] = '\0';
+	return err;
+}
+
+static ssize_t cfs_overlay_item_status_show(struct config_item *item,
+		char *page)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	return sprintf(page, "%s\n",
+			overlay->ov_id >= 0 ? "applied" : "unapplied");
+}
+
+CONFIGFS_ATTR(cfs_overlay_item_, path);
+CONFIGFS_ATTR_RO(cfs_overlay_item_, status);
+
+static struct configfs_attribute *cfs_overlay_attrs[] = {
+	&cfs_overlay_item_attr_path,
+	&cfs_overlay_item_attr_status,
+	NULL,
+};
+
+ssize_t cfs_overlay_item_dtbo_read(struct config_item *item,
+		void *buf, size_t max_count)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	pr_debug("%s: buf=%p max_count=%zu\n", __func__,
+			buf, max_count);
+
+	if (overlay->dtbo == NULL)
+		return 0;
+
+	/* copy if buffer provided */
+	if (buf != NULL) {
+		/* the buffer must be large enough */
+		if (overlay->dtbo_size > max_count)
+			return -ENOSPC;
+
+		memcpy(buf, overlay->dtbo, overlay->dtbo_size);
+	}
+
+	return overlay->dtbo_size;
+}
+
+ssize_t cfs_overlay_item_dtbo_write(struct config_item *item,
+		const void *buf, size_t count)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+	int err;
+
+	/* if it's set do not allow changes */
+	if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+		return -EPERM;
+
+	/* copy the contents */
+	overlay->dtbo = kmemdup(buf, count, GFP_KERNEL);
+	if (overlay->dtbo == NULL)
+		return -ENOMEM;
+
+	overlay->dtbo_size = count;
+
+	err = of_overlay_fdt_apply((void *)overlay->fw->data,
+				   overlay->dtbo_size,
+				   &overlay->ov_id);
+	if (err != 0)
+		goto out_err;
+
+	return count;
+
+out_err:
+	kfree(overlay->dtbo);
+	overlay->dtbo = NULL;
+	overlay->dtbo_size = 0;
+
+	return err;
+}
+
+CONFIGFS_BIN_ATTR(cfs_overlay_item_, dtbo, NULL, SZ_1M);
+
+static struct configfs_bin_attribute *cfs_overlay_bin_attrs[] = {
+	&cfs_overlay_item_attr_dtbo,
+	NULL,
+};
+
+static void cfs_overlay_release(struct config_item *item)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	if (overlay->ov_id >= 0)
+		of_overlay_remove(&overlay->ov_id);
+	if (overlay->fw)
+		release_firmware(overlay->fw);
+	/* kfree with NULL is safe */
+	kfree(overlay->dtbo);
+	kfree(overlay);
+}
+
+static struct configfs_item_operations cfs_overlay_item_ops = {
+	.release	= cfs_overlay_release,
+};
+
+static struct config_item_type cfs_overlay_type = {
+	.ct_item_ops	= &cfs_overlay_item_ops,
+	.ct_attrs	= cfs_overlay_attrs,
+	.ct_bin_attrs	= cfs_overlay_bin_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct config_item *cfs_overlay_group_make_item(
+		struct config_group *group, const char *name)
+{
+	struct cfs_overlay_item *overlay;
+
+	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+	if (!overlay)
+		return ERR_PTR(-ENOMEM);
+	overlay->ov_id = -1;
+
+	config_item_init_type_name(&overlay->item, name, &cfs_overlay_type);
+	return &overlay->item;
+}
+
+static void cfs_overlay_group_drop_item(struct config_group *group,
+		struct config_item *item)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	config_item_put(&overlay->item);
+}
+
+static struct configfs_group_operations overlays_ops = {
+	.make_item	= cfs_overlay_group_make_item,
+	.drop_item	= cfs_overlay_group_drop_item,
+};
+
+static struct config_item_type overlays_type = {
+	.ct_group_ops   = &overlays_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct configfs_group_operations of_cfs_ops = {
+	/* empty - we don't allow anything to be created */
+};
+
+static struct config_item_type of_cfs_type = {
+	.ct_group_ops   = &of_cfs_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+struct config_group of_cfs_overlay_group;
+
+static struct configfs_subsystem of_cfs_subsys = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "device-tree",
+			.ci_type = &of_cfs_type,
+		},
+	},
+	.su_mutex = __MUTEX_INITIALIZER(of_cfs_subsys.su_mutex),
+};
+
+static int __init of_cfs_init(void)
+{
+	int ret;
+
+	pr_info("%s\n", __func__);
+
+	config_group_init(&of_cfs_subsys.su_group);
+	config_group_init_type_name(&of_cfs_overlay_group, "overlays",
+			&overlays_type);
+	configfs_add_default_group(&of_cfs_overlay_group,
+			&of_cfs_subsys.su_group);
+
+	ret = configfs_register_subsystem(&of_cfs_subsys);
+	if (ret != 0) {
+		pr_err("%s: failed to register subsys\n", __func__);
+		goto out;
+	}
+	pr_info("%s: OK\n", __func__);
+out:
+	return ret;
+}
+late_initcall(of_cfs_init);
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/phy/xdsl_phy_api.c	2023-02-24 19:07:30.754305868 +0100
@@ -0,0 +1,205 @@
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/kref.h>
+#include <linux/xdsl_phy_api.h>
+
+static DEFINE_MUTEX(phy_device_list_mutex);
+static LIST_HEAD(phy_device_list);
+
+/*
+ *
+ */
+static struct xdsl_phy *__phy_lookup(struct device_node *node,
+					unsigned int id)
+{
+	struct xdsl_phy *pd;
+
+        list_for_each_entry(pd, &phy_device_list, next) {
+		if (node) {
+			if (pd->of_node == node)
+				return pd;
+		} else {
+			if (pd->id == id)
+				return pd;
+		}
+	}
+	return NULL;
+}
+
+/*
+ *
+ */
+struct xdsl_phy *xdsl_phy_attach(struct device_node *node,
+				 unsigned int id,
+				 void (*change_cb)(struct xdsl_phy *,
+						  void *),
+				 void *change_priv)
+{
+	struct xdsl_phy *phy_dev;
+
+	if (!node && !id)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&phy_device_list_mutex);
+
+	phy_dev = __phy_lookup(node, id);
+
+	if (!phy_dev) {
+		mutex_unlock(&phy_device_list_mutex);
+		return ERR_PTR(-EPROBE_DEFER);
+	}
+
+	if (phy_dev->in_use) {
+		mutex_unlock(&phy_device_list_mutex);
+		return ERR_PTR(-EBUSY);
+	}
+
+        try_module_get(phy_dev->owner);
+
+	phy_dev->in_use = true;
+	phy_dev->started = false;
+	phy_dev->change_cb = change_cb;
+	phy_dev->change_priv = change_priv;
+
+	mutex_unlock(&phy_device_list_mutex);
+
+	return phy_dev;
+}
+
+EXPORT_SYMBOL(xdsl_phy_attach);
+
+/*
+ *
+ */
+static void initial_change_work_func(struct work_struct *work)
+{
+	struct xdsl_phy *phy_dev = container_of(work,
+						struct xdsl_phy,
+						initial_change_work);
+
+	mutex_lock(&phy_dev->lock);
+	if (!phy_dev->in_use || !phy_dev->change_cb || !phy_dev->started) {
+		mutex_unlock(&phy_dev->lock);
+		return;
+	}
+
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb(phy_dev, phy_dev->change_priv);
+	mutex_unlock(&phy_dev->lock);
+}
+
+/*
+ *
+ */
+void xdsl_phy_start(struct xdsl_phy *phy_dev)
+{
+	mutex_lock(&phy_dev->lock);
+	phy_dev->started = true;
+	phy_dev->initial_change_pending = true;
+	schedule_work(&phy_dev->initial_change_work);
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_start);
+
+/*
+ *
+ */
+void xdsl_phy_stop(struct xdsl_phy *phy_dev)
+{
+	mutex_lock(&phy_dev->lock);
+	phy_dev->started = false;
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_stop);
+
+/*
+ *
+ */
+void xdsl_phy_detach(struct xdsl_phy *phy_dev)
+{
+	WARN_ON(!phy_dev->in_use);
+
+	mutex_lock(&phy_dev->lock);
+	phy_dev->in_use = false;
+	phy_dev->started = false;
+	cancel_work_sync(&phy_dev->initial_change_work);
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb = NULL;
+	phy_dev->change_priv = NULL;
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_detach);
+
+/*
+ *
+ */
+void xdsl_phy_device_notify_change(struct xdsl_phy *phy_dev)
+{
+	mutex_lock(&phy_dev->lock);
+	if (!phy_dev->in_use || !phy_dev->change_cb || !phy_dev->started) {
+		mutex_unlock(&phy_dev->lock);
+		return;
+	}
+
+	cancel_work_sync(&phy_dev->initial_change_work);
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb(phy_dev, phy_dev->change_priv);
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_device_notify_change);
+
+/*
+ *
+ */
+int xdsl_phy_device_register(struct xdsl_phy *phy_dev)
+{
+	if (!phy_dev->ops ||
+	    !phy_dev->ops->get_status ||
+	    !phy_dev->owner)
+		return -EINVAL;
+
+	mutex_lock(&phy_device_list_mutex);
+
+	if (__phy_lookup(phy_dev->of_node, phy_dev->id)) {
+		mutex_unlock(&phy_device_list_mutex);
+		return -EEXIST;
+	}
+
+	mutex_init(&phy_dev->lock);
+	mutex_init(&phy_dev->ops_lock);
+	phy_dev->in_use = false;
+	phy_dev->started = false;
+	INIT_WORK(&phy_dev->initial_change_work, initial_change_work_func);
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb = NULL;
+	phy_dev->change_priv = NULL;
+
+	list_add(&phy_dev->next, &phy_device_list);
+	mutex_unlock(&phy_device_list_mutex);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(xdsl_phy_device_register);
+
+/*
+ *
+ */
+void xdsl_phy_device_unregister(struct xdsl_phy *phy_dev)
+{
+	if (WARN_ON(phy_dev->in_use))
+		return;
+
+	cancel_work_sync(&phy_dev->initial_change_work);
+	mutex_lock(&phy_device_list_mutex);
+	list_del(&phy_dev->next);
+	mutex_unlock(&phy_device_list_mutex);
+}
+
+EXPORT_SYMBOL(xdsl_phy_device_unregister);
+
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.4-fbx/drivers/platform/fbxgw7r./Kconfig linux-6.4-fbx/drivers/platform/fbxgw7r/Kconfig
--- linux-6.4-fbx/drivers/platform/fbxgw7r./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/platform/fbxgw7r/Kconfig	2023-03-09 15:24:23.846931646 +0100
@@ -0,0 +1,6 @@
+config FBXGW7R_PLATFORM
+	bool "Freebox Gateway V7 specific drivers"
+
+config FBXGW7R_SWITCH
+	bool "Freebox Gateway V7 in kernel switch init code."
+	depends on FBXGW7R_PLATFORM
diff -Nruw linux-6.4-fbx/drivers/platform/fbxgw7r./Makefile linux-6.4-fbx/drivers/platform/fbxgw7r/Makefile
--- linux-6.4-fbx/drivers/platform/fbxgw7r./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/platform/fbxgw7r/Makefile	2023-03-09 15:24:23.846931646 +0100
@@ -0,0 +1 @@
+obj-$(CONFIG_FBXGW7R_SWITCH)	+= fbxgw7r-switch.o
diff -Nruw linux-6.4-fbx/drivers/platform/intelce./Kconfig linux-6.4-fbx/drivers/platform/intelce/Kconfig
--- linux-6.4-fbx/drivers/platform/intelce./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/platform/intelce/Kconfig	2023-03-09 15:06:11.376234546 +0100
@@ -0,0 +1,18 @@
+#
+# IntelCE devices configuration
+#
+
+menu "IntelCE devices"
+
+config INTELCE_GPIO
+	tristate "GPIO support"
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  IntelCE 3100/4100 GPIO support.
+
+config INTELCE_DFX
+	tristate "DFX reporting support"
+	help
+	  IntelCE 3100/4100 DFX fuse reporting support.
+
+endmenu
diff -Nruw linux-6.4-fbx/drivers/platform/intelce./Makefile linux-6.4-fbx/drivers/platform/intelce/Makefile
--- linux-6.4-fbx/drivers/platform/intelce./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/platform/intelce/Makefile	2023-03-09 15:06:11.376234546 +0100
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTELCE_GPIO)	+= gpio-intelce.o
+obj-$(CONFIG_INTELCE_DFX)	+= dfx.o
diff -Nruw linux-6.4-fbx/drivers/platform/ipq./Kconfig linux-6.4-fbx/drivers/platform/ipq/Kconfig
--- linux-6.4-fbx/drivers/platform/ipq./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/platform/ipq/Kconfig	2023-07-20 17:19:14.610365797 +0200
@@ -0,0 +1,19 @@
+
+menuconfig QCOM_IPQ_PLATFORM
+	bool "Qualcomm IPQ Platform Specific Device Drivers"
+	default y
+	depends on ARCH_QCOM || COMPILE_TEST
+	help
+	  Say Y here to get to see options for device drivers for
+	  various Qualcomm IPQ platforms.  This option alone does not
+	  add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped
+	  and disabled.
+
+if QCOM_IPQ_PLATFORM
+
+config IPQ_SEC_UPGRADE
+	bool "Qualcomm IPQ sec-upgrade driver."
+
+endif
diff -Nruw linux-6.4-fbx/drivers/platform/ipq./Makefile linux-6.4-fbx/drivers/platform/ipq/Makefile
--- linux-6.4-fbx/drivers/platform/ipq./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/platform/ipq/Makefile	2023-07-20 17:19:14.610365797 +0200
@@ -0,0 +1 @@
+obj-$(CONFIG_IPQ_SEC_UPGRADE)	+= sec-upgrade.o
diff -Nruw linux-6.4-fbx/drivers/soc/bcm/bcm63xx/rdp./Makefile linux-6.4-fbx/drivers/soc/bcm/bcm63xx/rdp/Makefile
--- linux-6.4-fbx/drivers/soc/bcm/bcm63xx/rdp./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/soc/bcm/bcm63xx/rdp/Makefile	2023-03-09 15:06:11.376234546 +0100
@@ -0,0 +1,9 @@
+obj-y += rdp_drv.o
+
+rdp_drv-y += \
+	rdp.o \
+	rdp_api.o \
+	rdp_io.o \
+	rdp_ioctl.o
+
+rdp_drv-$(CONFIG_DEBUG_FS) += rdp_debug.o
diff -Nruw linux-6.4-fbx/drivers/soc/bcm/bcm63xx/xrdp./Makefile linux-6.4-fbx/drivers/soc/bcm/bcm63xx/xrdp/Makefile
--- linux-6.4-fbx/drivers/soc/bcm/bcm63xx/xrdp./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/drivers/soc/bcm/bcm63xx/xrdp/Makefile	2023-03-09 15:06:11.380234652 +0100
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SOC_BCM63XX_XRDP) += xrdp_drv.o
+
+xrdp_drv-y += \
+	xrdp.o \
+	xrdp_api.o
+
+xrdp_drv-$(CONFIG_SOC_BCM63XX_XRDP_IOCTL) += xrdp_ioctl.o
+xrdp_drv-$(CONFIG_DEBUG_FS) += xrdp_debug.o
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/soc/qcom/qcom-imem-reset-reason.c	2024-03-25 17:41:17.024248710 +0100
@@ -0,0 +1,753 @@
+/*
+ * qcom-imem-reset-reason.c for imem-reset-reason
+ * Created by <nschichan@freebox.fr> on Fri Feb 16 15:42:29 2024
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/panic_notifier.h>
+#include <linux/io.h>
+#include <linux/arm-smccc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/seq_buf.h>
+#include <linux/seq_file.h>
+
+#define EL3_PANIC_SMC		ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+						   ARM_SMCCC_SMC_64,	\
+						   ARM_SMCCC_OWNER_OEM, 1)
+
+#define EL3_GET_PANIC		ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+						   ARM_SMCCC_SMC_64,	\
+						   ARM_SMCCC_OWNER_OEM, 2)
+
+#define EL3_GET_XPU_STAT	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+						   ARM_SMCCC_SMC_64,	\
+						   ARM_SMCCC_OWNER_OEM, 3)
+
+#define EL3_GET_NOC_STAT	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+						   ARM_SMCCC_SMC_64,	\
+						   ARM_SMCCC_OWNER_OEM, 4)
+
+#define SEL1_GET_PANIC		ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+						   ARM_SMCCC_SMC_64,	\
+						   ARM_SMCCC_OWNER_OEM, 5)
+
+/*
+ * NOTE:
+ *
+ * Offsets are relative to the beginning of an IMEM area writeable and
+ * readable from S-EL1 known to be unused. on IPQ9574 offset 0x50 to
+ * 0xc8 are known to be unused. configure the device tree reg
+ * attribute accordingly.
+ *
+ * IMEM content is retained upon warm reset. Its content at PoR is
+ * random.
+ *
+ * It's recommended for this driver to be built-in. If the system
+ * crashes before the driver is loaded, the IMEM won't be updated.
+ */
+#define OFF_KRR_MAGIC		0x0
+#define OFF_KRR_VALUE		0x4
+#define OFF_KRR_WARM_BOOT_COUNT	0x8
+
+#define KRR_MAGIC	0x4e12b46d
+#define KRR_PANIC	0x0050414E
+#define KRR_WARM	0x0057524D
+#define KRR_MASK	0x00ffffff
+#define KRR_IN_NMI	(1 << 24)
+#define KRR_IN_IRQ	(1 << 25)
+
+enum {
+	NOC_MEMNOC,
+	NOC_SNOC,
+	NOC_PCNOC,
+
+	__NOC_COUNT,
+};
+
+struct noc_strings {
+	const char **strings;
+	size_t nr_strings;
+};
+
+struct xpu_id {
+	u32 id;
+	const char *name;
+};
+
+struct qcom_irr_noc_platform_data {
+	struct noc_strings initiators[__NOC_COUNT];
+	struct noc_strings targets[__NOC_COUNT];
+
+	const struct xpu_id *xpu_ids;
+	size_t nr_xpu_ids;
+};
+
+/*
+ * qcom imem reset reason context:
+ */
+struct qcom_irr {
+	void __iomem *imem;
+	struct resource *imem_res;
+	struct notifier_block panic_notifier;
+	struct platform_device *pdev;
+
+	u32 warm_boot_count;
+	u32 boot_krr_value;
+
+	bool el3_rr_supported;
+
+	const struct qcom_irr_noc_platform_data *noc_pdata;
+};
+
+/*
+ * write KRR magic & value, called from panic notifier.
+ */
+static void write_krr(struct qcom_irr *priv, u32 krr_value)
+{
+	writel(KRR_MAGIC, priv->imem + OFF_KRR_MAGIC);
+	writel(krr_value, priv->imem + OFF_KRR_VALUE);
+}
+
+/*
+ * read KRR data from previous boot, called from probe() callback, and
+ * only there, since we're setting things up for the next boot.
+ *
+ * in case the driver is compiled as a module, rmmoding and insmoding
+ * it again will behave as if the board was warm rebooted.
+ */
+static void read_krr(struct qcom_irr *priv)
+{
+	if (readl(priv->imem + OFF_KRR_MAGIC) != KRR_MAGIC) {
+		/*
+		 * no magic found: assume first boot. set magic, and
+		 * WARM value for the next boot. Warm boot count is
+		 * set to 0.
+		 */
+		writel(KRR_MAGIC, priv->imem + OFF_KRR_MAGIC);
+		writel(KRR_WARM, priv->imem + OFF_KRR_VALUE);
+		writel(0x0, priv->imem + OFF_KRR_WARM_BOOT_COUNT);
+		return ;
+	}
+
+	/*
+	 * read krr value and warm boot count.
+	 */
+	priv->boot_krr_value = readl(priv->imem + OFF_KRR_VALUE);
+	priv->warm_boot_count = readl(priv->imem +
+				      OFF_KRR_WARM_BOOT_COUNT);
+
+	/*
+	 * set WARM value for next boot
+	 */
+	writel(KRR_WARM, priv->imem + OFF_KRR_VALUE);
+
+	/*
+	 * increment warm boot count for next boot.
+	 */
+	writel(priv->warm_boot_count + 1, priv->imem + OFF_KRR_WARM_BOOT_COUNT);
+}
+
+#define nb_to_qcom_irr(nb) container_of(nb, struct qcom_irr, panic_notifier)
+
+/*
+ * panic handler.
+ */
+static int qcom_irr_on_panic(struct notifier_block *nb, unsigned long code,
+			     void *unused)
+{
+	static int in_panic;
+	struct qcom_irr *priv;
+	u32 krr_val;
+
+	/*
+	 * terminate recursive calls.
+	 */
+	if (in_panic)
+		return NOTIFY_DONE;
+	in_panic = 1;
+
+	priv = nb_to_qcom_irr(nb);
+
+	/*
+	 * compute krr_val: indicate we are panicing, and set in_irq
+	 * and in_nmi according the state at panic() time.
+	 *
+	 * in_nmi() will generatly be true in case an xPU violation is
+	 * detected by TZ.
+	 */
+	krr_val = KRR_PANIC;
+	if (in_nmi())
+		krr_val |= KRR_IN_NMI;
+	if (in_hardirq())
+		krr_val |= KRR_IN_IRQ;
+
+	write_krr(priv, krr_val);
+	return NOTIFY_DONE;
+}
+
+/*
+ * get EL3 panic reason from EL3 firmware.
+ */
+static bool qcom_irr_get_el3_panic(struct device *dev)
+{
+	struct arm_smccc_res smc_res;
+
+	arm_smccc_smc(EL3_GET_PANIC, 0,
+		      0, 0, 0, 0, 0, 0, &smc_res);
+
+	if (smc_res.a0)
+		return false;
+
+	return smc_res.a1;
+}
+
+/*
+ * get SEL1 panic reason from EL3 firmware
+ */
+static bool qcom_irr_get_sel1_panic(struct device *dev)
+{
+	struct arm_smccc_res smc_res;
+
+	arm_smccc_smc(SEL1_GET_PANIC, 0,
+		      0, 0, 0, 0, 0, 0, &smc_res);
+
+	if (smc_res.a0)
+		return false;
+
+	return smc_res.a1;
+}
+
+/*
+ * display reset_reason as a set of ',' separated values.
+ */
+static ssize_t qcom_irr_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct qcom_irr *priv = dev_get_drvdata(dev);
+	size_t retsize;
+
+	*buf = '\0';
+
+	switch (priv->boot_krr_value & KRR_MASK) {
+	case 0:
+		strcat(buf, "por-reset,");
+		break;
+	case KRR_WARM:
+		strcat(buf, "warm-reset,");
+		break;
+	case KRR_PANIC:
+		strcat(buf, "panic,");
+		if (priv->boot_krr_value & KRR_IN_NMI)
+			strcat(buf, "panic-in-nmi,");
+		if (priv->boot_krr_value & KRR_IN_IRQ)
+			strcat(buf, "panic-in-irq,");
+		break;
+	default:
+		strcat(buf, "unknown,");
+	}
+
+	if (priv->el3_rr_supported && qcom_irr_get_el3_panic(dev))
+		strcat(buf, "el3-panic,");
+
+	if (priv->el3_rr_supported && qcom_irr_get_sel1_panic(dev))
+		strcat(buf, "sel1-panic,");
+
+	/*
+	 * '\n'-terminate the string.
+	 */
+	retsize = strlen(buf);
+	if (retsize)
+		buf[retsize - 1] = '\n';
+
+	return retsize;
+}
+
+static DEVICE_ATTR(reset_reason, 0400, qcom_irr_show, NULL);
+
+/*
+ * show warm boot count since last power cycle.
+ */
+static ssize_t qcom_irr_warm_boot_count_show(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct qcom_irr *priv = dev_get_drvdata(dev);
+
+	return snprintf(buf, 4096, "%u\n", priv->warm_boot_count);
+}
+
+static DEVICE_ATTR(warm_boot_count, 0400, qcom_irr_warm_boot_count_show, NULL);
+
+static ssize_t ret_str(char *buf, const char *str)
+{
+	strcpy(buf, str);
+	return strlen(str);
+}
+
+#define EL3_XPU_ID_VALID		BIT(31)
+#define EL3_NOC_INITIATOR_TARGET_VALID	BIT(31)
+
+/*
+ * get the XPU friendly name from the SoC specific conversion table.
+ */
+static const char *get_xpu_name(struct qcom_irr *priv, u32 xpu_id)
+{
+	size_t i;
+
+	if (!priv->noc_pdata || !priv->noc_pdata->xpu_ids)
+		goto unkown;
+
+	for (i = 0; i < priv->noc_pdata->nr_xpu_ids; ++i) {
+		const struct xpu_id *id = &priv->noc_pdata->xpu_ids[i];
+		if (xpu_id == id->id)
+			return id->name;
+	}
+
+unkown:
+	return "unknown";
+}
+
+/*
+ * display the information of the XPU state before the board last warm
+ * reset.
+ */
+static ssize_t qcom_irr_xpu_stat_show(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct qcom_irr *priv = dev_get_drvdata(dev);
+	u32 xpu_id, xpu_far;
+	struct arm_smccc_res smc_res;
+
+	if (!priv->el3_rr_supported)
+		return ret_str(buf, "unsupported\n");
+
+	arm_smccc_smc(EL3_GET_XPU_STAT, 0,
+		      0, 0, 0, 0, 0, 0, &smc_res);
+	if (smc_res.a0)
+		return ret_str(buf, "I/O error\n");
+
+	xpu_id = (smc_res.a1 >> 32) & 0xffffffff;
+	xpu_far = smc_res.a1 & 0xffffffff;
+
+	if (!(xpu_id & EL3_XPU_ID_VALID))
+		return ret_str(buf, "XPU: no fault found.\n");
+
+	xpu_id &= ~EL3_XPU_ID_VALID;
+	snprintf(buf, SZ_4K, "XPU: fault on xpu%d (%s), address %08x\n",
+		 xpu_id, get_xpu_name(priv, xpu_id), xpu_far);
+
+	return strlen(buf);
+}
+
+static DEVICE_ATTR(xpu_stat, 0400, qcom_irr_xpu_stat_show, NULL);
+
+/*
+ * friendly NOC names.
+ */
+static const char *noc_names[3] = {
+	[NOC_MEMNOC] = "MEMNOC",
+	[NOC_SNOC] = "SNOC  ",
+	[NOC_PCNOC] = "PCNOC ",
+};
+
+/*
+ * helper common to NOC intiator and target friendly name retrieval.
+ */
+static const char *__get_string(const struct noc_strings *ns, size_t off)
+{
+	if (off >= ns->nr_strings)
+		return NULL;
+	return ns->strings[off];
+}
+
+/*
+ * get the NOC initiator name from the SoC specific conversion table.
+ */
+static const char *get_initiator_name(struct qcom_irr *priv, int which,
+				      size_t initiator)
+{
+	const char *ret;
+	const struct noc_strings *ns;
+
+	if (!priv->noc_pdata || which >= __NOC_COUNT)
+		return "unknown";
+
+	ns = &priv->noc_pdata->initiators[which];
+
+	ret = __get_string(ns, initiator);
+	if (!ret)
+		ret = "unknown";
+
+	return ret;
+}
+
+/*
+ * get the NOC target name from the SoC specific conversion table.
+ */
+static const char *get_target_name(struct qcom_irr *priv, int which,
+				   size_t target)
+{
+	const char *ret;
+	const struct noc_strings *ns;
+
+	if (!priv->noc_pdata || which >= __NOC_COUNT)
+		return "unknown";
+
+	ns = &priv->noc_pdata->targets[which];
+
+	ret = __get_string(ns, target);
+	if (!ret)
+		ret = "unknown";
+
+	return ret;
+
+}
+
+/*
+ * display the information of the NOCs states before the board last
+ * warm reset.
+ */
+static ssize_t qcom_irr_noc_stat_show(struct device *dev,
+				      struct device_attribute *aatr,
+				      char *buf)
+{
+	struct qcom_irr *priv = dev_get_drvdata(dev);
+	struct seq_buf s;
+	struct arm_smccc_res smc_res;
+	u64 v[3];
+	int i;
+
+	if (!priv->el3_rr_supported)
+		return ret_str(buf, "unsupported\n");
+
+	arm_smccc_smc(EL3_GET_NOC_STAT, 0,
+		      0, 0, 0, 0, 0, 0, &smc_res);
+
+	if (smc_res.a0)
+		return ret_str(buf, "I/O error\n");
+
+	v[0] = smc_res.a1;
+	v[1] = smc_res.a2;
+	v[2] = smc_res.a3;
+
+	seq_buf_init(&s, buf, PAGE_SIZE - 1);
+
+	for (i = 0; i < __NOC_COUNT; ++i) {
+		u32 lo = v[i] & 0xffffffff;
+		u32 hi = (v[i] >> 32) & 0xffffffff;
+		u32 target = (hi >> 8) & 0xff;
+		u32 initiator = hi & 0xff;
+
+		const char *noc_name = noc_names[i];
+
+		if (!(hi & EL3_NOC_INITIATOR_TARGET_VALID)) {
+			seq_buf_printf(&s, "%s: no fault found.\n", noc_name);
+			continue ;
+		}
+
+		seq_buf_printf(&s, "%s: %u (%s) -> %u (%s) address %08x\n",
+		       noc_name,
+		       initiator, get_initiator_name(priv, i, initiator),
+		       target, get_target_name(priv, i, target),
+		       lo);
+	}
+
+	return s.len;
+}
+
+static DEVICE_ATTR(noc_stat, 0400, qcom_irr_noc_stat_show, NULL);
+
+// #define ENABLE_EL3_PANIC_TRIGGER
+#ifdef ENABLE_EL3_PANIC_TRIGGER
+/*
+ * for debug: request ATF to panic.
+ */
+static ssize_t qcom_irr_atf_panic_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct qcom_irr *priv = dev_get_drvdata(dev);
+	struct arm_smccc_res smc_res;
+
+	if (!priv->el3_rr_supported)
+		return -ENOTSUPP;
+
+	arm_smccc_smc(EL3_PANIC_SMC, 0,
+		      0, 0, 0, 0, 0, 0, &smc_res);
+
+	return size;
+}
+static DEVICE_ATTR(atf_panic, 0600, NULL, qcom_irr_atf_panic_store);
+#endif
+
+static struct attribute *qcom_irr_attrs[] = {
+	&dev_attr_reset_reason.attr,
+	&dev_attr_warm_boot_count.attr,
+	&dev_attr_xpu_stat.attr,
+	&dev_attr_noc_stat.attr,
+#ifdef ENABLE_EL3_PANIC_TRIGGER
+	&dev_attr_atf_panic.attr,
+#endif
+	NULL
+};
+
+static const struct attribute_group qcom_irr_attrs_group = {
+	.attrs = qcom_irr_attrs,
+};
+
+/*
+ * IPQ9574 specific NOCs initiator/target names & XPU  names
+ */
+static const char *ipq9574_memnoc_initiators[] = {
+	"memnoc_cfg",
+	"nss",
+	"sys0",
+	"sys1",
+	"wcssq6",
+	"app0",
+	"atcu0",
+};
+
+static const char *ipq9574_snoc_initiators[] = {
+	"pcie_ep",
+	"qdss_bam",
+	"snoc_cfg_0",
+	"tme",
+	"aggrnoc0",
+	"aggrnoc1",
+	"memnoc",
+	"pcnoc",
+	"qdss_etr",
+};
+
+static const char *ipq9574_pcnoc_initiators[] = {
+	"qhm0",
+	"pcnoc_cfg",
+	"rpm",
+	"tic",
+	"crypto",
+	"lpass",
+	"snoc",
+	"qdss_dap",
+	"sdcc",
+};
+
+static const char *ipq9574_memnoc_targets[] = {
+	"dbhn_sch0",
+	"dtb_reg",
+	"memnoc_mpu_sch0_cfg",
+	"memnoc_xpu_cfg",
+	"snoc",
+	"srvc_memnoc",
+};
+
+static const char *ipq9574_snoc_targets[] = {
+	"apss_cfg",
+	"group0",
+	"lpass_cfg",
+	"tme_cfg",
+	"usb_cfg",
+	"wcss_cfg",
+	"memnoc0",
+	"memnoc1",
+	"pcnoc",
+	"imem",
+	"nssnoc",
+	"srvc_snoc",
+	"pcie1_lane0",
+	"pcie1_lane1",
+	"pcie2_lane0",
+	"pcie2_lane1",
+	"pcie_ep_cfg",
+	"qdss_stm",
+};
+
+static const char *ipq9574_pcnoc_targets[] = {
+	"qhs0",
+	"qhs10",
+	"qhs1",
+	"qhs2",
+	"qhs3",
+	"qhs4",
+	"qhs5",
+	"qhs6",
+	"qhs7",
+	"qhs8",
+	"qhs9",
+	"dcc_cfg",
+	"snoc",
+	"pcnoc",
+	"tcu",
+};
+
+static struct xpu_id ipq9574_xpu_ids[] = {
+	{ .id = 4, .name = "DDR_MPU" },
+	{ .id = 3, .name = "DDRPHY_MPU" },
+	{ .id = 2, .name = "MEMNOC_APU" },
+	{ .id = 61, .name = "SEC_CTRL_APU" },
+	{ .id = 58, .name = "RPM_APU" },
+	{ .id = 40, .name = "MPM2_MPU" },
+	{ .id = 71, .name = "QPIC_APU" },
+	{ .id = 6, .name = "CRYPTO0_BAM" },
+	{ .id = 64, .name = "TCSR_REGS" },
+	{ .id = 129, .name = "SDC1_SDCC_ICE" },
+	{ .id = 46, .name = "BAM_BLSP1_DMA" },
+	{ .id = 70, .name = "SNOC_CFG" },
+	{ .id = 111, .name = "RPM_CFG" },
+	{ .id = 115, .name = "SMMU_TCU" },
+	{ .id = 43, .name = "IMEM_MPU" },
+	{ .id = 5, .name = "BOOT_ROM" },
+	{ .id = 9, .name = "CLK_CTL" },
+	{ .id = 72, .name = "QPIC_MPU" },
+	{ .id = 169, .name = "SNOC_BOOTIMEM" },
+	{ .id = 92, .name = "LPASS_CFG" },
+	{ .id = 65, .name = "TLMM" },
+	{ .id = 170, .name = "UNIPHY" },
+	{ .id = 142, .name = "NSS_IMEM" },
+	{ .id = 62, .name = "SPDM" },
+	{ .id = 110, .name = "PRNG" },
+	{ .id = 116, .name = "DCC" },
+	{ .id = 168, .name = "NOC" },
+	{ .id = 171, .name = "QDSS" },
+};
+
+static const struct qcom_irr_noc_platform_data ipq9574_platform_data = {
+	.initiators = {
+		[NOC_MEMNOC] = {
+			.strings = ipq9574_memnoc_initiators,
+			.nr_strings = ARRAY_SIZE(ipq9574_memnoc_initiators),
+		},
+		[NOC_PCNOC] = {
+			.strings = ipq9574_pcnoc_initiators,
+			.nr_strings = ARRAY_SIZE(ipq9574_pcnoc_initiators),
+		},
+		[NOC_SNOC] = {
+			.strings = ipq9574_snoc_initiators,
+			.nr_strings = ARRAY_SIZE(ipq9574_snoc_initiators),
+		},
+	},
+	.targets = {
+		[NOC_MEMNOC] = {
+			.strings = ipq9574_memnoc_targets,
+			.nr_strings = ARRAY_SIZE(ipq9574_memnoc_targets),
+		},
+		[NOC_PCNOC] = {
+			.strings = ipq9574_pcnoc_targets,
+			.nr_strings = ARRAY_SIZE(ipq9574_pcnoc_targets),
+		},
+		[NOC_SNOC] = {
+			.strings = ipq9574_snoc_targets,
+			.nr_strings = ARRAY_SIZE(ipq9574_snoc_targets),
+		},
+	},
+	.xpu_ids = ipq9574_xpu_ids,
+	.nr_xpu_ids = ARRAY_SIZE(ipq9574_xpu_ids),
+};
+
+static const struct of_device_id qcom_irr_match[] = {
+	{ .compatible = "qcom,imem-reset-reason", },
+	{ .compatible = "qcom,imem-reset-reason-ipq9574",
+	  .data = &ipq9574_platform_data },
+	{ },
+};
+
+/*
+ *
+ */
+static int qcom_irr_probe(struct platform_device *pdev)
+{
+	struct qcom_irr *priv;
+	int ret;
+	const struct of_device_id *match;
+
+	dev_dbg(&pdev->dev, "probe\n");
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&pdev->dev, "unable to allocate priv");
+		return -ENOMEM;
+	}
+
+	match = of_match_device(qcom_irr_match, &pdev->dev);
+	if (match)
+		priv->noc_pdata = match->data;
+
+	priv->imem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						      "imem");
+	if (!priv->imem_res) {
+		dev_err(&pdev->dev, "unable to get resource 'imem'\n");
+		return -ENXIO;
+	}
+
+	priv->imem = devm_ioremap_resource(&pdev->dev, priv->imem_res);
+	if (!priv->imem) {
+		dev_err(&pdev->dev, "unable to ioremap 'imem'\n");
+		return -ENOMEM;
+	}
+
+	read_krr(priv);
+
+	priv->pdev = pdev;
+	priv->panic_notifier.notifier_call =
+		qcom_irr_on_panic;
+
+	priv->el3_rr_supported = of_property_read_bool(pdev->dev.of_node,
+					       "qcom-fbx,scm-el3-reasons");
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &priv->panic_notifier);
+
+
+	ret = sysfs_create_group(&priv->pdev->dev.kobj, &qcom_irr_attrs_group);
+	if (ret) {
+		dev_err(&pdev->dev, "sysfs_create_group failed: %pe\n",
+			ERR_PTR(ret));
+		goto err_unregister_notifier;
+	}
+
+	return 0;
+
+err_unregister_notifier:
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &priv->panic_notifier);
+	return ret;
+}
+
+/*
+ *
+ */
+static int qcom_irr_remove(struct platform_device *pdev)
+{
+	struct qcom_irr *priv =
+		dev_get_drvdata(&pdev->dev);
+
+	dev_dbg(&pdev->dev, "remove\n");
+
+	sysfs_remove_group(&priv->pdev->dev.kobj,
+			   &qcom_irr_attrs_group);
+
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &priv->panic_notifier);
+
+	return 0;
+}
+
+static struct platform_driver qcom_irr_platform_driver = {
+	.driver	= {
+		.name		= "qcom_irr_platform_driver",
+		.of_match_table	= qcom_irr_match,
+	},
+	.probe	= qcom_irr_probe,
+	.remove	= qcom_irr_remove,
+};
+
+module_platform_driver(qcom_irr_platform_driver);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_LICENSE("GPL");
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/drivers/video/fbdev/ssd1320.c	2023-08-17 20:04:42.479011063 +0200
@@ -0,0 +1,1041 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/backlight.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/reset.h>
+
+#define SSD1320_MAX_BRIGHTNESS		0xff
+#define SSD1320_NOMINAL_BRIGHTNESS	0x9f
+
+/*
+ * common commands
+ */
+#define OPCODE_ADDRESSING_MODE		0x20
+#define OPCODE_SET_COLUMN		0x21
+#define OPCODE_SET_ROW			0x22
+#define OPCODE_CONTRAST			0x81
+#define OPCODE_SET_SEG_NORM_SCAN_DIR	0xa0
+#define OPCODE_SET_SEG_REV_SCAN_DIR	0xa1
+#define OPCODE_DISPLAY_START_LINE	0xa2
+#define OPCODE_DISPLAY_NO_FORCEON	0xa4
+#define OPCODE_DISPLAY_FORCEON		0xa5
+#define OPCODE_DISPLAY_NO_INVERSE	0xa6
+#define OPCODE_DISPLAY_INVERSE		0xa7
+#define OPCODE_MULTIPLEX_RATIO		0xa8
+#define OPCODE_IREF_SELECTION		0xad
+#define OPCODE_DISPLAY_OFF		0xae
+#define OPCODE_DISPLAY_ON		0xaf
+#define OPCODE_SET_PRECHARGE_VOLTAGE	0xbc
+#define OPCODE_SET_GRAYSCALE_TBL	0xbe
+#define OPCODE_DEF_GRAY			0xbf
+#define OPCODE_SET_COM_NORMAL_SCAN_DIR	0xc0
+#define OPCODE_SET_COM_REV_SCAN_DIR	0xc8
+#define OPCODE_SET_SEG_PINS_HWCONFIG	0xda
+#define OPCODE_DISPLAY_OFFSET		0xd3
+#define OPCODE_CLK_DIVIDE_RATIO		0xd5
+#define OPCODE_SET_DISPLAY_ENH_A	0xd8
+#define OPCODE_SET_PRECHARGE_PERIOD	0xd9
+#define OPCODE_SET_VCOM_DESELECT_LVL	0xdb
+#define OPCODE_SET_DISPLAY_ENH_B	0xf0
+
+/*
+ * ch1120 specific commands
+ */
+#define OPCODE_CH1120_SET_DISCHAGE_PERIOD	0x93
+#define OPCODE_CH1120_GRAYSCALE_MONO_MODE	0xac
+
+/*
+ * fbinfo
+ */
+static struct fb_fix_screeninfo ssd1320_fb_fix = {
+	.id		= "ssd1320",
+	.type		= FB_TYPE_PACKED_PIXELS,
+	.visual		= FB_VISUAL_STATIC_PSEUDOCOLOR,
+	.xpanstep	= 0,
+	.ypanstep	= 1,
+	.ywrapstep	= 0,
+	.accel		= FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ssd1320_fb_var = {
+	.bits_per_pixel	= 8,
+	.grayscale	= 1,
+	.nonstd		= 1,
+	.red.length	= 8,
+	.green.length	= 8,
+	.blue.length	= 8,
+};
+
+/*
+ * private data
+ */
+#define SSD1320_SEGS		160
+#define SSD1320_COMS		160
+
+enum oled_type {
+	TYPE_SSD1320,
+	TYPE_CH1120,
+};
+
+struct ssd1320 {
+	struct mutex			mutex;
+
+	/* configuration from device tree */
+	enum oled_type			type;
+	u32				watchdog;
+	u32				max_brightness;
+	u32				default_brightness;
+	u32				com_range[2];
+	bool				com_reverse;
+	u32				seg_range[2];
+	bool				seg_reverse;
+	bool				seg_sequential;
+	bool				seg_first_odd;
+
+	bool				has_clk_divide_ratio;
+	u32				clk_divide_ratio;
+	bool				has_precharge_period;
+	u32				precharge_period;
+	bool				has_vcom_deselect_level;
+	u32				vcom_deselect_level;
+	bool				has_precharge_voltage;
+	u32				precharge_voltage;
+	bool				has_iref;
+	u32				iref;
+	bool				has_grayscale_table;
+	u32				grayscale_table[15];
+	bool				has_display_enh_a;
+	u32				display_enh_a;
+	bool				has_display_enh_b;
+	u32				display_enh_b;
+	bool				has_discharge_period;
+	u32				discharge_period;
+
+	/* image of display ram */
+	u32				width;
+	u32				height;
+	u8				*gddram;
+	unsigned int			gddram_size;
+
+	/* data ram, 8 bits per pixel */
+	u8				*vmem;
+	unsigned int			vmem_size;
+
+	struct fb_info			*fb;
+	struct gpio_desc		*vcc_gpio;
+	struct reset_control		*reset;
+	struct gpio_desc		*data_gpio;
+	struct gpio_desc		*reset_gpio;
+	struct spi_device		*spi;
+
+	struct backlight_device		*backlight;
+	unsigned int			brightness;
+
+	/* watchog timer */
+	struct delayed_work		wtd_work;
+	atomic_t			wtd_count;
+};
+
+/*
+ * send command to device
+ */
+static int send_cmd(struct ssd1320 *priv, u8 cmd)
+{
+	int ret;
+
+	mutex_lock(&priv->mutex);
+	gpiod_set_value(priv->data_gpio, 0);
+	ret = spi_write_then_read(priv->spi, &cmd, 1, NULL, 0);
+	mutex_unlock(&priv->mutex);
+	return ret;
+}
+
+/*
+ * send command to device
+ */
+static int send_cmd2(struct ssd1320 *priv, u8 cmd, u8 arg)
+{
+	int ret;
+	ret = send_cmd(priv, cmd);
+	ret |= send_cmd(priv, arg);
+	return ret;
+}
+
+static int send_cmd3(struct ssd1320 *priv, u8 cmd, u8 arg1, u8 arg2)
+{
+	int ret;
+	ret = send_cmd(priv, cmd);
+	ret |= send_cmd(priv, arg1);
+	ret |= send_cmd(priv, arg2);
+	return ret;
+}
+
+/*
+ * send command list to device
+ */
+static int send_cmds(struct ssd1320 *priv, const u8 *cmd, unsigned int len)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < len; i++) {
+		ret = send_cmd(priv, cmd[i]);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * write given data into device gddram
+ */
+static int write_data(struct ssd1320 *priv, u8 *tx, unsigned int size)
+{
+	int ret;
+
+	mutex_lock(&priv->mutex);
+	gpiod_set_value(priv->data_gpio, 1);
+	ret = spi_write(priv->spi, tx, size);
+	mutex_unlock(&priv->mutex);
+	return ret;
+}
+
+/*
+ * soft reset & initialize ssd1320
+ */
+static int ssd1320_init(struct ssd1320 *priv)
+{
+	int ret;
+
+	if (priv->reset_gpio) {
+		if (priv->vcc_gpio)
+			gpiod_direction_output(priv->vcc_gpio, 0);
+		gpiod_set_value_cansleep(priv->reset_gpio, 1);
+		udelay(10);
+		gpiod_set_value_cansleep(priv->reset_gpio, 0);
+		udelay(10);
+	}
+
+	ret = send_cmd(priv, OPCODE_DISPLAY_OFF);
+	if (ret)
+		return ret;
+
+	if (priv->has_clk_divide_ratio) {
+		ret = send_cmd2(priv, OPCODE_CLK_DIVIDE_RATIO,
+				priv->clk_divide_ratio);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_precharge_period) {
+		ret = send_cmd2(priv, OPCODE_SET_PRECHARGE_PERIOD,
+				priv->precharge_period);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_precharge_voltage) {
+		ret = send_cmd2(priv, OPCODE_SET_PRECHARGE_VOLTAGE,
+				priv->precharge_voltage);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_vcom_deselect_level) {
+		ret = send_cmd2(priv, OPCODE_SET_VCOM_DESELECT_LVL,
+				priv->vcom_deselect_level);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_iref) {
+		ret = send_cmd2(priv, OPCODE_IREF_SELECTION, priv->iref);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_display_enh_a) {
+		ret = send_cmd2(priv, OPCODE_SET_DISPLAY_ENH_A,
+				priv->display_enh_a);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_display_enh_b) {
+		ret = send_cmd2(priv, OPCODE_SET_DISPLAY_ENH_B,
+				priv->display_enh_b);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_discharge_period) {
+		if (priv->type != TYPE_CH1120)
+			return -EINVAL;
+		ret = send_cmd2(priv, OPCODE_CH1120_SET_DISCHAGE_PERIOD,
+				priv->discharge_period);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_grayscale_table) {
+		u8 cmds[16];
+		int i;
+
+		cmds[0] = OPCODE_SET_GRAYSCALE_TBL;
+		for (i = 0; i < 15; i++)
+			cmds[i + 1] = priv->grayscale_table[i];
+
+		ret = send_cmds(priv, cmds, sizeof (cmds));
+		if (ret)
+			return ret;
+	}
+
+	ret = send_cmd2(priv, OPCODE_CONTRAST,
+			priv->default_brightness);
+	if (ret)
+		return ret;
+
+	if (priv->seg_sequential || priv->seg_first_odd) {
+		u8 hw_config;
+
+		hw_config = 0;
+		switch (priv->type) {
+		case TYPE_SSD1320:
+			hw_config = 2;
+			if (!priv->seg_sequential)
+				hw_config |= (1 << 4);
+			if (priv->seg_first_odd)
+				hw_config |= (1 << 5);
+			break;
+		case TYPE_CH1120:
+			if (priv->seg_first_odd)
+				hw_config |= (1 << 0);
+			if (priv->seg_sequential)
+				hw_config |= (1 << 1);
+			break;
+		}
+
+		ret = send_cmd2(priv, OPCODE_SET_SEG_PINS_HWCONFIG,
+				hw_config);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->com_reverse) {
+		ret = send_cmd(priv, OPCODE_SET_COM_REV_SCAN_DIR);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->seg_reverse) {
+		ret = send_cmd(priv, OPCODE_SET_SEG_REV_SCAN_DIR);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->height != SSD1320_COMS) {
+		u8 off;
+
+		if (priv->com_reverse)
+			off = priv->com_range[0];
+		else
+			off = SSD1320_COMS -  priv->com_range[0];
+
+		/* hardware will skip this number of COM */
+		ret = send_cmd2(priv, OPCODE_DISPLAY_OFFSET, off);
+                if (ret)
+                        return ret;
+
+		/* hardware will only use this number of COM + 1 */
+		ret = send_cmd2(priv, OPCODE_MULTIPLEX_RATIO,
+				priv->height - 1);
+                if (ret)
+                        return ret;
+
+		/* set row boundaries so that hardware switches to
+		   back to first row when row pointer reaches the
+		   upper bound. This way after writing the whole
+		   visible pixels data the hardware goes back (0,0) */
+		ret = send_cmd3(priv, OPCODE_SET_ROW, 0, priv->height - 1);
+		if (ret)
+			return ret;
+	}
+
+	/* zero ram */
+	memset(priv->gddram, 0x00, priv->gddram_size);
+	ret = write_data(priv, priv->gddram, priv->gddram_size);
+	if (ret)
+		return ret;
+
+	if (priv->vcc_gpio) {
+		gpiod_direction_output(priv->vcc_gpio, 1);
+		msleep(10);
+	}
+
+	return send_cmd(priv, OPCODE_DISPLAY_ON);
+}
+
+/*
+ * update area
+ */
+static int ssd1320_fb_update(struct ssd1320 *priv)
+{
+	const unsigned char *vmem;
+	unsigned int row, w, h;
+	unsigned int rotate;
+
+	w = priv->width;
+	h = priv->height;
+
+	rotate = priv->fb->var.rotate;
+	vmem = priv->vmem + w * priv->fb->var.yoffset;
+	memset(priv->gddram, 0, priv->gddram_size);
+
+	for (row = 0; row < h; row++) {
+		unsigned int hw_col;
+
+		for (hw_col = 0; hw_col < SSD1320_SEGS; hw_col += 2) {
+			unsigned int col, nibble;
+			u8 val;
+
+			if (hw_col < priv->seg_range[0])
+				continue;
+
+			col = hw_col - priv->seg_range[0];
+			val = 0;
+			for (nibble = 0; nibble < 2; nibble++) {
+				unsigned int off, x;
+				u8 vval;
+
+				x = col + nibble;
+				if (x >= w)
+					break;
+
+				switch (rotate) {
+				case 0:
+				default:
+					off = row * w + x;
+					break;
+
+				case 180:
+					off = w * h - (row * w + x) - 1;
+					break;
+
+				case 90:
+					off = (w - x - 1) * w + row;
+					break;
+
+				case 270:
+					off = x * w + (h - row - 1);
+					break;
+				}
+
+				vval = vmem[off] >> 4;
+				val |= vval << (nibble * 4);
+			}
+
+			priv->gddram[row * (SSD1320_SEGS / 2) +
+				     hw_col / 2] = val;
+		}
+	}
+
+	return write_data(priv, priv->gddram, priv->gddram_size);
+}
+
+/*
+ * frame buffer fill rect callback
+ */
+static void ssd1320_fb_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect)
+{
+	struct ssd1320 *priv = info->par;
+	sys_fillrect(info, rect);
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+}
+
+/*
+ * frame buffer copy area callback
+ */
+static void ssd1320_fb_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area)
+{
+	struct ssd1320 *priv = info->par;
+	sys_copyarea(info, area);
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+}
+
+/*
+ * frame buffer image blit
+ */
+static void ssd1320_fb_imageblit(struct fb_info *info,
+				 const struct fb_image *image)
+{
+	struct ssd1320 *priv = info->par;
+	sys_imageblit(info, image);
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+}
+
+/*
+ * frame buffer pan callback
+ */
+static int ssd1320_fb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct ssd1320 *priv = info->par;
+	priv->fb->var.xoffset = var->xoffset;
+	priv->fb->var.yoffset = var->yoffset;
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+	return 0;
+}
+
+/*
+ * fram buffer set_par callback, set videomode
+ */
+static int ssd1320_fb_set_par(struct fb_info *info)
+{
+	struct ssd1320 *priv = info->par;
+	/* called after rotate update */
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+	return 0;
+}
+
+static int ssd1320_fb_check_var(struct fb_var_screeninfo *var,
+				struct fb_info *info)
+{
+	unsigned int rotate;
+
+	rotate = var->rotate;
+	if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270)
+		rotate = 0;
+	*var = info->var;
+	var->rotate = rotate;
+	return 0;
+}
+
+/*
+ * frame buffer blank callback
+ */
+static int ssd1320_fb_blank(int blank, struct fb_info *info)
+{
+	return 0;
+}
+
+/*
+ * frame buffer write from userspace
+ */
+static ssize_t ssd1320_fb_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct ssd1320 *priv = info->par;
+	unsigned long p = *ppos;
+	void *dst;
+	int err = 0;
+	unsigned long total_size;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return -EPERM;
+
+	total_size = info->fix.smem_len;
+
+	if (p > total_size)
+		return -EFBIG;
+
+	if (count > total_size) {
+		err = -EFBIG;
+		count = total_size;
+	}
+
+	if (count + p > total_size) {
+		if (!err)
+			err = -ENOSPC;
+
+		count = total_size - p;
+	}
+
+	dst = (void __force *)(info->screen_base + p);
+
+	if (copy_from_user(dst, buf, count))
+		err = -EFAULT;
+
+	if  (!err)
+		*ppos += count;
+
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+
+	return (err) ? err : count;
+}
+
+static struct fb_ops ssd1320_fb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_write	= ssd1320_fb_write,
+	.fb_fillrect	= ssd1320_fb_fillrect,
+	.fb_copyarea	= ssd1320_fb_copyarea,
+	.fb_imageblit	= ssd1320_fb_imageblit,
+	.fb_pan_display	= ssd1320_fb_pan,
+	.fb_blank	= ssd1320_fb_blank,
+	.fb_check_var	= ssd1320_fb_check_var,
+	.fb_set_par	= ssd1320_fb_set_par,
+};
+
+/*
+ * watchdog timer
+ */
+static void wtd_work_cb(struct work_struct *t)
+{
+	struct ssd1320 *priv;
+	struct delayed_work *dwork;
+
+	dwork = container_of(t, struct delayed_work, work);
+	priv = container_of(dwork, struct ssd1320, wtd_work);
+
+	if (atomic_dec_and_test(&priv->wtd_count)) {
+		dev_err(&priv->spi->dev, "watchdog triggered\n");
+		memset(priv->vmem, 0, priv->vmem_size);
+		ssd1320_fb_update(priv);
+	}
+
+	schedule_delayed_work(&priv->wtd_work, HZ);
+}
+
+/*
+ * backlight control
+ */
+static int ssd1320_bl_update_status(struct backlight_device *bl)
+{
+	struct ssd1320 *priv;
+	u8 bl_cmds[2];
+	unsigned int brightness = bl->props.brightness;
+	int ret;
+
+	priv = bl_get_data(bl);
+
+	if (brightness > priv->max_brightness)
+		brightness = priv->max_brightness;
+
+	bl_cmds[0] = OPCODE_CONTRAST;
+	bl_cmds[1] = brightness;
+
+	ret = send_cmds(priv, bl_cmds, sizeof (bl_cmds));
+	if (ret < 0)
+		return ret;
+
+	priv->brightness = bl->props.brightness;
+	return 0;
+}
+
+static int ssd1320_bl_get_brightness(struct backlight_device *bl)
+{
+	struct ssd1320 *priv;
+	priv = bl_get_data(bl);
+	return priv->brightness;
+}
+
+static struct backlight_ops ssd1320_bl_ops = {
+	.update_status		= ssd1320_bl_update_status,
+	.get_brightness		= ssd1320_bl_get_brightness,
+};
+
+static const struct backlight_properties ssd1320_bl_props = {
+	.power		= FB_BLANK_UNBLANK,
+	.fb_blank	= FB_BLANK_UNBLANK,
+	.max_brightness	= 0xff,
+	.type		= BACKLIGHT_RAW,
+};
+
+static int init_backlight(struct ssd1320 *priv)
+{
+	struct backlight_device *bl;
+
+	bl = backlight_device_register("ssd1320", &priv->spi->dev,
+				       priv, &ssd1320_bl_ops,
+				       &ssd1320_bl_props);
+	if (IS_ERR(bl)) {
+		dev_err(&priv->spi->dev, "error %ld on backlight register\n",
+			PTR_ERR(bl));
+		return PTR_ERR(bl);
+	}
+	priv->backlight = bl;
+	bl->props.brightness = priv->brightness;
+	return 0;
+}
+
+/*
+ * platform device probe callback
+ */
+static int ssd1320_probe(struct spi_device *spi)
+{
+	struct device_node *node = spi->dev.of_node;
+	struct ssd1320 *priv;
+	struct fb_info *fb;
+	int ret;
+
+	if (!node) {
+		dev_err(&spi->dev, "No device tree data found!\n");
+		return -EINVAL;
+	}
+
+	fb = framebuffer_alloc(sizeof (*priv), &spi->dev);
+	if (!fb)
+		return -ENOMEM;
+
+	priv = fb->par;
+	mutex_init(&priv->mutex);
+	priv->spi = spi;
+	priv->fb = fb;
+	priv->type = (enum oled_type)of_device_get_match_data(&spi->dev);
+
+	priv->vcc_gpio = devm_gpiod_get_optional(&spi->dev, "ssd1320,vcc",
+					GPIOD_ASIS);
+	if (IS_ERR(priv->vcc_gpio)) {
+		ret = PTR_ERR(priv->vcc_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev,
+				"failed to get vcc gpio: %d\n", ret);
+		goto fail;
+	}
+
+	priv->data_gpio = devm_gpiod_get(&spi->dev,
+					 "ssd1320,data-select",
+					 GPIOD_OUT_LOW);
+	if (IS_ERR(priv->data_gpio)) {
+		ret = PTR_ERR(priv->data_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev, "failed to get data gpio: %d\n",
+				ret);
+		goto fail;
+	}
+
+	priv->reset_gpio = devm_gpiod_get(&spi->dev,
+					  "ssd1320,reset",
+					 GPIOD_OUT_LOW);
+	if (IS_ERR(priv->reset_gpio)) {
+		ret = PTR_ERR(priv->reset_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev, "failed to get reset gpio: %d\n",
+				ret);
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,watchdog", &priv->watchdog);
+	if (ret) {
+		dev_err(&spi->dev, "failed to get watchdog\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,max-brightness",
+				   &priv->max_brightness);
+	if (ret) {
+		dev_err(&spi->dev, "failed to get max-brightness\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,default-brightness",
+				   &priv->default_brightness);
+	if (ret) {
+		dev_err(&spi->dev, "failed to get default-brightness\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32_array(node, "ssd1320,com-range",
+					 priv->com_range,
+					 ARRAY_SIZE(priv->com_range));
+	if (ret) {
+		dev_err(&spi->dev, "failed to get com-range\n");
+		goto fail;
+	}
+
+
+	ret = of_property_read_u32_array(node, "ssd1320,seg-range",
+					 priv->seg_range,
+					 ARRAY_SIZE(priv->seg_range));
+	if (ret) {
+		dev_err(&spi->dev, "failed to get seg-range\n");
+		goto fail;
+	}
+
+	/* sanity check on screen size */
+	if (priv->com_range[0] >= SSD1320_COMS ||
+	    priv->com_range[1] >= SSD1320_COMS ||
+	    priv->com_range[0] >= priv->com_range[1])  {
+		dev_err(&spi->dev, "unsupported com-range\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (priv->seg_range[0] >= SSD1320_SEGS ||
+	    priv->seg_range[1] >= SSD1320_SEGS ||
+	    priv->seg_range[0] >= priv->seg_range[1])  {
+		dev_err(&spi->dev, "unsupported seg-range\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	priv->height = priv->com_range[1] - priv->com_range[0] + 1;
+	priv->width = priv->seg_range[1] - priv->seg_range[0] + 1;
+
+	priv->com_reverse = of_property_read_bool(node,
+						  "ssd1320,com-reverse-dir");
+	priv->seg_reverse = of_property_read_bool(node,
+						  "ssd1320,seg-reverse-dir");
+	priv->seg_sequential = of_property_read_bool(node,
+						     "ssd1320,seg-sequential");
+	priv->seg_first_odd = of_property_read_bool(node,
+						    "ssd1320,seg-first-odd");
+
+	ret = of_property_read_u32(node, "ssd1320,clk-divide-ratio",
+				   &priv->clk_divide_ratio);
+	priv->has_clk_divide_ratio = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get clk-divide-ratio\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,precharge-period",
+				   &priv->precharge_period);
+	priv->has_precharge_period = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get precharge-period\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,vcom-deselect-level",
+				   &priv->vcom_deselect_level);
+	priv->has_vcom_deselect_level = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get vcom-deselect-level");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,vcom-deselect-level",
+				   &priv->vcom_deselect_level);
+	priv->has_vcom_deselect_level = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get vcom-deselect-level");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,precharge-voltage",
+				   &priv->precharge_voltage);
+	priv->has_precharge_voltage = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get precharge-voltage");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,iref", &priv->iref);
+	priv->has_iref = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get iref");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,display-enh-a",
+				   &priv->display_enh_a);
+	priv->has_display_enh_a = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get ssd1320,display-enh-a");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,display-enh-b",
+				   &priv->display_enh_b);
+	priv->has_display_enh_b = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get ssd1320,display-enh-b");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,discharge-period",
+				   &priv->discharge_period);
+	priv->has_discharge_period = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get ssd1320,discharge-period");
+		goto fail;
+	}
+
+	ret = of_property_read_u32_array(node, "ssd1320,grayscale-table",
+					 priv->grayscale_table,
+					 ARRAY_SIZE(priv->grayscale_table));
+	priv->has_grayscale_table = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get grayscale-table\n");
+		goto fail;
+	}
+
+	priv->brightness = priv->default_brightness;
+
+	/* setup framebuffer */
+	fb->fbops = &ssd1320_fb_ops;
+	fb->flags = FBINFO_FLAG_DEFAULT | FBINFO_HWACCEL_YPAN;
+	fb->var = ssd1320_fb_var;
+	fb->fix = ssd1320_fb_fix;
+
+	fb->var.xres = priv->width;
+	fb->var.yres = priv->height;
+	fb->var.xres_virtual = priv->width;
+	fb->var.yres_virtual = priv->height * 2;
+
+	/* twice lcd size so we can pan in one direction */
+	fb->fix.smem_len = (priv->width * priv->height) * 2;
+	fb->fix.line_length = priv->width;
+	fb->var.rotate = 0;
+
+	/* allocate hardware video memory, no way to make the hardware
+	 * skip some segments so the full hw width is always used */
+	priv->gddram_size = SSD1320_SEGS * priv->height / 2;
+	priv->gddram = vmalloc(priv->gddram_size);
+	if (!priv->gddram) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memset(priv->gddram, 0, priv->gddram_size);
+
+	/* allocate video memory */
+	priv->vmem_size = PAGE_ALIGN(fb->fix.smem_len);
+	priv->vmem = vmalloc(priv->vmem_size);
+	if (!priv->vmem) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memset(priv->vmem, 0, priv->vmem_size);
+	fb->screen_base = (char __iomem *)priv->vmem;
+
+	ret = ssd1320_init(priv);
+	if (ret)
+		goto fail;
+
+	/* if (1){ */
+	/* 	unsigned int col = 0; */
+	/* 	unsigned int row = 0; */
+	/* 	for (row = 0; row < priv->height; row++) { */
+	/* 		if (signal_pending(current)) */
+	/* 			break; */
+
+	/* 		for (col = 0; col < priv->width / 2; col++) { */
+	/* 			u8 c; */
+
+	/* 			if (signal_pending(current)) */
+	/* 				break; */
+
+	/* 			/\* if (row <= 118) *\/ */
+	/* 			/\* 	c = 0x0; *\/ */
+	/* 			/\* else *\/ */
+	/* 			/\* 	c = 0xff; *\/ */
+	/* 			c = 0xff; */
+
+	/* 			printk("row:%u col:%u\n", row, col); */
+	/* 			write_data(priv, &c, 1); */
+	/* 			mdelay(1); */
+	/* 		} */
+	/* 	} */
+	/* } */
+
+	/* return -EINVAL; */
+
+	if (init_backlight(priv))
+		goto fail;
+
+	/* register frame buffer */
+	ret = register_framebuffer(fb);
+	if (ret < 0)
+		goto fail;
+
+	INIT_DELAYED_WORK(&priv->wtd_work, wtd_work_cb);
+
+	if (priv->watchdog) {
+		atomic_set(&priv->wtd_count, priv->watchdog);
+		schedule_delayed_work(&priv->wtd_work, HZ);
+	}
+
+	dev_info(&spi->dev,
+		 "fb%d: SSD1320 frame buffer device (%ux%u screen)\n",
+		 fb->node, priv->width, priv->height);
+
+	dev_set_drvdata(&spi->dev, priv);
+	return 0;
+
+fail:
+	if (priv->vmem)
+		vfree(priv->vmem);
+	if (priv->backlight)
+		backlight_device_unregister(priv->backlight);
+	framebuffer_release(fb);
+	return ret;
+}
+
+/*
+ * platform device remove callback
+ */
+static void ssd1320_remove(struct spi_device *spi)
+{
+	struct ssd1320 *priv;
+	unsigned int i;
+
+	priv = dev_get_drvdata(&spi->dev);
+	cancel_delayed_work_sync(&priv->wtd_work);
+	unregister_framebuffer(priv->fb);
+	for (i = 0; i < priv->vmem_size; i += PAGE_SIZE) {
+		struct page *page;
+		page = vmalloc_to_page(priv->vmem + i);
+		page->mapping = NULL;
+	}
+	vfree(priv->vmem);
+	vfree(priv->gddram);
+	backlight_device_unregister(priv->backlight);
+	framebuffer_release(priv->fb);
+}
+
+static const struct spi_device_id ssd1320_id_table[] = {
+        { "ssd1320", 0 },
+        { "ch1120", 0 },
+        {}
+};
+MODULE_DEVICE_TABLE(spi, ssd1320_id_table);
+
+static const struct of_device_id ssd1320_of_match[] = {
+	{
+		.compatible = "solomon,ssd1320",
+		.data = (void *)TYPE_SSD1320,
+	},
+	{
+		.compatible = "chipwealth,ch1120",
+		.data = (void *)TYPE_CH1120,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ssd1320_of_match);
+
+static struct spi_driver ssd1320_driver = {
+	.driver = {
+		.name		= "ssd1320",
+		.of_match_table	= ssd1320_of_match,
+	},
+	.probe		= ssd1320_probe,
+	.remove		= ssd1320_remove,
+	.id_table	= ssd1320_id_table,
+};
+
+module_spi_driver(ssd1320_driver);
+
+MODULE_DESCRIPTION("SSD1320 driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./Kconfig linux-6.4-fbx/fs/exfat-fbx/Kconfig
--- linux-6.4-fbx/fs/exfat-fbx./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/Kconfig	2023-02-24 19:09:23.405368085 +0100
@@ -0,0 +1,3 @@
+
+config EXFAT_FS_FBX
+	tristate "exFAT fs support (fbx)"
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./Makefile linux-6.4-fbx/fs/exfat-fbx/Makefile
--- linux-6.4-fbx/fs/exfat-fbx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/Makefile	2023-02-24 19:09:23.405368085 +0100
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_EXFAT_FS_FBX)	+= exfat.o
+
+exfat-y	= super.o				\
+	inode.o					\
+	fat.o					\
+	read-write.o				\
+	upcase.o				\
+	bitmap.o				\
+	time.o					\
+	dir.o					\
+	namei.o					\
+	file.o
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./bitmap.c linux-6.4-fbx/fs/exfat-fbx/bitmap.c
--- linux-6.4-fbx/fs/exfat-fbx./bitmap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/bitmap.c	2023-02-24 19:09:35.701702339 +0100
@@ -0,0 +1,606 @@
+/*
+ * bitmap.c for exfat
+ * Created by <nschichan@freebox.fr> on Thu Aug  8 19:21:05 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+
+static inline sector_t exfat_bitmap_sector(struct exfat_sb_info *sbi,
+					   u32 cluster)
+{
+	return sbi->first_bitmap_sector + ((cluster / 8) >> sbi->sectorbits);
+}
+
+static inline u32 exfat_bitmap_off(struct exfat_sb_info *sbi,
+				   u32 cluster)
+{
+	return (cluster / 8) & sbi->sectormask;
+}
+
+static inline u32 exfat_bitmap_shift(u32 cluster)
+{
+	return cluster & 7;
+}
+
+static int __find_get_free_cluster(struct inode *inode, u32 *out_cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	while (1) {
+		sector_t sect = exfat_bitmap_sector(sbi,
+						    sbi->cur_bitmap_cluster);
+		u32 off = exfat_bitmap_off(sbi, sbi->cur_bitmap_cluster);
+		u32 shift = exfat_bitmap_shift(sbi->cur_bitmap_cluster);
+
+		/* disk is full */
+		if (!sbi->free_clusters)
+			break;
+
+		if (!sbi->cur_bitmap_bh ||
+		    sect != sbi->cur_bitmap_sector) {
+			if (sbi->cur_bitmap_bh)
+				brelse(sbi->cur_bitmap_bh);
+			sbi->cur_bitmap_bh = sb_bread(inode->i_sb, sect);
+			sbi->cur_bitmap_sector = sect;
+			if (!sbi->cur_bitmap_bh) {
+				exfat_msg(inode->i_sb, KERN_ERR,
+					  "unable to read bitmap sector "
+					  "at %llu", (unsigned long long)sect);
+				return -EIO;
+			}
+		}
+
+		if (!(sbi->cur_bitmap_bh->b_data[off] & (1 << shift))) {
+			sbi->cur_bitmap_bh->b_data[off] |= (1 << shift);
+			*out_cluster = sbi->cur_bitmap_cluster;
+			goto found;
+		}
+
+		++sbi->cur_bitmap_cluster;
+		if (sbi->cur_bitmap_cluster == sbi->cluster_count)
+			sbi->cur_bitmap_cluster = 0;
+	}
+	return -ENOSPC;
+
+found:
+	sbi->prev_free_cluster = *out_cluster;
+	--sbi->free_clusters;
+	mark_buffer_dirty(sbi->cur_bitmap_bh);
+	return 0;
+}
+
+static int __put_cluster(struct inode *inode, u32 cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	sector_t sect = exfat_bitmap_sector(sbi, cluster);
+	u32 off = exfat_bitmap_off(sbi, cluster);
+	u32 shift = exfat_bitmap_shift(cluster);
+
+
+	if (!sbi->cur_bitmap_bh || sect != sbi->cur_bitmap_sector) {
+		if (sbi->cur_bitmap_bh)
+			brelse(sbi->cur_bitmap_bh);
+		sbi->cur_bitmap_bh = sb_bread(inode->i_sb, sect);
+		if (!sbi->cur_bitmap_bh) {
+			exfat_msg(inode->i_sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		sbi->cur_bitmap_sector = sect;
+		sbi->cur_bitmap_cluster = cluster;
+	}
+	if ((sbi->cur_bitmap_bh->b_data[off] & (1 << shift)) == 0) {
+		exfat_fs_error(inode->i_sb, "put_cluster: cluster %u "
+			  "already free.", cluster);
+		return -EIO;
+	}
+
+	++sbi->free_clusters;
+	sbi->cur_bitmap_bh->b_data[off] &= ~(1 << shift);
+	sbi->prev_free_cluster = cluster;
+	mark_buffer_dirty(sbi->cur_bitmap_bh);
+	/* sync_dirty_buffer(sbi->cur_bitmap_bh); */
+	return 0;
+}
+
+/*
+ * setup search to start at given cluster.
+ */
+static void __exfat_reset_bitmap(struct exfat_sb_info *sbi, u32 cluster)
+{
+	sector_t sect;
+
+	if (cluster >= sbi->cluster_count)
+		cluster = 0;
+
+	sect = exfat_bitmap_sector(sbi, cluster);
+	if (sbi->cur_bitmap_sector != sect) {
+		sbi->cur_bitmap_sector = sect;
+		if (sbi->cur_bitmap_bh) {
+			brelse(sbi->cur_bitmap_bh);
+			sbi->cur_bitmap_bh = NULL;
+		}
+	}
+	sbi->cur_bitmap_cluster = cluster;
+}
+
+static bool all_contiguous(u32 *clusters, u32 nr)
+{
+	u32 i;
+
+	for (i = 0; i < nr - 1; ++i) {
+		if (clusters[i] != clusters[i + 1] - 1)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * hint must be the immediately after the last allocated cluster of
+ * the inode.
+ */
+int exfat_alloc_clusters(struct inode *inode, u32 hint, u32 *clusters, u32 nr)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	u32 i;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	__exfat_reset_bitmap(sbi, hint - 2);
+	for (i = 0; i < nr; ++i) {
+		u32 new;
+		int error;
+
+		error = __find_get_free_cluster(inode, &new);
+		if (error) {
+			mutex_unlock(&sbi->bitmap_mutex);
+			return error;
+		}
+
+		clusters[i] = new + 2;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+
+	/*
+	 * all clusters found: now see if we need to update/create a
+	 * fat chain.
+	 */
+	if (info->first_cluster == 0) {
+		info->first_cluster = clusters[0];
+		if (all_contiguous(clusters, nr)) {
+			/*
+			 * first cluster alloc on inode and all
+			 * clusters are contiguous.
+			 */
+			info->flags |= EXFAT_I_FAT_INVALID;
+		} else {
+			/*
+			 * first alloc and already fragmented.
+			 */
+			return exfat_write_fat(inode, 0, clusters, nr);
+		}
+	} else {
+		int error;
+		if ((info->flags & EXFAT_I_FAT_INVALID) &&
+		    (clusters[0] != hint || !all_contiguous(clusters, nr))) {
+			/*
+			 * must now use fat chain instead of bitmap.
+			 */
+			info->flags &= ~(EXFAT_I_FAT_INVALID);
+
+			/*
+			 * write the contiguous chain that would
+			 * previously be accessed without the FAT
+			 * chain.
+			 */
+			error = exfat_write_fat_contiguous(inode,
+						  info->first_cluster,
+						  hint - info->first_cluster);
+			if (error)
+				return error;
+		}
+
+		if ((info->flags & EXFAT_I_FAT_INVALID) == 0) {
+			/*
+			 * link the allocated clusters after hint.
+			 */
+			error = exfat_write_fat(inode, hint - 1, clusters, nr);
+			if (error)
+				return  error;
+		}
+
+	}
+
+	/*
+	 * update i_blocks.
+	 */
+	inode->i_blocks += nr << (sbi->clusterbits - 9);
+	info->allocated_clusters += nr;
+
+	/*
+	 * caller must call mark_inode_dirty so that inode
+	 * first_cluster and inode flags get written to the disk.
+	 * caller must update inode size (directory and regular file
+	 * have different rules).
+	 */
+	return 0;
+}
+
+
+static int exfat_free_clusters_contiguous(struct inode *inode,
+					  u32 start, u32 nr)
+{
+	u32 cluster;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	int error = 0;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	for (cluster = start; cluster < start + nr; ++cluster) {
+		error = __put_cluster(inode, cluster - 2);
+		if (error)
+			break;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+	return error;
+}
+
+static int exfat_free_clusters_fat(struct inode *inode,
+				   u32 fcluster_start, u32 nr)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 fcluster;
+	int error = 0;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	for (fcluster = fcluster_start; fcluster < fcluster_start + nr;
+	     ++fcluster) {
+		u32 dcluster;
+		int error;
+
+		error = exfat_get_fat_cluster(inode, fcluster, &dcluster);
+		if (error)
+			break;
+
+		error = __put_cluster(inode, dcluster - 2);
+		if (error)
+			break;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+
+	/*
+	 * per-inode file cluster to disk cluster translation cache
+	 * mostly now holds entries to the zone we just truncated, so
+	 * they must not be kept (this could lead to FS corruption).
+	 */
+	exfat_inode_cache_drop(inode);
+
+	return error;
+}
+
+int exfat_free_clusters_inode(struct inode *inode, u32 fcluster_start)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	int error;
+	u32 nr_to_free = info->allocated_clusters - fcluster_start;
+
+	if (info->first_cluster == 0 || nr_to_free == 0)
+		/*
+		 * no clusters allocated, or nothing to do
+		 */
+		return 0;
+
+	if (info->flags & EXFAT_I_FAT_INVALID)
+		error = exfat_free_clusters_contiguous(inode,
+				       info->first_cluster + fcluster_start,
+				       nr_to_free);
+	else
+		error = exfat_free_clusters_fat(inode, fcluster_start,
+					nr_to_free);
+	if (error)
+		return error;
+
+	info->allocated_clusters -= nr_to_free;
+	inode->i_blocks = EXFAT_I(inode)->allocated_clusters <<
+		(EXFAT_SB(inode->i_sb)->clusterbits - 9);
+
+	/*
+	 * update inode info, caller must call mark_inode_dirty and
+	 * update inode->i_size.
+	 */
+	if (fcluster_start == 0) {
+		info->first_cluster = 0;
+		info->flags &= ~(EXFAT_I_FAT_INVALID);
+	}
+	return 0;
+}
+
+static u32 count_clusters_bh(struct buffer_head *bh, u32 count)
+{
+	u8 *ptr = bh->b_data;
+	u32 ret = 0;
+	u8 val;
+
+	while (count >= sizeof (u64) * 8) {
+		u64 val = *(u64*)ptr;
+
+		ret += hweight64(~val);
+		count -= sizeof (u64) * 8;
+		ptr += sizeof (u64);
+	}
+	if (count >= sizeof (u32) * 8) {
+		u32 val = *(u32*)ptr;
+
+		ret += hweight32(~val);
+		count -= sizeof (u32) * 8;
+		ptr += sizeof (u32);
+	}
+	if (count >= sizeof (u16) * 8) {
+		u16 val = *(u16*)ptr;
+
+		ret += hweight16(~val);
+		count -= sizeof (u16) * 8;
+		ptr += sizeof (u16);
+	}
+	if (count >= sizeof (u8) * 8) {
+		u8 val = *ptr;
+
+		ret += hweight8(~val);
+		count -= sizeof (u8) * 8;
+		ptr += sizeof (u8);
+	}
+
+	if (count) {
+		val = *ptr;
+		while (count) {
+			ret += (~val & 1);
+			val >>= 1;
+			--count;
+		}
+	}
+	return ret;
+}
+
+/*
+ * only called during mount, so taking sbi->bitmap_mutex should not be
+ * needed.
+ */
+static int exfat_get_free_cluster_count(struct super_block *sb, u32 *out_count)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 clusters_per_sector = 8 * sbi->sectorsize;
+	u32 cluster;
+
+	*out_count = 0;
+	for (cluster = 0; cluster < sbi->cluster_count;
+	     cluster += clusters_per_sector) {
+		sector_t sect = exfat_bitmap_sector(sbi, cluster);
+		struct buffer_head *bh;
+		u32 count = clusters_per_sector;
+
+		if (cluster + clusters_per_sector > sbi->cluster_count)
+			count = sbi->cluster_count - cluster;
+
+		bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		*out_count += count_clusters_bh(bh, count);
+		brelse(bh);
+	}
+	return 0;
+}
+
+/*
+ * setup a bitmap context, preload a bh from the requested starting
+ * cluster.
+ */
+int exfat_init_bitmap_context(struct super_block *sb,
+			      struct exfat_bitmap_ctx *ctx,
+			      u32 cluster)
+{
+	memset(ctx, 0, sizeof (*ctx));
+	ctx->sb = sb;
+
+	cluster -= 2;
+	if (cluster >= EXFAT_SB(sb)->cluster_count)
+		return -ENOSPC;
+
+	ctx->cur_sector = exfat_bitmap_sector(EXFAT_SB(sb), cluster);
+	ctx->bh = sb_bread(ctx->sb, ctx->cur_sector);
+
+	if (!ctx->bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read bitmap sector at %llu",
+			  (unsigned long long)ctx->cur_sector);
+		return -EIO;
+	}
+	return 0;
+}
+
+/*
+ * release bh in an already setup bitmap context.
+ */
+void exfat_exit_bitmap_context(struct exfat_bitmap_ctx *ctx)
+{
+	if (ctx->bh)
+		brelse(ctx->bh);
+}
+
+/*
+ * test a specific cluster usage in the bitmap. reuse the bh in the
+ * exfat_bitmap_ctx or read a new one if starting cluster is outside
+ * the current one.
+ */
+static int exfat_test_bitmap_cluster(struct exfat_bitmap_ctx *ctx,
+				     uint32_t cluster, bool *cluster_in_use)
+{
+	sector_t sect;
+	uint32_t off = exfat_bitmap_off(EXFAT_SB(ctx->sb), cluster);
+	int shift = exfat_bitmap_shift(cluster);
+
+	sect = exfat_bitmap_sector(EXFAT_SB(ctx->sb), cluster);
+	if (sect != ctx->cur_sector) {
+		ctx->cur_sector = sect;
+		ctx->bh = sb_bread(ctx->sb, ctx->cur_sector);
+		if (!ctx->bh) {
+			exfat_msg(ctx->sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+	}
+
+	*cluster_in_use = !!(ctx->bh->b_data[off] & (1 << shift));
+	return 0;
+}
+
+/*
+ * update first_in_use and nr_in_use with the first zone of used
+ * clusters starting from start_cluster.
+ */
+int exfat_test_bitmap(struct exfat_bitmap_ctx *ctx, uint32_t start_cluster,
+		      uint32_t *first_in_use, uint32_t *nr_in_use)
+{
+	bool in_use = false;
+	int error = 0;
+	struct exfat_sb_info *sbi = EXFAT_SB(ctx->sb);
+
+	start_cluster -= 2;
+
+	/*
+	 * scan bitmap until we find a cluster that is in use.
+	 */
+	while (1) {
+		if (start_cluster == sbi->cluster_count) {
+			/*
+			 * readched end of disk: no more in use
+			 * cluster found.
+			 */
+			*first_in_use = sbi->cluster_count;
+			*nr_in_use = 0;
+			return 0;
+		}
+		error = exfat_test_bitmap_cluster(ctx, start_cluster, &in_use);
+		if (error)
+			return error;
+		if (in_use)
+			break;
+		++start_cluster;
+	}
+
+
+	/*
+	 * update first_in_use, and scan until a free cluster is
+	 * found.
+	 */
+	*first_in_use = start_cluster + 2;
+	*nr_in_use = 0;
+	while (1) {
+		error = exfat_test_bitmap_cluster(ctx, start_cluster, &in_use);
+		if (error)
+			return error;
+		if (!in_use)
+			break;
+		++(*nr_in_use);
+		++start_cluster;
+	}
+	return 0;
+}
+
+int exfat_init_bitmap(struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->i_sb);
+	struct exfat_bitmap_entry *be;
+	struct exfat_dir_ctx dctx;
+	u32 first_bitmap_cluster;
+	u32 last_bitmap_cluster;
+
+	int error;
+
+	mutex_init(&sbi->bitmap_mutex);
+
+	error = exfat_init_dir_ctx(root, &dctx, 0);
+	if (error)
+		return error;
+
+try_bitmap:
+	error = -ENOENT;
+	be = __exfat_dentry_next(&dctx, E_EXFAT_BITMAP, 0xff, true, NULL);
+	if (!be) {
+		exfat_msg(root->i_sb, KERN_ERR, "root directory does not "
+			  "have a bitmap entry.");
+		goto fail;
+	}
+
+	if (exfat_bitmap_nr(be->flags) != 0)
+		/*
+		 * not expected to find a second bitmap entry here
+		 * since we checked during superblock fill that we
+		 * were not on a texFAT volume ...
+		 */
+		goto try_bitmap;
+
+
+	error = -EINVAL;
+	if (__le64_to_cpu(be->length) * 8 < sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_INFO, "bitmap does not cover "
+			  "the whole cluster heap.");
+		goto fail;
+	}
+
+	first_bitmap_cluster = __le32_to_cpu(be->cluster_addr);
+	last_bitmap_cluster = first_bitmap_cluster +
+		(__le32_to_cpu(be->length) >> sbi->clusterbits);
+
+	/*
+	 * check that bitmap start and end clusters are inside the
+	 * disk.
+	 */
+	error = -ERANGE;
+	if (first_bitmap_cluster < 2 &&
+	    first_bitmap_cluster >= sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_ERR, "bitmap start cluster is "
+			  "outside disk limits.");
+		goto fail;
+	}
+	if (last_bitmap_cluster < 2 &&
+	    last_bitmap_cluster >= sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_ERR, "bitmap last cluster is "
+			  "outside disk limits.");
+		goto fail;
+	}
+
+	sbi->bitmap_length = __le32_to_cpu(be->length);
+	sbi->first_bitmap_sector = exfat_cluster_sector(sbi,
+					__le32_to_cpu(be->cluster_addr));
+	sbi->last_bitmap_sector = sbi->first_bitmap_sector +
+		DIV_ROUND_UP(sbi->bitmap_length, sbi->sectorsize);
+
+	error = exfat_get_free_cluster_count(root->i_sb, &sbi->free_clusters);
+	if (error)
+		goto fail;
+
+	sbi->prev_free_cluster = 0;
+
+	exfat_cleanup_dir_ctx(&dctx);
+	return 0;
+fail:
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
+
+void exfat_exit_bitmap(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+	if (sbi->cur_bitmap_bh)
+		brelse(sbi->cur_bitmap_bh);
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./dir.c linux-6.4-fbx/fs/exfat-fbx/dir.c
--- linux-6.4-fbx/fs/exfat-fbx./dir.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/dir.c	2023-03-14 12:13:36.307603128 +0100
@@ -0,0 +1,402 @@
+/*
+ * dir.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 11:42:46 2013
+ */
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/nls.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+/*
+ * setup an exfat_dir_ctx structure so that __exfat_dentry_next can
+ * work with it.
+ */
+int exfat_init_dir_ctx(struct inode *inode, struct exfat_dir_ctx *ctx,
+		       off_t start)
+{
+	u32 cluster = EXFAT_I(inode)->first_cluster;
+
+	memset(ctx, 0, sizeof (*ctx));
+
+	if (cluster == 0) {
+		ctx->empty = true;
+		ctx->sb = inode->i_sb;
+		return 0;
+	}
+
+	if (cluster < EXFAT_CLUSTER_FIRSTVALID ||
+	    cluster > EXFAT_CLUSTER_LASTVALID) {
+		exfat_msg(inode->i_sb, KERN_ERR, "exfat_init_dir_ctx: invalid "
+			  "cluster %u", cluster);
+		return -EINVAL;
+	}
+
+	start &= ~(0x20 - 1);
+	if (start == 0)
+		ctx->off = -1;
+	else
+		ctx->off = start - 0x20;
+
+	ctx->sb = inode->i_sb;
+	ctx->inode = inode;
+
+	return 0;
+}
+
+void exfat_cleanup_dir_ctx(struct exfat_dir_ctx *dctx)
+{
+	if (dctx->bh)
+		brelse(dctx->bh);
+}
+
+/*
+ * calculate the checksum for the current direntry. fields containing
+ * the checksum for the first entry is not part of the checksum
+ * calculation.
+ */
+u16 exfat_direntry_checksum(void *data, u16 checksum, bool first)
+{
+	u8 *ptr = data;
+	int i;
+
+	for (i = 0; i < 0x20; ++i) {
+		if (first && (i == 2 || i == 3))
+			continue ;
+		checksum = ((checksum << 15) | (checksum >> 1)) + (u16)ptr[i];
+	}
+	return checksum;
+}
+
+u32 exfat_dctx_fpos(struct exfat_dir_ctx *dctx)
+{
+	return dctx->off;
+}
+
+u64 exfat_dctx_dpos(struct exfat_dir_ctx *dctx)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(dctx->sb);
+
+	return (dctx->sector << sbi->sectorbits) +
+		(dctx->off & sbi->sectormask);
+}
+
+static int exfat_get_dctx_disk_cluster(struct exfat_dir_ctx *dctx,
+				       u32 file_cluster, u32 *disk_cluster)
+{
+	struct exfat_inode_info *info = EXFAT_I(dctx->inode);
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		*disk_cluster = info->first_cluster + file_cluster;
+		return 0;
+	} else {
+		return exfat_get_fat_cluster(dctx->inode, file_cluster,
+					     disk_cluster);
+	}
+}
+
+/*
+ * get the next typed dentry in the exfat_dir_ctx structure. can_skip
+ * indicates whether the entry must be immediately there in the entry
+ * stream. *end indicates whether end of directory entry stream is
+ * reached or not.
+ *
+ * only one buffer_head is kept at a time. subsequent calls to
+ * __exfat_dentry_next can invalidate pointers from previous calls due
+ * to that.
+ */
+void *__exfat_dentry_next(struct exfat_dir_ctx *dctx, int type, int mask,
+			  bool can_skip, bool *end)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(dctx->sb);
+
+	if (dctx->empty) {
+		if (end)
+			*end = true;
+		return NULL;
+	}
+
+	if (end)
+		*end = false;
+
+	if (dctx->off == -1)
+		dctx->off = 0;
+	else
+		dctx->off += 0x20;
+
+	for (;;) {
+		sector_t wanted_sector;
+		u32 file_cluster = dctx->off >> sbi->clusterbits;
+		u32 disk_cluster;
+		int error;
+		int sector_offset;
+		sector_t sector_in_cluster;
+
+		if (dctx->off >= dctx->inode->i_size) {
+			*end = true;
+			return NULL;
+		}
+
+
+		error = exfat_get_dctx_disk_cluster(dctx, file_cluster,
+						    &disk_cluster);
+		if (error)
+			return NULL;
+
+		sector_in_cluster = (dctx->off >> sbi->sectorbits) %
+			sbi->sectors_per_cluster;
+
+		wanted_sector = exfat_cluster_sector(sbi, disk_cluster) +
+			sector_in_cluster;
+		if (wanted_sector != dctx->sector || !dctx->bh) {
+			/*
+			 * need to fetch a new sector from the current
+			 * cluster.
+			 */
+			dctx->sector = wanted_sector;
+			if (dctx->bh)
+				brelse(dctx->bh);
+			dctx->bh = sb_bread(dctx->sb, dctx->sector);
+			if (!dctx->bh)
+				return NULL;
+		}
+
+		sector_offset = dctx->off & sbi->sectormask;
+		if ((dctx->bh->b_data[sector_offset] & mask) == (type & mask))
+			/*
+			 * return pointer to entry if type matches the
+			 * one given.
+			 */
+			return dctx->bh->b_data + sector_offset;
+
+		if (dctx->bh->b_data[sector_offset] == 0 && end)
+			/*
+			 * set end if no more entries in this directory.
+			 */
+			*end = true;
+
+		if (dctx->bh->b_data[sector_offset] == 0 || !can_skip)
+			/*
+			 * handle can_skip / end of directory.
+			 */
+			return NULL;
+
+		/*
+		 * move to next entry.
+		 */
+		dctx->off += 0x20;
+	}
+	return NULL;
+}
+
+/*
+ * helper around __exfat_dentry_next that copies the content of the
+ * found entry in a user supplied buffer.
+ */
+int exfat_dentry_next(void *out, struct exfat_dir_ctx *dctx,
+			     int type, bool can_skip)
+{
+	bool end;
+
+	void *ptr = __exfat_dentry_next(dctx, type, 0xff, can_skip, &end);
+
+	if (!ptr) {
+		if (end)
+			return -ENOENT;
+		else {
+			exfat_msg(dctx->sb, KERN_INFO, "no ptr and "
+				  "end not reached: "
+				  "type %02x, can_skip %s\n", type,
+				  can_skip ? "true" : "false");
+			return -EIO;
+		}
+	}
+	memcpy(out, ptr, 0x20);
+	return 0;
+}
+
+/*
+ * extract name by parsing consecutive E_EXFAT_FILENAME entries in a
+ * caller provided buffer. also update the checksum on the fly.
+ *
+ * no utf16 to utf8 conversion is performed.
+ */
+int __exfat_get_name(struct exfat_dir_ctx *dctx, u32 name_length,
+			    __le16 *name, u16 *calc_checksum,
+			    struct exfat_iloc *iloc)
+{
+	__le16 *ptr;
+	int error;
+	int nr;
+
+	ptr = name;
+
+	error = -EIO;
+	nr = 0;
+	while (name_length) {
+		struct exfat_filename_entry *e;
+		u32 len = 15;
+
+		e = __exfat_dentry_next(dctx, E_EXFAT_FILENAME, 0xff,
+					false, NULL);
+		if (!e)
+			goto fail;
+		*calc_checksum = exfat_direntry_checksum(e, *calc_checksum,
+							 false);
+
+		if (iloc)
+			iloc->disk_offs[nr + 2] = exfat_dctx_dpos(dctx);
+		if (name_length < 15)
+			len = name_length;
+
+		memcpy(ptr, e->name_frag, len * sizeof (__le16));
+		name_length -= len;
+		ptr += len;
+		nr++;
+	}
+	return 0;
+
+fail:
+	return error;
+}
+
+/*
+ * walk the directory and invoke filldir on all found entries.
+ */
+static int __exfat_iterate(struct exfat_dir_ctx *dctx, struct file *file,
+			   struct dir_context *ctx)
+{
+	int error;
+	char *name = __getname();
+	__le16 *utf16name = __getname();
+
+	if (!name)
+		return -ENOMEM;
+	if (!utf16name) {
+		__putname(name);
+		return -ENOMEM;
+	}
+
+	for (;;) {
+		struct exfat_filedir_entry *efd;
+		struct exfat_stream_extension_entry *esx;
+		int dtype = DT_REG;
+		int name_length;
+		bool end;
+		u16 calc_checksum;
+		u16 expect_checksum;
+
+		/*
+		 * get the next filedir entry, we are allowed to skip
+		 * entries for that.
+		 */
+		error = -EIO;
+		efd = __exfat_dentry_next(dctx, E_EXFAT_FILEDIR, 0xff,
+					  true, &end);
+		if (!efd) {
+			if (end)
+				break;
+			else
+				goto fail;
+		}
+		expect_checksum = __le16_to_cpu(efd->set_checksum);
+		calc_checksum = exfat_direntry_checksum(efd, 0, true);
+
+		if (__le16_to_cpu(efd->attributes & E_EXFAT_ATTR_DIRECTORY))
+			dtype = DT_DIR;
+
+		/*
+		 * get immediate stream extension entry.
+		 */
+		esx = __exfat_dentry_next(dctx, E_EXFAT_STREAM_EXT, 0xff, false,
+					  NULL);
+		if (!esx)
+			goto fail;
+		calc_checksum = exfat_direntry_checksum(esx, calc_checksum,
+							false);
+
+		/*
+		 * get immediate name.
+		 */
+		error = __exfat_get_name(dctx, esx->name_length, utf16name,
+					 &calc_checksum, NULL);
+		if (error) {
+			exfat_msg(dctx->sb, KERN_INFO, "__exfat_get_name "
+				  "has failed with %i", error);
+			goto fail;
+		}
+
+		if (calc_checksum != expect_checksum) {
+			exfat_msg(dctx->sb, KERN_INFO, "checksum: "
+				  "calculated %04x, expect %04x",
+				  calc_checksum, expect_checksum);
+			error = -EIO;
+			goto fail;
+		}
+
+		/*
+		 * convert utf16 to utf8 for kernel filldir callback.
+		 */
+		name_length = utf16s_to_utf8s(utf16name, esx->name_length,
+						   UTF16_LITTLE_ENDIAN,
+						   name, NAME_MAX + 2);
+		if (name_length < 0) {
+			error = name_length;
+			goto fail;
+		}
+		if (name_length > 255) {
+			error = -ENAMETOOLONG;
+			goto fail;
+		}
+
+		/*
+		 * tell the kernel we have an entry by calling
+		 * dir_emit
+		 */
+		if (dir_emit(ctx, name, name_length, 1, dtype))
+			ctx->pos = 2 + exfat_dctx_fpos(dctx);
+		else
+			goto fail;
+	}
+	__putname(name);
+	__putname(utf16name);
+	ctx->pos = file_inode(file)->i_size + 2;
+	return 0;
+fail:
+	__putname(name);
+	__putname(utf16name);
+	return error;
+}
+
+/*
+ * readdir callback for VFS. fill "." and "..", then invoke
+ * __exfat_iterate.
+ */
+int exfat_iterate(struct file *file, struct dir_context *ctx)
+{
+	struct exfat_dir_ctx dctx;
+	int error;
+	struct inode *inode = file_inode(file);
+
+	switch (ctx->pos) {
+	case 0:
+		if (!dir_emit_dots(file, ctx))
+			return 0;
+		fallthrough;
+	default:
+		if (ctx->pos >= inode->i_size + 2)
+			return 0;
+		error = exfat_init_dir_ctx(inode, &dctx, ctx->pos - 2);
+		if (error)
+			return error;
+		exfat_lock_super(inode->i_sb);
+		error = __exfat_iterate(&dctx, file, ctx);
+		exfat_unlock_super(inode->i_sb);
+		exfat_cleanup_dir_ctx(&dctx);
+		return error;
+	}
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./exfat.h linux-6.4-fbx/fs/exfat-fbx/exfat.h
--- linux-6.4-fbx/fs/exfat-fbx./exfat.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/exfat.h	2023-11-14 18:40:09.820119701 +0100
@@ -0,0 +1,325 @@
+/*
+ * exfat.h for exfat
+ * Created by <nschichan@freebox.fr> on Tue Jul 23 12:37:12 2013
+ */
+
+#ifndef __EXFAT_H
+# define __EXFAT_H
+
+#define EXFAT_HASH_BITS	(8)
+#define EXFAT_HASH_SIZE	(1 << EXFAT_HASH_BITS)
+
+/*
+ * special inode number for root directory.
+ */
+#define EXFAT_ROOT_INO	1
+
+enum {
+	EXFAT_ERROR_ACTION_CONTINUE,
+	EXFAT_ERROR_ACTION_REMOUNT_RO,
+	EXFAT_ERROR_ACTION_PANIC,
+};
+
+struct exfat_sb_options {
+	kuid_t	uid;
+	kgid_t	gid;
+	mode_t	dmask;
+	mode_t	fmask;
+	int	time_offset;
+	int	time_offset_set;
+	int	error_action;
+};
+
+struct exfat_sb_info {
+	struct exfat_sb_options options;
+
+	struct buffer_head *sb_bh;
+	struct exfat_vbr *vbr;
+	bool dirty;
+
+	u32 sectorsize; /* in bytes*/
+	u32 clustersize; /* in bytes */
+	u32 sectors_per_cluster;
+	int sectorbits;
+	int clusterbits;
+	u32 sectormask;
+	u32 clustermask;
+
+	u32 fat_offset;
+	u32 fat_length;
+
+	u32 root_dir_cluster;
+	u32 cluster_heap_offset;
+	u32 cluster_count;
+
+	__le16	*upcase_table;
+	u32	upcase_len;
+
+	/*
+	 * bitmap fields
+	 */
+	struct mutex		bitmap_mutex;
+	u32			bitmap_length;
+	sector_t		first_bitmap_sector;
+	sector_t		last_bitmap_sector;
+	sector_t		cur_bitmap_sector;
+	u32			cur_bitmap_cluster;
+	struct buffer_head	*cur_bitmap_bh;
+	u32			free_clusters;
+	u32			prev_free_cluster;
+
+	/*
+	 * inode hash fields
+	 */
+	spinlock_t		inode_hash_lock;
+	struct hlist_head	inode_hash[EXFAT_HASH_SIZE];
+
+	struct mutex		sb_mutex;
+};
+
+struct exfat_cache_entry {
+	struct list_head list;
+	u32 file_cluster;
+	u32 disk_cluster;
+	u32 nr_contig;
+};
+
+struct exfat_cache {
+	struct mutex		mutex;
+	struct list_head	entries;
+	u32			nr_entries;
+};
+
+struct exfat_iloc {
+	u8 nr_secondary;
+	u32 file_off;
+	u64 disk_offs[19];
+};
+
+struct exfat_inode_info {
+	u8			flags;
+	u16			attributes;
+	u32			first_cluster;
+	u32			allocated_clusters;
+	loff_t			mmu_private;
+	struct exfat_iloc	iloc;
+	struct hlist_node	hash_list;
+
+	struct exfat_cache	exfat_cache;
+	struct inode		vfs_inode;
+};
+
+static inline struct exfat_sb_info *EXFAT_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
+{
+	return container_of(inode, struct exfat_inode_info, vfs_inode);
+}
+
+loff_t exfat_dir_links(struct inode *inode);
+
+int exfat_write_fat_contiguous(struct inode *inode, u32 first_cluster,
+			       u32 nr_clusters);
+int exfat_write_fat(struct inode *inode, u32 prev_cluster, u32 *clusters,
+		    u32 nr_clusters);
+
+__printf(3, 4) void exfat_msg(struct super_block *sb, const char *level,
+			      const char *fmt, ...);
+__printf(2, 3) void exfat_fs_error(struct super_block *sb,
+				   const char *fmt, ...);
+int exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster);
+int __exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster,
+			    bool eof_is_fatal);
+
+void exfat_inode_cache_init(struct inode *inode);
+void exfat_inode_cache_drop(struct inode *inode);
+
+int exfat_init_fat(struct super_block *sb);
+
+int exfat_init_bitmap(struct inode *root);
+void exfat_exit_bitmap(struct super_block *sb);
+int exfat_alloc_clusters(struct inode *inode, u32 hint_cluster,
+			 u32 *cluster, u32 nr);
+int exfat_free_clusters_inode(struct inode *inode, u32 start);
+
+
+/*
+ * read only bitmap accessors: used by EXFAT_IOCGETBITMAP ioctl.
+ */
+struct exfat_bitmap_ctx {
+	struct super_block *sb;
+	struct buffer_head *bh;
+	sector_t cur_sector;
+};
+
+int exfat_init_bitmap_context(struct super_block *sb,
+			      struct exfat_bitmap_ctx *ctx, u32 cluster);
+void exfat_exit_bitmap_context(struct exfat_bitmap_ctx *ctx);
+int exfat_test_bitmap(struct exfat_bitmap_ctx *ctx, uint32_t start_cluster,
+		      uint32_t *first_in_use, uint32_t *nr_in_use);
+
+
+/*
+ * return the physical sector address for a given cluster.
+ */
+static inline sector_t exfat_cluster_sector(struct exfat_sb_info *sbi,
+					    u32 cluster)
+{
+	return (sector_t)sbi->cluster_heap_offset + (cluster - 2) *
+		(sector_t)sbi->sectors_per_cluster;
+}
+
+/*
+ * in dir.c
+ */
+struct exfat_dir_ctx {
+	struct super_block	*sb;
+	struct inode		*inode;
+	struct buffer_head	*bh;
+
+	off_t			off; /* from beginning of directory */
+	sector_t		sector;
+	bool empty;
+};
+
+int exfat_init_dir_ctx(struct inode *inode, struct exfat_dir_ctx *ctx,
+		       off_t off);
+void exfat_cleanup_dir_ctx(struct exfat_dir_ctx *dctx);
+int exfat_get_cluster_hint(struct inode *inode, u32 *out_hint);
+int exfat_dentry_next(void *, struct exfat_dir_ctx *, int, bool);
+void *__exfat_dentry_next(struct exfat_dir_ctx *dctx, int type, int mask,
+			  bool can_skip, bool *end);
+u16 exfat_direntry_checksum(void *data, u16 checksum, bool first);
+u32 exfat_dctx_fpos(struct exfat_dir_ctx *dctx);
+u64 exfat_dctx_dpos(struct exfat_dir_ctx *dctx);
+int __exfat_get_name(struct exfat_dir_ctx *dctx, u32 name_length, __le16 *name,
+		     u16 *calc_checksum, struct exfat_iloc *iloc);
+
+/*
+ * in namei.c
+ */
+
+/*
+ * hold a pointer to an exfat dir entry, with the corresponding bh.
+ */
+struct dir_entry_buffer {
+	struct buffer_head *bh;
+	u32 off; /* in bytes, inside the buffer_head b_data array */
+	void *start;
+};
+
+int exfat_get_dir_entry_buffers(struct inode *dir, struct exfat_iloc *iloc,
+				struct dir_entry_buffer *entries,
+				size_t nr_entries);
+u16 exfat_dir_entries_checksum(struct dir_entry_buffer *entries, u32 nr);
+void exfat_dirty_dir_entries(struct dir_entry_buffer *entries,
+			     size_t nr_entries, bool sync);
+void exfat_write_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		      __le32 *datetime, u8 *time_cs, u8 *tz_offset);
+
+/*
+ * in inode.c
+ */
+
+int exfat_init_inodes(void);
+void exfat_exit_inodes(void);
+
+struct inode *exfat_iget(struct super_block *sb, loff_t disk_pos);
+void exfat_insert_inode_hash(struct inode *inode);
+void exfat_remove_inode_hash(struct inode *inode);
+int __exfat_write_inode(struct inode *inode, bool sync);
+
+/*
+ * in upcase.c
+ */
+int exfat_upcase_init(struct inode *root);
+static inline __le16 exfat_upcase_convert(struct super_block *sb, __le16 _c)
+{
+	u16 c = __le16_to_cpu(_c);
+
+	if (c >= EXFAT_SB(sb)->upcase_len)
+		return _c;
+	return EXFAT_SB(sb)->upcase_table[c];
+}
+
+/*
+ * superblock operations
+ */
+struct inode *exfat_alloc_inode(struct super_block *sb);
+void exfat_destroy_inode(struct inode *_inode);
+int exfat_drop_inode(struct inode *inode);
+void exfat_evict_inode(struct inode *inode);
+
+/*
+ * file operations
+ */
+int exfat_iterate(struct file *f, struct dir_context *ctx);
+long exfat_ioctl(struct file *, unsigned int, unsigned long);
+int exfat_truncate_blocks(struct inode *inode, loff_t newsize);
+
+/*
+ * inode operations
+ */
+struct dentry *exfat_inode_lookup(struct inode *, struct dentry *,
+				  unsigned int);
+int exfat_inode_create(struct mnt_idmap *, struct inode *dir,
+		       struct dentry *dentry, umode_t mode, bool excl);
+int exfat_inode_mkdir(struct mnt_idmap *, struct inode *dir,
+		      struct dentry *dentry, umode_t mode);
+
+mode_t exfat_make_mode(struct exfat_sb_info *sbi, mode_t mode, u16 attrs);
+
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc);
+
+int exfat_inode_unlink(struct inode *inode, struct dentry *dentry);
+
+int exfat_inode_rmdir(struct inode *inode, struct dentry *dentry);
+
+int exfat_getattr(struct mnt_idmap *, const struct path *, struct kstat *,
+		  u32, unsigned int);
+int exfat_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
+int exfat_rename(struct mnt_idmap *, struct inode *, struct dentry *,
+		 struct inode *, struct dentry *, unsigned int);
+
+/*
+ * address space operations
+ */
+int exfat_read_folio(struct file *file, struct folio *folio);
+void exfat_readahead(struct readahead_control *rac);
+int exfat_write_begin(struct file *file, struct address_space *mapping,
+		      loff_t pos, unsigned len,
+		      struct page **pagep, void **fsdata);
+int exfat_write_end(struct file *file, struct address_space *mapping,
+		    loff_t pos, unsigned len, unsigned copied,
+		    struct page *page, void *fsdata);
+int exfat_writepages(struct address_space *, struct writeback_control *);
+
+
+extern const struct inode_operations exfat_dir_inode_operations;
+extern const struct inode_operations exfat_file_inode_operations;
+extern const struct file_operations exfat_dir_operations;
+extern const struct file_operations exfat_file_operations;
+extern const struct address_space_operations exfat_address_space_operations;
+
+/*
+ * time functions
+ */
+void exfat_time_2unix(struct timespec64 *ts, u32 datetime, u8 time_cs,
+		      s8 tz_offset);
+void exfat_time_2exfat(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		       u32 *datetime, u8 *time_cs, s8 *tz_offset);
+
+static inline void exfat_lock_super(struct super_block *sb)
+{
+	mutex_lock(&EXFAT_SB(sb)->sb_mutex);
+}
+
+static inline void exfat_unlock_super(struct super_block *sb)
+{
+	mutex_unlock(&EXFAT_SB(sb)->sb_mutex);
+}
+
+#endif /*! __EXFAT_H */
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./exfat_fs.h linux-6.4-fbx/fs/exfat-fbx/exfat_fs.h
--- linux-6.4-fbx/fs/exfat-fbx./exfat_fs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/exfat_fs.h	2023-02-24 19:09:23.405368085 +0100
@@ -0,0 +1,200 @@
+/*
+ * exfat_fs.h for exfat
+ * Created by <nschichan@freebox.fr> on Mon Jul 29 15:06:38 2013
+ */
+
+#ifndef __EXFAT_FS_H
+# define __EXFAT_FS_H
+
+/*
+ * exfat on disk structures and constants
+ */
+
+#include <linux/types.h>
+
+struct exfat_vbr {
+	u8	jump[3];
+	u8	fsname[8];
+	u8	reserved1[53];
+
+	__le64	partition_offset;
+	__le64	volume_length;
+
+	__le32	fat_offset;
+	__le32	fat_length;
+
+	__le32	cluster_heap_offset;
+	__le32	cluster_count;
+	__le32	cluster_root_dir;
+
+	__le32	serial_number;
+
+	__le16	fs_rev;
+	__le16	volume_flags;
+
+	u8	bytes_per_sector;
+	u8	sectors_per_cluster;
+
+	u8	fat_num;
+	u8	drive_select;
+	u8	heap_use_percent;
+
+	u8	reserved2[7];
+	u8	boot_code[390];
+
+	u8	boot_sig[2];
+};
+
+enum {
+	EXFAT_CLUSTER_FIRSTVALID	= 0x00000002,
+	EXFAT_CLUSTER_LASTVALID		= 0xfffffff6,
+	EXFAT_CLUSTER_BADBLK		= 0xfffffff7,
+	EXFAT_CLUSTER_MEDIATYPE		= 0xfffffff8,
+	EXFAT_CLUSTER_EOF		= 0xffffffff,
+};
+
+enum {
+	EXFAT_ACTIVEFAT_MASK = (1 << 0),
+	EXFAT_FLAG_DIRTY = (1 << 1),
+	EXFAT_FLAG_MEDIA_FAILURE = (1 << 2),
+};
+
+static inline int exfat_active_fat(u16 flags)
+{
+	return flags & EXFAT_ACTIVEFAT_MASK;
+}
+
+#define EXFAT_CHECKSUM_SECTORS	11
+
+enum {
+	EXFAT_I_ALLOC_POSSIBLE = (1 << 0),
+	EXFAT_I_FAT_INVALID = (1 << 1),
+};
+
+/*
+ * directory cluster content
+ */
+
+/*
+ * entry types
+ */
+enum {
+	E_EXFAT_EOD		= 0x00,
+	E_EXFAT_VOLUME_LABEL	= 0x83,
+	E_EXFAT_BITMAP		= 0x81,
+	E_EXFAT_UPCASE_TABLE	= 0x82,
+	E_EXFAT_GUID		= 0xa0,
+	E_EXFAT_PADDING		= 0xa1,
+	E_EXFAT_ACL		= 0xe2,
+	E_EXFAT_FILEDIR		= 0x85,
+	E_EXFAT_STREAM_EXT	= 0xc0,
+	E_EXFAT_FILENAME	= 0xc1,
+};
+
+/*
+ * file attributes in exfat_filedir_entry
+ */
+enum {
+	E_EXFAT_ATTR_RO		= (1 << 0),
+	E_EXFAT_ATTR_HIDDEN	= (1 << 1),
+	E_EXFAT_ATTR_SYSTEM	= (1 << 2),
+	/* bit 3 reserved */
+	E_EXFAT_ATTR_DIRECTORY	= (1 << 4),
+	E_EXFAT_ATTR_ARCHIVE	= (1 << 5),
+	/* bits 6-15 reserved */
+};
+
+/* type 0x83 */
+struct exfat_volume_label_entry {
+	u8 type;
+	u8 charcount;
+	__u16 label[11];
+	u8 reserved1[8];
+};
+
+static inline int exfat_bitmap_nr(u8 flags)
+{
+	return flags & 1;
+}
+
+/* type 0x81 */
+struct exfat_bitmap_entry {
+	u8 type;
+	u8 flags;
+	u8 reserved1[18];
+	__le32 cluster_addr;
+	__le64 length;
+};
+
+/* type 0x82 */
+struct exfat_upcase_entry {
+	u8 type;
+	u8 reserved1[3];
+	__le32 checksum;
+	u8 reserved2[12];
+	__le32 cluster_addr;
+	__le64 length;
+};
+
+/* type 0xa0 */
+struct exfat_guid_entry {
+	u8 type;
+	u8 secondary_count;
+	__le16 set_checksum;
+	__le16 flags;
+	u8 guid[16];
+	u8 reserved1[10];
+};
+
+/* type 0xa1 */
+struct exfat_padding_entry {
+	u8 type;
+	u8 reserved1[31];
+};
+
+/* type 0xe2 */
+struct exfat_acl_entry {
+	u8 type;
+	u8 reserved1[31];
+};
+
+/* type 0x85 */
+struct exfat_filedir_entry {
+	u8 type;
+	u8 secondary_count;
+	__le16 set_checksum;
+	__le16 attributes;
+	u8 reserved1[2];
+	__le32 create;
+	__le32 modified;
+	__le32 accessed;
+	u8 create_10ms;
+	u8 modified_10ms;
+	s8 create_tz_offset;
+	s8 modified_tz_offset;
+	s8 accessed_tz_offset;
+	u8 reserved2[7];
+};
+
+/* 0xc0 */
+struct exfat_stream_extension_entry {
+	u8 type;
+	u8 flags;
+	u8 reserved1;
+	u8 name_length;
+	__le16 name_hash;
+	u8 reserved2[2];
+	__le64 valid_data_length;
+	u8 reserved3[4];
+	__le32 first_cluster;
+	__le64 data_length;
+};
+
+/* 0xc1 */
+struct exfat_filename_entry {
+	u8 type;
+	u8 flags;
+	__le16 name_frag[15];
+};
+
+#endif /*! __EXFAT_FS_H */
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./fat.c linux-6.4-fbx/fs/exfat-fbx/fat.c
--- linux-6.4-fbx/fs/exfat-fbx./fat.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/fat.c	2023-02-24 19:09:26.021439198 +0100
@@ -0,0 +1,424 @@
+/*
+ * fat.c for exfat
+ * Created by <nschichan@freebox.fr> on Mon Jul 29 19:43:38 2013
+ */
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+#define MAX_CACHED_FAT	16
+
+/*
+ * helpers for exfat_next_fat_cluster.
+ */
+
+/*
+ * get the sector number in the fat where the next requested cluster
+ * number is to be found.
+ */
+static inline sector_t cluster_sector(struct exfat_sb_info *sbi, u32 cluster)
+{
+	return sbi->fat_offset + (((u64)cluster * sizeof (u32)) >> sbi->sectorbits);
+}
+
+/*
+ * get the offset in the fat sector where the next requested cluster
+ * number is to be found.
+ */
+static inline off_t cluster_offset(struct exfat_sb_info *sbi, u32 cluster)
+{
+	return (cluster * sizeof (u32)) & sbi->sectormask;
+}
+
+/*
+ * walk one step in the fat chain.
+ */
+static int exfat_next_fat_cluster(struct super_block *sb, u32 *cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	sector_t sect = cluster_sector(sbi, *cluster);
+	off_t off = cluster_offset(sbi, *cluster);
+	struct buffer_head *bh;
+
+	bh = sb_bread(sb, sect);
+	if (!bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read FAT sector at %llu",
+			  (unsigned long long)sect);
+		return -EIO;
+	}
+
+	*cluster = __le32_to_cpu(*(u32*)&bh->b_data[off]);
+	brelse(bh);
+	return 0;
+}
+
+/*
+ * setup inode cache
+ */
+void exfat_inode_cache_init(struct inode *inode)
+{
+	mutex_init(&EXFAT_I(inode)->exfat_cache.mutex);
+	EXFAT_I(inode)->exfat_cache.nr_entries = 0;
+	INIT_LIST_HEAD(&EXFAT_I(inode)->exfat_cache.entries);
+}
+
+/*
+ * drop inode cache content
+ */
+void exfat_inode_cache_drop(struct inode *inode)
+{
+	struct exfat_cache *cache = &EXFAT_I(inode)->exfat_cache;
+	struct exfat_cache_entry *e, *tmp;
+
+	mutex_lock(&cache->mutex);
+	list_for_each_entry_safe (e, tmp, &cache->entries, list) {
+		kfree(e);
+	}
+	INIT_LIST_HEAD(&cache->entries);
+	cache->nr_entries = 0;
+	mutex_unlock(&cache->mutex);
+}
+
+/*
+ * move the entry to the head of the list, this will make it less
+ * likely to be the victim in when caching new entries.
+ *
+ * caller must hold cache->mutex.
+ */
+static void __exfat_fat_lru(struct exfat_cache *cache,
+			  struct exfat_cache_entry *e)
+{
+	if (cache->entries.next != &e->list)
+		list_move(&e->list, &cache->entries);
+}
+
+/*
+ * find a cache entry that is close to the wanted fcluster (ideally
+ * spanning over the requested file cluster).
+ *
+ * caller must hold cache->mutex.
+ */
+static struct exfat_cache_entry *__exfat_cache_lookup(struct exfat_cache *cache,
+						      u32 fcluster)
+{
+	struct exfat_cache_entry *e;
+	struct exfat_cache_entry *best = NULL;
+
+	list_for_each_entry (e, &cache->entries, list) {
+		if (e->file_cluster <= fcluster &&
+		    e->file_cluster + e->nr_contig >= fcluster)
+			return e;
+
+		if (!best && e->file_cluster < fcluster)
+			best = e;
+		if (best && best->file_cluster < e->file_cluster &&
+		    e->file_cluster < fcluster)
+			best = e;
+	}
+	return best;
+}
+
+/*
+ * caller must hold cache->mutex.
+ */
+static int __exfat_cache_cluster(struct exfat_cache *cache,
+			       struct exfat_cache_entry *nearest,
+			       u32 fcluster, u32 dcluster)
+{
+	struct exfat_cache_entry *e;
+
+	/*
+	 * see if we can merge with the nearest entry. in the ideal
+	 * case, all cluster in the chain are contiguous, and only
+	 * one entry is needed for a single file.
+	 */
+	if (nearest &&
+	    nearest->file_cluster + nearest->nr_contig + 1 == fcluster &&
+	    nearest->disk_cluster + nearest->nr_contig + 1 == dcluster) {
+		list_move(&nearest->list, &cache->entries);
+		nearest->nr_contig++;
+		return 0;
+	}
+
+	/*
+	 * allocate a new entry or reuse an existing one if the number
+	 * of cached entries is too hihc.
+	 */
+	if (cache->nr_entries < MAX_CACHED_FAT) {
+		e = kmalloc(sizeof (*e), GFP_NOFS);
+		list_add(&e->list, &cache->entries);
+		++cache->nr_entries;
+	} else {
+		e = list_entry(cache->entries.prev, struct exfat_cache_entry,
+			       list);
+		list_move(&e->list, &cache->entries);
+	}
+
+	if (!e)
+		return -ENOMEM;
+
+	e->file_cluster = fcluster;
+	e->disk_cluster = dcluster;
+	e->nr_contig = 0;
+
+	return 0;
+}
+
+int __exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster,
+			    bool eof_is_fatal)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_cache *cache = &info->exfat_cache;
+	int error;
+	struct exfat_cache_entry *e;
+	u32 fcluster_start;
+
+	/*
+	 * intial translation: first file cluster is found in the
+	 * inode info.
+	 */
+	if (fcluster == 0) {
+		*dcluster = info->first_cluster;
+		return 0;
+	}
+
+	mutex_lock(&cache->mutex);
+	/*
+	 * try to find a cached entry either covering the file cluster
+	 * we want or at least close to the file cluster.
+	 */
+	e = __exfat_cache_lookup(cache, fcluster);
+	if (e && e->file_cluster <= fcluster &&
+	    e->file_cluster + e->nr_contig >= fcluster) {
+		/*
+		 * perfect match, entry zone covers the requested file
+		 * cluster.
+		 */
+		__exfat_fat_lru(cache, e);
+		*dcluster = e->disk_cluster + (fcluster - e->file_cluster);
+		mutex_unlock(&cache->mutex);
+		return 0;
+	}
+
+	if (e) {
+		/*
+		 * we have an entry, hopefully close enough, setup
+		 * cluster walk from there.
+		 */
+		*dcluster = e->disk_cluster + e->nr_contig;
+		fcluster_start = e->file_cluster + e->nr_contig;
+	} else {
+		/*
+		 * no entry, walk the FAT chain from the start of the
+		 * file.
+		 */
+		fcluster_start = 0;
+		*dcluster = info->first_cluster;
+	}
+
+	/*
+	 * walk fhe FAT chain the number of time required to get the
+	 * disk cluster corresponding to the file cluster.
+	 */
+	while (fcluster_start != fcluster) {
+		error = exfat_next_fat_cluster(inode->i_sb, dcluster);
+		if (error) {
+			mutex_unlock(&cache->mutex);
+			return error;
+		}
+		if (*dcluster == EXFAT_CLUSTER_EOF) {
+			if (eof_is_fatal)
+				/*
+				 * exfat_fill_root uses
+				 * __exfat_get_fat_cluster with
+				 * eof_is_fatal set to false, as the
+				 * root inode does not have a size
+				 * field and thus requires a complete
+				 * FAT walk to compute the size.
+				 */
+				exfat_fs_error(inode->i_sb, "premature EOF in FAT "
+					       "chain. file cluster %u out "
+					       "of %u\n", fcluster_start,
+					       fcluster);
+			mutex_unlock(&cache->mutex);
+			return -EIO;
+		}
+		if (*dcluster < EXFAT_CLUSTER_FIRSTVALID) {
+			exfat_fs_error(inode->i_sb, "invalid cluster %u found "
+				       "in fat chain.", *dcluster);
+			mutex_unlock(&cache->mutex);
+			return -EIO;
+		}
+		++fcluster_start;
+	}
+
+	/*
+	 * cache the result.
+	 */
+	__exfat_cache_cluster(cache, e, fcluster, *dcluster);
+	mutex_unlock(&cache->mutex);
+	return 0;
+}
+
+int exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster)
+{
+	return __exfat_get_fat_cluster(inode, fcluster, dcluster, true);
+}
+
+int exfat_init_fat(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct buffer_head *bh;
+	int error = 0;
+	u32 first, second;
+
+	bh = sb_bread(sb, sbi->fat_offset);
+	if (!bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read FAT sector at %u",
+			  sbi->fat_offset);
+		return -EIO;
+	}
+
+	first = __le32_to_cpu(*(__le32*)(bh->b_data + 0));
+	second = __le32_to_cpu(*(__le32*)(bh->b_data + sizeof (__le32)));
+
+	if (first != 0xf8ffffff && second != 0xffffffff) {
+		exfat_msg(sb, KERN_INFO, "invalid FAT start: %08x, %08x",
+			  first, second);
+		error = -ENXIO;
+	}
+
+	brelse(bh);
+	return error;
+}
+
+/*
+ * fat write context, store the current buffer_head and current
+ * cluster to avoid having sb_bread all the time when the clusters are
+ * contiguous or at least not too far apart.
+ */
+struct fat_write_ctx {
+	struct super_block *sb;
+	struct buffer_head *bh;
+	u32 cur_cluster;
+};
+
+static void fat_init_write_ctx(struct fat_write_ctx *fwctx,
+				struct super_block *sb)
+{
+	memset(fwctx, 0, sizeof (*fwctx));
+	fwctx->sb = sb;
+}
+
+static void fat_exit_write_ctx(struct fat_write_ctx *fwctx)
+{
+	if (fwctx->bh)
+		brelse(fwctx->bh);
+}
+
+static int __fat_write_entry(struct fat_write_ctx *fwctx,
+			       u32 cluster, u32 next)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(fwctx->sb);
+	sector_t current_sector = cluster_sector(sbi, fwctx->cur_cluster);
+	sector_t wanted_sector = cluster_sector(sbi, cluster);
+	off_t off = cluster_offset(sbi, cluster);
+
+	/*
+	 * first see if we need a different buffer head from the
+	 * current one in the fat_write_ctx.
+	 */
+	if (current_sector != wanted_sector || !fwctx->bh) {
+		if (fwctx->bh)
+			brelse(fwctx->bh);
+		fwctx->bh = sb_bread(fwctx->sb, wanted_sector);
+		if (!fwctx->bh) {
+			exfat_msg(fwctx->sb, KERN_ERR,
+				  "unable to read FAT sector at %llu",
+				  (unsigned long long)wanted_sector);
+			return -EIO;
+		}
+	}
+
+	/*
+	 * set fat cluster to point to the next cluster, and mark bh
+	 * dirty so that the change hits the storage device.
+	 */
+	fwctx->cur_cluster = cluster;
+	*(__le32*)(fwctx->bh->b_data + off) = __cpu_to_le32(next);
+	mark_buffer_dirty(fwctx->bh);
+	return 0;
+}
+
+/*
+ * write nr_clusters contiguous clusters starting at first_cluster.
+ */
+int exfat_write_fat_contiguous(struct inode *inode, u32 first_cluster,
+			       u32 nr_clusters)
+{
+	u32 cluster;
+	struct fat_write_ctx fwctx;
+	int error = 0;
+
+	fat_init_write_ctx(&fwctx, inode->i_sb);
+	for (cluster = first_cluster;
+	     cluster < first_cluster + nr_clusters - 1;
+	     ++cluster) {
+		error = __fat_write_entry(&fwctx, cluster, cluster + 1);
+		if (error)
+			goto end;
+	}
+
+	/*
+	 * set EOF
+	 */
+	error = __fat_write_entry(&fwctx, cluster, EXFAT_CLUSTER_EOF);
+end:
+	fat_exit_write_ctx(&fwctx);
+	return error;
+
+}
+
+/*
+ * write cluster nr_clusters stored in clusters array, link with prev_cluster.
+ */
+int exfat_write_fat(struct inode *inode, u32 prev_cluster, u32 *clusters,
+		    u32 nr_clusters)
+{
+	u32 i;
+	struct fat_write_ctx fwctx;
+	int error;
+
+	if (!nr_clusters)
+		/* ??! */
+		return 0;
+
+	fat_init_write_ctx(&fwctx, inode->i_sb);
+
+	if (prev_cluster) {
+		/*
+		 * link with previous cluster if applicable.
+		 */
+		error = __fat_write_entry(&fwctx, prev_cluster, clusters[0]);
+		if (error)
+			goto end;
+	}
+	for (i = 0; i < nr_clusters - 1; ++i) {
+		error = __fat_write_entry(&fwctx, clusters[i], clusters[i + 1]);
+		if (error)
+			goto end;
+	}
+
+	/*
+	 * set EOF.
+	 */
+	error = __fat_write_entry(&fwctx, clusters[i], EXFAT_CLUSTER_EOF);
+
+ end:
+	fat_exit_write_ctx(&fwctx);
+	return error;
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./file.c linux-6.4-fbx/fs/exfat-fbx/file.c
--- linux-6.4-fbx/fs/exfat-fbx./file.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/file.c	2023-05-22 20:06:44.067849120 +0200
@@ -0,0 +1,428 @@
+/*
+ * file.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 14:39:41 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/exfat_user.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static int append_fragment(struct exfat_fragment __user *ufrag,
+			   struct exfat_fragment *kfrag)
+{
+	if (copy_to_user(ufrag, kfrag, sizeof (*kfrag)))
+		return -EFAULT;
+	return 0;
+}
+
+static void setup_fragment(struct exfat_sb_info *sbi,
+			  struct exfat_fragment *fragment, uint32_t fcluster,
+			  uint32_t dcluster)
+{
+	fragment->fcluster_start = fcluster;
+	fragment->dcluster_start = dcluster;
+	fragment->sector_start = exfat_cluster_sector(sbi, dcluster);
+	fragment->nr_clusters = 1;
+}
+
+static int exfat_ioctl_get_fragments(struct inode *inode,
+				     struct exfat_fragment_head __user *uhead)
+{
+	struct exfat_fragment_head head;
+	struct exfat_fragment fragment;
+	u32 fcluster;
+	u32 prev_dcluster;
+	u32 cur_fragment;
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	int error;
+
+	memset(&fragment, 0, sizeof (fragment));
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+
+	if (put_user(sbi->sectorsize, &uhead->sector_size) ||
+	    put_user(sbi->clustersize, &uhead->cluster_size))
+		return -EFAULT;
+
+	if (!head.nr_fragments) {
+		/*
+		 * user did not provide space for fragments after
+		 * header.
+		 */
+		return 0;
+	}
+
+	if (head.fcluster_start >= info->allocated_clusters) {
+		/*
+		 * requested start cluster is after file EOF
+		 */
+		if (put_user(0, &uhead->nr_fragments))
+			return -EFAULT;
+		return 0;
+	}
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		/*
+		 * not FAT chain, this file has only one fragment.
+		 */
+		fragment.fcluster_start = head.fcluster_start;
+		fragment.dcluster_start =
+			info->first_cluster + head.fcluster_start;
+		fragment.nr_clusters = info->allocated_clusters -
+			head.fcluster_start;
+		fragment.sector_start =
+			exfat_cluster_sector(sbi, fragment.dcluster_start);
+
+		if (copy_to_user(&uhead->fragments[0], &fragment,
+				 sizeof (fragment)))
+			return -EFAULT;
+		if (put_user(1, &uhead->nr_fragments))
+			return -EFAULT;
+		if (put_user(info->first_cluster + info->allocated_clusters,
+			     &uhead->fcluster_start))
+			return -EFAULT;
+		return 0;
+	}
+
+	fcluster = head.fcluster_start;
+	cur_fragment = 0;
+
+	/*
+	 * initial fragment setup
+	 */
+	error = exfat_get_fat_cluster(inode, fcluster,
+				      &prev_dcluster);
+	if (error)
+		return error;
+	setup_fragment(sbi, &fragment, fcluster, prev_dcluster);
+	++fcluster;
+	while (fcluster < info->allocated_clusters) {
+		int error;
+		u32 dcluster;
+
+		/*
+		 * walk one step in the FAT.
+		 */
+		error = exfat_get_fat_cluster(inode, fcluster, &dcluster);
+		if (error)
+			return error;
+
+		if (prev_dcluster == dcluster - 1) {
+			/*
+			 * dcluster and prev_dcluster are contiguous.
+			 */
+			++fragment.nr_clusters;
+		} else {
+			/*
+			 * put this cluster in the user array
+			 */
+			error = append_fragment(&uhead->fragments[cur_fragment],
+						&fragment);
+			if (error)
+				return error;
+
+			++cur_fragment;
+			if (cur_fragment == head.nr_fragments)
+				break;
+
+			/*
+			 * setup a new fragment.
+			 */
+			setup_fragment(sbi, &fragment, fcluster, dcluster);
+		}
+		++fcluster;
+		prev_dcluster = dcluster;
+	}
+
+	if (cur_fragment < head.nr_fragments) {
+		append_fragment(&uhead->fragments[cur_fragment], &fragment);
+		++cur_fragment;
+	}
+
+	/*
+	 * update nr_fragments in user supplied head.
+	 */
+	if (cur_fragment != head.nr_fragments &&
+	    put_user(cur_fragment, &uhead->nr_fragments))
+		return -EFAULT;
+
+	/*
+	 * update fcluster_start in user supplied head.
+	 */
+	if (put_user(fcluster, &uhead->fcluster_start))
+		return -EFAULT;
+
+
+	return 0;
+}
+
+static int exfat_ioctl_get_bitmap(struct super_block *sb,
+				  struct exfat_bitmap_head __user *uhead)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct exfat_bitmap_head head;
+	uint32_t i;
+	int error;
+	struct exfat_bitmap_ctx ctx;
+	uint32_t start_cluster;
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+	start_cluster = head.start_cluster;
+	if (start_cluster < 2)
+		return -EINVAL;
+
+
+	error = exfat_init_bitmap_context(sb, &ctx, head.start_cluster);
+	if (error)
+		return error;
+	for (i = 0; i < head.nr_entries; ++i) {
+		uint32_t first_in_use;
+		uint32_t nr_in_use;
+		int error;
+
+		error = exfat_test_bitmap(&ctx, start_cluster, &first_in_use,
+					  &nr_in_use);
+		if (error)
+			goto out_error;
+
+		if (first_in_use == sbi->cluster_count)
+			break;
+		if (put_user(first_in_use, &uhead->entries[i].start_cluster))
+			goto out_efault;
+		if (put_user(nr_in_use, &uhead->entries[i].nr_clusters))
+			goto out_efault;
+		if (put_user(exfat_cluster_sector(sbi, first_in_use),
+			     &uhead->entries[i].sector_start))
+			goto out_efault;
+		if (put_user((u64)nr_in_use * sbi->sectors_per_cluster,
+			     &uhead->entries[i].nr_sectors))
+			goto out_efault;
+		start_cluster = first_in_use + nr_in_use + 1;
+	}
+
+	exfat_exit_bitmap_context(&ctx);
+	if (put_user(i, &uhead->nr_entries))
+		return -EFAULT;
+	if (put_user(start_cluster, &uhead->start_cluster))
+		return -EFAULT;
+
+	return 0;
+
+out_efault:
+	error = -EFAULT;
+out_error:
+	exfat_exit_bitmap_context(&ctx);
+	return error;
+}
+
+static int exfat_ioctl_get_dirents(struct inode *inode,
+				   struct exfat_dirent_head __user *uhead)
+{
+	struct exfat_dir_ctx dctx;
+	struct exfat_dirent_head head;
+	int error;
+	uint32_t i;
+
+	if (!S_ISDIR(inode->i_mode))
+		return -ENOTDIR;
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+	/* make sure we're aligned on an entry boundary */
+	head.offset &= ~0x1f;
+
+	error = exfat_init_dir_ctx(inode, &dctx, head.offset);
+	if (error < 0)
+		return error;
+
+	error = 0;
+	for (i = 0; i < head.nr_entries; ++i) {
+		bool end;
+		u8 *entry = __exfat_dentry_next(&dctx, 0, 0, false, &end);
+		u8 type;
+
+		if (!entry && end)
+			/* genuine end of file */
+			break;
+		if (!entry) {
+			/* something went wrong */
+			error = -EIO;
+			goto out;
+		}
+		type = *entry;
+
+		if (put_user(type, &uhead->entries[i])) {
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	/*
+	 * update head nr_entries and offset.
+	 */
+	if (put_user(i, &uhead->nr_entries))  {
+		error = -EFAULT;
+		goto out;
+	}
+	if (put_user(head.offset + 0x20 * i, &uhead->offset)) {
+		error = -EFAULT;
+		goto out;
+	}
+
+ out:
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
+
+long exfat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case EXFAT_IOCGETFRAGMENTS:
+		return exfat_ioctl_get_fragments(file_inode(file),
+						 (void __user*)arg);
+	case EXFAT_IOCGETBITMAP:
+		return exfat_ioctl_get_bitmap(file_inode(file)->i_sb,
+					      (void __user*)arg);
+	case EXFAT_IOCGETDIRENTS:
+		return exfat_ioctl_get_dirents(file_inode(file),
+					       (void __user*)arg);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int exfat_cont_expand(struct inode *inode, loff_t newsize)
+{
+	int error;
+
+	error = generic_cont_expand_simple(inode, newsize);
+	if (error)
+		return error;
+
+	inode->i_mtime = current_time(inode);
+	mark_inode_dirty(inode);
+
+	if (IS_SYNC(inode))
+		exfat_msg(inode->i_sb, KERN_ERR, "TODO: cont_expand with "
+			  "sync mode.");
+	return 0;
+}
+
+int exfat_truncate_blocks(struct inode *inode, loff_t newsize)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 fcluster = (newsize + sbi->clustersize - 1) >> sbi->clusterbits;
+	int error;
+
+	if (EXFAT_I(inode)->mmu_private > newsize)
+		EXFAT_I(inode)->mmu_private = newsize;
+
+	error = exfat_free_clusters_inode(inode, fcluster);
+	if (error) {
+		exfat_msg(inode->i_sb, KERN_INFO, "exfat_free_clusters_inode: "
+			  "%i", error);
+		return error;
+	}
+
+	return 0;
+}
+
+int exfat_getattr(struct mnt_idmap *ns, const struct path *path,
+		  struct kstat *stat, u32 request_mask, unsigned int flags)
+{
+	struct inode *inode = d_inode(path->dentry);
+	generic_fillattr(&nop_mnt_idmap, inode, stat);
+	stat->blksize = EXFAT_SB(inode->i_sb)->clustersize;
+	return 0;
+}
+
+#define EXFAT_VALID_MODE       (S_IFREG | S_IFDIR | S_IRWXUGO)
+
+static int exfat_mode_fixup(struct inode *inode, umode_t *mode)
+{
+	mode_t mask, perm;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	if (S_ISDIR(*mode))
+		mask = sbi->options.dmask;
+	else
+		mask = sbi->options.fmask;
+
+	perm = *mode & ~(S_IFMT | mask);
+
+	/*
+	 * we want 'r' and 'x' bits when mask allows for it.
+	 */
+	if ((perm & (S_IRUGO | S_IXUGO)) !=
+	    (inode->i_mode & ~mask & (S_IRUGO | S_IXUGO))) {
+		return -EPERM;
+	}
+
+	/*
+	 * we want all 'w' bits or none, depending on mask.
+	 */
+	if ((perm & S_IWUGO) && (perm & S_IWUGO) != (~mask & S_IWUGO))
+		return -EPERM;
+	*mode &= ~mask;
+	return 0;
+}
+
+int exfat_setattr(struct mnt_idmap *ns, struct dentry *dentry,
+		  struct iattr *attrs)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	/*
+	 * can set uid/gid, only if it the same as the current one in
+	 * the inode.
+	 */
+	if (attrs->ia_valid & ATTR_UID &&
+	    !uid_eq(inode->i_uid, attrs->ia_uid))
+		return -EPERM;
+
+	if (attrs->ia_valid & ATTR_GID &&
+	    !gid_eq(inode->i_gid, attrs->ia_gid))
+		return -EPERM;
+
+	if (attrs->ia_valid & ATTR_MODE &&
+	    (attrs->ia_mode & ~EXFAT_VALID_MODE ||
+	     exfat_mode_fixup(inode, &attrs->ia_mode) < 0)) {
+		/*
+		 * silently ignore mode change if we're not OK with
+		 * it (same behavior as vfat).
+		 */
+		attrs->ia_valid &= ~ATTR_MODE;
+	}
+
+	if (attrs->ia_valid & ATTR_SIZE) {
+		inode_dio_wait(inode);
+		if (attrs->ia_size > inode->i_size) {
+			/*
+			 * expand file
+			 */
+			error = exfat_cont_expand(inode, attrs->ia_size);
+			if (error)
+				return error;
+		} else {
+			/*
+			 * shrink file
+			 */
+			truncate_setsize(inode, attrs->ia_size);
+			exfat_truncate_blocks(inode, attrs->ia_size);
+		}
+	}
+
+	setattr_copy(&nop_mnt_idmap, inode, attrs);
+	mark_inode_dirty(inode);
+	return 0;
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./inode.c linux-6.4-fbx/fs/exfat-fbx/inode.c
--- linux-6.4-fbx/fs/exfat-fbx./inode.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/inode.c	2023-02-24 19:09:23.405368085 +0100
@@ -0,0 +1,277 @@
+/*
+ * inode.c<2> for exfat
+ * Created by <nschichan@freebox.fr> on Wed Jul 24 16:15:52 2013
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/hash.h>
+
+#include "exfat_fs.h"
+#include "exfat.h"
+
+static struct kmem_cache *exfat_inodes_cachep;
+
+/*
+ * inode callbacks.
+ */
+struct inode *exfat_alloc_inode(struct super_block *sb)
+{
+	struct exfat_inode_info *ei = kmem_cache_alloc(exfat_inodes_cachep,
+						       GFP_NOFS);
+
+	if (!ei)
+		return NULL;
+
+	return &ei->vfs_inode;
+}
+
+static void exfat_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+
+	kmem_cache_free(exfat_inodes_cachep, EXFAT_I(inode));
+}
+
+void exfat_destroy_inode(struct inode *_inode)
+{
+	struct exfat_inode_info *inode = EXFAT_I(_inode);
+
+	call_rcu(&inode->vfs_inode.i_rcu, exfat_i_callback);
+}
+
+static void exfat_inode_init_once(void *ptr)
+{
+	struct exfat_inode_info *info = ptr;
+
+	INIT_HLIST_NODE(&info->hash_list);
+	exfat_inode_cache_init(&info->vfs_inode);
+	inode_init_once(&info->vfs_inode);
+}
+
+/*
+ * inode cache create/destroy.
+ */
+int exfat_init_inodes(void)
+{
+	exfat_inodes_cachep = kmem_cache_create("exfat-inodes",
+				       sizeof (struct exfat_inode_info), 0,
+				       SLAB_RECLAIM_ACCOUNT |SLAB_MEM_SPREAD,
+				       exfat_inode_init_once);
+	if (!exfat_inodes_cachep)
+		return -ENOMEM;
+	return 0;
+}
+
+void exfat_exit_inodes(void)
+{
+	kmem_cache_destroy(exfat_inodes_cachep);
+}
+
+int exfat_drop_inode(struct inode *inode)
+{
+	return generic_drop_inode(inode);
+}
+
+void exfat_evict_inode(struct inode *inode)
+{
+	truncate_inode_pages_final(&inode->i_data);
+	if (!inode->i_nlink) {
+		inode->i_size = 0;
+		exfat_free_clusters_inode(inode, 0);
+	}
+	invalidate_inode_buffers(inode);
+	clear_inode(inode);
+	exfat_remove_inode_hash(inode);
+	exfat_inode_cache_drop(inode);
+}
+
+static u32 exfat_hash(loff_t disk_pos)
+{
+	return hash_32(disk_pos, EXFAT_HASH_BITS);
+}
+
+struct inode *exfat_iget(struct super_block *sb, loff_t disk_pos)
+{
+	struct exfat_inode_info *info;
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct hlist_head *head = sbi->inode_hash + exfat_hash(disk_pos);
+	struct inode *ret = NULL;
+
+
+	spin_lock(&sbi->inode_hash_lock);
+	hlist_for_each_entry (info, head, hash_list) {
+		if (info->iloc.disk_offs[0] != disk_pos)
+			continue ;
+		ret = igrab(&info->vfs_inode);
+		if (ret)
+			break;
+	}
+	spin_unlock(&sbi->inode_hash_lock);
+	return ret;
+}
+
+void exfat_insert_inode_hash(struct inode *inode)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct hlist_head *head = sbi->inode_hash +
+		exfat_hash(info->iloc.disk_offs[0]);
+
+	spin_lock(&sbi->inode_hash_lock);
+	hlist_add_head(&info->hash_list, head);
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+void exfat_remove_inode_hash(struct inode *inode)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	spin_lock(&sbi->inode_hash_lock);
+	info->iloc.disk_offs[0] = 0;
+	hlist_del_init(&info->hash_list);
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+/*
+ * calculate the number of links in a directory. this is the number of
+ * EXFAT_FILEDIR_ENTRY typed elements in the directory stream. This
+ * does not include the '.' and '..' entries.
+ */
+loff_t exfat_dir_links(struct inode *inode)
+{
+	size_t ret = 0;
+	struct exfat_dir_ctx dctx;
+	int error;
+	bool end;
+
+	error = exfat_init_dir_ctx(inode, &dctx, 0);
+	if (error)
+		return error;
+
+	error = -EIO;
+	for (;;) {
+		struct exfat_filedir_entry *e =
+			__exfat_dentry_next(&dctx, E_EXFAT_FILEDIR, 0xff,
+					    true, &end);
+		if (!e) {
+			if (end)
+				error = 0;
+			goto out;
+		}
+		++ret;
+	}
+out:
+	exfat_cleanup_dir_ctx(&dctx);
+	if (error)
+		return error;
+	return ret;
+}
+
+int exfat_get_cluster_hint(struct inode *inode, u32 *out_hint)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	int error;
+	u32 first_cluster = info->first_cluster;
+
+
+	if (!first_cluster) {
+		/*
+		 * empty file, return a cluster likely to be free.
+		 */
+		*out_hint = EXFAT_SB(inode->i_sb)->prev_free_cluster + 2;
+		return 0;
+	}
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		/*
+		 * not fat run, all clusters are contiguous, set hint
+		 * to next last file cluster.
+		 */
+		*out_hint = first_cluster + info->allocated_clusters;
+		return 0;
+	}
+
+	/*
+	 * fat run available, walk it to get the last physical cluster
+	 * address and set hint to the immediate next physical
+	 * cluster.
+	 */
+	error = exfat_get_fat_cluster(inode, info->allocated_clusters - 1,
+				      out_hint);
+	if (error)
+		return error;
+	(*out_hint)++;
+	return 0;
+}
+
+int __exfat_write_inode(struct inode *inode, bool sync)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct dir_entry_buffer entries[info->iloc.nr_secondary];
+	int error;
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	u16 checksum;
+
+	if (inode->i_ino == EXFAT_ROOT_INO)
+		return 0;
+
+	if (info->iloc.disk_offs[0] == 0) {
+		/*
+		 * write_inode() to unlinked inode: don't corrupt
+		 * superblock.
+		 */
+		return 0;
+	}
+
+	error = exfat_get_dir_entry_buffers(inode, &info->iloc,
+					    entries, info->iloc.nr_secondary);
+	if (error)
+		return error;
+
+	if (inode->i_mode & S_IWUGO)
+		info->attributes &= ~E_EXFAT_ATTR_RO;
+	else
+		info->attributes |= E_EXFAT_ATTR_RO;
+
+	efd = entries[0].start;
+	esx = entries[1].start;
+
+	efd->attributes = __cpu_to_le16(info->attributes);
+	esx->data_length = __cpu_to_le64(inode->i_size);
+	esx->valid_data_length = esx->data_length =
+		__cpu_to_le64(inode->i_size);
+	esx->flags = info->flags;
+	esx->first_cluster = __cpu_to_le32(info->first_cluster);
+
+	exfat_write_time(sbi, &inode->i_ctime, &efd->create, &efd->create_10ms,
+			 &efd->create_tz_offset);
+	exfat_write_time(sbi, &inode->i_mtime, &efd->modified,
+			 &efd->modified_10ms, &efd->modified_tz_offset);
+	exfat_write_time(sbi, &inode->i_atime, &efd->accessed, NULL,
+			 &efd->accessed_tz_offset);
+
+	checksum = exfat_dir_entries_checksum(entries, info->iloc.nr_secondary);
+	efd->set_checksum = __cpu_to_le16(checksum);
+
+	exfat_dirty_dir_entries(entries, info->iloc.nr_secondary, sync);
+
+
+	return 0;
+}
+
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+	int ret;
+
+	exfat_lock_super(inode->i_sb);
+	ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+	exfat_unlock_super(inode->i_sb);
+	return ret;
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./namei.c linux-6.4-fbx/fs/exfat-fbx/namei.c
--- linux-6.4-fbx/fs/exfat-fbx./namei.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/namei.c	2023-05-22 20:06:44.067849120 +0200
@@ -0,0 +1,933 @@
+/*
+ * namei.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 12:00:27 2013
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/nls.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static u16 exfat_filename_hash_cont(struct super_block *sb,
+				    const __le16 *name, u16 hash, size_t len);
+
+
+void exfat_write_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		      __le32 *datetime, u8 *time_cs, u8 *tz_offset)
+{
+	u32 cpu_datetime;
+
+	exfat_time_2exfat(sbi, ts, &cpu_datetime, time_cs, tz_offset);
+	*datetime = __cpu_to_le32(cpu_datetime);
+}
+
+static void exfat_read_time(struct timespec64 *ts, __le32 datetime, u8 time_cs,
+			    u8 tz_offset)
+{
+	u32 cpu_datetime = __le32_to_cpu(datetime);
+	exfat_time_2unix(ts, cpu_datetime, time_cs, tz_offset);
+}
+
+static int exfat_zero_cluster(struct super_block *sb, u32 cluster, bool sync)
+{
+	sector_t start = exfat_cluster_sector(EXFAT_SB(sb), cluster);
+	sector_t end = start + EXFAT_SB(sb)->sectors_per_cluster;
+	sector_t sect;
+
+	for (sect = start; sect < end; ++sect) {
+		struct buffer_head *bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_WARNING,
+				  "unable to read sector %llu for zeroing.",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		memset(bh->b_data, 0, bh->b_size);
+		mark_buffer_dirty(bh);
+		if (sync)
+			sync_dirty_buffer(bh);
+		brelse(bh);
+	}
+	return 0;
+}
+
+/*
+ * use per superblock fmask or dmaks, depending on provided entry
+ * attribute to restrict the provided mode even more.
+ */
+mode_t exfat_make_mode(struct exfat_sb_info *sbi, mode_t mode, u16 attrs)
+{
+	if (attrs & E_EXFAT_ATTR_DIRECTORY)
+		mode = (mode & ~sbi->options.dmask) | S_IFDIR;
+	else
+		mode = (mode & ~sbi->options.fmask) | S_IFREG;
+	if (attrs & E_EXFAT_ATTR_RO)
+		mode &= ~S_IWUGO;
+	return mode;
+}
+
+/*
+ * populate inode fields.
+ */
+static struct inode *exfat_populate_inode(struct super_block *sb,
+			  const struct exfat_filedir_entry *efd,
+			  const struct exfat_stream_extension_entry *esx,
+			  const struct exfat_iloc *iloc)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct inode *inode;
+
+	inode = exfat_iget(sb, iloc->disk_offs[0]);
+	if (inode)
+		return inode;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
+	EXFAT_I(inode)->first_cluster = __le32_to_cpu(esx->first_cluster);
+	EXFAT_I(inode)->flags = esx->flags;
+	EXFAT_I(inode)->iloc = *iloc;
+	EXFAT_I(inode)->attributes = __le16_to_cpu(efd->attributes);
+
+	inode->i_size = __le64_to_cpu(esx->data_length);
+	EXFAT_I(inode)->allocated_clusters = inode->i_size >> sbi->clusterbits;
+	if (inode->i_size & sbi->clustermask)
+		EXFAT_I(inode)->allocated_clusters++;
+	inode->i_blocks = EXFAT_I(inode)->allocated_clusters <<
+		(sbi->clusterbits - 9);
+	EXFAT_I(inode)->mmu_private = inode->i_size;
+
+	inode->i_uid = sbi->options.uid;
+	inode->i_gid = sbi->options.gid;
+	inode->i_mode = exfat_make_mode(sbi, S_IRWXUGO,
+					EXFAT_I(inode)->attributes);
+
+	if (EXFAT_I(inode)->attributes & E_EXFAT_ATTR_DIRECTORY) {
+		loff_t nlinks = exfat_dir_links(inode);
+		if (nlinks < 0)
+			goto iput;
+		set_nlink(inode, nlinks + 2);
+	} else
+		set_nlink(inode, 1);
+
+	if (esx->data_length != esx->valid_data_length)
+		exfat_msg(sb, KERN_WARNING, "data length (%llu) != valid data "
+			  "length (%llu)", __le64_to_cpu(esx->data_length),
+			  __le64_to_cpu(esx->valid_data_length));
+
+	if (S_ISDIR(inode->i_mode)) {
+		inode->i_fop = &exfat_dir_operations;
+		inode->i_op = &exfat_dir_inode_operations;
+	} else {
+		/* until we support write */
+		inode->i_fop = &exfat_file_operations;
+		inode->i_op = &exfat_file_inode_operations;
+		inode->i_data.a_ops = &exfat_address_space_operations;
+	}
+
+
+	exfat_read_time(&inode->i_ctime, efd->create, efd->create_10ms,
+			efd->create_tz_offset);
+	exfat_read_time(&inode->i_mtime, efd->modified, efd->modified_10ms,
+			efd->modified_tz_offset);
+	exfat_read_time(&inode->i_atime, efd->accessed, 0,
+			efd->accessed_tz_offset);
+
+	exfat_insert_inode_hash(inode);
+	insert_inode_hash(inode);
+	return inode;
+iput:
+	iput(inode);
+	return NULL;
+}
+
+/*
+ * lookup an inode.
+ */
+struct dentry *exfat_inode_lookup(struct inode *parent, struct dentry *dentry,
+				  unsigned int flags)
+{
+	struct super_block *sb = dentry->d_sb;
+	struct exfat_dir_ctx dctx;
+	int error;
+	struct exfat_filedir_entry efd;
+	struct exfat_stream_extension_entry esx;
+	__le16 *name = __getname();
+	__le16 *utf16_name = __getname();
+	unsigned int utf16_name_length;
+	__le16 name_hash;
+
+	exfat_lock_super(parent->i_sb);
+
+	if (!name || !utf16_name) {
+		error = -ENOMEM;
+		goto putnames;
+	}
+
+	utf16_name_length = utf8s_to_utf16s(dentry->d_name.name,
+					    dentry->d_name.len,
+					    UTF16_LITTLE_ENDIAN,
+					    utf16_name, 255 + 2);
+	if (utf16_name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto putnames;
+	}
+
+	/*
+	 * get the name hash of the wanted inode early so that we can
+	 * skip entries with only an efd and an esx entry.
+	 */
+	name_hash = __cpu_to_le16(exfat_filename_hash_cont(sb, utf16_name, 0,
+							   utf16_name_length));
+
+	/*
+	 * create a dir ctx from the parent so that we can iterate on
+	 * it.
+	 */
+	error = exfat_init_dir_ctx(parent, &dctx, 0);
+	if (error)
+		goto putnames;
+
+	for (;;) {
+		u32 name_length;
+		struct inode *inode;
+		u16 calc_checksum;
+		u16 expect_checksum;
+		struct exfat_iloc iloc;
+
+		memset(&iloc, 0, sizeof (iloc));
+		/*
+		 * get filedir and stream extension entries.
+		 */
+		error = exfat_dentry_next(&efd, &dctx, E_EXFAT_FILEDIR, true);
+		if (error < 0)
+			/* end of directory reached, or other error */
+			goto cleanup;
+
+		error = -EINVAL;
+		if (efd.secondary_count > 18)
+			goto cleanup;
+
+		iloc.file_off = exfat_dctx_fpos(&dctx);
+		iloc.disk_offs[0] = exfat_dctx_dpos(&dctx);
+		iloc.nr_secondary = efd.secondary_count + 1;
+
+		error = exfat_dentry_next(&esx, &dctx, E_EXFAT_STREAM_EXT,
+					  false);
+		if (error)
+			goto cleanup;
+
+		if (esx.name_hash != name_hash)
+			/*
+			 * stored name hash is not the same as the
+			 * wanted hash: no point in processing the
+			 * remaining entries for the current efd/esx
+			 * any further.
+			 */
+			continue ;
+
+		/*
+		 * now that the hash matches it is ok to update the
+		 * checksum for the efd and esx entries.
+		 */
+		expect_checksum = __le16_to_cpu(efd.set_checksum);
+		calc_checksum = exfat_direntry_checksum(&efd, 0, true);
+
+		calc_checksum = exfat_direntry_checksum(&esx,
+							calc_checksum, false);
+		iloc.disk_offs[1] = exfat_dctx_dpos(&dctx);
+
+		/*
+		 * fetch name.
+		 */
+		name_length = esx.name_length;
+		error = __exfat_get_name(&dctx, name_length, name,
+					 &calc_checksum, &iloc);
+		if (error)
+			goto cleanup;
+
+		if (calc_checksum != expect_checksum) {
+			exfat_msg(dctx.sb, KERN_INFO, "checksum: "
+				  "calculated %04x, expect %04x",
+				  calc_checksum, expect_checksum);
+			error = -EIO;
+			goto cleanup;
+		}
+
+
+		if (utf16_name_length != name_length)
+			continue ;
+
+		if (memcmp(utf16_name, name, name_length * sizeof (__le16)))
+			continue ;
+
+		inode = exfat_populate_inode(sb, &efd, &esx, &iloc);
+		if (inode) {
+			d_add(dentry, inode);
+			error = 0;
+		} else
+			error = -EIO;
+		goto cleanup;
+	}
+
+cleanup:
+	exfat_cleanup_dir_ctx(&dctx);
+putnames:
+	if (name)
+		__putname(name);
+	if (utf16_name)
+		__putname(utf16_name);
+	exfat_unlock_super(parent->i_sb);
+	if (error && error != -ENOENT)
+		return ERR_PTR(error);
+	return NULL;
+}
+
+/*
+ * find nr unused directory entries (type & 0x80 == 0).
+ */
+static int exfat_find_dir_iloc(struct inode *inode, int nr,
+			       struct exfat_iloc *iloc)
+{
+	struct exfat_dir_ctx dctx;
+	bool end = false;
+	int error;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 nr_new_clusters, i;
+	u32 new_clusters[2];
+	u32 hint_cluster;
+
+retry:
+	memset(iloc, 0, sizeof (*iloc));
+	iloc->nr_secondary = nr;
+
+	error = exfat_init_dir_ctx(inode, &dctx, 0);
+	if (error)
+		return error;
+
+	while (1) {
+		int nr_free;
+		void *ent;
+
+		ent = __exfat_dentry_next(&dctx, 0x00, 0x80, true, &end);
+		if (end)
+			break;
+		if (!ent) {
+			exfat_cleanup_dir_ctx(&dctx);
+			return -EIO;
+		}
+
+		nr_free = 1;
+		iloc->file_off = exfat_dctx_fpos(&dctx);
+		iloc->disk_offs[0] = exfat_dctx_dpos(&dctx);
+		while (__exfat_dentry_next(&dctx, 0x00, 0x80, false, &end)
+		       != NULL && nr_free < nr) {
+			iloc->disk_offs[nr_free] = exfat_dctx_dpos(&dctx);
+			++nr_free;
+		}
+		if (nr_free == nr) {
+			/*
+			 * we found enough consecutive free entries.
+			 */
+			exfat_cleanup_dir_ctx(&dctx);
+			return 0;
+		}
+
+	}
+
+	/*
+	 * not enough consecutive free entries found, kick the cluster
+	 * allocator and retry.
+	 */
+	exfat_cleanup_dir_ctx(&dctx);
+
+	/*
+	 * with the smallest cluster size, a file can take more than
+	 * two clusters. allocate two in that case reardless of what
+	 * is needed to make code simplier.
+	 */
+	switch (sbi->clustersize) {
+	case 512:
+		nr_new_clusters = 2;
+		break;
+	default:
+		nr_new_clusters = 1;
+		break;
+	}
+
+	/*
+	 * get a hint cluster for the cluster allocator.
+	 */
+	error = exfat_get_cluster_hint(inode, &hint_cluster);
+	if (error)
+		return error;
+
+	/*
+	 * peform the allocation.
+	 */
+	error = exfat_alloc_clusters(inode, hint_cluster, new_clusters,
+				     nr_new_clusters);
+	if (error)
+		return error;
+
+	/*
+	 * fill new cluster(s) with zero.
+	 */
+	for (i = 0; i < nr_new_clusters; ++i)
+		exfat_zero_cluster(inode->i_sb, new_clusters[i], false);
+
+	/*
+	 * update size and mark inode as dirty so that write_inode()
+	 * can update it's size, and the other fields updated by
+	 * exfat_alloc_clusters.
+	 */
+	inode->i_size += nr_new_clusters << sbi->clusterbits;
+	mark_inode_dirty(inode);
+
+	/*
+	 * kick the whole place search again, this time with the newly
+	 * allocated clusters.
+	 */
+	goto retry;
+}
+
+/*
+ * setup dir_entry_buffers starting at using iloc.
+ */
+int exfat_get_dir_entry_buffers(struct inode *dir, struct exfat_iloc *iloc,
+				struct dir_entry_buffer *entries,
+				size_t nr_entries)
+{
+	size_t i;
+	int error;
+	struct exfat_sb_info *sbi = EXFAT_SB(dir->i_sb);
+
+	BUG_ON(iloc->nr_secondary != nr_entries);
+
+	memset(entries, 0, sizeof (*entries) * nr_entries);
+	for (i = 0; i < nr_entries; ++i) {
+		sector_t sector = iloc->disk_offs[i] >> sbi->sectorbits;
+
+		entries[i].off = iloc->disk_offs[i] & sbi->sectormask;
+		entries[i].bh = sb_bread(dir->i_sb, sector);
+		if (!entries[i].bh) {
+			error = -EIO;
+			goto fail;
+		}
+		entries[i].start = entries[i].bh->b_data + entries[i].off;
+	}
+	return 0;
+
+fail:
+	for (i = 0; i < nr_entries; ++i)
+		if (entries[i].bh)
+			brelse(entries[i].bh);
+	return error;
+}
+
+static u16 exfat_filename_hash_cont(struct super_block *sb,
+				    const __le16 *name, u16 hash, size_t len)
+{
+	while (len) {
+		u16 c = __le16_to_cpu(exfat_upcase_convert(sb, *name));
+
+		hash = ((hash << 15) | (hash >> 1)) + (c & 0xff);
+		hash = ((hash << 15) | (hash >> 1)) + (c >> 8);
+		--len;
+		++name;
+	}
+	return hash;
+}
+
+u16 exfat_dir_entries_checksum(struct dir_entry_buffer *entries, u32 nr)
+{
+	u32 checksum = 0;
+
+	if (nr) {
+		checksum = exfat_direntry_checksum(entries->start,
+						   checksum, true);
+		--nr;
+		++entries;
+	}
+	while (nr) {
+		checksum = exfat_direntry_checksum(entries->start,
+						   checksum, false);
+		--nr;
+		++entries;
+	}
+	return checksum;
+}
+
+/*
+ * setup exfat_filedir_entry and exfat_stream_extension_entry for a
+ * new entry, with attribute attrs, and named name.
+ */
+static void exfat_fill_dir_entries(struct super_block *sb,
+				  struct dir_entry_buffer *entries,
+				  size_t nr_entries, u8 attrs,
+				  __le16 *name, int name_length)
+{
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	int i;
+	u16 name_hash;
+	u16 checksum;
+	struct timespec64 ts;
+
+        ktime_get_coarse_real_ts64(&ts);
+
+	efd = entries[0].start;
+	esx = entries[1].start;
+
+	/*
+	 * fill exfat filedir entry
+	 */
+	memset(efd, 0, sizeof (*efd));
+	efd->type = E_EXFAT_FILEDIR;
+	efd->secondary_count = nr_entries - 1;
+	efd->set_checksum = 0;
+	efd->attributes = __cpu_to_le16(attrs);
+
+	/*
+	 * update file directory entry times
+	 */
+	efd = entries[0].start;
+	exfat_write_time(EXFAT_SB(sb), &ts, &efd->create, &efd->create_10ms,
+			 &efd->create_tz_offset);
+	efd->modified = efd->accessed = efd->create;
+	efd->modified_10ms = efd->create_10ms;
+	efd->accessed_tz_offset = efd->modified_tz_offset =
+		efd->create_tz_offset;
+
+	/*
+	 * fill exfat stream extension entry
+	 */
+	memset(esx, 0, sizeof (*esx));
+	esx->type = E_EXFAT_STREAM_EXT;
+	esx->flags = EXFAT_I_ALLOC_POSSIBLE;
+	esx->first_cluster = __cpu_to_le32(0);
+	esx->data_length = __cpu_to_le64(0);
+	esx->valid_data_length = __cpu_to_le64(0);
+	esx->name_length = name_length;
+
+	/*
+	 * fill name fragments.
+	 */
+	name_hash = 0;
+	for (i = 0; i < nr_entries - 2; ++i, name_length -= 15) {
+		struct exfat_filename_entry *efn = entries[i + 2].start;
+		int len = 15;
+
+		if (name_length < 15)
+			len = name_length;
+
+		memset(efn, 0, sizeof (*efn));
+		efn->type = E_EXFAT_FILENAME;
+		memcpy(efn->name_frag, name + i * 15, len * sizeof (__le16));
+		name_hash = exfat_filename_hash_cont(sb, efn->name_frag,
+						     name_hash, len);
+	}
+	esx->name_hash = __cpu_to_le16(name_hash);
+
+	checksum = exfat_dir_entries_checksum(entries, nr_entries);
+	efd->set_checksum = __cpu_to_le16(checksum);
+}
+
+/*
+ * mark all buffer heads in the entries array as dirty. optionally
+ * sync them if required.
+ */
+void exfat_dirty_dir_entries(struct dir_entry_buffer *entries,
+			     size_t nr_entries, bool sync)
+{
+	size_t i;
+
+	for (i = 0; i < nr_entries; ++i) {
+		mark_buffer_dirty(entries[i].bh);
+		if (sync)
+			sync_dirty_buffer(entries[i].bh);
+		brelse(entries[i].bh);
+	}
+}
+
+/*
+ * cleanup all buffer heads in entries.
+ */
+static void exfat_cleanup_dir_entries(struct dir_entry_buffer *entries,
+				     size_t nr_entries)
+{
+	size_t i;
+
+	for (i = 0; i < nr_entries; ++i)
+		brelse(entries[i].bh);
+}
+
+/*
+ * create an inode
+ */
+static int __exfat_inode_create(struct inode *dir, struct dentry *dentry,
+				umode_t mode, bool is_dir)
+{
+	int nr_entries;
+	struct dir_entry_buffer entries[19];
+	struct inode *new;
+	struct exfat_iloc iloc;
+	int error;
+	u8 attr = 0;
+	__le16 *utf16_name;
+	int utf16_name_length;
+
+	if (is_dir)
+		attr |= E_EXFAT_ATTR_DIRECTORY;
+
+	exfat_lock_super(dir->i_sb);
+
+	utf16_name = __getname();
+	if (!utf16_name) {
+		error = -ENOMEM;
+		goto unlock_super;
+	}
+
+	utf16_name_length = utf8s_to_utf16s(dentry->d_name.name,
+					    dentry->d_name.len,
+					    UTF16_LITTLE_ENDIAN, utf16_name,
+					    255 + 2);
+	if (utf16_name_length < 0) {
+		error = utf16_name_length;
+		goto putname;
+	}
+	if (utf16_name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto putname;
+	}
+
+
+	nr_entries = 2 + DIV_ROUND_UP(utf16_name_length, 15);
+	if (nr_entries > 19) {
+		error = -ENAMETOOLONG;
+		goto putname;
+	}
+
+	error = exfat_find_dir_iloc(dir, nr_entries, &iloc);
+	if (error < 0)
+		goto putname;
+
+	error = exfat_get_dir_entry_buffers(dir, &iloc, entries, nr_entries);
+	if (error)
+		goto putname;
+	exfat_fill_dir_entries(dir->i_sb, entries, nr_entries, attr,
+				       utf16_name, utf16_name_length);
+
+	/*
+	 * create an inode with it.
+	 */
+	error = -ENOMEM;
+	new = exfat_populate_inode(dir->i_sb, entries[0].start,
+				   entries[1].start, &iloc);
+	if (!new)
+		goto cleanup;
+	inc_nlink(dir);
+	d_instantiate(dentry, new);
+
+	/*
+	 * update directory atime / ctime.
+	 */
+	dir->i_atime = dir->i_mtime = current_time(dir);
+	if (IS_DIRSYNC(dir))
+		__exfat_write_inode(dir, true);
+	else
+		mark_inode_dirty(dir);
+
+	/*
+	 * write to disk
+	 */
+	exfat_dirty_dir_entries(entries, nr_entries, false);
+	__putname(utf16_name);
+	exfat_unlock_super(dir->i_sb);
+	return 0;
+
+cleanup:
+	exfat_cleanup_dir_entries(entries, nr_entries);
+putname:
+	__putname(utf16_name);
+unlock_super:
+	exfat_unlock_super(dir->i_sb);
+	return error;
+}
+
+int exfat_inode_create(struct mnt_idmap *ns, struct inode *dir,
+		       struct dentry *dentry, umode_t mode, bool excl)
+{
+	return __exfat_inode_create(dir, dentry, mode, false);
+}
+
+int exfat_inode_mkdir(struct mnt_idmap *ns, struct inode *dir,
+		      struct dentry *dentry, umode_t mode)
+{
+	return __exfat_inode_create(dir, dentry, mode, true);
+}
+
+/*
+ * inode unlink: find all direntry buffers and clear seventh bit of
+ * the entry type to mark the as unused.
+ */
+static int __exfat_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct dir_entry_buffer entries[info->iloc.nr_secondary];
+	int error;
+	u32 i;
+
+	error = exfat_get_dir_entry_buffers(inode, &info->iloc,
+					    entries, info->iloc.nr_secondary);
+	if (error)
+		return error;
+
+	for (i = 0; i < info->iloc.nr_secondary; ++i) {
+		u8 *type = entries[i].start;
+
+		*type &= 0x7f;
+	}
+
+	drop_nlink(dir);
+	clear_nlink(inode);
+	inode->i_mtime = inode->i_atime = current_time(inode);
+
+	/*
+	 * update atime & mtime for parent directory.
+	 */
+	dir->i_mtime = dir->i_atime = current_time(dir);
+	if (IS_DIRSYNC(dir))
+		__exfat_write_inode(dir, true);
+	else
+		mark_inode_dirty(dir);
+
+	exfat_dirty_dir_entries(entries, info->iloc.nr_secondary, false);
+	exfat_remove_inode_hash(inode);
+	return 0;
+}
+
+int exfat_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int ret;
+
+	exfat_lock_super(dir->i_sb);
+	ret = __exfat_inode_unlink(dir, dentry);
+	exfat_unlock_super(dir->i_sb);
+	return ret;
+}
+
+/*
+ * inode rmdir: check that links is not greater than 2 (meaning that
+ * the directory is empty) and invoke unlink.
+ */
+static int __exfat_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (inode->i_nlink > 2)
+		return -ENOTEMPTY;
+
+	return __exfat_inode_unlink(dir, dentry);
+}
+
+int exfat_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int ret;
+
+	exfat_lock_super(dir->i_sb);
+	ret = __exfat_inode_rmdir(dir, dentry);
+	exfat_unlock_super(dir->i_sb);
+	return ret;
+}
+
+int exfat_rename(struct mnt_idmap *ns,
+		 struct inode *old_dir, struct dentry *old_dentry,
+		 struct inode *new_dir, struct dentry *new_dentry,
+		 unsigned int flags)
+{
+	struct inode *old_inode = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
+	int new_nr_entries;
+	int error = 0;
+	struct exfat_iloc new_iloc;
+	struct exfat_inode_info *old_info = EXFAT_I(old_inode);
+	struct dir_entry_buffer old_buffers[old_info->iloc.nr_secondary];
+	struct dir_entry_buffer new_buffers[19];
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	int name_length;
+	__le16 *name;
+	u16 name_hash;
+	int i;
+
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
+	exfat_lock_super(new_dir->i_sb);
+
+	/*
+	 * convert new name to utf16
+	 */
+	name = __getname();
+	if (!name) {
+		error = -ENOMEM;
+		goto unlock_super;
+	}
+	name_length = utf8s_to_utf16s(new_dentry->d_name.name,
+				      new_dentry->d_name.len,
+				      UTF16_LITTLE_ENDIAN, name, 255 + 2);
+
+	if (name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto err_putname;
+	}
+	if (name_length < 0) {
+		error = name_length;
+		goto err_putname;
+	}
+
+	new_nr_entries = 2 + DIV_ROUND_UP(name_length, 15);
+
+	/*
+	 * find space for new entry
+	 */
+	error = exfat_find_dir_iloc(new_dir, new_nr_entries, &new_iloc);
+	if (error < 0)
+		goto err_putname;
+
+	/*
+	 * get buffers for old and new entries.
+	 */
+	error = exfat_get_dir_entry_buffers(old_dir, &old_info->iloc,
+				    old_buffers, old_info->iloc.nr_secondary);
+	if (error < 0)
+		goto err_putname;
+
+	error = exfat_get_dir_entry_buffers(new_dir, &new_iloc, new_buffers,
+					    new_nr_entries);
+	if (error < 0)
+		goto err_cleanup_old_buffers;
+
+
+	/*
+	 * remove new inode, if it exists.
+	 */
+	if (new_inode) {
+		if (S_ISDIR(new_inode->i_mode))
+			error = __exfat_inode_rmdir(new_dir, new_dentry);
+		else
+			error = __exfat_inode_unlink(new_dir, new_dentry);
+		if (error < 0)
+			goto err_cleanup_new_buffers;
+	}
+
+	/*
+	 * move old esd to new esd (and ditto for esx).
+	 */
+	efd = new_buffers[0].start;
+	esx = new_buffers[1].start;
+	memcpy(efd, old_buffers[0].start, sizeof (*efd));
+	memcpy(esx, old_buffers[1].start, sizeof (*esx));
+
+	efd->secondary_count = new_nr_entries - 1;
+
+	/*
+	 * patch new name after that.
+	 */
+	esx->name_length = __cpu_to_le16(name_length);
+
+	/*
+	 * fill name fragments.
+	 */
+	name_hash = 0;
+	for (i = 0; i < new_nr_entries - 2; ++i, name_length -= 15) {
+		struct exfat_filename_entry *efn = new_buffers[i + 2].start;
+		int len = 15;
+
+		if (name_length < 15)
+			len = name_length;
+
+		memset(efn, 0, sizeof (*efn));
+		efn->type = E_EXFAT_FILENAME;
+		memcpy(efn->name_frag, name + i * 15, len * sizeof (__le16));
+		name_hash = exfat_filename_hash_cont(new_dir->i_sb,
+						     efn->name_frag,
+						     name_hash, len);
+	}
+	__putname(name);
+	esx->name_hash = __cpu_to_le16(name_hash);
+	efd->set_checksum = exfat_dir_entries_checksum(new_buffers,
+						       new_nr_entries);
+	efd->set_checksum = __cpu_to_le16(efd->set_checksum);
+
+	/*
+	 * mark old buffer entries as unused.
+	 */
+	for (i = 0; i < old_info->iloc.nr_secondary; ++i)
+		*((u8*)old_buffers[i].start) &= 0x7f;
+
+	/*
+	 * dirty old & new entries buffers.
+	 */
+	exfat_dirty_dir_entries(new_buffers, new_nr_entries, false);
+	exfat_dirty_dir_entries(old_buffers, old_info->iloc.nr_secondary,
+				false);
+
+	/*
+	 * update links if new_dir and old_dir are differents.
+	 */
+	if (new_dir != old_dir) {
+		drop_nlink(old_dir);
+		inc_nlink(new_dir);
+	}
+
+	/*
+	 * make old inode use the new iloc, and update sb inode hash.
+	 */
+	exfat_remove_inode_hash(old_inode);
+	old_info->iloc = new_iloc;
+	exfat_insert_inode_hash(old_inode);
+
+	/*
+	 * update new dir & old dir mtime/atime
+	 */
+	if (new_dir == old_dir) {
+		new_dir->i_mtime = new_dir->i_atime = current_time(new_dir);
+		if (IS_DIRSYNC(new_dir))
+			__exfat_write_inode(new_dir, true);
+		else
+			mark_inode_dirty(new_dir);
+	} else {
+		new_dir->i_mtime = new_dir->i_atime =
+			old_dir->i_mtime = old_dir->i_atime =
+				current_time(old_dir);
+		if (IS_DIRSYNC(new_dir)) {
+			__exfat_write_inode(new_dir, true);
+			__exfat_write_inode(old_dir, true);
+		} else {
+			mark_inode_dirty(new_dir);
+			mark_inode_dirty(old_dir);
+		}
+	}
+
+	exfat_unlock_super(new_dir->i_sb);
+	return 0;
+
+err_cleanup_new_buffers:
+	exfat_cleanup_dir_entries(new_buffers, new_nr_entries);
+err_cleanup_old_buffers:
+	exfat_cleanup_dir_entries(old_buffers, old_info->iloc.nr_secondary);
+err_putname:
+	__putname(name);
+unlock_super:
+	exfat_unlock_super(new_dir->i_sb);
+	return error;
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./read-write.c linux-6.4-fbx/fs/exfat-fbx/read-write.c
--- linux-6.4-fbx/fs/exfat-fbx./read-write.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/read-write.c	2023-11-14 18:40:09.820119701 +0100
@@ -0,0 +1,144 @@
+/*
+ * read-write.c for exfat
+ * Created by <nschichan@freebox.fr> on Wed Jul 31 16:37:51 2013
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mpage.h>
+#include <linux/buffer_head.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+/*
+ * map file sector to disk sector.
+ */
+static int exfat_bmap(struct inode *inode, sector_t fsect, sector_t *dsect)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	u32 cluster_nr = fsect >> (sbi->clusterbits - sbi->sectorbits);
+	u32 cluster;
+	unsigned int offset = fsect & (sbi->sectors_per_cluster - 1);
+
+	if (info->flags & EXFAT_I_FAT_INVALID)
+		cluster = info->first_cluster + cluster_nr;
+	else {
+		int error;
+
+		error = exfat_get_fat_cluster(inode, cluster_nr, &cluster);
+		if (error)
+			return error;
+	}
+
+	*dsect = exfat_cluster_sector(sbi, cluster) + offset;
+	return 0;
+}
+
+static int exfat_get_block(struct inode *inode, sector_t block,
+			   struct buffer_head *bh, int create)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	sector_t last_block;
+	unsigned int offset;
+	sector_t dblock;
+	int error;
+
+	last_block = (i_size_read(inode) + sbi->sectorsize - 1) >>
+		sbi->sectorbits;
+	offset = block & (sbi->sectors_per_cluster - 1);
+
+	if (!create && block >= last_block)
+		return 0;
+
+	if (create && block >= last_block && offset == 0) {
+		u32 hint, cluster;
+
+		/*
+		 * request for first sector in a cluster immediate to
+		 * the last allocated cluster of the file: must
+		 * allocate a new clluster.
+		 */
+		error = exfat_get_cluster_hint(inode, &hint);
+		if (error)
+			return error;
+
+		error = exfat_alloc_clusters(inode, hint, &cluster, 1);
+		if (error)
+			return error;
+	}
+
+	error = exfat_bmap(inode, block, &dblock);
+	if (error)
+		return error;
+
+	if (create && block >= last_block) {
+		/*
+		 * currently in create mode: we need to update
+		 * mmu_private.
+		 */
+		info->mmu_private += sbi->sectorsize;
+		set_buffer_new(bh);
+	}
+	map_bh(bh, inode->i_sb, dblock);
+	return 0;
+}
+
+int exfat_read_folio(struct file *file, struct folio *folio)
+{
+	return mpage_read_folio(folio, exfat_get_block);
+}
+
+void exfat_readahead(struct readahead_control *rac)
+{
+	mpage_readahead(rac, exfat_get_block);
+}
+
+static int exfat_write_error(struct inode *inode, loff_t to)
+{
+	if (to > inode->i_size) {
+		truncate_pagecache(inode, to);
+		exfat_truncate_blocks(inode, inode->i_size);
+	}
+	return 0;
+}
+
+int exfat_write_begin(struct file *file, struct address_space *mapping,
+		      loff_t pos, unsigned len,
+		      struct page **pagep, void **fsdata)
+{
+	struct inode *inode = mapping->host;
+	int error;
+
+	*pagep = NULL;
+	error = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
+				 exfat_get_block, &EXFAT_I(inode)->mmu_private);
+
+	if (error)
+		exfat_write_error(inode, pos + len);
+	return error;
+}
+
+int exfat_write_end(struct file *file, struct address_space *mapping,
+		    loff_t pos, unsigned len, unsigned copied,
+		    struct page *page, void *fsdata)
+{
+	struct inode *inode = mapping->host;
+	int error;
+
+	error = generic_write_end(file, mapping, pos, len, copied, page,
+				  fsdata);
+
+	if (error < len)
+		exfat_write_error(inode, pos + len);
+	return error;
+}
+
+int exfat_writepages(struct address_space *mapping,
+		     struct writeback_control *wbc)
+{
+	return mpage_writepages(mapping, wbc, exfat_get_block);
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./super.c linux-6.4-fbx/fs/exfat-fbx/super.c
--- linux-6.4-fbx/fs/exfat-fbx./super.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/super.c	2023-11-14 18:40:09.820119701 +0100
@@ -0,0 +1,746 @@
+/*
+ * super.c<2> for exfat
+ * Created by <nschichan@freebox.fr> on Tue Jul 23 12:33:53 2013
+ */
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/statfs.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/iversion.h>
+#include <linux/blk_types.h>
+
+#include "exfat_fs.h"
+#include "exfat.h"
+
+
+#define PFX	"exFAT: "
+
+static void exfat_put_super(struct super_block *sb);
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *kstat);
+static int exfat_show_options(struct seq_file *m, struct dentry *root);
+static int exfat_remount(struct super_block *sb, int *flags, char *opts);
+
+static const struct super_operations exfat_super_ops = {
+	.alloc_inode	= exfat_alloc_inode,
+	.destroy_inode	= exfat_destroy_inode,
+	.drop_inode	= exfat_drop_inode,
+	.evict_inode	= exfat_evict_inode,
+	.write_inode	= exfat_write_inode,
+	.statfs         = exfat_statfs,
+	.put_super      = exfat_put_super,
+	.show_options	= exfat_show_options,
+	.remount_fs	= exfat_remount,
+};
+
+const struct file_operations exfat_dir_operations = {
+	.llseek = generic_file_llseek,
+	.read = generic_read_dir,
+	.iterate = exfat_iterate,
+	.unlocked_ioctl	= exfat_ioctl,
+};
+
+const struct file_operations exfat_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read_iter	= generic_file_read_iter,
+	.write_iter	= generic_file_write_iter,
+	.mmap		= generic_file_mmap,
+	.splice_read	= generic_file_splice_read,
+	.splice_write	= iter_file_splice_write,
+	.unlocked_ioctl	= exfat_ioctl,
+	.fsync		= generic_file_fsync,
+};
+
+const struct inode_operations exfat_dir_inode_operations =
+{
+	.create = exfat_inode_create,
+	.mkdir	= exfat_inode_mkdir,
+	.lookup = exfat_inode_lookup,
+	.rmdir	= exfat_inode_rmdir,
+	.unlink	= exfat_inode_unlink,
+	.rename	= exfat_rename,
+	.setattr = exfat_setattr,
+	.getattr = exfat_getattr,
+};
+
+const struct inode_operations exfat_file_inode_operations = {
+	.setattr = exfat_setattr,
+	.getattr = exfat_getattr,
+};
+
+const struct address_space_operations exfat_address_space_operations = {
+	.dirty_folio	= block_dirty_folio,
+	.invalidate_folio = block_invalidate_folio,
+	.read_folio	= exfat_read_folio,
+	.readahead	= exfat_readahead,
+	.write_begin	= exfat_write_begin,
+	.write_end	= exfat_write_end,
+	.writepages	= exfat_writepages,
+	.migrate_folio  = buffer_migrate_folio,
+};
+
+void exfat_msg(struct super_block *sb, const char *prefix,
+		const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk("%sexFAT-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+	va_end(args);
+}
+
+void exfat_fs_error(struct super_block *sb, const char *fmt, ...)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	exfat_msg(sb, KERN_ERR, "error: %pV", &vaf);
+	va_end(args);
+
+	if (sbi->options.error_action == EXFAT_ERROR_ACTION_REMOUNT_RO &&
+	    !(sb->s_flags & SB_RDONLY)) {
+		sb->s_flags |= SB_RDONLY;
+		exfat_msg(sb, KERN_ERR, "remounted read-only due to fs error.");
+	} else if (sbi->options.error_action == EXFAT_ERROR_ACTION_PANIC)
+		panic("exFAT-fs (%s): panic due fs error.\n", sb->s_id);
+}
+
+/*
+ * process checksum on buffer head. first indicates if the special
+ * treatment of the first sector needs to be done or not.
+ *
+ * first sector can be changed (volume flags, and heap use percent),
+ * those fields are excluded from the checksum to allow updating
+ * without recalculating the checksum.
+ */
+static u32 exfat_sb_checksum_process(struct buffer_head *bh, u32 checksum,
+				     unsigned int size,
+				     bool first)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; ++i) {
+		if (first && (i == 106 || i == 107 || i == 112))
+			continue ;
+		checksum = ((checksum << 31) | (checksum >> 1)) +
+			(unsigned char)bh->b_data[i];
+	}
+	return checksum;
+}
+
+static int exfat_check_sb_checksum(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 checksum;
+	int i;
+	int err;
+	struct buffer_head *bh[EXFAT_CHECKSUM_SECTORS + 1];
+
+	/*
+	 * fetch needed sectors, reuse first sector from sbi.
+	 */
+	err = -ENOMEM;
+	memset(bh, 0, sizeof (struct buffer_head*) *
+	       (EXFAT_CHECKSUM_SECTORS + 1));
+	bh[0] = sbi->sb_bh;
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS + 1; ++i) {
+		bh[i] = sb_bread(sb, i);
+		if (!bh[i])
+			goto out;
+	}
+
+	/*
+	 * calculate checksum.
+	 */
+	checksum = exfat_sb_checksum_process(bh[0], 0, sbi->sectorsize, true);
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS; ++i) {
+		checksum = exfat_sb_checksum_process(bh[i], checksum,
+						     sbi->sectorsize, false);
+	}
+
+	/*
+	 * compare with the checksum sector.
+	 */
+	err = -EINVAL;
+	for (i = 0; i < sbi->sectorsize; i += sizeof (u32)) {
+		__le32 val = *(u32*)(bh[EXFAT_CHECKSUM_SECTORS]->b_data + i);
+
+		if (__le32_to_cpu(val) != checksum) {
+			exfat_msg(sb, KERN_INFO, "at offset %i, checksum "
+				  "%08x != %08x", i, __le32_to_cpu(val), checksum);
+			goto out;
+		}
+	}
+	err = 0;
+
+out:
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS; ++i)
+		if (bh[i])
+			brelse(bh[i]);
+	return err;
+}
+
+static int exfat_check_sb(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct exfat_vbr *vbr = sbi->vbr;
+	u16 fs_rev;
+	u16 flags;
+	int active_fat;
+	u16 num_fats;
+
+	if (memcmp(vbr->jump, "\xeb\x76\x90", sizeof (vbr->jump))) {
+		exfat_msg(sb, KERN_INFO, "invalid jump field in vbr.");
+		return -EINVAL;
+	}
+
+	if (memcmp(vbr->fsname, "EXFAT   ", 8)) {
+		exfat_msg(sb, KERN_INFO, "invalid fsname field in vbr: %s.",
+			  vbr->fsname);
+		return -EINVAL;
+	}
+
+	fs_rev = __le16_to_cpu(vbr->fs_rev);
+	if (fs_rev != 0x0100) {
+		exfat_msg(sb, KERN_INFO, "filesystem version invalid: "
+			  "have 0x%04x, need 0x0100", fs_rev);
+		return -EINVAL;
+	}
+
+	flags = __le16_to_cpu(vbr->volume_flags);
+	active_fat = exfat_active_fat(flags);
+	if (active_fat != 0) {
+		exfat_msg(sb, KERN_INFO, "filesystems with active fat > 0 are "
+			  "not supported.");
+		return -EINVAL;
+	}
+
+	if (flags & EXFAT_FLAG_MEDIA_FAILURE)
+		exfat_msg(sb, KERN_WARNING, "filesystem had media failure(s)");
+
+	/*
+	 * bytes per sectors are on the range 2^9 - 2^12 (512 - 4096)
+	 */
+	if (vbr->bytes_per_sector < 9 || vbr->bytes_per_sector > 12) {
+		exfat_msg(sb, KERN_ERR, "invalid byte per sectors: %u",
+			  (1 << vbr->bytes_per_sector));
+		return -EINVAL;
+	}
+
+	/*
+	 * sectors per cluster can be as low as 0, and must not result
+	 * in a cluster size higher than 32MB (byte_per_sector +
+	 * sectors_per_cluster must not be creater than 25)
+	 */
+	if (vbr->bytes_per_sector + vbr->sectors_per_cluster > 25) {
+		exfat_msg(sb, KERN_ERR, "invalid cluster size: %u",
+		  1 << (vbr->bytes_per_sector + vbr->sectors_per_cluster));
+		return -EINVAL;
+	}
+
+	num_fats = __le16_to_cpu(vbr->fat_num);
+	if (num_fats == 0) {
+		exfat_msg(sb, KERN_ERR, "superblock reports no FAT.");
+		return -EINVAL;
+	}
+	if (num_fats > 1) {
+		exfat_msg(sb, KERN_ERR, "TexFAT is not supported.");
+		return -EINVAL;
+	}
+
+	if (memcmp(vbr->boot_sig, "\x55\xaa", 2)) {
+		exfat_msg(sb, KERN_ERR, "invalid end boot signature: %02x%02x.",
+			  vbr->boot_sig[0], vbr->boot_sig[1]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int exfat_fill_root(struct super_block *sb, struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 nclust;
+	u32 dummy;
+	loff_t links;
+
+	root->i_ino = EXFAT_ROOT_INO;
+	inode_set_iversion(root, 1);
+	EXFAT_I(root)->first_cluster =
+		__le32_to_cpu(sbi->root_dir_cluster);
+	EXFAT_I(root)->attributes = E_EXFAT_ATTR_DIRECTORY;
+
+	root->i_uid = sbi->options.uid;
+	root->i_gid = sbi->options.gid;
+
+	root->i_mode = exfat_make_mode(sbi, S_IRWXUGO, E_EXFAT_ATTR_DIRECTORY);
+	inode_inc_iversion(root);
+	root->i_generation = 0;
+
+	root->i_op = &exfat_dir_inode_operations;
+	root->i_fop = &exfat_dir_operations;
+
+	/*
+	 * root inode cannot use bitmap.
+	 */
+	EXFAT_I(root)->flags = EXFAT_I_ALLOC_POSSIBLE;
+
+	/*
+	 * set i_size
+	 */
+	nclust = 0;
+	while (__exfat_get_fat_cluster(root, nclust, &dummy, false) == 0)
+		++nclust;
+	root->i_size = nclust << sbi->clusterbits;
+	root->i_blocks = nclust << (sbi->clusterbits - 9);
+	EXFAT_I(root)->allocated_clusters = nclust;
+
+	/*
+	 * +2 to account for '.' and '..'
+	 */
+	links = exfat_dir_links(root);
+	if (links < 0)
+		return links;
+	set_nlink(root, links + 2);
+
+	root->i_mtime = root->i_atime = root->i_ctime = current_time(root);
+
+	return 0;
+}
+
+static loff_t exfat_file_max_byte(struct exfat_sb_info *sbi)
+{
+	u32 max_clusters = EXFAT_CLUSTER_LASTVALID -
+		EXFAT_CLUSTER_FIRSTVALID + 1;
+
+	return (loff_t)max_clusters << sbi->clusterbits;
+}
+
+static int exfat_show_options(struct seq_file *m, struct dentry *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->d_inode->i_sb);
+
+	if (!uid_eq(sbi->options.uid, GLOBAL_ROOT_UID))
+		seq_printf(m, ",uid=%u",
+			   from_kuid_munged(&init_user_ns, sbi->options.uid));
+	if (!gid_eq(sbi->options.gid, GLOBAL_ROOT_GID))
+		seq_printf(m, ",gid=%u",
+			   from_kgid_munged(&init_user_ns, sbi->options.gid));
+
+	seq_printf(m, ",fmask=%04o", sbi->options.fmask);
+	seq_printf(m, ",dmask=%04o", sbi->options.dmask);
+
+	if (sbi->options.time_offset_set)
+		seq_printf(m, ",time_offset=%d", sbi->options.time_offset);
+
+	switch (sbi->options.error_action) {
+	case EXFAT_ERROR_ACTION_PANIC:
+		seq_printf(m, ",errors=panic");
+		break;
+	case EXFAT_ERROR_ACTION_REMOUNT_RO:
+		seq_printf(m, ",errors=remount-ro");
+		break;
+	default:
+		seq_printf(m, ",errors=continue");
+		break;
+	}
+
+	return 0;
+}
+
+enum {
+	Opt_exfat_uid,
+	Opt_exfat_gid,
+	Opt_exfat_dmask,
+	Opt_exfat_fmask,
+	Opt_exfat_time_offset,
+	Opt_exfat_error_continue,
+	Opt_exfat_error_remount_ro,
+	Opt_exfat_error_panic,
+	Opt_exfat_err,
+};
+
+static const match_table_t exfat_tokens = {
+	{ Opt_exfat_uid, "uid=%u", },
+	{ Opt_exfat_gid, "gid=%u", },
+	{ Opt_exfat_dmask, "dmask=%04o", },
+	{ Opt_exfat_fmask, "fmask=%04o", },
+	{ Opt_exfat_time_offset, "time_offset=%d", },
+	{ Opt_exfat_error_continue, "errors=continue", },
+	{ Opt_exfat_error_remount_ro, "errors=remount-ro", },
+	{ Opt_exfat_error_panic, "errors=panic", },
+	{ Opt_exfat_err, NULL },
+};
+
+static int exfat_parse_options(struct super_block *sb, char *opts, int silent)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	char *p;
+
+	sbi->options.uid = current_uid();
+	sbi->options.gid = current_gid();
+
+	sbi->options.dmask = current_umask();
+	sbi->options.fmask = current_umask();
+	sbi->options.time_offset_set = 0;
+	sbi->options.error_action = EXFAT_ERROR_ACTION_CONTINUE;
+
+	while (1) {
+		int token;
+		substring_t args[MAX_OPT_ARGS];
+		unsigned int optval;
+
+		p = strsep(&opts, ",");
+		if (!p)
+			break;
+		token = match_token(p, exfat_tokens, args);
+
+		switch (token) {
+		case Opt_exfat_uid:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.uid = make_kuid(current_user_ns(), optval);
+			break;
+
+		case Opt_exfat_gid:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.gid = make_kgid(current_user_ns(), optval);
+			break;
+
+		case Opt_exfat_dmask:
+			if (match_octal(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.dmask = optval;
+			break;
+
+		case Opt_exfat_fmask:
+			if (match_octal(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.fmask = optval;
+			break;
+
+		case Opt_exfat_time_offset:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			if (optval < -12 * 60 && optval > 12 * 60) {
+				if (!silent)
+					exfat_msg(sb, KERN_INFO, "invalid "
+						  "time_offset value %d: "
+						  "should be between %d and %d",
+						  optval, -12 * 60, 12 * 60);
+				return -EINVAL;
+			}
+			sbi->options.time_offset = optval;
+			sbi->options.time_offset_set = 1;
+			break;
+
+		case Opt_exfat_error_continue:
+			sbi->options.error_action = EXFAT_ERROR_ACTION_CONTINUE;
+			break;
+
+		case Opt_exfat_error_remount_ro:
+			sbi->options.error_action =
+				EXFAT_ERROR_ACTION_REMOUNT_RO;
+			break;
+
+		case Opt_exfat_error_panic:
+			sbi->options.error_action = EXFAT_ERROR_ACTION_PANIC;
+			break;
+
+		default:
+			if (!silent)
+				exfat_msg(sb, KERN_INFO, "Unrecognized mount "
+					  "option %s or missing parameter.\n",
+					  p);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static void exfat_set_sb_dirty(struct super_block *sb, bool set, bool force)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u16 flags;
+
+	/*
+	 * do not change anything if mounted read only and not
+	 * forced. the force case would happen during remount.
+	 */
+	if ((sb->s_flags & SB_RDONLY) && !force)
+		return ;
+
+	if (sbi->dirty) {
+		if (set)
+			exfat_msg(sb, KERN_WARNING, "Volume was not cleanly "
+				  "umounted. fsck should probably be needed.");
+		return ;
+	}
+
+	flags = __le16_to_cpu(sbi->vbr->volume_flags);
+	if (set)
+		flags |= EXFAT_FLAG_DIRTY;
+	else
+		flags &= ~EXFAT_FLAG_DIRTY;
+	sbi->vbr->volume_flags = __cpu_to_le16(flags);
+
+	mark_buffer_dirty(sbi->sb_bh);
+	sync_dirty_buffer(sbi->sb_bh);
+}
+
+static int exfat_remount(struct super_block *sb, int *flags, char *opts)
+{
+	int new_rdonly = *flags & SB_RDONLY;
+
+	if (new_rdonly != (sb->s_flags & SB_RDONLY)) {
+		if (new_rdonly)
+			exfat_set_sb_dirty(sb, false, false);
+		else
+			/*
+			 * sb->s_flag still has SB_RDONLY, so we need
+			 * to force the dirty state
+			 */
+			exfat_set_sb_dirty(sb, true, true);
+	}
+	return 0;
+}
+
+static int exfat_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct exfat_sb_info *sbi = NULL;
+	int ret = -ENOMEM;
+	struct inode *root = NULL;
+	int i;
+
+	sbi = kzalloc(sizeof (*sbi), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+
+	sb->s_fs_info = sbi;
+	if (exfat_parse_options(sb, data, silent) < 0)
+		return -EINVAL;
+
+	mutex_init(&sbi->sb_mutex);
+	spin_lock_init(&sbi->inode_hash_lock);
+
+	/*
+	 * first block, before we know sector size.
+	 */
+	sbi->sb_bh = sb_bread(sb, 0);
+	if (!sbi->sb_bh)
+		goto fail;
+
+	sbi->vbr = (struct exfat_vbr*)sbi->sb_bh->b_data;
+	sb->s_op = &exfat_super_ops;
+
+
+	ret = exfat_check_sb(sb);
+	if (ret)
+		goto fail;
+
+	/*
+	 * time granularity of FS for use by current_time(inode): in
+	 * nsec so 1000000000 for 1 sec granularity.
+	 */
+	sb->s_time_gran = 1000 * 1000 * 1000;
+
+	/*
+	 * vbr seems sane, fill sbi.
+	 */
+	sbi->sectorsize = (1 << sbi->vbr->bytes_per_sector);
+	sbi->clustersize = sbi->sectorsize *
+		(1 << sbi->vbr->sectors_per_cluster);
+
+	sbi->sectors_per_cluster = sbi->clustersize / sbi->sectorsize;
+
+	sbi->sectorbits = sbi->vbr->bytes_per_sector;
+	sbi->clusterbits = sbi->vbr->sectors_per_cluster + sbi->sectorbits;
+	sbi->sectormask = sbi->sectorsize - 1;
+	sbi->clustermask = sbi->clustersize - 1;
+
+
+	sbi->fat_offset = __le32_to_cpu(sbi->vbr->fat_offset);
+	sbi->fat_length = __le32_to_cpu(sbi->vbr->fat_length);
+
+	sbi->root_dir_cluster = __le32_to_cpu(sbi->vbr->cluster_root_dir);
+
+	sbi->cluster_heap_offset = __le32_to_cpu(sbi->vbr->cluster_heap_offset);
+	sbi->cluster_count = __le32_to_cpu(sbi->vbr->cluster_count);
+
+	sbi->dirty = !!(__le16_to_cpu(sbi->vbr->volume_flags) &
+			EXFAT_FLAG_DIRTY);
+
+	/*
+	 * now that we know sector size, reread superblock with
+	 * correct sector size.
+	 */
+	ret = -EIO;
+	if (sb->s_blocksize != sbi->sectorsize) {
+		if (!sb_set_blocksize(sb, sbi->sectorsize)) {
+			exfat_msg(sb, KERN_INFO, "bad block size %d.",
+				  sbi->sectorsize);
+			goto fail;
+		}
+
+		brelse(sbi->sb_bh);
+		sbi->vbr = NULL;
+
+		sbi->sb_bh = sb_bread(sb, 0);
+		if (!sbi->sb_bh)
+			goto fail;
+		sbi->vbr = (struct exfat_vbr*)sbi->sb_bh->b_data;
+		sb->s_fs_info = sbi;
+	}
+
+	ret = exfat_check_sb_checksum(sb);
+	if (ret)
+		goto fail;
+
+	sb->s_maxbytes = exfat_file_max_byte(sbi);
+
+	ret = exfat_init_fat(sb);
+	if (ret)
+		goto fail;
+
+	for (i = 0 ; i < EXFAT_HASH_SIZE; ++i) {
+		INIT_HLIST_HEAD(&sbi->inode_hash[i]);
+	}
+
+	/*
+	 * create root inode.
+	 */
+	root = new_inode(sb);
+	if (!root)
+		goto fail;
+
+	exfat_fill_root(sb, root);
+
+	ret = exfat_upcase_init(root);
+	if (ret)
+		goto fail_iput;
+
+	ret = exfat_init_bitmap(root);
+	if (ret)
+		goto fail_iput;
+
+
+	sb->s_root = d_make_root(root);
+	if (!sb->s_root)
+		goto fail_iput;
+
+	exfat_set_sb_dirty(sb, true, false);
+	return 0;
+
+fail_iput:
+	iput(root);
+
+fail:
+	if (sbi->sb_bh)
+		brelse(sbi->sb_bh);
+	if (sbi)
+		kfree(sbi);
+	return ret;
+}
+
+static struct dentry *exfat_mount(struct file_system_type *fstype,
+				  int flags, const char *dev_name, void *data)
+{
+	return mount_bdev(fstype, flags, dev_name, data, exfat_fill_super);
+}
+
+static void exfat_put_super(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi;
+
+	sbi = EXFAT_SB(sb);
+	if (sbi) {
+		exfat_set_sb_dirty(sb, false, false);
+		exfat_exit_bitmap(sb);
+		brelse(sbi->sb_bh);
+		kfree(sbi->upcase_table);
+		kfree(sbi);
+	}
+}
+
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *kstat)
+{
+	struct super_block *sb = dentry->d_inode->i_sb;
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+
+	memset(kstat, 0, sizeof (*kstat));
+
+
+	kstat->f_bsize = sbi->clustersize;
+	kstat->f_blocks = sbi->cluster_count;
+	kstat->f_bfree = sbi->free_clusters;
+	kstat->f_bavail = sbi->free_clusters;
+	kstat->f_namelen = 255;
+	kstat->f_fsid.val[0] = (u32)id;
+	kstat->f_fsid.val[1] = (u32)(id >> 32);
+
+	return 0;
+}
+
+static struct file_system_type exfat_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "exfat",
+	.mount		= exfat_mount,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init exfat_init(void)
+{
+	int error;
+
+	/* some sanity check on internal structure sizes */
+	BUILD_BUG_ON(sizeof (struct exfat_vbr) != 512);
+
+	BUILD_BUG_ON(sizeof (struct exfat_volume_label_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_bitmap_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_upcase_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_guid_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_padding_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_acl_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_filedir_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_stream_extension_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_filename_entry) != 0x20);
+
+	error = exfat_init_inodes();
+	if (error)
+		return error;
+
+
+	error = register_filesystem(&exfat_fs_type);
+	if (error)
+		exfat_exit_inodes();
+	return error;
+}
+
+static void __exit exfat_exit(void)
+{
+	unregister_filesystem(&exfat_fs_type);
+	exfat_exit_inodes();
+}
+
+module_init(exfat_init);
+module_exit(exfat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./time.c linux-6.4-fbx/fs/exfat-fbx/time.c
--- linux-6.4-fbx/fs/exfat-fbx./time.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/time.c	2023-02-24 19:09:39.461804551 +0100
@@ -0,0 +1,126 @@
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+
+
+extern struct timezone sys_tz;
+
+/*
+ * The epoch of FAT timestamp is 1980.
+ *     :  bits :     value
+ * date:  0 -  4: day	(1 -  31)
+ * date:  5 -  8: month	(1 -  12)
+ * date:  9 - 15: year	(0 - 127) from 1980
+ * time:  0 -  4: sec	(0 -  29) 2sec counts
+ * time:  5 - 10: min	(0 -  59)
+ * time: 11 - 15: hour	(0 -  23)
+ */
+#define SECS_PER_MIN	60
+#define SECS_PER_HOUR	(60 * 60)
+#define SECS_PER_DAY	(SECS_PER_HOUR * 24)
+/* days between 1.1.70 and 1.1.80 (2 leap days) */
+#define DAYS_DELTA	(365 * 10 + 2)
+/* 120 (2100 - 1980) isn't leap year */
+#define YEAR_2100	120
+#define IS_LEAP_YEAR(y)	(!((y) & 3) && (y) != YEAR_2100)
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+static u32 days_in_year[] = {
+	/* Jan  Feb  Mar  Apr  May  Jun  Jul  Aug  Sep  Oct  Nov  Dec */
+	0,   0,  31,  59,  90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
+};
+
+/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
+void exfat_time_2unix(struct timespec64 *ts, u32 datetime, u8 time_cs,
+		      s8 tz_offset)
+{
+	u16 date = (datetime >> 16);
+	u16 time = (datetime & 0xffff);
+	time64_t second, day, leap_day, month, year;
+
+	year  = date >> 9;
+	month = max(1, (date >> 5) & 0xf);
+	day   = max(1, date & 0x1f) - 1;
+
+	if (((tz_offset & (1 << 6)) == 0))
+		tz_offset &= ~(1 << 7);
+
+	leap_day = (year + 3) / 4;
+	if (year > YEAR_2100)		/* 2100 isn't leap year */
+		leap_day--;
+	if (IS_LEAP_YEAR(year) && month > 2)
+		leap_day++;
+
+	second =  (time & 0x1f) << 1;
+	second += ((time >> 5) & 0x3f) * SECS_PER_MIN;
+	second += (time >> 11) * SECS_PER_HOUR;
+	second += (year * 365 + leap_day
+		   + days_in_year[month] + day
+		   + DAYS_DELTA) * SECS_PER_DAY;
+
+	second -= tz_offset * 15 * SECS_PER_MIN;
+
+	if (time_cs) {
+		ts->tv_sec = second + (time_cs / 100);
+		ts->tv_nsec = (time_cs % 100) * 10000000;
+	} else {
+		ts->tv_sec = second;
+		ts->tv_nsec = 0;
+	}
+}
+
+/* Convert linear UNIX date to a FAT time/date pair. */
+void exfat_time_2exfat(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		       u32 *datetime, u8 *time_cs, s8 *tz_offset)
+{
+	struct tm tm;
+	u16 time;
+	u16 date;
+	int offset;
+
+	if (sbi->options.time_offset_set) {
+		offset = -sbi->options.time_offset;
+	} else
+		offset = sys_tz.tz_minuteswest;
+
+	time64_to_tm(ts->tv_sec, -offset * SECS_PER_MIN, &tm);
+
+	/*  FAT can only support year between 1980 to 2107 */
+	if (tm.tm_year < 1980 - 1900) {
+		time = 0;
+		date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
+		if (time_cs)
+			*time_cs = 0;
+		*tz_offset = 0;
+		return;
+	}
+	if (tm.tm_year > 2107 - 1900) {
+		time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
+		date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
+		if (time_cs)
+			*time_cs = 199;
+		*tz_offset = 0;
+		return;
+	}
+
+	/* from 1900 -> from 1980 */
+	tm.tm_year -= 80;
+	/* 0~11 -> 1~12 */
+	tm.tm_mon++;
+	/* 0~59 -> 0~29(2sec counts) */
+	tm.tm_sec >>= 1;
+
+	time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
+	date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
+
+	*datetime = (date << 16) | time;
+
+	if (time_cs)
+		*time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
+	*tz_offset = -offset / 15;
+	*tz_offset |= (1 << 7);
+}
diff -Nruw linux-6.4-fbx/fs/exfat-fbx./upcase.c linux-6.4-fbx/fs/exfat-fbx/upcase.c
--- linux-6.4-fbx/fs/exfat-fbx./upcase.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/fs/exfat-fbx/upcase.c	2023-02-24 19:09:26.021439198 +0100
@@ -0,0 +1,137 @@
+/*
+ * upcase.c for exfat
+ * Created by <nschichan@freebox.fr> on Wed Aug  7 11:51:37 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static u32 exfat_calc_upcase_checksum(const u8 *data, u32 checksum,
+				      size_t count)
+{
+	while (count) {
+		checksum = ((checksum << 31) | (checksum >> 1)) + *data;
+		--count;
+		++data;
+	}
+	return checksum;
+}
+
+static int exfat_load_upcase_table(struct super_block *sb, u32 disk_cluster,
+				   u32 *out_checksum)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct buffer_head *bh;
+	sector_t start, sect, end;
+	u32 off = 0;
+	u32 byte_len = sbi->upcase_len * sizeof (__le16);
+	u32 checksum = 0;
+
+	/*
+	 * up-case table are not fragmented, so sequential cluster
+	 * read will do here.
+	 */
+	start = exfat_cluster_sector(sbi, disk_cluster);
+	end = start + DIV_ROUND_UP(byte_len,
+			   sbi->sectorsize);
+	for (sect = start; sect < end; ++sect) {
+		u32 len = sbi->sectorsize;
+
+		if (sect == end - 1)
+			len = byte_len & sbi->sectormask;
+
+		bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_ERR,
+				  "unable to read upcase sector %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		memcpy((u8*)sbi->upcase_table + off, bh->b_data,
+		       len);
+
+		checksum = exfat_calc_upcase_checksum(bh->b_data, checksum,
+						      len);
+
+		off += len;
+		brelse(bh);
+	}
+
+	BUG_ON(off != byte_len);
+	*out_checksum = checksum;
+	return 0;
+}
+
+int exfat_upcase_init(struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->i_sb);
+	struct exfat_upcase_entry *upcase;
+	struct exfat_dir_ctx dctx;
+	int error;
+	u64 upcase_length;
+	u32 checksum;
+
+	/*
+	 * configure directory context and look for an upcase table
+	 * entry.
+	 */
+	if (exfat_init_dir_ctx(root, &dctx, 0) < 0)
+		return -EIO;
+
+	error = -EIO;
+	upcase = __exfat_dentry_next(&dctx, E_EXFAT_UPCASE_TABLE, 0xff,
+				     true, NULL);
+	if (!upcase)
+		goto fail;
+
+	/*
+	 * check upcase table length. we need it to be non-zero,
+	 * ending on a __le16 boundary and provide at most a
+	 * conversion for the whole __le16 space.
+	 */
+	upcase_length = __le64_to_cpu(upcase->length);
+	if (upcase_length == 0 ||
+	    upcase_length & (sizeof (__le16) - 1) ||
+	    upcase_length > 0xffff * sizeof (__le16)) {
+		exfat_msg(root->i_sb, KERN_ERR, "invalid upcase length %llu",
+			  (unsigned long long)upcase_length);
+		goto fail;
+	}
+
+	/*
+	 * load complete upcase table in memory.
+	 */
+	error = -ENOMEM;
+	sbi->upcase_len = upcase_length / sizeof (__le16);
+	sbi->upcase_table = kmalloc(upcase_length, GFP_NOFS);
+	if (!sbi->upcase_table)
+		goto fail;
+
+	error = exfat_load_upcase_table(root->i_sb,
+					__le32_to_cpu(upcase->cluster_addr),
+					&checksum);
+	if (error)
+		goto fail;
+
+	if (checksum != __le32_to_cpu(upcase->checksum)) {
+		exfat_msg(root->i_sb, KERN_INFO,
+			  "upcase table checksum mismatch: have %08x, "
+			  "expect %08x", checksum,
+			  __le32_to_cpu(upcase->checksum));
+		error = -EINVAL;
+		goto fail;
+	}
+
+	exfat_cleanup_dir_ctx(&dctx);
+	return 0;
+
+fail:
+	if (sbi->upcase_table)
+		kfree(sbi->upcase_table);
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/fs/smb/server/netmisc.c	2023-11-07 13:38:44.042256145 +0100
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (c) International Business Machines  Corp., 2002,2008
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   Error mapping routines from Samba libsmb/errormap.c
+ *   Copyright (C) Andrew Tridgell 2001
+ */
+
+#include "glob.h"
+#include "smberr.h"
+#include "nterr.h"
+#include "smb_common.h"
+
+/*****************************************************************************
+ * convert a NT status code to a dos class/code
+ *****************************************************************************/
+/* NT status -> dos error map */
+static const struct {
+	__u8 dos_class;
+	__u16 dos_code;
+	__u32 ntstatus;
+} ntstatus_to_dos_map[] = {
+	{
+	ERRDOS, ERRgeneral, NT_STATUS_UNSUCCESSFUL}, {
+	ERRDOS, ERRbadfunc, NT_STATUS_NOT_IMPLEMENTED}, {
+	ERRDOS, ERRinvlevel, NT_STATUS_INVALID_INFO_CLASS}, {
+	ERRDOS, 24, NT_STATUS_INFO_LENGTH_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ACCESS_VIOLATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IN_PAGE_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA}, {
+	ERRDOS, ERRbadfid, NT_STATUS_INVALID_HANDLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_INITIAL_STACK}, {
+	ERRDOS, 193, NT_STATUS_BAD_INITIAL_PC}, {
+	ERRDOS, 87, NT_STATUS_INVALID_CID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TIMER_NOT_CANCELED}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER}, {
+	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_DEVICE}, {
+	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_FILE}, {
+	ERRDOS, ERRbadfunc, NT_STATUS_INVALID_DEVICE_REQUEST}, {
+	ERRDOS, 38, NT_STATUS_END_OF_FILE}, {
+	ERRDOS, 34, NT_STATUS_WRONG_VOLUME}, {
+	ERRDOS, 21, NT_STATUS_NO_MEDIA_IN_DEVICE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, {
+	ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR},
+/*	{ This NT error code was 'sqashed'
+ *	 from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK
+ *	 during the session setup }
+ */
+	{
+	ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, {
+	ERRDOS, 487, NT_STATUS_CONFLICTING_ADDRESSES}, {
+	ERRDOS, 487, NT_STATUS_NOT_MAPPED_VIEW}, {
+	ERRDOS, 87, NT_STATUS_UNABLE_TO_FREE_VM}, {
+	ERRDOS, 87, NT_STATUS_UNABLE_TO_DELETE_SECTION}, {
+	ERRDOS, 2142, NT_STATUS_INVALID_SYSTEM_SERVICE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_INSTRUCTION}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_LOCK_SEQUENCE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_VIEW_SIZE}, {
+	ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED},
+/*	{ This NT error code was 'sqashed'
+ *	 from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE
+ *	 during the session setup }
+ */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, {
+	ERRDOS, 111, NT_STATUS_BUFFER_TOO_SMALL}, {
+	ERRDOS, ERRbadfid, NT_STATUS_OBJECT_TYPE_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NONCONTINUABLE_EXCEPTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DISPOSITION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNWIND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_STACK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_UNWIND_TARGET}, {
+	ERRDOS, 158, NT_STATUS_NOT_LOCKED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PARITY_ERROR}, {
+	ERRDOS, 487, NT_STATUS_UNABLE_TO_DECOMMIT_VM}, {
+	ERRDOS, 487, NT_STATUS_NOT_COMMITTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PORT_ATTRIBUTES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PORT_MESSAGE_TOO_LONG}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, {
+	/* mapping changed since shell does lookup on * expects FileNotFound */
+	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, {
+	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, {
+	ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, {
+	ERRDOS, ERRbadfid, NT_STATUS_PORT_DISCONNECTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_ALREADY_ATTACHED}, {
+	ERRDOS, 161, NT_STATUS_OBJECT_PATH_INVALID}, {
+	ERRDOS, ERRbadpath, NT_STATUS_OBJECT_PATH_NOT_FOUND}, {
+	ERRDOS, 161, NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DATA_OVERRUN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DATA_LATE_ERROR}, {
+	ERRDOS, 23, NT_STATUS_DATA_ERROR}, {
+	ERRDOS, 23, NT_STATUS_CRC_ERROR}, {
+	ERRDOS, ERRnomem, NT_STATUS_SECTION_TOO_BIG}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_PORT_CONNECTION_REFUSED}, {
+	ERRDOS, ERRbadfid, NT_STATUS_INVALID_PORT_HANDLE}, {
+	ERRDOS, ERRbadshare, NT_STATUS_SHARING_VIOLATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_EXCEEDED}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PAGE_PROTECTION}, {
+	ERRDOS, 288, NT_STATUS_MUTANT_NOT_OWNED}, {
+	ERRDOS, 298, NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, {
+	ERRDOS, 87, NT_STATUS_PORT_ALREADY_SET}, {
+	ERRDOS, 87, NT_STATUS_SECTION_NOT_IMAGE}, {
+	ERRDOS, 156, NT_STATUS_SUSPEND_COUNT_EXCEEDED}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_THREAD_IS_TERMINATING}, {
+	ERRDOS, 87, NT_STATUS_BAD_WORKING_SET_LIMIT}, {
+	ERRDOS, 87, NT_STATUS_INCOMPATIBLE_FILE_MAP}, {
+	ERRDOS, 87, NT_STATUS_SECTION_PROTECTION}, {
+	ERRDOS, ERReasnotsupported, NT_STATUS_EAS_NOT_SUPPORTED}, {
+	ERRDOS, 255, NT_STATUS_EA_TOO_LARGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NONEXISTENT_EA_ENTRY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_EAS_ON_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EA_CORRUPT_ERROR}, {
+	ERRDOS, ERRlock, NT_STATUS_FILE_LOCK_CONFLICT}, {
+	ERRDOS, ERRlock, NT_STATUS_LOCK_NOT_GRANTED}, {
+	ERRDOS, ERRbadfile, NT_STATUS_DELETE_PENDING}, {
+	ERRDOS, ERRunsup, NT_STATUS_CTL_FILE_NOT_SUPPORTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNKNOWN_REVISION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REVISION_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_OWNER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PRIMARY_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_IMPERSONATION_TOKEN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_DISABLE_MANDATORY}, {
+	ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, {
+	ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS},
+/*	{ This NT error code was 'sqashed'
+ *	 from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE
+ *	 during the session setup }
+ */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, { /* could map to 2238 */
+	ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN},
+/*	{ This NT error code was 'sqashed'
+ *	 from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE
+ *	 during the session setup }
+ */
+	{
+	ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_PASSWORD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, {
+	ERRSRV, ERRbadLogonTime, NT_STATUS_INVALID_LOGON_HOURS}, {
+	ERRSRV, ERRbadclient, NT_STATUS_INVALID_WORKSTATION}, {
+	ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_EXPIRED}, {
+	ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_DISABLED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SUB_AUTHORITY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SECURITY_DESCR}, {
+	ERRDOS, 127, NT_STATUS_PROCEDURE_NOT_FOUND}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_FORMAT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_TOKEN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_INHERITANCE_ACL}, {
+	ERRDOS, 158, NT_STATUS_RANGE_NOT_LOCKED}, {
+	ERRDOS, 112, NT_STATUS_DISK_FULL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SERVER_DISABLED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SERVER_NOT_DISABLED}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, {
+	ERRDOS, 259, NT_STATUS_GUIDS_EXHAUSTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ID_AUTHORITY}, {
+	ERRDOS, 259, NT_STATUS_AGENTS_EXHAUSTED}, {
+	ERRDOS, 154, NT_STATUS_INVALID_VOLUME_LABEL}, {
+	ERRDOS, 14, NT_STATUS_SECTION_NOT_EXTENDED}, {
+	ERRDOS, 487, NT_STATUS_NOT_MAPPED_DATA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_DATA_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_NAME_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DENORMAL_OPERAND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INEXACT_RESULT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INVALID_OPERATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_STACK_CHECK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_UNDERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, {
+	ERRDOS, 534, NT_STATUS_INTEGER_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PRIVILEGED_INSTRUCTION}, {
+	ERRDOS, ERRnomem, NT_STATUS_TOO_MANY_PAGING_FILES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
+/*	{ This NT error code was 'sqashed'
+ *	 from NT_STATUS_INSUFFICIENT_RESOURCES to
+ *	 NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup }
+ */
+	{
+	ERRDOS, ERRnoresource, NT_STATUS_INSUFFICIENT_RESOURCES}, {
+	ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, {
+	ERRDOS, 23, NT_STATUS_DEVICE_DATA_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_CONNECTED}, {
+	ERRDOS, 21, NT_STATUS_DEVICE_POWER_FAILURE}, {
+	ERRDOS, 487, NT_STATUS_FREE_VM_NOT_AT_BASE}, {
+	ERRDOS, 487, NT_STATUS_MEMORY_NOT_ALLOCATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_WORKING_SET_QUOTA}, {
+	ERRDOS, 19, NT_STATUS_MEDIA_WRITE_PROTECTED}, {
+	ERRDOS, 21, NT_STATUS_DEVICE_NOT_READY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_GROUP_ATTRIBUTES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_IMPERSONATION_LEVEL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_OPEN_ANONYMOUS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_VALIDATION_CLASS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_TOKEN_TYPE}, {
+	ERRDOS, 87, NT_STATUS_BAD_MASTER_BOOT_RECORD}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INSTRUCTION_MISALIGNMENT}, {
+	ERRDOS, ERRpipebusy, NT_STATUS_INSTANCE_NOT_AVAILABLE}, {
+	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_NOT_AVAILABLE}, {
+	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_PIPE_STATE}, {
+	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_BUSY}, {
+	ERRDOS, ERRbadfunc, NT_STATUS_ILLEGAL_FUNCTION}, {
+	ERRDOS, ERRnotconnected, NT_STATUS_PIPE_DISCONNECTED}, {
+	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_CLOSING}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PIPE_CONNECTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PIPE_LISTENING}, {
+	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_READ_MODE}, {
+	ERRDOS, 121, NT_STATUS_IO_TIMEOUT}, {
+	ERRDOS, 38, NT_STATUS_FILE_FORCED_CLOSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STARTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STOPPED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_COULD_NOT_INTERPRET}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_FILE_IS_A_DIRECTORY}, {
+	ERRDOS, ERRunsup, NT_STATUS_NOT_SUPPORTED}, {
+	ERRDOS, 51, NT_STATUS_REMOTE_NOT_LISTENING}, {
+	ERRDOS, 52, NT_STATUS_DUPLICATE_NAME}, {
+	ERRDOS, 53, NT_STATUS_BAD_NETWORK_PATH}, {
+	ERRDOS, 54, NT_STATUS_NETWORK_BUSY}, {
+	ERRDOS, 55, NT_STATUS_DEVICE_DOES_NOT_EXIST}, {
+	ERRDOS, 56, NT_STATUS_TOO_MANY_COMMANDS}, {
+	ERRDOS, 57, NT_STATUS_ADAPTER_HARDWARE_ERROR}, {
+	ERRDOS, 58, NT_STATUS_INVALID_NETWORK_RESPONSE}, {
+	ERRDOS, 59, NT_STATUS_UNEXPECTED_NETWORK_ERROR}, {
+	ERRDOS, 60, NT_STATUS_BAD_REMOTE_ADAPTER}, {
+	ERRDOS, 61, NT_STATUS_PRINT_QUEUE_FULL}, {
+	ERRDOS, 62, NT_STATUS_NO_SPOOL_SPACE}, {
+	ERRDOS, 63, NT_STATUS_PRINT_CANCELLED}, {
+	ERRDOS, 64, NT_STATUS_NETWORK_NAME_DELETED}, {
+	ERRDOS, 65, NT_STATUS_NETWORK_ACCESS_DENIED}, {
+	ERRDOS, 66, NT_STATUS_BAD_DEVICE_TYPE}, {
+	ERRDOS, ERRnosuchshare, NT_STATUS_BAD_NETWORK_NAME}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_NAMES}, {
+	ERRDOS, 69, NT_STATUS_TOO_MANY_SESSIONS}, {
+	ERRDOS, 70, NT_STATUS_SHARING_PAUSED}, {
+	ERRDOS, 71, NT_STATUS_REQUEST_NOT_ACCEPTED}, {
+	ERRDOS, 72, NT_STATUS_REDIRECTOR_PAUSED}, {
+	ERRDOS, 88, NT_STATUS_NET_WRITE_FAULT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_AT_LIMIT}, {
+	ERRDOS, ERRdiffdevice, NT_STATUS_NOT_SAME_DEVICE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_FILE_RENAMED}, {
+	ERRDOS, 240, NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SECURITY_ON_OBJECT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_WAIT}, {
+	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_EMPTY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANT_TERMINATE_SELF}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SERVER_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_ROLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_DOMAIN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, {
+	ERRDOS, 300, NT_STATUS_OPLOCK_NOT_GRANTED}, {
+	ERRDOS, 301, NT_STATUS_INVALID_OPLOCK_PROTOCOL}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_CORRUPTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_GENERIC_NOT_MAPPED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_DESCRIPTOR_FORMAT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_USER_BUFFER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_IO_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_LOGON_PROCESS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_EXISTS}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_1}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_2}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_3}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_4}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_5}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_6}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_7}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_8}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_9}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_10}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_11}, {
+	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_12}, {
+	ERRDOS, ERRbadpath, NT_STATUS_REDIRECTOR_NOT_STARTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REDIRECTOR_STARTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PACKAGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_FUNCTION_TABLE}, {
+	ERRDOS, 203, 0xc0000100}, {
+	ERRDOS, 145, NT_STATUS_DIRECTORY_NOT_EMPTY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FILE_CORRUPT_ERROR}, {
+	ERRDOS, 267, NT_STATUS_NOT_A_DIRECTORY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_LOGON_SESSION_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_COLLISION}, {
+	ERRDOS, 206, NT_STATUS_NAME_TOO_LONG}, {
+	ERRDOS, 2401, NT_STATUS_FILES_OPEN}, {
+	ERRDOS, 2404, NT_STATUS_CONNECTION_IN_USE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MESSAGE_NOT_FOUND}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_PROCESS_IS_TERMINATING}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LOGON_TYPE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_GUID_TRANSLATION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_IMPERSONATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IMAGE_ALREADY_LOADED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_PRESENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_NOT_EXIST}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_ALREADY_OWNED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_LID_OWNER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_COMMAND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_LID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_SELECTOR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_LDT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_SIZE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_OFFSET}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_DESCRIPTOR}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NE_FORMAT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RXACT_INVALID_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RXACT_COMMIT_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_FILE_SIZE_ZERO}, {
+	ERRDOS, ERRnofids, NT_STATUS_TOO_MANY_OPENED_FILES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANCELLED}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_CANNOT_DELETE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_COMPUTER_NAME}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_FILE_DELETED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_ACCOUNT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_GROUP}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_USER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBERS_PRIMARY_GROUP}, {
+	ERRDOS, ERRbadfid, NT_STATUS_FILE_CLOSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_THREADS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_THREAD_NOT_IN_PROCESS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOKEN_ALREADY_IN_USE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_COMMITMENT_LIMIT}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_LE_FORMAT}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NOT_MZ}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_PROTECT}, {
+	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_WIN_16}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SERVER_CONFLICT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TIME_DIFFERENCE_AT_DC}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SYNCHRONIZATION_REQUIRED}, {
+	ERRDOS, 126, NT_STATUS_DLL_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_OPEN_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IO_PRIVILEGE_FAILED}, {
+	ERRDOS, 182, NT_STATUS_ORDINAL_NOT_FOUND}, {
+	ERRDOS, 127, NT_STATUS_ENTRYPOINT_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONTROL_C_EXIT}, {
+	ERRDOS, 64, NT_STATUS_LOCAL_DISCONNECT}, {
+	ERRDOS, 64, NT_STATUS_REMOTE_DISCONNECT}, {
+	ERRDOS, 51, NT_STATUS_REMOTE_RESOURCES}, {
+	ERRDOS, 59, NT_STATUS_LINK_FAILED}, {
+	ERRDOS, 59, NT_STATUS_LINK_TIMEOUT}, {
+	ERRDOS, 59, NT_STATUS_INVALID_CONNECTION}, {
+	ERRDOS, 59, NT_STATUS_INVALID_ADDRESS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DLL_INIT_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MISSING_SYSTEMFILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNHANDLED_EXCEPTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_APP_INIT_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_CREATE_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_PAGEFILE}, {
+	ERRDOS, 124, NT_STATUS_INVALID_LEVEL}, {
+	ERRDOS, 86, NT_STATUS_WRONG_PASSWORD_CORE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, {
+	ERRDOS, 109, NT_STATUS_PIPE_BROKEN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_CORRUPT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_IO_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_EVENT_PAIR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_VOLUME}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SERIAL_NO_DEVICE_INITED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_ALIAS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_ALIAS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_ALIAS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ALIAS_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGON_NOT_GRANTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SECRETS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SECRET_TOO_LONG}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FULLSCREEN_MODE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_CONTEXT_IDS}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_TYPE_NOT_GRANTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_REGISTRY_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FT_MISSING_MEMBER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_CHARACTER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNMAPPABLE_CHARACTER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNDEFINED_CHARACTER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_VOLUME}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_WRONG_CYLINDER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_UNKNOWN_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_BAD_REGISTERS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_RECALIBRATE_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_OPERATION_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DISK_RESET_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SHARED_IRQ_BUSY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FT_ORPHANING}, {
+	ERRHRD, ERRgeneral, 0xc000016e}, {
+	ERRHRD, ERRgeneral, 0xc000016f}, {
+	ERRHRD, ERRgeneral, 0xc0000170}, {
+	ERRHRD, ERRgeneral, 0xc0000171}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PARTITION_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BLOCK_LENGTH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_PARTITIONED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_LOCK_MEDIA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EOM_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_MEDIA}, {
+	ERRHRD, ERRgeneral, 0xc0000179}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_MEMBER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_MEMBER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_KEY_DELETED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_LOG_SPACE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SIDS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_KEY_HAS_CHILDREN}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CHILD_MUST_BE_VOLATILE}, {
+	ERRDOS, 87, NT_STATUS_DEVICE_CONFIGURATION_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_INTERNAL_ERROR}, {
+	ERRDOS, 22, NT_STATUS_INVALID_DEVICE_STATE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IO_DEVICE_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_PROTOCOL_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BACKUP_CONTROLLER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOG_FILE_FULL}, {
+	ERRDOS, 19, NT_STATUS_TOO_LATE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET},
+/*	{ This NT error code was 'sqashed'
+ *	 from NT_STATUS_NO_TRUST_SAM_ACCOUNT to
+ *	 NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup }
+ */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CORRUPT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_CANT_START}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, {
+	ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, {
+	ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_EXPIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CHANGED}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
+/*	{ This NT error code was 'sqashed'
+ *	 from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE
+ *	 during the session setup }
+ */
+	{
+	ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FS_DRIVER_REQUIRED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_USER_SESSION_KEY}, {
+	ERRDOS, 59, NT_STATUS_USER_SESSION_DELETED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_LANG_NOT_FOUND}, {
+	ERRDOS, ERRnoresource, NT_STATUS_INSUFF_SERVER_RESOURCES}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BUFFER_SIZE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_COMPONENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_WILDCARD}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_ADDRESSES}, {
+	ERRDOS, 52, NT_STATUS_ADDRESS_ALREADY_EXISTS}, {
+	ERRDOS, 64, NT_STATUS_ADDRESS_CLOSED}, {
+	ERRDOS, 64, NT_STATUS_CONNECTION_DISCONNECTED}, {
+	ERRDOS, 64, NT_STATUS_CONNECTION_RESET}, {
+	ERRDOS, 68, NT_STATUS_TOO_MANY_NODES}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_ABORTED}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_TIMED_OUT}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_RELEASE}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_MATCH}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_RESPONDED}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_ID}, {
+	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_TYPE}, {
+	ERRDOS, ERRunsup, NT_STATUS_NOT_SERVER_SESSION}, {
+	ERRDOS, ERRunsup, NT_STATUS_NOT_CLIENT_SESSION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DEBUG_ATTACH_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_SYSTEM_PROCESS_TERMINATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DATA_NOT_ACCEPTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_BROWSER_SERVERS_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_VDM_HARD_ERROR}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_CANCEL_TIMEOUT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REPLY_MESSAGE_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_ALIGNMENT}, {
+	ERRDOS, 193, NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOST_WRITEBEHIND_DATA}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, {
+	ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_MUST_CHANGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NOT_TINY_STREAM}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RECOVERY_FAILURE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW_READ}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FAIL_CHECK}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DUPLICATE_OBJECTID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_OBJECTID_EXISTS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONVERT_TO_LARGE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_RETRY}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FOUND_OUT_OF_SCOPE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ALLOCATE_BUCKET}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROPSET_NOT_FOUND}, {
+	ERRHRD, ERRgeneral, NT_STATUS_MARSHALL_OVERFLOW}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_VARIANT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, {
+	ERRDOS, ERRnoaccess, NT_STATUS_ACCOUNT_LOCKED_OUT}, {
+	ERRDOS, ERRbadfid, NT_STATUS_HANDLE_NOT_CLOSABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_REFUSED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_GRACEFUL_DISCONNECT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_NOT_ASSOCIATED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_INVALID}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ACTIVE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_HOST_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PROTOCOL_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PORT_UNREACHABLE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REQUEST_ABORTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ABORTED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_COMPRESSION_BUFFER}, {
+	ERRHRD, ERRgeneral, NT_STATUS_USER_MAPPED_FILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_AUDIT_FAILED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_TIMER_RESOLUTION_NOT_SET}, {
+	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_COUNT_LIMIT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_TIME_RESTRICTION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_WKSTA_RESTRICTION}, {
+	ERRDOS, 193, NT_STATUS_IMAGE_MP_UP_MISMATCH}, {
+	ERRHRD, ERRgeneral, 0xc000024a}, {
+	ERRHRD, ERRgeneral, 0xc000024b}, {
+	ERRHRD, ERRgeneral, 0xc000024c}, {
+	ERRHRD, ERRgeneral, 0xc000024d}, {
+	ERRHRD, ERRgeneral, 0xc000024e}, {
+	ERRHRD, ERRgeneral, 0xc000024f}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INSUFFICIENT_LOGON_INFO}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_DLL_ENTRYPOINT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_BAD_SERVICE_ENTRYPOINT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LPC_REPLY_LOST}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT1}, {
+	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT2}, {
+	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_QUOTA_LIMIT}, {
+	ERRSRV, 3, NT_STATUS_PATH_NOT_COVERED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_NO_CALLBACK_ACTIVE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_LICENSE_QUOTA_EXCEEDED}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_SHORT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_RECENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PWD_HISTORY_CONFLICT}, {
+	ERRHRD, ERRgeneral, 0xc000025d}, {
+	ERRHRD, ERRgeneral, NT_STATUS_PLUGPLAY_NO_DEVICE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_UNSUPPORTED_COMPRESSION}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_HW_PROFILE}, {
+	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, {
+	ERRDOS, 182, NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, {
+	ERRDOS, 127, NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, {
+	ERRDOS, 288, NT_STATUS_RESOURCE_NOT_OWNED}, {
+	ERRDOS, ErrTooManyLinks, NT_STATUS_TOO_MANY_LINKS}, {
+	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_LIST_INCONSISTENT}, {
+	ERRHRD, ERRgeneral, NT_STATUS_FILE_IS_OFFLINE}, {
+	ERRDOS, 21, 0xc000026e}, {
+	ERRDOS, 161, 0xc0000281}, {
+	ERRDOS, ERRnoaccess, 0xc000028a}, {
+	ERRDOS, ERRnoaccess, 0xc000028b}, {
+	ERRHRD, ERRgeneral, 0xc000028c}, {
+	ERRDOS, ERRnoaccess, 0xc000028d}, {
+	ERRDOS, ERRnoaccess, 0xc000028e}, {
+	ERRDOS, ERRnoaccess, 0xc000028f}, {
+	ERRDOS, ERRnoaccess, 0xc0000290}, {
+	ERRDOS, ERRbadfunc, 0xc000029c}, {
+	ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, {
+	ERRDOS, ERRinvlevel, 0x007c0001}, };
+
+void
+ntstatus_to_dos(__le32 ntstatus, __u8 *eclass, __le16 *ecode)
+{
+	int i;
+
+	if (ntstatus == 0) {
+		*eclass = 0;
+		*ecode = 0;
+		return;
+	}
+	for (i = 0; ntstatus_to_dos_map[i].ntstatus; i++) {
+		if (le32_to_cpu(ntstatus) == ntstatus_to_dos_map[i].ntstatus) {
+			*eclass = ntstatus_to_dos_map[i].dos_class;
+			*ecode = cpu_to_le16(ntstatus_to_dos_map[i].dos_code);
+			return;
+		}
+	}
+	*eclass = ERRHRD;
+	*ecode = cpu_to_le16(ERRgeneral);
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/fs/smb/server/smb1misc.c	2023-11-07 13:38:44.042256145 +0100
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include "glob.h"
+#include "asn1.h"
+#include "nterr.h"
+#include "ksmbd_work.h"
+#include "smb_common.h"
+#include "smb1pdu.h"
+#include "mgmt/user_session.h"
+
+/**
+ * check_smb_hdr() - check for valid smb request header
+ * @smb:        smb header to be checked
+ *
+ * check for valid smb signature and packet direction(request/response)
+ * TODO: properly check client authetication and tree authentication
+ *
+ * Return:      0 on success, otherwise 1
+ */
+static int check_smb1_hdr(struct smb_hdr *smb)
+{
+	/* does it have the right SMB "signature" ? */
+	if (*(__le32 *) smb->Protocol != SMB1_PROTO_NUMBER) {
+		ksmbd_debug(SMB, "Bad protocol string signature header 0x%x\n",
+				*(unsigned int *)smb->Protocol);
+		return 1;
+	}
+	ksmbd_debug(SMB, "got SMB\n");
+
+	/* if it's not a response then accept */
+	/* TODO : check for oplock break */
+	if (!(smb->Flags & SMBFLG_RESPONSE))
+		return 0;
+
+	ksmbd_debug(SMB, "Server sent request, not response\n");
+	return 1;
+}
+
+
+static int smb1_req_struct_size(struct smb_hdr *hdr)
+{
+	int wc = hdr->WordCount;
+
+	switch (hdr->Command) {
+	case SMB_COM_CREATE_DIRECTORY:
+	case SMB_COM_DELETE_DIRECTORY:
+	case SMB_COM_QUERY_INFORMATION:
+	case SMB_COM_TREE_DISCONNECT:
+	case SMB_COM_NEGOTIATE:
+	case SMB_COM_NT_CANCEL:
+	case SMB_COM_CHECK_DIRECTORY:
+	case SMB_COM_PROCESS_EXIT:
+	case SMB_COM_QUERY_INFORMATION_DISK:
+		if (wc != 0x0)
+			return -EINVAL;
+		break;
+	case SMB_COM_FLUSH:
+	case SMB_COM_DELETE:
+	case SMB_COM_RENAME:
+	case SMB_COM_ECHO:
+	case SMB_COM_FIND_CLOSE2:
+		if (wc != 0x1)
+			return -EINVAL;
+		break;
+	case SMB_COM_LOGOFF_ANDX:
+		if (wc != 0x2)
+			return -EINVAL;
+		break;
+	case SMB_COM_CLOSE:
+		if (wc != 0x3)
+			return -EINVAL;
+		break;
+	case SMB_COM_TREE_CONNECT_ANDX:
+	case SMB_COM_NT_RENAME:
+		if (wc != 0x4)
+			return -EINVAL;
+		break;
+	case SMB_COM_WRITE:
+		if (wc != 0x5)
+			return -EINVAL;
+		break;
+	case SMB_COM_SETATTR:
+	case SMB_COM_LOCKING_ANDX:
+		if (wc != 0x8)
+			return -EINVAL;
+		break;
+	case SMB_COM_TRANSACTION:
+		if (wc < 0xe)
+			return -EINVAL;
+		break;
+	case SMB_COM_SESSION_SETUP_ANDX:
+		if (wc != 0xc && wc != 0xd)
+			return -EINVAL;
+		break;
+	case SMB_COM_OPEN_ANDX:
+	case SMB_COM_TRANSACTION2:
+		if (wc != 0xf)
+			return -EINVAL;
+		break;
+	case SMB_COM_NT_CREATE_ANDX:
+		if (wc != 0x18)
+			return -EINVAL;
+		break;
+	case SMB_COM_READ_ANDX:
+		if (wc != 0xa && wc != 0xc)
+			return -EINVAL;
+		break;
+	case SMB_COM_WRITE_ANDX:
+		if (wc != 0xc && wc != 0xe)
+			return -EINVAL;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return wc;
+}
+
+static int smb1_get_byte_count(struct smb_hdr *hdr)
+{
+	int bc;
+
+	bc = le16_to_cpu(*(__le16 *)((char *)hdr +
+		sizeof(struct smb_hdr) + hdr->WordCount * 2));
+
+	switch (hdr->Command) {
+	case SMB_COM_CLOSE:
+	case SMB_COM_FLUSH:
+	case SMB_COM_READ_ANDX:
+	case SMB_COM_TREE_DISCONNECT:
+	case SMB_COM_LOGOFF_ANDX:
+	case SMB_COM_NT_CANCEL:
+	case SMB_COM_PROCESS_EXIT:
+	case SMB_COM_FIND_CLOSE2:
+		if (bc != 0x0)
+			return -EINVAL;
+		break;
+	case SMB_COM_LOCKING_ANDX:
+	case SMB_COM_TRANSACTION:
+	case SMB_COM_TRANSACTION2:
+	case SMB_COM_ECHO:
+	case SMB_COM_SESSION_SETUP_ANDX:
+		if (bc < 0x0)
+			return -EINVAL;
+		break;
+	case SMB_COM_WRITE_ANDX:
+		if (bc < 0x1)
+			return -EINVAL;
+		break;
+	case SMB_COM_CREATE_DIRECTORY:
+	case SMB_COM_DELETE_DIRECTORY:
+	case SMB_COM_DELETE:
+	case SMB_COM_RENAME:
+	case SMB_COM_QUERY_INFORMATION:
+	case SMB_COM_SETATTR:
+	case SMB_COM_OPEN_ANDX:
+	case SMB_COM_NEGOTIATE:
+	case SMB_COM_CHECK_DIRECTORY:
+		if (bc < 0x2)
+			return -EINVAL;
+		break;
+	case SMB_COM_TREE_CONNECT_ANDX:
+	case SMB_COM_WRITE:
+		if (bc < 0x3)
+			return -EINVAL;
+		break;
+	case SMB_COM_NT_RENAME:
+		if (bc < 0x4)
+			return -EINVAL;
+		break;
+	case SMB_COM_NT_CREATE_ANDX:
+		if (hdr->Flags2 & SMBFLG2_UNICODE) {
+			if (bc < 3)
+				return -EINVAL;
+		} else if (bc < 2)
+			return -EINVAL;
+		break;
+	}
+
+	return bc;
+}
+
+static unsigned int smb1_calc_size(struct smb_hdr *hdr)
+{
+	int len = sizeof(struct smb_hdr) - 4 + 2;
+	int bc, struct_size = hdr->WordCount * 2;
+
+	len += struct_size;
+	bc = smb1_get_byte_count(hdr);
+	if (bc < 0)
+		return bc;
+	ksmbd_debug(SMB, "SMB2 byte count %d, struct size : %d\n", bc,
+		struct_size);
+	len += bc;
+
+	ksmbd_debug(SMB, "SMB1 len %d\n", len);
+	return len;
+}
+
+static int smb1_get_data_len(struct smb_hdr *hdr)
+{
+	int data_len = 0;
+
+	/* data offset check */
+	switch (hdr->Command) {
+	case SMB_COM_WRITE_ANDX:
+	{
+		struct smb_com_write_req *req = (struct smb_com_write_req *)hdr;
+
+		data_len = le16_to_cpu(req->DataLengthLow);
+		data_len |= (le16_to_cpu(req->DataLengthHigh) << 16);
+		data_len += le16_to_cpu(req->DataOffset);
+		break;
+	}
+	case SMB_COM_TRANSACTION:
+	{
+		struct smb_com_trans_req *req = (struct smb_com_trans_req *)hdr;
+
+		data_len = le16_to_cpu(req->DataOffset) +
+			le16_to_cpu(req->DataCount);
+		break;
+	}
+	case SMB_COM_TRANSACTION2:
+	{
+		struct smb_com_trans2_req *req =
+				(struct smb_com_trans2_req *)hdr;
+
+		data_len = le16_to_cpu(req->DataOffset) +
+			le16_to_cpu(req->DataCount);
+		break;
+	}
+	}
+
+	return data_len;
+}
+
+int ksmbd_smb1_check_message(struct ksmbd_work *work)
+{
+	struct smb_hdr *hdr = (struct smb_hdr *)work->request_buf;
+	char *buf = work->request_buf;
+	int command = hdr->Command;
+	__u32 clc_len;  /* calculated length */
+	__u32 len = get_rfc1002_len(buf);
+	int wc, data_len;
+
+	if (check_smb1_hdr(hdr))
+		return 1;
+
+	wc = smb1_req_struct_size(hdr);
+	if (wc == -EOPNOTSUPP) {
+		ksmbd_debug(SMB, "Not support cmd %x\n", command);
+		return 1;
+	} else if (hdr->WordCount != wc) {
+		pr_err("Invalid word count, %d not %d. cmd %x\n",
+		       hdr->WordCount, wc, command);
+		return 1;
+	}
+
+	data_len = smb1_get_data_len(hdr);
+	if (len < data_len) {
+		pr_err("Invalid data area length %u not %u. cmd : %x\n",
+		       len, data_len, command);
+		return 1;
+	}
+
+	clc_len = smb1_calc_size(hdr);
+	if (len != clc_len) {
+		/*
+		 * smbclient may return wrong byte count in smb header.
+		 * But allow it to avoid write failure with smbclient.
+		 */
+		if (command == SMB_COM_WRITE_ANDX)
+			return 0;
+
+		if (len > clc_len) {
+			ksmbd_debug(SMB,
+				"cli req too long, len %d not %d. cmd:%x\n",
+				len, clc_len, command);
+			return 0;
+		}
+
+		pr_err("cli req too short, len %d not %d. cmd:%x\n",
+		       len, clc_len, command);
+
+		return 1;
+	}
+
+	return 0;
+}
+
+int smb_negotiate_request(struct ksmbd_work *work)
+{
+	return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/fs/smb/server/smb1ops.c	2023-11-07 13:38:44.042256145 +0100
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+
+#include "glob.h"
+#include "connection.h"
+#include "smb_common.h"
+#include "smb1pdu.h"
+
+static struct smb_version_values smb1_server_values = {
+	.version_string = SMB1_VERSION_STRING,
+	.protocol_id = SMB10_PROT_ID,
+	.capabilities = SMB1_SERVER_CAPS,
+	.max_read_size = CIFS_DEFAULT_IOSIZE,
+	.max_write_size = MAX_STREAM_PROT_LEN,
+	.max_trans_size = CIFS_DEFAULT_IOSIZE,
+	.large_lock_type = LOCKING_ANDX_LARGE_FILES,
+	.exclusive_lock_type = 0,
+	.shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
+	.unlock_lock_type = 0,
+	.header_size = sizeof(struct smb_hdr),
+	.max_header_size = MAX_CIFS_HDR_SIZE,
+	.read_rsp_size = sizeof(struct smb_com_read_rsp),
+	.lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX),
+	.cap_unix = CAP_UNIX,
+	.cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
+	.cap_large_files = CAP_LARGE_FILES,
+	.signing_enabled = SECMODE_SIGN_ENABLED,
+	.signing_required = SECMODE_SIGN_REQUIRED,
+};
+
+static struct smb_version_ops smb1_server_ops = {
+	.get_cmd_val = get_smb_cmd_val,
+	.init_rsp_hdr = init_smb_rsp_hdr,
+	.set_rsp_status = set_smb_rsp_status,
+	.allocate_rsp_buf = smb_allocate_rsp_buf,
+	.check_user_session = smb_check_user_session,
+	.is_sign_req = smb1_is_sign_req,
+	.check_sign_req = smb1_check_sign_req,
+	.set_sign_rsp = smb1_set_sign_rsp,
+	.get_ksmbd_tcon = smb_get_ksmbd_tcon,
+};
+
+static struct smb_version_cmds smb1_server_cmds[256] = {
+	[SMB_COM_CREATE_DIRECTORY]	= { .proc = smb_mkdir, },
+	[SMB_COM_DELETE_DIRECTORY]	= { .proc = smb_rmdir, },
+	[SMB_COM_CLOSE]			= { .proc = smb_close, },
+	[SMB_COM_FLUSH]			= { .proc = smb_flush, },
+	[SMB_COM_DELETE]		= { .proc = smb_unlink, },
+	[SMB_COM_RENAME]		= { .proc = smb_rename, },
+	[SMB_COM_QUERY_INFORMATION]	= { .proc = smb_query_info, },
+	[SMB_COM_SETATTR]		= { .proc = smb_setattr, },
+	[SMB_COM_LOCKING_ANDX]		= { .proc = smb_locking_andx, },
+	[SMB_COM_TRANSACTION]		= { .proc = smb_trans, },
+	[SMB_COM_ECHO]			= { .proc = smb_echo, },
+	[SMB_COM_OPEN_ANDX]		= { .proc = smb_open_andx, },
+	[SMB_COM_READ_ANDX]		= { .proc = smb_read_andx, },
+	[SMB_COM_WRITE_ANDX]		= { .proc = smb_write_andx, },
+	[SMB_COM_TRANSACTION2]		= { .proc = smb_trans2, },
+	[SMB_COM_FIND_CLOSE2]		= { .proc = smb_closedir, },
+	[SMB_COM_TREE_DISCONNECT]	= { .proc = smb_tree_disconnect, },
+	[SMB_COM_NEGOTIATE]		= { .proc = smb_negotiate_request, },
+	[SMB_COM_SESSION_SETUP_ANDX]	= { .proc = smb_session_setup_andx, },
+	[SMB_COM_LOGOFF_ANDX]           = { .proc = smb_session_disconnect, },
+	[SMB_COM_TREE_CONNECT_ANDX]	= { .proc = smb_tree_connect_andx, },
+	[SMB_COM_QUERY_INFORMATION_DISK] = { .proc = smb_query_information_disk, },
+	[SMB_COM_NT_CREATE_ANDX]	= { .proc = smb_nt_create_andx, },
+	[SMB_COM_NT_CANCEL]		= { .proc = smb_nt_cancel, },
+	[SMB_COM_NT_RENAME]		= { .proc = smb_nt_rename, },
+	[SMB_COM_WRITE]			= { .proc = smb_write, },
+	[SMB_COM_CHECK_DIRECTORY]	= { .proc = smb_checkdir, },
+	[SMB_COM_PROCESS_EXIT]		= { .proc = smb_process_exit, },
+};
+
+/**
+ * init_smb1_server() - initialize a smb server connection with smb1
+ *			command dispatcher
+ * @conn:	connection instance
+ */
+int init_smb1_server(struct ksmbd_conn *conn)
+{
+	conn->vals = &smb1_server_values;
+	conn->ops = &smb1_server_ops;
+	conn->cmds = smb1_server_cmds;
+	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
+	return 0;
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/fs/smb/server/smb1pdu.c	2024-04-02 14:58:40.996786045 +0200
@@ -0,0 +1,9228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+#include <linux/math64.h>
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/namei.h>
+#include <linux/statfs.h>
+#include <linux/vmalloc.h>
+#include <linux/filelock.h>
+#include <linux/version.h>
+
+#include "glob.h"
+#include "oplock.h"
+#include "connection.h"
+#include "transport_ipc.h"
+#include "vfs.h"
+#include "misc.h"
+
+#include "auth.h"
+#include "asn1.h"
+#include "server.h"
+#include "smb_common.h"
+#include "smb1pdu.h"
+#include "smbstatus.h"
+#include "mgmt/user_config.h"
+#include "mgmt/share_config.h"
+#include "mgmt/tree_connect.h"
+#include "mgmt/user_session.h"
+#include "ndr.h"
+#include "smberr.h"
+
+#define MAX_HEADER_SIZE(conn)		((conn)->vals->max_header_size)
+
+static const char *smb_cmd_str[] = {
+	[SMB_COM_CREATE_DIRECTORY] = "SMB_COM_CREATE_DIRECTORY",
+	[SMB_COM_DELETE_DIRECTORY] = "SMB_COM_DELETE_DIRECTORY",
+	[SMB_COM_CLOSE] = "SMB_COM_CLOSE",
+	[SMB_COM_FLUSH] = "SMB_COM_FLUSH",
+	[SMB_COM_DELETE] = "SMB_COM_DELETE",
+	[SMB_COM_RENAME] = "SMB_COM_RENAME",
+	[SMB_COM_QUERY_INFORMATION] = "SMB_COM_QUERY_INFORMATION",
+	[SMB_COM_SETATTR] = "SMB_COM_SETATTR",
+	[SMB_COM_WRITE] = "SMB_COM_WRITE",
+	[SMB_COM_CHECK_DIRECTORY] = "SMB_COM_CHECK_DIRECTORY",
+	[SMB_COM_PROCESS_EXIT] = "SMB_COM_PROCESS_EXIT",
+	[SMB_COM_LOCKING_ANDX] = "SMB_COM_LOCKING_ANDX",
+	[SMB_COM_TRANSACTION] = "SMB_COM_TRANSACTION",
+	[SMB_COM_COPY] = "SMB_COM_COPY",
+	[SMB_COM_ECHO] = "SMB_COM_ECHO",
+	[SMB_COM_OPEN_ANDX] = "SMB_COM_OPEN_ANDX",
+	[SMB_COM_READ_ANDX] = "SMB_COM_READ_ANDX",
+	[SMB_COM_WRITE_ANDX] = "SMB_COM_WRITE_ANDX",
+	[SMB_COM_TRANSACTION2] = "SMB_COM_TRANSACTION2",
+	[SMB_COM_TRANSACTION2_SECONDARY] = "SMB_COM_TRANSACTION2_SECONDARY",
+	[SMB_COM_FIND_CLOSE2] = "SMB_COM_FIND_CLOSE2",
+	[SMB_COM_TREE_DISCONNECT] = "SMB_COM_TREE_DISCONNECT",
+	[SMB_COM_NEGOTIATE] = "SMB_COM_NEGOTIATE",
+	[SMB_COM_SESSION_SETUP_ANDX] = "SMB_COM_SESSION_SETUP_ANDX",
+	[SMB_COM_LOGOFF_ANDX] = "SMB_COM_LOGOFF_ANDX",
+	[SMB_COM_TREE_CONNECT_ANDX] = "SMB_COM_TREE_CONNECT_ANDX",
+	[SMB_COM_QUERY_INFORMATION_DISK] = "SMB_COM_QUERY_INFORMATION_DISK",
+	[SMB_COM_NT_TRANSACT] = "SMB_COM_NT_TRANSACT",
+	[SMB_COM_NT_TRANSACT_SECONDARY] = "SMB_COM_NT_TRANSACT_SECONDARY",
+	[SMB_COM_NT_CREATE_ANDX] = "SMB_COM_NT_CREATE_ANDX",
+	[SMB_COM_NT_CANCEL] = "SMB_COM_NT_CANCEL",
+	[SMB_COM_NT_RENAME] = "SMB_COM_NT_RENAME",
+};
+
+static const char *smb_cmd_to_str(u16 cmd)
+{
+	if (cmd < ARRAY_SIZE(smb_cmd_str))
+		return smb_cmd_str[cmd];
+
+	return "unknown_cmd";
+}
+
+static const char *smb_trans2_cmd_str[] = {
+	[TRANS2_OPEN] = "TRANS2_OPEN",
+	[TRANS2_FIND_FIRST] = "TRANS2_FIND_FIRST",
+	[TRANS2_FIND_NEXT] = "TRANS2_FIND_NEXT",
+	[TRANS2_QUERY_FS_INFORMATION] = "TRANS2_QUERY_FS_INFORMATION",
+	[TRANS2_SET_FS_INFORMATION] = "TRANS2_SET_FS_INFORMATION",
+	[TRANS2_QUERY_PATH_INFORMATION] = "TRANS2_QUERY_PATH_INFORMATION",
+	[TRANS2_SET_PATH_INFORMATION] = "TRANS2_SET_PATH_INFORMATION",
+	[TRANS2_QUERY_FILE_INFORMATION] = "TRANS2_QUERY_FILE_INFORMATION",
+	[TRANS2_SET_FILE_INFORMATION] = "TRANS2_SET_FILE_INFORMATION",
+	[TRANS2_CREATE_DIRECTORY] = "TRANS2_CREATE_DIRECTORY",
+	[TRANS2_GET_DFS_REFERRAL] = "TRANS2_GET_DFS_REFERRAL",
+	[TRANS2_REPORT_DFS_INCOSISTENCY] = "TRANS2_REPORT_DFS_INCOSISTENCY",
+};
+
+static const char *smb_trans2_cmd_to_str(u16 cmd)
+{
+	if (cmd < ARRAY_SIZE(smb_trans2_cmd_str))
+		return smb_trans2_cmd_str[cmd];
+
+	return "unknown_trans2_cmd";
+}
+
+static int smb1_oplock_enable = false;
+
+/* Default: allocation roundup size = 1048576 */
+static unsigned int alloc_roundup_size = 1048576;
+
+struct ksmbd_dirent {
+	unsigned long long	ino;
+	unsigned long long	offset;
+	unsigned int		namelen;
+	unsigned int		d_type;
+	char			name[];
+};
+
+static inline void inc_resp_size(struct ksmbd_work *work, size_t len)
+{
+	work->response_offset += len;
+}
+
+static inline unsigned int get_req_len(void *buf)
+{
+	return 4 + get_rfc1002_len(buf);
+}
+
+/**
+ * smb_NTtimeToUnix() - convert NTFS time to unix style time format
+ * @ntutc:	NTFS style time
+ *
+ * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
+ * into Unix UTC (based 1970-01-01, in seconds).
+ *
+ * Return:      timespec containing unix style time
+ */
+static struct timespec64 smb_NTtimeToUnix(__le64 ntutc)
+{
+	struct timespec64 ts;
+
+	/* BB what about the timezone? BB */
+
+	/* Subtract the NTFS time offset, then convert to 1s intervals. */
+	/* this has been taken from cifs, ntfs code */
+	u64 t;
+
+	t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+	ts.tv_nsec = do_div(t, 10000000) * 100;
+	ts.tv_sec = t;
+	return ts;
+}
+
+/**
+ * get_smb_cmd_val() - get smb command value from smb header
+ * @work:	smb work containing smb header
+ *
+ * Return:      smb command value
+ */
+u16 get_smb_cmd_val(struct ksmbd_work *work)
+{
+	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+
+	return (u16)rcv_hdr->Command;
+}
+
+/**
+ * is_smbreq_unicode() - check if the smb command is request is unicode or not
+ * @hdr:	pointer to smb_hdr in the the request part
+ *
+ * Return: check flags and return true if request is unicode, else false
+ */
+static inline int is_smbreq_unicode(struct smb_hdr *hdr)
+{
+	return hdr->Flags2 & SMBFLG2_UNICODE ? 1 : 0;
+}
+
+/**
+ * set_smb_rsp_status() - set error type in smb response header
+ * @work:	smb work containing smb response header
+ * @err:	error code to set in response
+ */
+void set_smb_rsp_status(struct ksmbd_work *work, __le32 err)
+{
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *) work->response_buf;
+
+	rsp_hdr->Status.CifsError = err;
+}
+
+/**
+ * init_smb_rsp_hdr() - initialize smb response header
+ * @work:	smb work containing smb request
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+int init_smb_rsp_hdr(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr;
+	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+
+	rsp_hdr = (struct smb_hdr *) work->response_buf;
+	memset(rsp_hdr, 0, sizeof(struct smb_hdr) + 2);
+
+	/* smallest response is: smb_hdr, 1 byte wc and 2 bytes bcc */
+	work->response_offset = SMB_HEADER_SIZE + 2 + 1;
+	memcpy(rsp_hdr->Protocol, rcv_hdr->Protocol, 4);
+	rsp_hdr->Command = rcv_hdr->Command;
+
+	/*
+	 * Message is response. Other bits are obsolete.
+	 */
+	rsp_hdr->Flags = (SMBFLG_RESPONSE);
+
+	/*
+	 * Lets assume error code are NTLM. True for CIFS and windows 7
+	 */
+	rsp_hdr->Flags2 = rcv_hdr->Flags2;
+	rsp_hdr->PidHigh = rcv_hdr->PidHigh;
+	rsp_hdr->Pid = rcv_hdr->Pid;
+	rsp_hdr->Mid = rcv_hdr->Mid;
+	rsp_hdr->WordCount = 0;
+
+	/* We can do the above test because we have set maxVCN as 1 */
+	rsp_hdr->Uid = rcv_hdr->Uid;
+	rsp_hdr->Tid = rcv_hdr->Tid;
+	return 0;
+}
+
+/**
+ * smb_allocate_rsp_buf() - allocate response buffer for a command
+ * @work:	smb work containing smb request
+ *
+ * Return:      0 on success, otherwise -ENOMEM
+ */
+int smb_allocate_rsp_buf(struct ksmbd_work *work)
+{
+	struct smb_hdr *hdr = (struct smb_hdr *)work->request_buf;
+	unsigned char cmd = hdr->Command;
+	size_t large_sz = work->conn->vals->max_read_size + MAX_CIFS_HDR_SIZE;
+	size_t sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+
+	if (cmd == SMB_COM_TRANSACTION2) {
+		struct smb_com_trans2_qpi_req *req = work->request_buf;
+		u16 sub_cmd = le16_to_cpu(req->SubCommand);
+		u16 infolevel = le16_to_cpu(req->InformationLevel);
+
+		if ((sub_cmd == TRANS2_FIND_FIRST) ||
+		    (sub_cmd == TRANS2_FIND_NEXT) ||
+		    (sub_cmd == TRANS2_QUERY_PATH_INFORMATION &&
+		     (infolevel == SMB_QUERY_FILE_UNIX_LINK ||
+		      infolevel == SMB_QUERY_POSIX_ACL ||
+		      infolevel == SMB_INFO_QUERY_ALL_EAS)))
+			sz = large_sz;
+	}
+
+	if (cmd == SMB_COM_TRANSACTION)
+		sz = large_sz;
+
+	if (cmd == SMB_COM_ECHO) {
+		int resp_size;
+		struct smb_com_echo_req *req = work->request_buf;
+
+		/*
+		 * size of struct smb_com_echo_rsp + Bytecount - Size of Data
+		 * in struct smb_com_echo_rsp
+		 */
+		resp_size = sizeof(struct smb_com_echo_rsp) +
+			    le16_to_cpu(req->ByteCount) - 1;
+		if (resp_size > MAX_CIFS_SMALL_BUFFER_SIZE)
+			sz = large_sz;
+	}
+
+	work->response_buf = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+	work->response_sz = sz;
+
+	if (!work->response_buf) {
+		pr_err("Failed to allocate %zu bytes buffer\n", sz);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * andx_request_buffer() - return pointer to matching andx command
+ * @buf:	buffer containing smb request
+ * @len:	buffer length
+ * @command:	match next command with this command
+ *
+ * Return:      pointer to matching command buffer on success, otherwise NULL
+ */
+static char *andx_request_buffer(char *buf, unsigned int len, int command)
+{
+	struct andx_block *andx_ptr =
+		(struct andx_block *)(buf + sizeof(struct smb_hdr) - 1);
+	struct andx_block *next;
+
+	/* AndXOffset does not include 4 byte RFC1002 header */
+	len -= 4;
+
+	while (andx_ptr->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		unsigned int offset = le16_to_cpu(andx_ptr->AndXOffset);
+
+		if (offset > len)
+			return NULL;
+
+		next = (struct andx_block *)(buf + 4 + offset);
+		if (andx_ptr->AndXCommand == command)
+			return (char *)next;
+		andx_ptr = next;
+	}
+	return NULL;
+}
+
+/**
+ * andx_response_buffer() - return pointer to andx response buffer
+ * @buf:	buffer containing smb request
+ *
+ * Return:      pointer to andx command response on success, otherwise NULL
+ */
+static char *andx_response_buffer(char *buf)
+{
+	int pdu_length = get_rfc1002_len(buf);
+
+	return buf + 4 + pdu_length;
+}
+
+/**
+ * smb_check_user_session() - check for valid session for a user
+ * @work:	smb work containing smb request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_check_user_session(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct ksmbd_conn *conn = work->conn;
+	unsigned int cmd = conn->ops->get_cmd_val(work);
+
+	ksmbd_debug(SMB, "received SMB command: %s\n",
+		    smb_cmd_to_str(req_hdr->Command));
+
+	work->sess = NULL;
+	if (cmd == SMB_COM_NEGOTIATE || cmd == SMB_COM_SESSION_SETUP_ANDX ||
+	    cmd == SMB_COM_ECHO)
+		return 0;
+
+	if (!ksmbd_conn_good(conn))
+		return -EINVAL;
+
+	if (xa_empty(&conn->sessions)) {
+		ksmbd_debug(SMB, "NO sessions registered\n");
+		return -EINVAL;
+	}
+
+	work->sess = ksmbd_session_lookup(conn, le16_to_cpu(req_hdr->Uid));
+	if (work->sess)
+		return 1;
+	ksmbd_debug(SMB, "Invalid user session, Uid %u\n",
+		    le16_to_cpu(req_hdr->Uid));
+	return -EINVAL;
+}
+
+/**
+ * smb_get_ksmbd_tcon() - get tree connection information for a tree id
+ * @sess:	session containing tree list
+ * @tid:	match tree connection with tree id
+ *
+ * Return:      matching tree connection on success, otherwise error
+ */
+int smb_get_ksmbd_tcon(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	u8 cmd = req_hdr->Command;
+	int tree_id;
+
+	work->tcon = NULL;
+	if (cmd == SMB_COM_TREE_CONNECT_ANDX ||
+	    cmd == SMB_COM_NT_CANCEL ||
+	    cmd == SMB_COM_PROCESS_EXIT ||
+	    cmd == SMB_COM_LOGOFF_ANDX) {
+		ksmbd_debug(SMB, "skip to check tree connect request\n");
+		return 0;
+	}
+
+	if (xa_empty(&work->sess->tree_conns)) {
+		ksmbd_debug(SMB, "NO tree connected\n");
+		return -ENOENT;
+	}
+
+	tree_id = le16_to_cpu(req_hdr->Tid);
+	work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id);
+	if (!work->tcon) {
+		pr_err("Invalid tid %d\n", tree_id);
+		return -EINVAL;
+	}
+
+	return 1;
+}
+
+/**
+ * smb_session_disconnect() - LOGOFF request handler
+ * @work:	smb work containing log off request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_session_disconnect(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_session *sess = work->sess;
+
+	/* setting CifsExiting here may race with start_tcp_sess */
+	ksmbd_conn_set_need_reconnect(conn);
+
+	ksmbd_conn_wait_idle(conn, sess->id);
+
+	ksmbd_tree_conn_session_logoff(sess);
+
+	/* let start_tcp_sess free conn info now */
+	ksmbd_conn_set_exiting(conn);
+	return 0;
+}
+
+/**
+ * smb_tree_disconnect() - tree disconnect request handler
+ * @work:	smb work containing tree disconnect request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_tree_disconnect(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	struct ksmbd_tree_connect *tcon = work->tcon;
+	struct ksmbd_session *sess = work->sess;
+	int err;
+
+	if (!tcon) {
+		pr_err("Invalid tid %d\n", req_hdr->Tid);
+		rsp_hdr->Status.CifsError = STATUS_NO_SUCH_USER;
+		return -EINVAL;
+	}
+
+	ksmbd_close_tree_conn_fds(work);
+
+	write_lock(&sess->tree_conns_lock);
+	if (tcon->t_state == TREE_DISCONNECTED) {
+		write_unlock(&sess->tree_conns_lock);
+		rsp_hdr->Status.CifsError = STATUS_NETWORK_NAME_DELETED;
+		return -ENOENT;
+	}
+
+	WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount));
+	tcon->t_state = TREE_DISCONNECTED;
+	write_unlock(&sess->tree_conns_lock);
+
+	err = ksmbd_tree_conn_disconnect(sess, tcon);
+	if (err) {
+		rsp_hdr->Status.CifsError = STATUS_NETWORK_NAME_DELETED;
+		return -ENOENT;
+	}
+
+	work->tcon = NULL;
+
+	return 0;
+}
+
+static void set_service_type(struct ksmbd_conn *conn,
+			     struct ksmbd_share_config *share,
+			     struct smb_com_tconx_rsp_ext *rsp)
+{
+	int length;
+	char *buf = rsp->Service;
+
+	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+		length = strlen(SERVICE_IPC_SHARE);
+		memcpy(buf, SERVICE_IPC_SHARE, length);
+		rsp->ByteCount = cpu_to_le16(length + 1);
+		buf += length;
+		*buf = '\0';
+	} else {
+		int uni_len = 0;
+
+		length = strlen(SERVICE_DISK_SHARE);
+		memcpy(buf, SERVICE_DISK_SHARE, length);
+		buf[length] = '\0';
+		length += 1;
+		uni_len = smbConvertToUTF16((__le16 *)(buf + length),
+					    NATIVE_FILE_SYSTEM,
+					    strlen(NATIVE_FILE_SYSTEM),
+					    conn->local_nls, 0);
+		uni_len++;
+		uni_len *= 2;
+		length += uni_len;
+		rsp->ByteCount = cpu_to_le16(length);
+	}
+}
+
+/**
+ * smb_tree_connect_andx() - tree connect request handler
+ * @work:	smb work containing tree connect request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_tree_connect_andx(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_tconx_req *req;
+	struct smb_com_tconx_rsp_ext *rsp;
+	int extra_byte = 0;
+	char *treename = NULL, *name = NULL, *dev_type = NULL;
+	struct ksmbd_share_config *share;
+	struct ksmbd_session *sess = work->sess;
+	int dev_flags = 0;
+	struct ksmbd_tree_conn_status status;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req_hdr);
+
+	/* Is this an ANDX command ? */
+	if (req_hdr->Command != SMB_COM_TREE_CONNECT_ANDX) {
+		ksmbd_debug(SMB, "SMB_COM_TREE_CONNECT_ANDX is part of ANDX");
+		req = (struct smb_com_tconx_req *)
+			andx_request_buffer(work->request_buf, maxlen,
+					    SMB_COM_TREE_CONNECT_ANDX);
+		rsp = (struct smb_com_tconx_rsp_ext *)
+			andx_response_buffer(work->response_buf);
+		extra_byte = 3;
+		if (!req) {
+			status.ret = -EINVAL;
+			goto out_err;
+		}
+	} else {
+		req = (struct smb_com_tconx_req *)(&req_hdr->WordCount);
+		rsp = (struct smb_com_tconx_rsp_ext *)(&rsp_hdr->WordCount);
+	}
+
+	offset = (char *)req - (char *)work->request_buf;
+	offset += offsetof(struct smb_com_tconx_req, Password)
+		  + le16_to_cpu(req->PasswordLength);
+
+	if (offset >= maxlen) {
+		status.ret = -EINVAL;
+		goto out_err;
+	}
+
+	treename = smb_strndup_from_utf16((char *)work->request_buf + offset,
+					  maxlen - offset,
+					  true,
+					  conn->local_nls);
+	if (IS_ERR(treename)) {
+		pr_err("Unable to strdup() treename for uid %d\n",
+		       rsp_hdr->Uid);
+		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+		goto out_err;
+ 	}
+
+	/* adjust tree name length */
+	offset += (strlen(treename) + 1) * 2;
+	if (offset > maxlen) {
+		status.ret = -EINVAL;
+		goto out_err;
+	}
+
+	dev_type = kstrndup((char *)work->request_buf + offset,
+			    maxlen - offset, GFP_KERNEL);
+	if (IS_ERR(dev_type)) {
+		pr_err("Unable to strdup() devtype for uid %d\n",
+		       rsp_hdr->Uid);
+		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+		goto out_err;
+	}
+
+	name = ksmbd_extract_sharename(conn->um, treename);
+	if (IS_ERR(name)) {
+		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+		goto out_err;
+	}
+
+	ksmbd_debug(SMB, "tree connect request for tree %s, dev_type : %s\n",
+		    name, dev_type);
+
+	if (!strcmp(dev_type, "A:"))
+		dev_flags = 1;
+	else if (!strncmp(dev_type, "LPT", 3))
+		dev_flags = 2;
+	else if (!strcmp(dev_type, "IPC"))
+		dev_flags = 3;
+	else if (!strcmp(dev_type, "COMM"))
+		dev_flags = 4;
+	else if (!strcmp(dev_type, "?????"))
+		dev_flags = 5;
+
+	if (!strcmp(name, "IPC$")) {
+		if (dev_flags < 3) {
+			status.ret = -ENODEV;
+			goto out_err;
+		}
+	} else if (!dev_flags || (dev_flags > 1 && dev_flags < 5)) {
+		status.ret = -ENODEV;
+		goto out_err;
+	}
+
+	status = ksmbd_tree_conn_connect(conn, sess, name);
+	if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
+		rsp_hdr->Tid = cpu_to_le16(status.tree_conn->id);
+	else
+		goto out_err;
+
+	status.ret = 0;
+	share = status.tree_conn->share_conf;
+	rsp->WordCount = 7;
+	rsp->OptionalSupport = cpu_to_le16(SMB_SUPPORT_SEARCH_BITS |
+					   SMB_CSC_NO_CACHING |
+					   SMB_UNIQUE_FILE_NAME);
+
+	rsp->MaximalShareAccessRights = cpu_to_le32(FILE_READ_RIGHTS |
+						    FILE_EXEC_RIGHTS);
+	if (test_tree_conn_flag(status.tree_conn,
+				KSMBD_TREE_CONN_FLAG_WRITABLE))
+		rsp->MaximalShareAccessRights |= cpu_to_le32(FILE_WRITE_RIGHTS);
+	rsp->GuestMaximalShareAccessRights = 0;
+
+	set_service_type(conn, share, rsp);
+
+	/* For each extra andx response, we have to add 1 byte,
+	 * for wc and 2 bytes for byte count
+	 */
+	inc_resp_size(work, 7 * 2 + le16_to_cpu(rsp->ByteCount) + extra_byte);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(rsp_hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		/* More processing required */
+		status.ret = rsp->AndXCommand;
+	} else {
+		rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	}
+
+	kfree(treename);
+	kfree(dev_type);
+	kfree(name);
+
+	write_lock(&sess->tree_conns_lock);
+	status.tree_conn->t_state = TREE_CONNECTED;
+	write_unlock(&sess->tree_conns_lock);
+
+	return status.ret;
+
+out_err:
+	if (!IS_ERR(treename))
+		kfree(treename);
+	if (!IS_ERR(dev_type))
+		kfree(dev_type);
+	if (!IS_ERR(name))
+		kfree(name);
+
+	rsp->WordCount = 7;
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(rsp_hdr));
+	rsp->OptionalSupport = 0;
+	rsp->MaximalShareAccessRights = 0;
+	rsp->GuestMaximalShareAccessRights = 0;
+	rsp->ByteCount = 0;
+	ksmbd_debug(SMB, "error while tree connect\n");
+	switch (status.ret) {
+	case KSMBD_TREE_CONN_STATUS_NO_SHARE:
+		rsp_hdr->Status.CifsError = STATUS_BAD_NETWORK_PATH;
+		break;
+	case -ENOMEM:
+	case KSMBD_TREE_CONN_STATUS_NOMEM:
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+		break;
+	case KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS:
+	case KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS:
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+		break;
+	case -ENODEV:
+		rsp_hdr->Status.CifsError = STATUS_BAD_DEVICE_TYPE;
+		break;
+	case KSMBD_TREE_CONN_STATUS_ERROR:
+		rsp_hdr->Status.CifsError = STATUS_BAD_NETWORK_NAME;
+		break;
+	case -EINVAL:
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+		break;
+	default:
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+	}
+
+	inc_resp_size(work, 7 * 2 + le16_to_cpu(rsp->ByteCount) + extra_byte);
+	return -EINVAL;
+}
+
+/**
+ * smb_get_name() - convert filename on smb packet to char string
+ * @src:	source filename, mostly in unicode format
+ * @maxlen:	maxlen of src string to be used for parsing
+ * @work:	smb work containing smb header flag
+ * @converted:	src string already converted to local characterset
+ *
+ * Return:	pointer to filename string on success, otherwise error ptr
+ */
+static char *smb_get_name(struct ksmbd_share_config *share, const char *src,
+			  const int maxlen, struct ksmbd_work *work,
+			  bool converted)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	bool is_unicode = is_smbreq_unicode(req_hdr);
+	char *name, *wild_card_pos;
+
+	if (converted)
+		name = (char *)src;
+	else {
+		name = smb_strndup_from_utf16(src, maxlen, is_unicode,
+					      work->conn->local_nls);
+		if (IS_ERR(name)) {
+			ksmbd_debug(SMB, "failed to get name %ld\n",
+				    PTR_ERR(name));
+			return name;
+		}
+	}
+
+	ksmbd_conv_path_to_unix(name);
+	ksmbd_strip_last_slash(name);
+
+	/*Handling of dir path in FIND_FIRST2 having '*' at end of path*/
+	wild_card_pos = strrchr(name, '*');
+
+	if (wild_card_pos != NULL)
+		*wild_card_pos = '\0';
+
+
+	if (ksmbd_validate_filename(name) < 0) {
+		if (!converted)
+			kfree(name);
+		return ERR_PTR(-ENOENT);
+	}
+
+	if (ksmbd_share_veto_filename(share, name)) {
+		ksmbd_debug(SMB,
+			"file(%s) open is not allowed by setting as veto file\n",
+			name);
+		if (!converted)
+			kfree(name);
+		return ERR_PTR(-ENOENT);
+	}
+
+	ksmbd_debug(SMB, "file name = %s\n", name);
+
+	return name;
+}
+
+/**
+ * smb_get_dir_name() - convert directory name on smb packet to char string
+ * @src:	source dir name, mostly in unicode format
+ * @maxlen:	maxlen of src string to be used for parsing
+ * @work:	smb work containing smb header flag
+ * @srch_ptr:	update search pointer in dir for searching dir entries
+ *
+ * Return:	pointer to dir name string on success, otherwise error ptr
+ */
+static char *smb_get_dir_name(struct ksmbd_share_config *share,
+			      const char *src, const int maxlen,
+			      struct ksmbd_work *work, char **srch_ptr)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	bool is_unicode = is_smbreq_unicode(req_hdr);
+	char *name, *pattern_pos, *pattern = NULL;
+	int pattern_len, rc;
+
+	name = smb_strndup_from_utf16(src, maxlen, is_unicode,
+				      work->conn->local_nls);
+	if (IS_ERR(name)) {
+		pr_err("failed to allocate memory\n");
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+		return name;
+	}
+
+	ksmbd_conv_path_to_unix(name);
+	ksmbd_strip_last_slash(name);
+
+	pattern_pos = strrchr(name, '/');
+
+	if (!pattern_pos)
+		pattern_pos = name;
+	else
+		pattern_pos += 1;
+
+	pattern_len = strlen(pattern_pos);
+	if (pattern_len == 0) {
+		rc = -EINVAL;
+		goto err_name;
+	}
+	ksmbd_debug(SMB, "pattern searched = %s pattern_len = %d\n",
+		    pattern_pos, pattern_len);
+	pattern = kmalloc(pattern_len + 1, GFP_KERNEL);
+	if (!pattern) {
+		rc = -ENOMEM;
+		goto err_name;
+	}
+	memcpy(pattern, pattern_pos, pattern_len);
+	*(pattern + pattern_len) = '\0';
+	*pattern_pos = '\0';
+	*srch_ptr = pattern;
+
+	if (ksmbd_validate_filename(name) < 0) {
+		rc = -ENOENT;
+		goto err_pattern;
+	}
+
+	if (ksmbd_share_veto_filename(share, name)) {
+		ksmbd_debug(SMB,
+			"file(%s) open is not allowed by setting as veto file\n",
+			name);
+		rc = -ENOENT;
+		goto err_pattern;
+	}
+
+	ksmbd_debug(SMB, "dir name = %s\n", name);
+	return name;
+
+err_pattern:
+	kfree(pattern);
+	*srch_ptr = NULL;
+err_name:
+	kfree(name);
+
+	if (rc == -EINVAL)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+	else if (rc == -ENOMEM)
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+	else if (rc == -ENOENT)
+		rsp_hdr->Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+
+	return ERR_PTR(rc);
+}
+
+static int smb_common_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+			     char *newname, int flags)
+{
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB, "user does not have permission to write\n");
+		return -EACCES;
+	}
+
+	return ksmbd_vfs_rename(work, &fp->filp->f_path, newname, flags);
+}
+
+/**
+ * smb_rename() - rename request handler
+ * @work:	smb work containing rename request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_rename(struct ksmbd_work *work)
+{
+	struct smb_com_rename_req *req = work->request_buf;
+	struct smb_com_rename_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	bool is_unicode = is_smbreq_unicode(&req->hdr);
+	char *oldname, *newname = NULL;
+	struct ksmbd_file *fp = NULL;
+	int oldname_len;
+	struct path path, parent_path;
+	int rc = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_rename_req, OldFileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	oldname = smb_get_name(share, (char *)req + offset,
+			       maxlen - offset, work, false);
+	if (IS_ERR(oldname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(oldname);
+	}
+
+	if (is_unicode)
+		oldname_len = smb1_utf16_name_length((__le16 *)req->OldFileName,
+						     maxlen - offset);
+	else {
+		oldname_len = strlen(oldname);
+		oldname_len++;
+	}
+
+	/* 2 bytes for BufferFormat field and padding byte */
+	offset += oldname_len + 2;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		goto out_free_name;
+	}
+
+	newname = smb_get_name(share, (char *)req + offset,
+			       maxlen - offset, work, false);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		rc = PTR_ERR(newname);
+		newname = NULL;
+		goto out_free_name;
+	}
+
+	ksmbd_debug(SMB, "rename %s -> %s\n", oldname, newname);
+	rc = ksmbd_vfs_kern_path_locked(work, oldname, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, 1);
+	if (rc)
+		goto out_free_name;
+
+	fp = ksmbd_vfs_dentry_open(work, &path, O_RDONLY, 0, false);
+
+	/* release parent lock to avoid deadlock in smb_common_rename */
+	inode_unlock(d_inode(parent_path.dentry));
+
+	if (IS_ERR(fp)) {
+		rc = PTR_ERR(fp);
+		fp = NULL;
+		goto out_kern_path;
+	}
+
+	rc = smb_common_rename(work, fp, newname, RENAME_NOREPLACE);
+	if (rc)
+		goto out_kern_path;
+
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+out_kern_path:
+	if (fp)
+		ksmbd_close_fd(work, fp->volatile_id);
+	path_put(&path);
+	path_put(&parent_path);
+
+out_free_name:
+	kfree(oldname);
+	kfree(newname);
+
+	if (rc) {
+		switch (rc) {
+		case -EEXIST:
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_COLLISION;
+			break;
+		case -ENOENT:
+			rsp->hdr.Status.CifsError =
+				NT_STATUS_OBJECT_NAME_NOT_FOUND;
+			break;
+		case -ENOMEM:
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * smb_handle_negotiate() - negotiate request handler
+ * @work:	smb work containing negotiate request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_handle_negotiate(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
+	__u64 time;
+	int rc = 0;
+
+	WARN_ON(ksmbd_conn_good(conn));
+
+	if (conn->dialect == BAD_PROT_ID) {
+		neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	conn->connection_type = 0;
+
+	/* wct 17 for NTLM */
+	neg_rsp->hdr.WordCount = 17;
+	neg_rsp->DialectIndex = cpu_to_le16(conn->dialect);
+
+	neg_rsp->SecurityMode = SMB1_SERVER_SECU;
+	if (server_conf.signing == KSMBD_CONFIG_OPT_AUTO ||
+	    server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY) {
+		conn->sign = true;
+		neg_rsp->SecurityMode |= SECMODE_SIGN_ENABLED;
+		if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
+			neg_rsp->SecurityMode |= SECMODE_SIGN_REQUIRED;
+	}
+	neg_rsp->MaxMpxCount = cpu_to_le16(SMB1_MAX_MPX_COUNT);
+	neg_rsp->MaxNumberVcs = cpu_to_le16(SMB1_MAX_VCS);
+	neg_rsp->MaxBufferSize = cpu_to_le32(conn->vals->max_read_size);
+	neg_rsp->MaxRawSize = cpu_to_le32(SMB1_MAX_RAW_SIZE);
+	neg_rsp->SessionKey = 0;
+	neg_rsp->Capabilities = cpu_to_le32(SMB1_SERVER_CAPS);
+
+	time = ksmbd_systime();
+	neg_rsp->SystemTimeLow = cpu_to_le32(time & 0x00000000FFFFFFFF);
+	neg_rsp->SystemTimeHigh =
+		cpu_to_le32((time & 0xFFFFFFFF00000000) >> 32);
+	neg_rsp->ServerTimeZone = 0;
+
+	if (((struct smb_hdr *)work->request_buf)->Flags2 & SMBFLG2_EXT_SEC)
+		conn->use_spnego = true;
+
+	ksmbd_debug(SMB, "spnego is %s\n", conn->use_spnego ? "on" : "off");
+
+	if (conn->use_spnego == false) {
+		neg_rsp->EncryptionKeyLength = CIFS_CRYPTO_KEY_SIZE;
+		neg_rsp->Capabilities &= ~cpu_to_le32(CAP_EXTENDED_SECURITY);
+		neg_rsp->ByteCount = cpu_to_le16(CIFS_CRYPTO_KEY_SIZE);
+		/* initialize random server challenge */
+		get_random_bytes(conn->ntlmssp.cryptkey, sizeof(__u64));
+		memcpy((neg_rsp->u.EncryptionKey), conn->ntlmssp.cryptkey,
+		       CIFS_CRYPTO_KEY_SIZE);
+		/* Adjust pdu length, 17 words and 8 bytes added */
+		inc_resp_size(work, 17 * 2 + 8);
+	} else {
+		neg_rsp->EncryptionKeyLength = 0;
+		neg_rsp->ByteCount = cpu_to_le16(SMB1_CLIENT_GUID_SIZE +
+			AUTH_GSS_LENGTH);
+		get_random_bytes(neg_rsp->u.extended_response.GUID,
+				 SMB1_CLIENT_GUID_SIZE);
+		ksmbd_copy_gss_neg_header(
+				neg_rsp->u.extended_response.SecurityBlob);
+		inc_resp_size(work, 17 * 2 + 16 + AUTH_GSS_LENGTH);
+	}
+
+	/* Null terminated domain name in unicode */
+
+	ksmbd_conn_set_need_negotiate(conn);
+	/* Domain name and PC name are ignored by clients, so no need to send.
+	 * We can try sending them later
+	 */
+err_out:
+	return rc;
+}
+
+static int build_sess_rsp_noextsec(struct ksmbd_work *work,
+				   struct ksmbd_session *sess,
+				   struct smb_com_session_setup_req_no_secext *req,
+				   struct smb_com_session_setup_old_resp *rsp)
+{
+	struct ksmbd_conn *conn = work->conn;
+	u16 oem_passwd_len, unicode_passwd_len;
+	u8 *oem_passwd, *unicode_passwd;
+	int offset, err = 0, len;
+	unsigned int maxlen;
+	char *name;
+	__le16 str[32];
+
+	/* Build response. We don't use extended security (yet), so wct is 3 */
+	rsp->hdr.WordCount = 3;
+	rsp->Action = 0;
+	/* The names should be unicode */
+	rsp->ByteCount = 0;
+	/* adjust pdu length */
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	maxlen = get_req_len(req);
+
+	/* start of variable data */
+	offset = offsetof(struct smb_com_session_setup_req_no_secext,
+			  CaseInsensitivePassword);
+
+	/* verify password length fields and save pointers to data */
+	oem_passwd_len = le16_to_cpu(req->CaseInsensitivePasswordLength);
+	if (offset + oem_passwd_len > maxlen) {
+		err = -EINVAL;
+		goto out_err;
+	}
+	oem_passwd = (char *)req + offset;
+	offset += oem_passwd_len;
+
+	unicode_passwd_len = le16_to_cpu(req->CaseSensitivePasswordLength);
+	if (offset + unicode_passwd_len > maxlen) {
+		err = -EINVAL;
+		goto out_err;
+	}
+	unicode_passwd = (char *)req + offset;
+	offset += unicode_passwd_len;
+
+	/* 1 byte for padding */
+	offset++;
+	if (offset > maxlen) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	/* check if valid user name is present in request or not */
+	name = smb_strndup_from_utf16((char *)req + offset, maxlen - offset,
+				      true,  conn->local_nls);
+	if (IS_ERR(name)) {
+		pr_err("cannot allocate memory\n");
+		err = PTR_ERR(name);
+		goto out_err;
+	}
+	offset += (strlen(name) + 1) * 2;
+
+	WARN_ON(sess->user);
+
+	ksmbd_debug(SMB, "session setup request for user %s\n", name);
+	sess->user = ksmbd_login_user(name);
+	kfree(name);
+	if (!sess->user) {
+		pr_err("user not present in database\n");
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	if (user_guest(sess->user)) {
+		rsp->Action = cpu_to_le16(GUEST_LOGIN);
+		goto no_password_check;
+	}
+
+	if (unicode_passwd_len == CIFS_AUTH_RESP_SIZE) {
+		err = ksmbd_auth_ntlm(sess, oem_passwd,
+				      conn->ntlmssp.cryptkey);
+		if (err) {
+			pr_err("ntlm authentication failed for user %s\n",
+			       user_name(sess->user));
+			goto out_err;
+		}
+	} else {
+		char *ntdomain;
+
+		ntdomain = smb_strndup_from_utf16((char *)req + offset,
+						  maxlen - offset, true,
+						  conn->local_nls);
+		if (IS_ERR(ntdomain)) {
+			pr_err("cannot allocate memory\n");
+			err = PTR_ERR(ntdomain);
+			goto out_err;
+		}
+
+		err = ksmbd_auth_ntlmv2(conn, sess,
+					(struct ntlmv2_resp *)unicode_passwd,
+					unicode_passwd_len - CIFS_ENCPWD_SIZE,
+					ntdomain, conn->ntlmssp.cryptkey);
+		kfree(ntdomain);
+		if (err) {
+			pr_err("authentication failed for user %s\n",
+			       user_name(sess->user));
+			goto out_err;
+		}
+	}
+
+no_password_check:
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+
+	/* 1 byte padding for word alignment */
+	offset = 1;
+
+	memset(str, 0, sizeof(str));
+
+	len = smb_strtoUTF16(str, "Unix", 4, conn->local_nls);
+	len = UNICODE_LEN(len + 1);
+	memcpy(rsp->NativeOS + offset, str, len);
+	offset += len;
+
+	len = smb_strtoUTF16(str, "ksmbd", 5, conn->local_nls);
+	len = UNICODE_LEN(len + 1);
+	memcpy(rsp->NativeOS + offset, str, len);
+	offset += len;
+
+	len = smb_strtoUTF16(str, "WORKGROUP", 9, conn->local_nls);
+	len = UNICODE_LEN(len + 1);
+	memcpy(rsp->NativeOS + offset, str, len);
+	offset += len;
+
+	rsp->ByteCount = cpu_to_le16(offset);
+	inc_resp_size(work, offset);
+
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+out_err:
+	return err;
+}
+
+static int build_sess_rsp_extsec(struct ksmbd_work *work,
+				 struct ksmbd_session *sess,
+				 struct smb_com_session_setup_req *req,
+				 struct smb_com_session_setup_resp *rsp)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct negotiate_message *negblob;
+	char *neg_blob;
+	int err = 0, neg_blob_len;
+	unsigned char *spnego_blob;
+	u16 spnego_blob_len;
+	int sz;
+
+	rsp->hdr.WordCount = 4;
+	rsp->Action = 0;
+
+	/* The names should be unicode */
+	rsp->ByteCount = 0;
+	/* adjust pdu length */
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	negblob = (struct negotiate_message *)req->SecurityBlob;
+	sz = le16_to_cpu(req->SecurityBlobLength);
+
+	if (offsetof(struct smb_com_session_setup_req, SecurityBlob) + sz >
+		get_req_len(req)) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
+		if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
+			conn->use_spnego = false;
+		}
+	}
+
+	if (conn->mechToken)
+		negblob = (struct negotiate_message *)conn->mechToken;
+
+	if (negblob->MessageType == NtLmNegotiate) {
+		struct challenge_message *chgblob;
+
+		ksmbd_debug(SMB, "negotiate phase\n");
+		err = ksmbd_decode_ntlmssp_neg_blob(negblob, sz, conn);
+		if (err)
+			goto out_err;
+
+		chgblob = (struct challenge_message *)rsp->SecurityBlob;
+		memset(chgblob, 0, sizeof(struct challenge_message));
+
+		if (conn->use_spnego) {
+			int sz;
+
+			sz = sizeof(struct negotiate_message) +
+				(strlen(ksmbd_netbios_name()) * 2 + 1 + 4) * 6;
+			neg_blob = kmalloc(sz, GFP_KERNEL);
+			if (!neg_blob) {
+				err = -ENOMEM;
+				goto out_err;
+			}
+			chgblob = (struct challenge_message *)neg_blob;
+			neg_blob_len = ksmbd_build_ntlmssp_challenge_blob(
+					chgblob,
+					conn);
+			if (neg_blob_len < 0) {
+				kfree(neg_blob);
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			if (build_spnego_ntlmssp_neg_blob(&spnego_blob,
+							  &spnego_blob_len,
+							  neg_blob,
+							  neg_blob_len)) {
+				kfree(neg_blob);
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			memcpy((char *)rsp->SecurityBlob, spnego_blob,
+			       spnego_blob_len);
+			rsp->SecurityBlobLength = cpu_to_le16(spnego_blob_len);
+			kfree(spnego_blob);
+			kfree(neg_blob);
+		} else {
+			neg_blob_len = ksmbd_build_ntlmssp_challenge_blob(
+					chgblob,
+					conn);
+			if (neg_blob_len < 0) {
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			rsp->SecurityBlobLength = cpu_to_le16(neg_blob_len);
+		}
+
+		rsp->hdr.Status.CifsError = STATUS_MORE_PROCESSING_REQUIRED;
+		/*
+		 * Note: here total size -1 is done as an adjustment
+		 * for 0 size blob.
+		 */
+		inc_resp_size(work, le16_to_cpu(rsp->SecurityBlobLength));
+		rsp->ByteCount = rsp->SecurityBlobLength;
+	} else if (negblob->MessageType == NtLmAuthenticate) {
+		struct authenticate_message *authblob;
+		unsigned int blob_len, un_off, un_len;
+		char *username;
+
+		ksmbd_debug(SMB, "authenticate phase\n");
+		if (conn->use_spnego && conn->mechToken)
+			authblob =
+				(struct authenticate_message *)conn->mechToken;
+		else
+			authblob = (struct authenticate_message *)
+						req->SecurityBlob;
+
+		un_off = le32_to_cpu(authblob->UserName.BufferOffset);
+		un_len = le16_to_cpu(authblob->UserName.Length);
+		blob_len = le16_to_cpu(req->SecurityBlobLength);
+		if (blob_len < (u64)un_off + un_len) {
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		username =
+			smb_strndup_from_utf16((const char *)authblob + un_off,
+					       un_len, true, conn->local_nls);
+
+		if (IS_ERR(username)) {
+			pr_err("cannot allocate memory\n");
+			err = PTR_ERR(username);
+			goto out_err;
+		}
+
+		ksmbd_debug(SMB, "session setup request for user %s\n",
+			    username);
+		sess->user = ksmbd_login_user(username);
+		kfree(username);
+
+		if (!sess->user) {
+			ksmbd_debug(SMB, "Unknown user name or an error\n");
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		if (user_guest(sess->user)) {
+			rsp->Action = cpu_to_le16(GUEST_LOGIN);
+			goto no_password_check;
+		}
+
+		err = ksmbd_decode_ntlmssp_auth_blob(authblob, blob_len,
+						     conn, sess);
+		if (err) {
+			ksmbd_debug(SMB, "authentication failed\n");
+			err = -EINVAL;
+			goto out_err;
+		}
+
+no_password_check:
+		if (conn->use_spnego) {
+			if (build_spnego_ntlmssp_auth_blob(&spnego_blob,
+						&spnego_blob_len, 0)) {
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			memcpy((char *)rsp->SecurityBlob, spnego_blob,
+			       spnego_blob_len);
+			rsp->SecurityBlobLength = cpu_to_le16(spnego_blob_len);
+			kfree(spnego_blob);
+			inc_resp_size(work, spnego_blob_len);
+			rsp->ByteCount = rsp->SecurityBlobLength;
+		}
+	} else {
+		pr_err("Invalid phase %d\n", negblob->MessageType);
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+out_err:
+	if (conn->use_spnego && conn->mechToken) {
+		kfree(conn->mechToken);
+		conn->mechToken = NULL;
+	}
+
+	return err;
+}
+
+/**
+ * smb_session_setup_andx() - session setup request handler
+ * @work:   smb work containing session setup request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_session_setup_andx(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_session *sess = NULL;
+	int rc = 0, cap;
+	unsigned short uid;
+
+	union smb_com_session_setup_andx *pSMB = work->request_buf;
+	union smb_com_session_setup_andx *rsp = work->response_buf;
+
+	if (pSMB->req.hdr.WordCount == 12)
+		cap = le32_to_cpu(pSMB->req.Capabilities);
+	else if (pSMB->req.hdr.WordCount == 13)
+		cap = le32_to_cpu(pSMB->req_no_secext.Capabilities);
+	else {
+		pr_err("malformed packet\n");
+		work->send_no_response = 1;
+		return 0;
+	}
+
+	ksmbd_conn_lock(conn);
+	uid = le16_to_cpu(pSMB->req.hdr.Uid);
+	if (uid != 0) {
+		sess = ksmbd_session_lookup(conn, uid);
+		if (!sess) {
+			rc = -ENOENT;
+			goto out_err;
+		}
+		ksmbd_debug(SMB, "Reuse session ID: %llu, Uid: %u\n",
+			    sess->id, uid);
+	} else {
+		sess = ksmbd_smb1_session_create();
+		if (!sess) {
+			rc = -ENOMEM;
+			goto out_err;
+		}
+
+		rc = ksmbd_session_register(conn, sess);
+		if (rc)
+			goto out_err;
+		rsp->resp.hdr.Uid = cpu_to_le16(sess->id);
+		ksmbd_debug(SMB, "New session ID: %llu, Uid: %u\n", sess->id,
+			    uid);
+	}
+
+	if (cap & CAP_EXTENDED_SECURITY) {
+		ksmbd_debug(SMB, "build response with extend_security\n");
+		rc = build_sess_rsp_extsec(work, sess, &pSMB->req, &rsp->resp);
+
+	} else {
+		ksmbd_debug(SMB, "build response without extend_security\n");
+		rc = build_sess_rsp_noextsec(work, sess, &pSMB->req_no_secext,
+					     &rsp->old_resp);
+	}
+	if (rc < 0)
+		goto out_err;
+
+	work->sess = sess;
+	ksmbd_conn_set_good(conn);
+	ksmbd_conn_unlock(conn);
+	return 0;
+
+out_err:
+	rsp->resp.hdr.Status.CifsError = STATUS_LOGON_FAILURE;
+	rsp->resp.hdr.WordCount = 0;
+	rsp->resp.ByteCount = 0;
+	if (rc < 0 && sess) {
+		xa_erase(&conn->sessions, sess->id);
+		hash_del(&sess->hlist);
+		ksmbd_session_destroy(sess);
+		work->sess = NULL;
+	}
+	ksmbd_conn_unlock(conn);
+	return rc;
+}
+
+/**
+ * file_create_dispostion_flags() - convert disposition flags to
+ *				file open flags
+ * @dispostion:		file disposition contained in open request
+ * @file_present:	file already present or not
+ *
+ * Return:      file open flags after conversion from disposition
+ */
+static int file_create_dispostion_flags(int dispostion, bool file_present)
+{
+	int disp_flags = 0;
+
+	switch (dispostion) {
+	/*
+	 * If the file already exists, it SHOULD be superseded (overwritten).
+	 * If it does not already exist, then it SHOULD be created.
+	 */
+	case FILE_SUPERSEDE:
+		if (file_present)
+			disp_flags |= O_TRUNC;
+		else
+			disp_flags |= O_CREAT;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened rather than created.
+	 * If the file does not already exist, the operation MUST fail.
+	 */
+	case FILE_OPEN:
+		if (!file_present)
+			return -ENOENT;
+		break;
+	/*
+	 * If the file already exists, the operation MUST fail.
+	 * If the file does not already exist, it SHOULD be created.
+	 */
+	case FILE_CREATE:
+		if (file_present)
+			return -EEXIST;
+		disp_flags |= O_CREAT;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened. If the file
+	 * does not already exist, then it SHOULD be created.
+	 */
+	case FILE_OPEN_IF:
+		if (!file_present)
+			disp_flags |= O_CREAT;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened and truncated.
+	 * If the file does not already exist, the operation MUST fail.
+	 */
+	case FILE_OVERWRITE:
+		if (!file_present)
+			return -ENOENT;
+		disp_flags |= O_TRUNC;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened and truncated.
+	 * If the file does not already exist, it SHOULD be created.
+	 */
+	case FILE_OVERWRITE_IF:
+		if (file_present)
+			disp_flags |= O_TRUNC;
+		else
+			disp_flags |= O_CREAT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return disp_flags;
+}
+
+static inline int ksmbd_openflags_to_mayflags(int open_flags)
+{
+	int mask = open_flags & O_ACCMODE;
+
+	if (mask == O_WRONLY)
+		return MAY_OPEN | MAY_WRITE;
+	else if (mask == O_RDWR)
+		return MAY_OPEN | MAY_READ | MAY_WRITE;
+	else
+		return MAY_OPEN | MAY_READ;
+}
+
+/**
+ * convert_generic_access_flags() - convert access flags to
+ *				file open flags
+ * @access_flag:	file access flags contained in open request
+ * @open_flag:		file open flags are updated as per access flags
+ * @may_flags:		file may flags are updated with @open_flags
+ * @attrib:		attribute flag indicating posix symantics or not
+ *
+ * Return:		access flags
+ */
+static int convert_generic_access_flags(int access_flag, int *open_flags,
+					int *may_flags, int attrib)
+{
+	int aflags = access_flag;
+	int oflags = *open_flags;
+
+	if (aflags & GENERIC_READ) {
+		aflags &= ~GENERIC_READ;
+		aflags |= GENERIC_READ_FLAGS;
+	}
+
+	if (aflags & GENERIC_WRITE) {
+		aflags &= ~GENERIC_WRITE;
+		aflags |= GENERIC_WRITE_FLAGS;
+	}
+
+	if (aflags & GENERIC_EXECUTE) {
+		aflags &= ~GENERIC_EXECUTE;
+		aflags |= GENERIC_EXECUTE_FLAGS;
+	}
+
+	if (aflags & GENERIC_ALL) {
+		aflags &= ~GENERIC_ALL;
+		aflags |= GENERIC_ALL_FLAGS;
+	}
+
+	if (oflags & O_TRUNC)
+		aflags |= FILE_WRITE_DATA;
+
+	if (aflags & (FILE_WRITE_DATA | FILE_APPEND_DATA)) {
+		if (aflags & (FILE_READ_ATTRIBUTES | FILE_READ_DATA |
+			      FILE_READ_EA | FILE_EXECUTE)) {
+			*open_flags |= O_RDWR;
+
+		} else {
+			*open_flags |= O_WRONLY;
+		}
+	} else {
+		*open_flags |= O_RDONLY;
+	}
+
+	if ((attrib & ATTR_POSIX_SEMANTICS) && (aflags & FILE_APPEND_DATA))
+		*open_flags |= O_APPEND;
+
+	*may_flags = ksmbd_openflags_to_mayflags(*open_flags);
+
+	return aflags;
+}
+
+/**
+ * smb_get_dos_attr() - convert unix style stat info to dos attr
+ * @stat:	stat to be converted to dos attr
+ *
+ * Return:	dos style attribute
+ */
+static __u32 smb_get_dos_attr(struct kstat *stat)
+{
+	__u32 attr = 0;
+
+	/* check whether file has attributes ATTR_READONLY, ATTR_HIDDEN,
+	 * ATTR_SYSTEM, ATTR_VOLUME, ATTR_DIRECTORY, ATTR_ARCHIVE,
+	 * ATTR_DEVICE, ATTR_NORMAL, ATTR_TEMPORARY, ATTR_SPARSE,
+	 * ATTR_REPARSE, ATTR_COMPRESSED, ATTR_OFFLINE
+	 */
+
+	if (stat->mode & S_ISVTX) /* hidden */
+		attr |= (ATTR_HIDDEN | ATTR_SYSTEM);
+
+	if (!(stat->mode & 0222)) /* read-only */
+		attr |= ATTR_READONLY;
+
+	if (S_ISDIR(stat->mode))
+		attr |= ATTR_DIRECTORY;
+
+	if (stat->size > (stat->blksize * stat->blocks))
+		attr |= ATTR_SPARSE;
+
+	if (!attr)
+		attr |= ATTR_NORMAL;
+
+	return attr;
+}
+
+static int lock_oplock_release(struct ksmbd_file *fp, int type,
+			       int oplock_level)
+{
+	struct oplock_info *opinfo;
+	int ret;
+
+	ksmbd_debug(SMB, "got oplock brk for level OplockLevel = %d\n",
+		    oplock_level);
+
+	opinfo = fp->f_opinfo;
+	if (opinfo->op_state == OPLOCK_STATE_NONE) {
+		pr_err("unexpected oplock state 0x%x\n", opinfo->op_state);
+		return -EINVAL;
+	}
+
+	if (oplock_level == OPLOCK_EXCLUSIVE || oplock_level == OPLOCK_BATCH) {
+		if (opinfo_write_to_none(opinfo) < 0) {
+			opinfo->op_state = OPLOCK_STATE_NONE;
+			return -EINVAL;
+		}
+	} else if (((opinfo->level == OPLOCK_EXCLUSIVE) ||
+		    (opinfo->level == OPLOCK_BATCH)) &&
+		   (oplock_level == OPLOCK_READ)) {
+		ret = opinfo_write_to_read(opinfo);
+		if (ret) {
+			opinfo->op_state = OPLOCK_STATE_NONE;
+			return -EINVAL;
+		}
+	} else if ((opinfo->level == OPLOCK_READ) &&
+		   (oplock_level == OPLOCK_NONE)) {
+		ret = opinfo_read_to_none(opinfo);
+		if (ret) {
+			opinfo->op_state = OPLOCK_STATE_NONE;
+			return -EINVAL;
+		}
+	}
+
+	opinfo->op_state = OPLOCK_STATE_NONE;
+	wake_up_interruptible(&opinfo->oplock_q);
+
+	return 0;
+}
+
+static struct ksmbd_lock *smb_lock_init(struct file_lock *flock,
+					unsigned int cmd, int mode,
+					unsigned long long offset,
+					unsigned long long length,
+					struct list_head *lock_list)
+{
+	struct ksmbd_lock *lock;
+
+	lock = kzalloc(sizeof(struct ksmbd_lock), GFP_KERNEL);
+	if (!lock)
+		return NULL;
+
+	lock->cmd = cmd;
+	lock->fl = flock;
+	lock->start = offset;
+	lock->end = offset + length;
+	lock->flags = mode;
+	if (lock->start == lock->end)
+		lock->zero_len = 1;
+	INIT_LIST_HEAD(&lock->llist);
+	INIT_LIST_HEAD(&lock->clist);
+	INIT_LIST_HEAD(&lock->flist);
+	list_add_tail(&lock->llist, lock_list);
+
+	return lock;
+}
+
+/**
+ * smb_locking_andx() - received oplock break response from client
+ * @work:	smb work containing oplock break command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_locking_andx(struct ksmbd_work *work)
+{
+	struct smb_com_lock_req *req = work->request_buf;
+	struct smb_com_lock_rsp *rsp = work->response_buf;
+	struct ksmbd_file *fp;
+	int err = 0;
+	struct locking_andx_range32 *lock_ele32 = NULL, *unlock_ele32 = NULL;
+	struct locking_andx_range64 *lock_ele64 = NULL, *unlock_ele64 = NULL;
+	struct file *filp = NULL;
+	struct ksmbd_lock *smb_lock = NULL, *cmp_lock, *tmp, *tmp2;
+	int i, lock_count, unlock_count;
+	unsigned long long offset, length;
+	struct file_lock *flock = NULL;
+	unsigned int cmd = 0, leftlen;
+	LIST_HEAD(lock_list);
+	LIST_HEAD(rollback_list);
+	int locked, timeout;
+	const unsigned long long loff_max = ~0;
+	struct ksmbd_conn *conn;
+
+	timeout = le32_to_cpu(req->Timeout);
+	ksmbd_debug(SMB, "got oplock brk for fid %d lock type = 0x%x, timeout: %d\n",
+		    req->Fid, req->LockType, timeout);
+
+	/* find fid */
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("cannot obtain fid for %d\n", req->Fid);
+		return -EINVAL;
+	}
+
+	if (req->LockType & LOCKING_ANDX_OPLOCK_RELEASE) {
+		pr_err("lock type is oplock release\n");
+		err = lock_oplock_release(fp, req->LockType, req->OplockLevel);
+	}
+
+	filp = fp->filp;
+	lock_count = le16_to_cpu(req->NumberOfLocks);
+	unlock_count = le16_to_cpu(req->NumberOfUnlocks);
+
+	ksmbd_debug(SMB, "lock count is %d, unlock_count : %d\n", lock_count,
+		    unlock_count);
+
+	leftlen = get_req_len(req) - offsetof(struct smb_com_lock_req, Locks);
+
+	if (req->LockType & LOCKING_ANDX_LARGE_FILES) {
+		if ((unlock_count + lock_count) * sizeof(*lock_ele64) > leftlen) {
+			err = -EINVAL;
+			goto out;
+		}
+		lock_ele64 = (struct locking_andx_range64 *)req->Locks;
+	} else {
+		if ((unlock_count + lock_count) * sizeof(*lock_ele32) > leftlen) {
+			err = -EINVAL;
+			goto out;
+		}
+		lock_ele32 = (struct locking_andx_range32 *)req->Locks;
+	}
+
+	if (req->LockType & LOCKING_ANDX_CHANGE_LOCKTYPE) {
+		pr_err("lock type: LOCKING_ANDX_CHANGE_LOCKTYPE\n");
+		rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+		rsp->hdr.Status.DosError.Error = cpu_to_le16(ERRnoatomiclocks);
+		rsp->hdr.Flags2 &= ~SMBFLG2_ERR_STATUS;
+		goto out;
+	}
+
+	if (req->LockType & LOCKING_ANDX_CANCEL_LOCK)
+		pr_err("lock type: LOCKING_ANDX_CANCEL_LOCK\n");
+
+	for (i = 0; i < lock_count; i++) {
+		flock = smb_flock_init(filp);
+		if (!flock)
+			goto out;
+
+		if (req->LockType & LOCKING_ANDX_SHARED_LOCK) {
+			pr_err("received shared request\n");
+			if (!(filp->f_mode & FMODE_READ)) {
+				rsp->hdr.Status.CifsError =
+					STATUS_ACCESS_DENIED;
+				locks_free_lock(flock);
+				goto out;
+			}
+			cmd = F_SETLKW;
+			flock->fl_type = F_RDLCK;
+		} else {
+			pr_err("received exclusive request\n");
+			if (!(filp->f_mode & FMODE_WRITE)) {
+				rsp->hdr.Status.CifsError =
+					STATUS_ACCESS_DENIED;
+				locks_free_lock(flock);
+				goto out;
+			}
+			cmd = F_SETLKW;
+			flock->fl_type = F_WRLCK;
+			flock->fl_flags |= FL_SLEEP;
+		}
+
+		if (req->LockType & LOCKING_ANDX_LARGE_FILES) {
+			offset = (unsigned long long)le32_to_cpu(
+					lock_ele64[i].OffsetLow);
+			length = (unsigned long long)le32_to_cpu(
+					lock_ele64[i].LengthLow);
+			offset |= (unsigned long long)le32_to_cpu(
+					lock_ele64[i].OffsetHigh) << 32;
+			length |= (unsigned long long)le32_to_cpu(
+					lock_ele64[i].LengthHigh) << 32;
+		} else {
+			offset = (unsigned long long)le32_to_cpu(
+				lock_ele32[i].Offset);
+			length = (unsigned long long)le32_to_cpu(
+				lock_ele32[i].Length);
+		}
+
+		if (offset > loff_max) {
+			pr_err("Invalid lock range requested\n");
+			rsp->hdr.Status.CifsError = STATUS_INVALID_LOCK_RANGE;
+			locks_free_lock(flock);
+			goto out;
+		}
+
+		if (offset > 0 && length > (loff_max - offset) + 1) {
+			pr_err("Invalid lock range requested\n");
+			rsp->hdr.Status.CifsError = STATUS_INVALID_LOCK_RANGE;
+			locks_free_lock(flock);
+			goto out;
+		}
+
+		ksmbd_debug(SMB, "locking offset : %llx, length : %llu\n",
+			    offset, length);
+
+		if (offset > OFFSET_MAX)
+			flock->fl_start = OFFSET_MAX;
+		else
+			flock->fl_start = offset;
+		if (offset + length > OFFSET_MAX)
+			flock->fl_end = OFFSET_MAX;
+		else
+			flock->fl_end = offset + length;
+
+		smb_lock = smb_lock_init(flock, cmd, req->LockType, offset,
+					 length, &lock_list);
+		if (!smb_lock) {
+			locks_free_lock(flock);
+			goto out;
+		}
+	}
+
+	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+		int same_zero_lock = 0;
+
+		list_del(&smb_lock->llist);
+		/* check locks in connections */
+		down_read(&conn_list_lock);
+		list_for_each_entry(conn, &conn_list, conns_list) {
+			spin_lock(&conn->llist_lock);
+			list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+				if (file_inode(cmp_lock->fl->fl_file) !=
+					file_inode(smb_lock->fl->fl_file))
+					continue;
+
+				if (smb_lock->zero_len &&
+					cmp_lock->start == smb_lock->start &&
+					cmp_lock->end == smb_lock->end) {
+					same_zero_lock = 1;
+					spin_unlock(&conn->llist_lock);
+					up_read(&conn_list_lock);
+					goto out_check_cl;
+				}
+
+				/* check zero byte lock range */
+				if (cmp_lock->zero_len && !smb_lock->zero_len &&
+						cmp_lock->start > smb_lock->start &&
+						cmp_lock->start < smb_lock->end) {
+					pr_err("previous lock conflict with zero byte lock range\n");
+					err = -EPERM;
+				} else if (smb_lock->zero_len && !cmp_lock->zero_len &&
+					smb_lock->start > cmp_lock->start &&
+					smb_lock->start < cmp_lock->end) {
+					pr_err("current lock conflict with zero byte lock range\n");
+					err = -EPERM;
+				} else if (((cmp_lock->start <= smb_lock->start &&
+					cmp_lock->end > smb_lock->start) ||
+					(cmp_lock->start < smb_lock->end &&
+					 cmp_lock->end >= smb_lock->end)) &&
+					!cmp_lock->zero_len && !smb_lock->zero_len) {
+					pr_err("Not allow lock operation on exclusive lock range\n");
+					err = -EPERM;
+				}
+
+				if (err) {
+					/* Clean error cache */
+					if ((smb_lock->zero_len &&
+							fp->cflock_cnt > 1) ||
+						(timeout && (fp->llock_fstart ==
+								smb_lock->start))) {
+						ksmbd_debug(SMB, "clean error cache\n");
+						fp->cflock_cnt = 0;
+					}
+
+					if (timeout > 0 ||
+						(fp->cflock_cnt > 0 &&
+						fp->llock_fstart == smb_lock->start) ||
+						((smb_lock->start >> 63) == 0 &&
+						smb_lock->start >= 0xEF000000)) {
+						if (timeout) {
+							spin_unlock(&conn->llist_lock);
+							up_read(&conn_list_lock);
+							ksmbd_debug(SMB, "waiting error response for timeout : %d\n",
+								timeout);
+							msleep(timeout);
+						}
+						rsp->hdr.Status.CifsError =
+							STATUS_FILE_LOCK_CONFLICT;
+					} else
+						rsp->hdr.Status.CifsError =
+							STATUS_LOCK_NOT_GRANTED;
+					fp->cflock_cnt++;
+					fp->llock_fstart = smb_lock->start;
+
+					if (timeout <= 0) {
+						spin_unlock(&conn->llist_lock);
+						up_read(&conn_list_lock);
+					}
+					goto out;
+				}
+			}
+			spin_unlock(&conn->llist_lock);
+		}
+		up_read(&conn_list_lock);
+
+out_check_cl:
+		if (same_zero_lock)
+			continue;
+		if (smb_lock->zero_len) {
+			err = 0;
+			goto skip;
+		}
+
+		flock = smb_lock->fl;
+retry:
+		err = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+		if (err == FILE_LOCK_DEFERRED) {
+			pr_err("would have to wait for getting lock\n");
+			spin_lock(&work->conn->llist_lock);
+			list_add_tail(&smb_lock->clist, &work->conn->lock_list);
+			spin_unlock(&work->conn->llist_lock);
+			list_add(&smb_lock->llist, &rollback_list);
+wait:
+			err = ksmbd_vfs_posix_lock_wait_timeout(flock,
+							msecs_to_jiffies(10));
+			if (err) {
+				list_del(&smb_lock->llist);
+				spin_lock(&work->conn->llist_lock);
+				list_del(&smb_lock->clist);
+				spin_unlock(&work->conn->llist_lock);
+				goto retry;
+			} else
+				goto wait;
+		} else if (!err) {
+skip:
+			spin_lock(&work->conn->llist_lock);
+			list_add_tail(&smb_lock->clist, &work->conn->lock_list);
+			list_add_tail(&smb_lock->flist, &fp->lock_list);
+			spin_unlock(&work->conn->llist_lock);
+			list_add(&smb_lock->llist, &rollback_list);
+			pr_err("successful in taking lock\n");
+		} else if (err < 0) {
+			rsp->hdr.Status.CifsError = STATUS_LOCK_NOT_GRANTED;
+			goto out;
+		}
+	}
+
+	if (req->LockType & LOCKING_ANDX_LARGE_FILES)
+		unlock_ele64 = (struct locking_andx_range64 *)(req->Locks +
+				(sizeof(struct locking_andx_range64) *
+				 lock_count));
+	else
+		unlock_ele32 = (struct locking_andx_range32 *)(req->Locks +
+				(sizeof(struct locking_andx_range32) *
+				 lock_count));
+
+	for (i = 0; i < unlock_count; i++) {
+		flock = smb_flock_init(filp);
+		if (!flock)
+			goto out;
+
+		flock->fl_type = F_UNLCK;
+		cmd = 0;
+
+		if (req->LockType & LOCKING_ANDX_LARGE_FILES) {
+			offset = (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].OffsetLow);
+			length = (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].LengthLow);
+			offset |= (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].OffsetHigh) << 32;
+			length |= (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].LengthHigh) << 32;
+		} else {
+			offset = (unsigned long long)le32_to_cpu(
+				unlock_ele32[i].Offset);
+			length = (unsigned long long)le32_to_cpu(
+				unlock_ele32[i].Length);
+		}
+
+		ksmbd_debug(SMB, "unlock offset : %llx, length : %llu\n",
+			    offset, length);
+
+		if (offset > OFFSET_MAX)
+			flock->fl_start = OFFSET_MAX;
+		else
+			flock->fl_start = offset;
+		if (offset + length > OFFSET_MAX)
+			flock->fl_end = OFFSET_MAX;
+		else
+			flock->fl_end = offset + length;
+
+		locked = 0;
+		up_read(&conn_list_lock);
+		list_for_each_entry(conn, &conn_list, conns_list) {
+			spin_lock(&conn->llist_lock);
+			list_for_each_entry(cmp_lock, &conn->lock_list, clist) {
+				if (file_inode(cmp_lock->fl->fl_file) !=
+					file_inode(flock->fl_file))
+					continue;
+
+				if ((cmp_lock->start == offset &&
+					 cmp_lock->end == offset + length)) {
+					locked = 1;
+					spin_unlock(&conn->llist_lock);
+					up_read(&conn_list_lock);
+					goto out_check_cl_unlck;
+				}
+			}
+			spin_unlock(&conn->llist_lock);
+		}
+		up_read(&conn_list_lock);
+
+out_check_cl_unlck:
+		if (!locked) {
+			locks_free_lock(flock);
+			rsp->hdr.Status.CifsError = STATUS_RANGE_NOT_LOCKED;
+			goto out;
+		}
+
+		err = vfs_lock_file(filp, cmd, flock, NULL);
+		if (!err) {
+			ksmbd_debug(SMB, "File unlocked\n");
+			spin_lock(&conn->llist_lock);
+			if (!list_empty(&cmp_lock->flist))
+				list_del(&cmp_lock->flist);
+			list_del(&cmp_lock->clist);
+			spin_unlock(&conn->llist_lock);
+
+			locks_free_lock(cmp_lock->fl);
+			kfree(cmp_lock);
+			fp->cflock_cnt = 0;
+		} else if (err == -ENOENT) {
+			rsp->hdr.Status.CifsError = STATUS_RANGE_NOT_LOCKED;
+			locks_free_lock(flock);
+			goto out;
+		}
+		locks_free_lock(flock);
+	}
+
+	rsp->hdr.WordCount = 2;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	ksmbd_fd_put(work, fp);
+	return err;
+
+out:
+	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+		locks_free_lock(smb_lock->fl);
+		list_del(&smb_lock->llist);
+		kfree(smb_lock);
+	}
+
+	list_for_each_entry_safe(smb_lock, tmp, &rollback_list, llist) {
+		struct file_lock *rlock = NULL;
+
+		rlock = smb_flock_init(filp);
+		rlock->fl_type = F_UNLCK;
+		rlock->fl_start = smb_lock->start;
+		rlock->fl_end = smb_lock->end;
+
+		err = vfs_lock_file(filp, 0, rlock, NULL);
+		if (err)
+			pr_err("rollback unlock fail : %d\n", err);
+
+		list_del(&smb_lock->llist);
+		spin_lock(&work->conn->llist_lock);
+		if (!list_empty(&smb_lock->flist))
+			list_del(&smb_lock->flist);
+		list_del(&smb_lock->clist);
+		spin_unlock(&work->conn->llist_lock);
+
+		locks_free_lock(smb_lock->fl);
+		locks_free_lock(rlock);
+		kfree(smb_lock);
+	}
+
+	ksmbd_fd_put(work, fp);
+	pr_err("failed in taking lock\n");
+	return err;
+}
+
+/**
+ * smb_trans() - trans2 command dispatcher
+ * @work:	smb work containing trans2 command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_trans(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_trans_req *req = work->request_buf;
+	struct smb_com_trans_rsp *rsp = work->response_buf;
+	struct smb_com_trans_pipe_req *pipe_req = work->request_buf;
+	struct ksmbd_rpc_command *rpc_resp;
+	unsigned int maxlen, offset;
+	__u16 subcommand;
+	char *name, *pipe;
+	char *pipedata;
+	int setup_bytes_count = 0;
+	int pipe_name_offset = 0;
+	int str_len_uni;
+	int ret = 0, nbytes = 0;
+	int param_len = 0;
+	int id;
+	int padding;
+
+	if (req->SetupCount)
+		setup_bytes_count = 2 * req->SetupCount;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans_req, Data) + setup_bytes_count;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return 0;
+	}
+
+	subcommand = le16_to_cpu(req->SubCommand);
+	name = smb_strndup_from_utf16((char *)req + offset,
+				      maxlen - offset, 1,
+				      conn->local_nls);
+
+	if (IS_ERR(name)) {
+		pr_err("failed to allocate memory\n");
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return PTR_ERR(name);
+	}
+
+	ksmbd_debug(SMB, "Obtained string name = %s setupcount = %d\n",
+			name, setup_bytes_count);
+
+	pipe_name_offset = strlen("\\PIPE");
+	if (strncmp("\\PIPE", name, pipe_name_offset) != 0) {
+		ksmbd_debug(SMB, "Not Pipe request\n");
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		kfree(name);
+		return 0;
+	}
+
+	if (name[pipe_name_offset] == '\\')
+		pipe_name_offset++;
+
+	pipe = name + pipe_name_offset;
+
+	if (*pipe != '\0' && strncmp(pipe, "LANMAN", sizeof("LANMAN")) != 0) {
+		ksmbd_debug(SMB, "Pipe %s not supported request\n", pipe);
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		kfree(name);
+		return 0;
+	}
+
+	/* Incoming pipe name unicode len */
+	str_len_uni = 2 * (strlen(name) + 1);
+
+	ksmbd_debug(SMB, "Pipe name unicode len = %d\n", str_len_uni);
+
+	/* Some clients like Windows may have additional padding. */
+	padding = le16_to_cpu(req->ParameterOffset) -
+		offsetof(struct smb_com_trans_req, Data)
+		- str_len_uni;
+	pipedata = req->Data + str_len_uni + setup_bytes_count + padding;
+	offset += str_len_uni + padding;
+
+	if (!strncmp(pipe, "LANMAN", sizeof("LANMAN"))) {
+		u16 param_count = le16_to_cpu(req->ParameterCount);
+
+		if (offset + param_count > maxlen) {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+			goto out;
+		}
+
+		rpc_resp = ksmbd_rpc_rap(work->sess, pipedata, param_count);
+
+		if (rpc_resp) {
+			if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+				rsp->hdr.Status.CifsError =
+					STATUS_NOT_SUPPORTED;
+				kvfree(rpc_resp);
+				goto out;
+			} else if (rpc_resp->flags != KSMBD_RPC_OK) {
+				rsp->hdr.Status.CifsError =
+					STATUS_INVALID_PARAMETER;
+				kvfree(rpc_resp);
+				goto out;
+			}
+
+			nbytes = rpc_resp->payload_sz;
+			memcpy((char *)rsp + sizeof(struct smb_com_trans_rsp),
+			       rpc_resp->payload, nbytes);
+
+			kvfree(rpc_resp);
+			ret = 0;
+			goto resp_out;
+		} else {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	id = pipe_req->fid;
+	switch (subcommand) {
+	case TRANSACT_DCERPCCMD:
+	{
+		u16 data_count = le16_to_cpu(req->DataCount);
+
+		if (offset + data_count > maxlen) {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+			goto out;
+		}
+
+		ksmbd_debug(SMB, "GOT TRANSACT_DCERPCCMD\n");
+		ret = -EINVAL;
+		rpc_resp = ksmbd_rpc_ioctl(work->sess, id, pipedata,
+					   data_count);
+		if (rpc_resp) {
+			if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+				rsp->hdr.Status.CifsError =
+					STATUS_NOT_SUPPORTED;
+				kvfree(rpc_resp);
+				goto out;
+			} else if (rpc_resp->flags != KSMBD_RPC_OK) {
+				rsp->hdr.Status.CifsError =
+					STATUS_INVALID_PARAMETER;
+				kvfree(rpc_resp);
+				goto out;
+			}
+
+			nbytes = rpc_resp->payload_sz;
+			memcpy((char *)rsp + sizeof(struct smb_com_trans_rsp),
+			       rpc_resp->payload, nbytes);
+			kvfree(rpc_resp);
+			ret = 0;
+		}
+		break;
+	}
+	default:
+		ksmbd_debug(SMB, "SMB TRANS subcommand not supported %u\n",
+			    subcommand);
+		ret = -EOPNOTSUPP;
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		goto out;
+	}
+
+resp_out:
+
+	rsp->hdr.WordCount = 10;
+	rsp->TotalParameterCount = cpu_to_le16(param_len);
+	rsp->TotalDataCount = cpu_to_le16(nbytes);
+	rsp->Reserved = 0;
+	rsp->ParameterCount = cpu_to_le16(param_len);
+	rsp->ParameterOffset = cpu_to_le16(56);
+	rsp->ParameterDisplacement = 0;
+	rsp->DataCount = cpu_to_le16(nbytes);
+	rsp->DataOffset = cpu_to_le16(56 + param_len);
+	rsp->DataDisplacement = 0;
+	rsp->SetupCount = 0;
+	rsp->Reserved1 = 0;
+	/* Adding 1 for Pad */
+	rsp->ByteCount = cpu_to_le16(nbytes + 1 + param_len);
+	rsp->Pad = 0;
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+out:
+	kfree(name);
+	return ret;
+}
+
+/**
+ * create_andx_pipe() - create ipc pipe request handler
+ * @work:	smb work containing create command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int create_andx_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_open_req *req = work->request_buf;
+	struct smb_com_open_ext_rsp *rsp = work->response_buf;
+	unsigned int maxlen, offset;
+	char *name;
+	int rc = 0;
+	__u16 fid;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_open_req, fileName);
+
+	if (is_smbreq_unicode(&req->hdr))
+		offset += 1;
+
+	if (offset >= maxlen) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	name = smb_strndup_from_utf16((char *)req + offset,
+				      maxlen - offset, 1,
+				      work->conn->local_nls);
+	if (IS_ERR(name)) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = ksmbd_session_rpc_open(work->sess, name);
+	if (rc < 0)
+		goto out;
+	fid = rc;
+
+	rsp->hdr.WordCount = 42;
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	rsp->AndXReserved = 0;
+	rsp->OplockLevel = 0;
+	rsp->Fid = fid;
+	rsp->CreateAction = cpu_to_le32(1);
+	rsp->CreationTime = 0;
+	rsp->LastAccessTime = 0;
+	rsp->LastWriteTime = 0;
+	rsp->ChangeTime = 0;
+	rsp->FileAttributes = cpu_to_le32(ATTR_NORMAL);
+	rsp->AllocationSize = cpu_to_le64(0);
+	rsp->EndOfFile = 0;
+	rsp->FileType = cpu_to_le16(2);
+	rsp->DeviceState = cpu_to_le16(0x05ff);
+	rsp->DirectoryFlag = 0;
+	rsp->fid = 0;
+	rsp->MaxAccess = cpu_to_le32(FILE_GENERIC_ALL);
+	rsp->GuestAccess = cpu_to_le32(FILE_GENERIC_READ);
+	rsp->ByteCount = 0;
+	inc_resp_size(work, 100);
+
+out:
+	switch (rc) {
+	case 0:
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		break;
+	case -EINVAL:
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		break;
+	case -ENOSPC:
+	case -ENOMEM:
+	default:
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		break;
+	}
+
+	kfree(name);
+	return rc;
+}
+
+/*
+ * helper to create a file/directory with a given mode
+ */
+static int smb_common_create(struct ksmbd_work *work, struct path *parent_path,
+			     struct path *path, char *name, int open_flags,
+			     umode_t posix_mode, bool is_dir)
+{
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	umode_t mode;
+	int err;
+
+	if (!(open_flags & O_CREAT))
+		return -EBADF;
+
+	if (is_dir) {
+		mode = share_config_directory_mode(share, posix_mode);
+		ksmbd_debug(SMB, "creating directory (mode=%04o)\n", mode);
+		err = ksmbd_vfs_mkdir(work, name, mode);
+		if (err)
+			return err;
+	} else {
+		mode = share_config_create_mode(share, posix_mode);
+		ksmbd_debug(SMB, "creating regular file (mode=%04o)\n", mode);
+		err = ksmbd_vfs_create(work, name, mode);
+		if (err)
+			return err;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0);
+	if (err) {
+		pr_err("cannot get linux path (%s), err = %d\n", name, err);
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * smb_nt_create_andx() - file open request handler
+ * @work:	smb work containing nt open command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_nt_create_andx(struct ksmbd_work *work)
+{
+	struct smb_com_open_req *req = work->request_buf;
+	struct smb_com_open_rsp *rsp = work->response_buf;
+	struct smb_com_open_ext_rsp *ext_rsp = work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_tree_connect *tcon = work->tcon;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	int oplock_flags, file_info, open_flags, may_flags, access_flags;
+	char *name;
+	char *conv_name = NULL;
+	bool file_present = true, extended_reply;
+	__u64 alloc_size = 0, time;
+	umode_t mode = 0;
+	int err = 0;
+	int create_directory = 0;
+	char *root = NULL;
+	bool is_unicode;
+	bool is_relative_root = false;
+	struct ksmbd_file *fp = NULL;
+	int oplock_rsp = OPLOCK_NONE;
+	int share_ret;
+	unsigned int maxlen, offset;
+
+	rsp->hdr.Status.CifsError = STATUS_UNSUCCESSFUL;
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		ksmbd_debug(SMB, "create pipe on IPC\n");
+		return create_andx_pipe(work);
+	}
+
+	if (req->CreateOptions & FILE_OPEN_BY_FILE_ID_LE) {
+		ksmbd_debug(SMB, "file open with FID is not supported\n");
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+
+	if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
+		if (req->DesiredAccess &&
+		    !(le32_to_cpu(req->DesiredAccess) & DELETE)) {
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			return -EPERM;
+		}
+
+		if (le32_to_cpu(req->FileAttributes) & ATTR_READONLY) {
+			rsp->hdr.Status.CifsError = STATUS_CANNOT_DELETE;
+			return -EPERM;
+		}
+	}
+
+	if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+		ksmbd_debug(SMB, "GOT Create Directory via CREATE ANDX\n");
+		create_directory = 1;
+	}
+
+	/*
+	 * Filename is relative to this root directory FID, instead of
+	 * tree connect point. Find root dir name from this FID and
+	 * prepend root dir name in filename.
+	 */
+	if (req->RootDirectoryFid) {
+		ksmbd_debug(SMB, "path lookup relative to RootDirectoryFid\n");
+
+		is_relative_root = true;
+		fp = ksmbd_lookup_fd_fast(work, req->RootDirectoryFid);
+		if (fp)
+			root = (char *)fp->filp->f_path.dentry->d_name.name;
+		else {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+			memset(&rsp->hdr.WordCount, 0, 3);
+			return -EINVAL;
+		}
+		ksmbd_fd_put(work, fp);
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_open_req, fileName);
+	is_unicode = is_smbreq_unicode(&req->hdr);
+
+	if (is_unicode)
+		offset++;
+
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_strndup_from_utf16((char *)req + offset, maxlen - offset,
+				      is_unicode, conn->local_nls);
+	if (IS_ERR(name)) {
+		if (PTR_ERR(name) == -ENOMEM) {
+			pr_err("failed to allocate memory\n");
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		} else
+			rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+
+		return PTR_ERR(name);
+	}
+
+	if (is_relative_root) {
+		char *full_name;
+
+		full_name = kasprintf(GFP_KERNEL, "\\%s\\%s", root, name);
+		if (!full_name) {
+			kfree(name);
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			return -ENOMEM;
+		}
+
+		kfree(name);
+		name = full_name;
+	}
+
+	root = strrchr(name, '\\');
+	if (root) {
+		root++;
+		if ((root[0] == '*' || root[0] == '/') && (root[1] == '\0')) {
+			rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+			kfree(name);
+			return -EINVAL;
+		}
+	}
+
+	conv_name = smb_get_name(share, name, strlen(name), work, true);
+	if (IS_ERR(conv_name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		kfree(name);
+		return PTR_ERR(conv_name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		err = -ENOMEM;
+		goto out1;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, conv_name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path,
+					 (req->hdr.Flags & SMBFLG_CASELESS) &&
+						!create_directory);
+	if (err) {
+		if (err == -EACCES || err == -EXDEV)
+			goto out;
+		file_present = false;
+		ksmbd_debug(SMB, "can not get linux path for %s, err = %d\n",
+			    conv_name, err);
+	} else {
+		if (d_is_symlink(path.dentry)) {
+			err = -EACCES;
+			goto free_path;
+		}
+
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err) {
+			pr_err("can not stat %s, err = %d\n", conv_name, err);
+			goto free_path;
+		}
+	}
+
+	if (file_present &&
+	    (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) &&
+	    S_ISDIR(stat.mode)) {
+		ksmbd_debug(SMB, "Can't open dir %s, request is to open file\n",
+			    conv_name);
+		if (!(((struct smb_hdr *)work->request_buf)->Flags2 &
+					SMBFLG2_ERR_STATUS)) {
+			rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+			rsp->hdr.Status.DosError.Error =
+				cpu_to_le16(ERRfilexists);
+		} else
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_COLLISION;
+
+		memset(&rsp->hdr.WordCount, 0, 3);
+
+		goto free_path;
+	}
+
+	if (file_present && create_directory && !S_ISDIR(stat.mode)) {
+		ksmbd_debug(SMB, "Can't open file %s, request is to open dir\n",
+			    conv_name);
+		if (!(((struct smb_hdr *)work->request_buf)->Flags2 &
+		      SMBFLG2_ERR_STATUS)) {
+			ntstatus_to_dos(STATUS_NOT_A_DIRECTORY,
+					&rsp->hdr.Status.DosError.ErrorClass,
+					&rsp->hdr.Status.DosError.Error);
+		} else
+			rsp->hdr.Status.CifsError = STATUS_NOT_A_DIRECTORY;
+
+		memset(&rsp->hdr.WordCount, 0, 3);
+
+		goto free_path;
+	}
+
+	oplock_flags = le32_to_cpu(req->OpenFlags) &
+		(REQ_OPLOCK | REQ_BATCHOPLOCK);
+	extended_reply = le32_to_cpu(req->OpenFlags) & REQ_EXTENDED_INFO;
+	open_flags = file_create_dispostion_flags(
+			le32_to_cpu(req->CreateDisposition), file_present);
+
+	if (open_flags < 0) {
+		ksmbd_debug(SMB, "create_dispostion returned %d\n", open_flags);
+		if (file_present) {
+			if (!(((struct smb_hdr *)work->request_buf)->Flags2 &
+						SMBFLG2_ERR_STATUS)) {
+				rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+				rsp->hdr.Status.DosError.Error =
+					cpu_to_le16(ERRfilexists);
+			} else if (open_flags == -EINVAL)
+				rsp->hdr.Status.CifsError =
+					STATUS_INVALID_PARAMETER;
+			else
+				rsp->hdr.Status.CifsError =
+					STATUS_OBJECT_NAME_COLLISION;
+			memset(&rsp->hdr.WordCount, 0, 3);
+			goto free_path;
+		} else {
+			err = -ENOENT;
+			goto out;
+		}
+	} else {
+		if (file_present) {
+			if (S_ISFIFO(stat.mode))
+				open_flags |= O_NONBLOCK;
+		}
+
+		if (req->CreateOptions & FILE_WRITE_THROUGH_LE)
+			open_flags |= O_SYNC;
+	}
+
+	access_flags =
+		convert_generic_access_flags(le32_to_cpu(req->DesiredAccess),
+					     &open_flags, &may_flags,
+					     le32_to_cpu(req->FileAttributes));
+
+	mode |= 0777;
+	if (le32_to_cpu(req->FileAttributes) & ATTR_READONLY)
+		mode &= ~0222;
+
+	/* TODO:
+	 * - check req->ShareAccess for sharing file among different process
+	 * - check req->FileAttributes for special/readonly file attrib
+	 * - check req->SecurityFlags for client security context tracking
+	 * - check req->ImpersonationLevel
+	 */
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		if (open_flags & O_CREAT) {
+			ksmbd_debug(SMB,
+				"returning as user does not have permission to write\n");
+			err = -EACCES;
+			if (file_present)
+				goto free_path;
+			else
+				goto out;
+		}
+	}
+
+	ksmbd_debug(SMB, "filename : %s, open_flags = 0x%x\n", conv_name,
+		    open_flags);
+	if (!file_present && (open_flags & O_CREAT)) {
+		if (!create_directory)
+			mode |= S_IFREG;
+
+		err = smb_common_create(work, &parent_path, &path, conv_name,
+					open_flags, mode, create_directory);
+		if (err) {
+			ksmbd_debug(SMB, "smb_common_create err: %d\n", err);
+			goto out;
+		}
+	} else {
+		err = inode_permission(mnt_idmap(path.mnt),
+				       d_inode(path.dentry),
+				       may_flags);
+		if (err)
+			goto free_path;
+	}
+
+	err = ksmbd_query_inode_status(path.dentry->d_parent);
+	if (err == KSMBD_INODE_STATUS_PENDING_DELETE) {
+		err = -EBUSY;
+		goto free_path;
+	}
+
+	err = 0;
+	/* open file and get FID */
+	fp = ksmbd_vfs_dentry_open(work,
+				   &path,
+				   open_flags,
+				   req->CreateOptions,
+				   file_present);
+	if (IS_ERR(fp)) {
+		err = PTR_ERR(fp);
+		fp = NULL;
+		goto free_path;
+	}
+	fp->daccess = req->DesiredAccess;
+	fp->saccess = req->ShareAccess;
+	fp->pid = le16_to_cpu(req->hdr.Pid);
+
+	write_lock(&fp->f_ci->m_lock);
+	list_add(&fp->node, &fp->f_ci->m_fp_list);
+	write_unlock(&fp->f_ci->m_lock);
+
+	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+	if (smb1_oplock_enable &&
+	    test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_OPLOCKS) &&
+	    !S_ISDIR(file_inode(fp->filp)->i_mode) && oplock_flags) {
+		/* Client cannot request levelII oplock directly */
+		err = smb_grant_oplock(work, oplock_flags, fp->volatile_id, fp,
+				       le16_to_cpu(req->hdr.Tid), NULL,
+				       share_ret);
+		if (err)
+			goto free_path;
+	} else {
+		if (ksmbd_inode_pending_delete(fp)) {
+			err = -EBUSY;
+			goto free_path;
+		}
+
+		if (share_ret < 0) {
+			err = -EPERM;
+			goto free_path;
+		}
+	}
+
+	oplock_rsp = fp->f_opinfo != NULL ? fp->f_opinfo->level : 0;
+
+	if (file_present) {
+		if (!(open_flags & O_TRUNC))
+			file_info = F_OPENED;
+		else
+			file_info = F_OVERWRITTEN;
+	} else
+		file_info = F_CREATED;
+
+	if (le32_to_cpu(req->DesiredAccess) & (DELETE | GENERIC_ALL))
+		fp->is_nt_open = 1;
+	if ((le32_to_cpu(req->DesiredAccess) & DELETE) &&
+	    (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE))
+		ksmbd_fd_set_delete_on_close(fp, file_info);
+
+	/* open success, send back response */
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err) {
+		pr_err("cannot get stat information\n");
+		goto free_path;
+	}
+
+	alloc_size = le64_to_cpu(req->AllocationSize);
+	if (alloc_size &&
+	    (file_info == F_CREATED || file_info == F_OVERWRITTEN)) {
+		if (alloc_size > stat.size) {
+			err = ksmbd_vfs_truncate(work, fp, alloc_size);
+			if (err) {
+				pr_err("failed to expand file, err = %d\n",
+				       err);
+				goto free_path;
+			}
+		}
+	}
+
+	/* prepare response buffer */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+	rsp->OplockLevel = oplock_rsp;
+	rsp->Fid = fp->volatile_id;
+
+	if ((le32_to_cpu(req->CreateDisposition) == FILE_SUPERSEDE) &&
+	    (file_info == F_OVERWRITTEN))
+		rsp->CreateAction = cpu_to_le32(F_SUPERSEDED);
+	else
+		rsp->CreateAction = cpu_to_le32(file_info);
+
+	if (stat.result_mask & STATX_BTIME)
+		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+	else
+		fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+	if (file_present) {
+		if (test_share_config_flag(tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da;
+
+			err = ksmbd_vfs_get_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     path.dentry, &da);
+			if (err > 0)
+				fp->create_time = da.create_time;
+			err = 0;
+		}
+	} else {
+		if (test_share_config_flag(tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da = {0};
+
+			da.version = 4;
+			da.attr = smb_get_dos_attr(&stat);
+			da.create_time = fp->create_time;
+
+			err = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     &path, &da, false);
+			if (err)
+				ksmbd_debug(SMB, "failed to store creation time in xattr\n");
+			err = 0;
+		}
+	}
+
+	rsp->CreationTime = cpu_to_le64(fp->create_time);
+	time = ksmbd_UnixTimeToNT(stat.atime);
+	rsp->LastAccessTime = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat.mtime);
+	rsp->LastWriteTime = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat.ctime);
+	rsp->ChangeTime = cpu_to_le64(time);
+
+	rsp->FileAttributes = cpu_to_le32(smb_get_dos_attr(&stat));
+	rsp->AllocationSize = cpu_to_le64(stat.blocks << 9);
+	rsp->EndOfFile = cpu_to_le64(stat.size);
+	/* TODO: is it normal file, named pipe, printer, modem etc*/
+	rsp->FileType = 0;
+	/* status of named pipe*/
+	rsp->DeviceState = 0;
+	rsp->DirectoryFlag = S_ISDIR(stat.mode) ? 1 : 0;
+	if (extended_reply) {
+		struct inode *inode;
+
+		rsp->hdr.WordCount = 50;
+		memset(&ext_rsp->VolId, 0, 16);
+		if (fp) {
+			inode = file_inode(fp->filp);
+			ext_rsp->fid = inode->i_ino;
+			if (S_ISDIR(inode->i_mode) ||
+			    (fp->filp->f_mode & FMODE_WRITE))
+				ext_rsp->MaxAccess = FILE_GENERIC_ALL_LE;
+			else
+				ext_rsp->MaxAccess = FILE_GENERIC_READ_LE |
+						     FILE_EXECUTE_LE;
+		} else {
+			ext_rsp->MaxAccess = FILE_GENERIC_ALL_LE;
+			ext_rsp->fid = 0;
+		}
+
+		ext_rsp->ByteCount = 0;
+
+	} else {
+		rsp->hdr.WordCount = 34;
+		rsp->ByteCount = 0;
+	}
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 0);
+
+free_path:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+out1:
+	switch (err) {
+	case 0:
+		ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+		break;
+	case -ENOSPC:
+		rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		break;
+	case -EMFILE:
+		rsp->hdr.Status.CifsError = STATUS_TOO_MANY_OPENED_FILES;
+		break;
+	case -EINVAL:
+		rsp->hdr.Status.CifsError = STATUS_NO_SUCH_USER;
+		break;
+	case -EACCES:
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		break;
+	case -EPERM:
+		rsp->hdr.Status.CifsError = STATUS_SHARING_VIOLATION;
+		break;
+	case -ENOENT:
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_NOT_FOUND;
+		break;
+	case -EBUSY:
+		rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+		break;
+	case -EOPNOTSUPP:
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		break;
+	case -ENOMEM:
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		break;
+	default:
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+	}
+
+	if (err && fp)
+		ksmbd_close_fd(work, fp->volatile_id);
+
+	kfree(conv_name);
+
+	if (!rsp->hdr.WordCount)
+		return err;
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return err;
+}
+
+/**
+ * smb_close_pipe() - ipc pipe close request handler
+ * @work:	smb work containing close command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_close_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_close_req *req = work->request_buf;
+
+	ksmbd_session_rpc_close(work->sess, req->FileID);
+	return 0;
+}
+
+/**
+ * smb_close() - ipc pipe close request handler
+ * @work:	smb work containing close command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_close(struct ksmbd_work *work)
+{
+	struct smb_com_close_req *req = work->request_buf;
+	struct smb_com_close_rsp *rsp = work->response_buf;
+	int err = 0;
+
+	ksmbd_debug(SMB, "SMB_COM_CLOSE called for fid %u\n", req->FileID);
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		err = smb_close_pipe(work);
+		if (err < 0)
+			goto out;
+		goto IPC_out;
+	}
+
+	/*
+	 * TODO: linux cifs client does not send LastWriteTime,
+	 * need to check if windows client use this field
+	 */
+	if (le32_to_cpu(req->LastWriteTime) > 0 &&
+	    le32_to_cpu(req->LastWriteTime) < 0xFFFFFFFF)
+		pr_info("need to set last modified time before close\n");
+
+	err = ksmbd_close_fd(work, req->FileID);
+
+IPC_out:
+	/* file close success, return response to server */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+out:
+	if (err)
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_read_andx_pipe() - read from ipc pipe request handler
+ * @work:	smb work containing read command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_read_andx_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_read_req *req = work->request_buf;
+	struct smb_com_read_rsp *rsp = work->response_buf;
+	struct ksmbd_rpc_command *rpc_resp;
+	int ret = 0, nbytes = 0;
+
+	rpc_resp = ksmbd_rpc_read(work->sess, req->Fid);
+	if (rpc_resp) {
+		void *aux_buf;
+
+		if (rpc_resp->flags != KSMBD_RPC_OK || !rpc_resp->payload_sz) {
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			kvfree(rpc_resp);
+			return -EINVAL;
+		}
+
+		nbytes = rpc_resp->payload_sz;
+		aux_buf = kvmalloc(nbytes, GFP_KERNEL);
+		if (!aux_buf) {
+			kvfree(rpc_resp);
+			return -ENOMEM;
+		}
+		memcpy(aux_buf, rpc_resp->payload, nbytes);
+
+		kvfree(rpc_resp);
+		ret = ksmbd_iov_pin_rsp_read(work, (char *)rsp + 4,
+					     sizeof (struct smb_com_read_rsp) - 4,
+					     aux_buf, nbytes);
+		if (ret) {
+			kvfree(aux_buf);
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			return -EINVAL;
+		}
+	} else {
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		return -EINVAL;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 12;
+	rsp->Remaining = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->Reserved = 0;
+	rsp->DataLength = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->DataOffset = cpu_to_le16(sizeof(struct smb_com_read_rsp) -
+			sizeof(rsp->hdr.smb_buf_length));
+	rsp->DataLengthHigh = cpu_to_le16(nbytes >> 16);
+	rsp->Reserved2 = 0;
+
+	rsp->ByteCount = cpu_to_le16(nbytes);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + nbytes);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return ret;
+}
+
+/**
+ * smb_read_andx() - read request handler
+ * @work:	smb work containing read command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_read_andx(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_read_req *req = work->request_buf;
+	struct smb_com_read_rsp *rsp = work->response_buf;
+	struct ksmbd_file *fp;
+	void *aux_payload_buf;
+	loff_t pos;
+	size_t count;
+	ssize_t nbytes;
+	int err = 0;
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE))
+		return smb_read_andx_pipe(work);
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %d\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	pos = le32_to_cpu(req->OffsetLow);
+	if (req->hdr.WordCount == 12)
+		pos |= ((loff_t)le32_to_cpu(req->OffsetHigh) << 32);
+
+	count = le16_to_cpu(req->MaxCount);
+	/*
+	 * It probably seems to be set to 0 or 0xFFFF if MaxCountHigh is
+	 * not supported. If it is 0xFFFF, it is set to a too large value
+	 * and a read fail occurs. If it is 0xFFFF, limit it to not set
+	 * the value.
+	 *
+	 * [MS-SMB] 3.2.4.4.1:
+	 * If the CAP_LARGE_READX bit is set in
+	 * Client.Connection.ServerCapabilities, then the client is allowed to
+	 * issue a read of a size larger than Client.Connection.MaxBufferSize
+	 * using an SMB_COM_READ_ANDX request.
+	 */
+	if (conn->vals->capabilities & CAP_LARGE_READ_X &&
+	    le32_to_cpu(req->MaxCountHigh) < 0xFFFF)
+		count |= le32_to_cpu(req->MaxCountHigh) << 16;
+	else if (count > CIFS_DEFAULT_IOSIZE) {
+		ksmbd_debug(SMB, "read size(%zu) exceeds max size(%u)\n", count,
+			    CIFS_DEFAULT_IOSIZE);
+		ksmbd_debug(SMB, "limiting read size to max size(%u)\n",
+			    CIFS_DEFAULT_IOSIZE);
+		count = CIFS_DEFAULT_IOSIZE;
+	}
+
+	ksmbd_debug(SMB, "filename %pd, offset %lld, count %zu\n",
+		    fp->filp->f_path.dentry, pos, count);
+
+	aux_payload_buf = kvmalloc(count, GFP_KERNEL | __GFP_ZERO);
+	if (!aux_payload_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	nbytes = ksmbd_vfs_read(work, fp, count, &pos, aux_payload_buf);
+	if (nbytes < 0) {
+		err = nbytes;
+		goto out;
+	}
+
+	/* read success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 12;
+	rsp->Remaining = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->Reserved = 0;
+	rsp->DataLength = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->DataOffset = cpu_to_le16(sizeof(struct smb_com_read_rsp) -
+			sizeof(rsp->hdr.smb_buf_length));
+	rsp->DataLengthHigh = cpu_to_le16(nbytes >> 16);
+	rsp->Reserved2 = 0;
+
+	rsp->ByteCount = cpu_to_le16(nbytes);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		ksmbd_fd_put(work, fp);
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	err = ksmbd_iov_pin_rsp_read(work, (char *)rsp + 4,
+				     sizeof(struct smb_com_read_rsp) - 4,
+				     aux_payload_buf, nbytes);
+out:
+	ksmbd_fd_put(work, fp);
+	if (err)
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_write() - write request handler
+ * @work:	smb work containing write command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_write(struct ksmbd_work *work)
+{
+	struct smb_com_write_req_32bit *req = work->request_buf;
+	struct smb_com_write_rsp_32bit *rsp = work->response_buf;
+	struct ksmbd_file *fp = NULL;
+	loff_t pos;
+	size_t count;
+	char *data_buf;
+	ssize_t nbytes = 0;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	pos = le32_to_cpu(req->Offset);
+	count = le16_to_cpu(req->Length);
+	data_buf = req->Data;
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_write_req_32bit, Data);
+	if (offset + count > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return -EINVAL;
+	}
+
+	ksmbd_debug(SMB, "filename %pd, offset %lld, count %zu\n",
+		    fp->filp->f_path.dentry, pos, count);
+	if (!count) {
+		err = ksmbd_vfs_truncate(work, fp, pos);
+		nbytes = 0;
+	} else
+		err = ksmbd_vfs_write(work, fp, data_buf,
+				      count, &pos, 0, &nbytes);
+
+	rsp->hdr.WordCount = 1;
+	rsp->Written = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	ksmbd_fd_put(work, fp);
+	if (!err) {
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		return 0;
+	}
+
+	if (err == -ENOSPC || err == -EFBIG)
+		rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+	else
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_write_andx_pipe() - write on pipe request handler
+ * @work:	smb work containing write command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_write_andx_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_write_req *req = work->request_buf;
+	struct smb_com_write_rsp *rsp = work->response_buf;
+	struct ksmbd_rpc_command *rpc_resp;
+	int ret = 0;
+	size_t count = 0;
+	unsigned int maxlen;
+
+	count = le16_to_cpu(req->DataLengthLow);
+	if (work->conn->vals->capabilities & CAP_LARGE_WRITE_X)
+		count |= (le16_to_cpu(req->DataLengthHigh) << 16);
+
+	maxlen = get_req_len(req) - sizeof(struct smb_com_write_req);
+	if (count > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	rpc_resp = ksmbd_rpc_write(work->sess, req->Fid, req->Data, count);
+	if (rpc_resp) {
+		if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+			rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+			kvfree(rpc_resp);
+			return -EOPNOTSUPP;
+		}
+		if (rpc_resp->flags != KSMBD_RPC_OK) {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+			kvfree(rpc_resp);
+			return -EINVAL;
+		}
+		count = rpc_resp->payload_sz;
+		kvfree(rpc_resp);
+	} else {
+		ret = -EINVAL;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 6;
+	rsp->Count = cpu_to_le16(count & 0xFFFF);
+	rsp->Remaining = 0;
+	rsp->CountHigh = cpu_to_le16(count >> 16);
+	rsp->Reserved = 0;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return ret;
+}
+
+/**
+ * smb_write_andx() - andx write request handler
+ * @work:	smb work containing write command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_write_andx(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_write_req *req = work->request_buf;
+	struct smb_com_write_rsp *rsp = work->response_buf;
+	struct ksmbd_file *fp;
+	bool writethrough = false;
+	loff_t pos;
+	size_t count;
+	ssize_t nbytes = 0;
+	char *data_buf;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		ksmbd_debug(SMB, "Write ANDX called for IPC$");
+		return smb_write_andx_pipe(work);
+	}
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	pos = le32_to_cpu(req->OffsetLow);
+	if (req->hdr.WordCount == 14)
+		pos |= ((loff_t)le32_to_cpu(req->OffsetHigh) << 32);
+
+	writethrough = (le16_to_cpu(req->WriteMode) == 1);
+
+	/*
+	 * [MS-SMB] 3.3.5.8:
+	 * If CAP_LARGE_WRITEX is set in Server.Connection.ClientCapabilities,
+	 * then it is possible that the count of bytes to be written is larger
+	 * than the server's MaxBufferSize
+	 */
+	count = le16_to_cpu(req->DataLengthLow);
+	if (conn->vals->capabilities & CAP_LARGE_WRITE_X)
+		count |= (le16_to_cpu(req->DataLengthHigh) << 16);
+	else if (count > CIFS_DEFAULT_IOSIZE) {
+		ksmbd_debug(SMB, "write size(%zu) exceeds max size(%u)\n",
+				count, CIFS_DEFAULT_IOSIZE);
+		ksmbd_debug(SMB, "limiting write size to max size(%u)\n",
+				CIFS_DEFAULT_IOSIZE);
+		count = CIFS_DEFAULT_IOSIZE;
+	}
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if ((offset > maxlen) ||
+	    (offset + count > maxlen)) {
+		pr_err("invalid write data offset %u, smb_len %u\n",
+		       le16_to_cpu(req->DataOffset),
+		       get_rfc1002_len(req));
+		err = -EINVAL;
+		goto out;
+	}
+
+	data_buf = (char *)req + offset;
+
+	ksmbd_debug(SMB, "filename %pd, offset %lld, count %zu\n",
+		    fp->filp->f_path.dentry, pos, count);
+	err = ksmbd_vfs_write(work, fp, data_buf, count, &pos,
+			      writethrough, &nbytes);
+	if (err < 0)
+		goto out;
+
+	/* write success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 6;
+	rsp->Count = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->Remaining = 0;
+	rsp->CountHigh = cpu_to_le16(nbytes >> 16);
+	rsp->Reserved = 0;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	ksmbd_fd_put(work, fp);
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return 0;
+
+out:
+	ksmbd_fd_put(work, fp);
+	/* XXX */
+	if (err == -ENOSPC || err == -EFBIG)
+		rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+	else
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_echo() - echo(ping) request handler
+ * @work:	smb work containing echo command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_echo(struct ksmbd_work *work)
+{
+	struct smb_com_echo_req *req = work->request_buf;
+	struct smb_com_echo_rsp *rsp = work->response_buf;
+	__u16 data_count, echo_count;
+	int i;
+
+	echo_count = le16_to_cpu(req->EchoCount);
+
+	ksmbd_debug(SMB, "SMB_COM_ECHO called with echo count %u\n",
+		    echo_count);
+
+	if (!echo_count) {
+		work->send_no_response = true;
+		return 0;
+	}
+
+	/* don't let a client make us work too much */
+	if (echo_count > 10)
+		echo_count = 10;
+
+	data_count = le16_to_cpu(req->ByteCount);
+	/* send echo response to server */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 1;
+	rsp->ByteCount = cpu_to_le16(data_count);
+
+	memcpy(rsp->Data, req->Data, data_count);
+	inc_resp_size(work, (rsp->hdr.WordCount * 2) + data_count);
+
+	/* Send req->EchoCount - 1 number of ECHO response now &
+	 * if SMB CANCEL for Echo comes don't send response
+	 */
+	for (i = 1; i < echo_count && !work->send_no_response; i++) {
+		rsp->SequenceNumber = cpu_to_le16(i);
+		ksmbd_conn_write(work);
+	}
+
+	/* Last echo response */
+	rsp->SequenceNumber = cpu_to_le16(i);
+
+	return 0;
+}
+
+/**
+ * smb_flush() - file sync - flush request handler
+ * @work:	smb work containing flush command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_flush(struct ksmbd_work *work)
+{
+	struct smb_com_flush_req *req = work->request_buf;
+	struct smb_com_flush_rsp *rsp = work->response_buf;
+	int err = 0;
+
+	ksmbd_debug(SMB, "SMB_COM_FLUSH called for fid %u\n", req->FileID);
+
+	if (req->FileID == 0xFFFF) {
+		err = ksmbd_file_table_flush(work);
+		if (err)
+			goto out;
+	} else {
+		err = ksmbd_vfs_fsync(work, req->FileID, KSMBD_NO_FID);
+		if (err)
+			goto out;
+	}
+
+	/* file fsync success, return response to server */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+	return err;
+
+out:
+	if (err)
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+
+	return err;
+}
+
+/*****************************************************************************
+ * TRANS2 command implementation functions
+ *****************************************************************************/
+
+/**
+ * get_filetype() - convert file mode to smb file type
+ * @mode:	file mode to be convertd
+ *
+ * Return:	converted file type
+ */
+static __u32 get_filetype(mode_t mode)
+{
+	if (S_ISREG(mode))
+		return UNIX_FILE;
+	else if (S_ISDIR(mode))
+		return UNIX_DIR;
+	else if (S_ISLNK(mode))
+		return UNIX_SYMLINK;
+	else if (S_ISCHR(mode))
+		return UNIX_CHARDEV;
+	else if (S_ISBLK(mode))
+		return UNIX_BLOCKDEV;
+	else if (S_ISFIFO(mode))
+		return UNIX_FIFO;
+	else if (S_ISSOCK(mode))
+		return UNIX_SOCKET;
+
+	return UNIX_UNKNOWN;
+}
+
+/**
+ * init_unix_info() - convert file stat information to smb file info format
+ * @unix_info:	smb file information format
+ * @stat:	unix file/dir stat information
+ */
+static void init_unix_info(struct file_unix_basic_info *unix_info,
+			   struct user_namespace *user_ns, struct kstat *stat)
+{
+	u64 time;
+
+	unix_info->EndOfFile = cpu_to_le64(stat->size);
+	unix_info->NumOfBytes = cpu_to_le64(512 * stat->blocks);
+	time = ksmbd_UnixTimeToNT(stat->ctime);
+	unix_info->LastStatusChange = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat->atime);
+	unix_info->LastAccessTime = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat->mtime);
+	unix_info->LastModificationTime = cpu_to_le64(time);
+	unix_info->Uid = cpu_to_le64(from_kuid(user_ns, stat->uid));
+	unix_info->Gid = cpu_to_le64(from_kgid(user_ns, stat->gid));
+	unix_info->Type = cpu_to_le32(get_filetype(stat->mode));
+	unix_info->DevMajor = cpu_to_le64(MAJOR(stat->rdev));
+	unix_info->DevMinor = cpu_to_le64(MINOR(stat->rdev));
+	unix_info->UniqueId = cpu_to_le64(stat->ino);
+	unix_info->Permissions = cpu_to_le64(stat->mode);
+	unix_info->Nlinks = cpu_to_le64(stat->nlink);
+}
+
+/**
+ * unix_info_to_attr() - convert smb file info format to unix attr format
+ * @unix_info:	smb file information format
+ * @attrs:	unix file/dir stat information
+ *
+ * Return:	0
+ */
+static int unix_info_to_attr(struct file_unix_basic_info *unix_info,
+			     struct user_namespace *user_ns,
+			     struct iattr *attrs)
+{
+	struct timespec64 ts;
+
+	if (le64_to_cpu(unix_info->EndOfFile) != NO_CHANGE_64) {
+		attrs->ia_size = le64_to_cpu(unix_info->EndOfFile);
+		attrs->ia_valid |= ATTR_SIZE;
+	}
+
+	if (le64_to_cpu(unix_info->LastStatusChange) != NO_CHANGE_64) {
+		ts = smb_NTtimeToUnix(unix_info->LastStatusChange);
+		attrs->ia_ctime = ts;
+		attrs->ia_valid |= ATTR_CTIME;
+	}
+
+	if (le64_to_cpu(unix_info->LastAccessTime) != NO_CHANGE_64) {
+		ts = smb_NTtimeToUnix(unix_info->LastAccessTime);
+		attrs->ia_atime = ts;
+		attrs->ia_valid |= ATTR_ATIME;
+	}
+
+	if (le64_to_cpu(unix_info->LastModificationTime) != NO_CHANGE_64) {
+		ts = smb_NTtimeToUnix(unix_info->LastModificationTime);
+		attrs->ia_mtime = ts;
+		attrs->ia_valid |= ATTR_MTIME;
+	}
+
+	if (le64_to_cpu(unix_info->Uid) != NO_CHANGE_64) {
+		attrs->ia_uid = make_kuid(user_ns, le64_to_cpu(unix_info->Uid));
+		attrs->ia_valid |= ATTR_UID;
+	}
+
+	if (le64_to_cpu(unix_info->Gid) != NO_CHANGE_64) {
+		attrs->ia_gid = make_kgid(user_ns, le64_to_cpu(unix_info->Gid));
+		attrs->ia_valid |= ATTR_GID;
+	}
+
+	if (le64_to_cpu(unix_info->Permissions) != NO_CHANGE_64) {
+		attrs->ia_mode = le64_to_cpu(unix_info->Permissions);
+		attrs->ia_valid |= ATTR_MODE;
+	}
+
+	switch (le32_to_cpu(unix_info->Type)) {
+	case UNIX_FILE:
+		attrs->ia_mode |= S_IFREG;
+		break;
+	case UNIX_DIR:
+		attrs->ia_mode |= S_IFDIR;
+		break;
+	case UNIX_SYMLINK:
+		attrs->ia_mode |= S_IFLNK;
+		break;
+	case UNIX_CHARDEV:
+		attrs->ia_mode |= S_IFCHR;
+		break;
+	case UNIX_BLOCKDEV:
+		attrs->ia_mode |= S_IFBLK;
+		break;
+	case UNIX_FIFO:
+		attrs->ia_mode |= S_IFIFO;
+		break;
+	case UNIX_SOCKET:
+		attrs->ia_mode |= S_IFSOCK;
+		break;
+	default:
+		pr_err("unknown file type 0x%x\n",
+		       le32_to_cpu(unix_info->Type));
+	}
+
+	return 0;
+}
+
+/**
+ * unix_to_dos_time() - convert unix time to dos format
+ * @ts:		unix style time
+ * @time:	store dos style time
+ * @date:	store dos style date
+ */
+static void unix_to_dos_time(struct timespec64 ts, __le16 *time, __le16 *date)
+{
+	struct tm t;
+	__u16 val;
+
+	time64_to_tm(ts.tv_sec, (-sys_tz.tz_minuteswest) * 60, &t);
+	val = (((unsigned int)(t.tm_mon + 1)) >> 3) | ((t.tm_year - 80) << 1);
+	val = ((val & 0xFF) << 8) | (t.tm_mday | (((t.tm_mon + 1) & 0x7) << 5));
+	*date = cpu_to_le16(val);
+
+	val = ((((unsigned int)t.tm_min >> 3) & 0x7) |
+	       (((unsigned int)t.tm_hour) << 3));
+	val = ((val & 0xFF) << 8) | ((t.tm_sec / 2) | ((t.tm_min & 0x7) << 5));
+	*time = cpu_to_le16(val);
+}
+
+/**
+ * cifs_convert_ace() - helper function for convert an Access Control Entry
+ *		from cifs wire format to local POSIX xattr format
+ * @ace:	local - unix style Access Control Entry format
+ * @cifs_ace:	cifs wire Access Control Entry format
+ */
+static void cifs_convert_ace(struct posix_acl_xattr_entry *ace,
+			     struct cifs_posix_ace *cifs_ace)
+{
+	/* u8 cifs fields do not need le conversion */
+	ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
+	ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag);
+	ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid));
+}
+
+/**
+ * cifs_copy_posix_acl() - Convert ACL from CIFS POSIX wire format to local
+ *		Linux POSIX ACL xattr
+ * @trgt:	target buffer for storing in local ace format
+ * @src:	source buffer in cifs ace format
+ * @buflen:	target buffer length
+ * @acl_type:	ace type
+ * @size_of_data_area:	max buffer size to store ace xattr
+ *
+ * Return:	size of convert ace xattr on success, otherwise error
+ */
+static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
+			       const int acl_type, const int size_of_data_area)
+{
+	int size = 0;
+	int i;
+	__u16 count;
+	struct cifs_posix_ace *pACE;
+	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
+	struct posix_acl_xattr_entry *ace;
+	struct posix_acl_xattr_header *local_acl = (void *)trgt;
+
+	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
+		return -EOPNOTSUPP;
+
+	if (acl_type & ACL_TYPE_ACCESS) {
+		count = le16_to_cpu(cifs_acl->access_entry_count);
+		pACE = &cifs_acl->ace_array[0];
+		size = sizeof(struct cifs_posix_acl);
+		size += sizeof(struct cifs_posix_ace) * count;
+		/* check if we would go beyond end of SMB */
+		if (size_of_data_area < size) {
+			ksmbd_debug(SMB, "bad CIFS POSIX ACL size %d vs. %d\n",
+				    size_of_data_area, size);
+			return -EINVAL;
+		}
+	} else if (acl_type & ACL_TYPE_DEFAULT) {
+		count = le16_to_cpu(cifs_acl->default_entry_count);
+		pACE = &cifs_acl->ace_array[0];
+		size = sizeof(struct cifs_posix_acl);
+		size += sizeof(struct cifs_posix_ace) * count;
+		/* check if we would go beyond end of SMB */
+		if (size_of_data_area < size)
+			return -EINVAL;
+	} else {
+		/* illegal type */
+		return -EINVAL;
+	}
+
+	size = posix_acl_xattr_size(count);
+	if ((buflen != 0) && local_acl && size > buflen)
+		return -ERANGE;
+
+	/* buffer big enough */
+	ace = (void *)(local_acl + 1);
+	local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
+	for (i = 0; i < count; i++) {
+		cifs_convert_ace(&ace[i], pACE);
+		pACE++;
+	}
+
+	return size;
+}
+
+/**
+ * convert_ace_to_cifs_ace() - helper function to convert ACL from local
+ * Linux POSIX ACL xattr to CIFS POSIX wire format to local
+ * @cifs_ace:	target buffer for storing in cifs ace format
+ * @local_ace:	source buffer in Linux POSIX ACL xattr format
+ *
+ * Return:	0
+ */
+static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
+		const struct posix_acl_xattr_entry *local_ace)
+{
+	__u16 rc = 0; /* 0 = ACL converted ok */
+
+	cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm);
+	cifs_ace->cifs_e_tag = le16_to_cpu(local_ace->e_tag);
+	/* BB is there a better way to handle the large uid? */
+	if (local_ace->e_id == cpu_to_le32(-1)) {
+		/* Probably no need to le convert -1 on any
+		 * arch but can not hurt
+		 */
+		cifs_ace->cifs_uid = cpu_to_le64(-1);
+	} else
+		cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
+	return rc;
+}
+
+/**
+ * ACL_to_cifs_posix() - ACL from local Linux POSIX xattr to CIFS POSIX ACL
+ *		wire format
+ * @parm_data:	target buffer for storing in cifs ace format
+ * @pACL:	source buffer in cifs ace format
+ * @buflen:	target buffer length
+ * @acl_type:	ace type
+ *
+ * Return:	0 on success, otherwise error
+ */
+static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+			       const int buflen, const int acl_type)
+{
+	__u16 rc = 0;
+	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
+	struct posix_acl_xattr_header *local_acl = (void *)pACL;
+	struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
+	int count;
+	int i, j = 0;
+
+	if ((buflen == 0) || !pACL || !cifs_acl)
+		return 0;
+
+	count = posix_acl_xattr_count((size_t)buflen);
+	ksmbd_debug(SMB, "setting acl with %d entries from buf of length %d and version of %d\n",
+		 count, buflen, le32_to_cpu(local_acl->a_version));
+	if (le32_to_cpu(local_acl->a_version) != 2) {
+		ksmbd_debug(SMB, "unknown POSIX ACL version %d\n",
+			    le32_to_cpu(local_acl->a_version));
+		return 0;
+	}
+	if (acl_type == ACL_TYPE_ACCESS) {
+		cifs_acl->access_entry_count = cpu_to_le16(count);
+		j = 0;
+	} else if (acl_type == ACL_TYPE_DEFAULT) {
+		cifs_acl->default_entry_count = cpu_to_le16(count);
+		if (cifs_acl->access_entry_count)
+			j = le16_to_cpu(cifs_acl->access_entry_count);
+	} else {
+		ksmbd_debug(SMB, "unknown ACL type %d\n", acl_type);
+		return 0;
+	}
+	for (i = 0; i < count; i++, j++) {
+		rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]);
+		if (rc != 0) {
+			/* ACE not converted */
+			break;
+		}
+	}
+	if (rc == 0) {
+		rc = (__u16)(count * sizeof(struct cifs_posix_ace));
+		/* BB add check to make sure ACL does not overflow SMB */
+	}
+	return rc;
+}
+
+/**
+ * smb_get_acl() - handler for query posix acl information
+ * @work:	smb work containing posix acl query command
+ * @path:	path of file/dir to query acl
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_get_acl(struct ksmbd_work *work, struct path *path)
+{
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	char *buf = NULL;
+	int rc = 0, value_len;
+	struct cifs_posix_acl *aclbuf;
+	__u16 rsp_data_cnt = 0;
+
+	aclbuf = (struct cifs_posix_acl *)(work->response_buf +
+			sizeof(struct smb_com_trans2_rsp) + 4);
+
+	aclbuf->version = cpu_to_le16(CIFS_ACL_VERSION);
+	aclbuf->default_entry_count = 0;
+	aclbuf->access_entry_count = 0;
+
+	/* check if POSIX_ACL_XATTR_ACCESS exists */
+	value_len = ksmbd_vfs_getxattr(mnt_idmap(path->mnt), path->dentry,
+				       XATTR_NAME_POSIX_ACL_ACCESS,
+				       &buf);
+	if (value_len > 0) {
+		rsp_data_cnt += ACL_to_cifs_posix((char *)aclbuf, buf,
+				value_len, ACL_TYPE_ACCESS);
+		kfree(buf);
+		buf = NULL;
+	}
+
+	/* check if POSIX_ACL_XATTR_DEFAULT exists */
+	value_len = ksmbd_vfs_getxattr(mnt_idmap(path->mnt), path->dentry,
+				       XATTR_NAME_POSIX_ACL_DEFAULT,
+				       &buf);
+	if (value_len > 0) {
+		rsp_data_cnt += ACL_to_cifs_posix((char *)aclbuf, buf,
+						  value_len, ACL_TYPE_DEFAULT);
+		kfree(buf);
+		buf = NULL;
+	}
+
+	if (rsp_data_cnt)
+		rsp_data_cnt += sizeof(struct cifs_posix_acl);
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(rsp_data_cnt);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(rsp_data_cnt + 5);
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+	if (buf)
+		kfree(buf);
+	return rc;
+}
+
+/**
+ * smb_set_acl() - handler for setting posix acl information
+ * @work:	smb work containing posix acl set command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_acl(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct cifs_posix_acl *wire_acl_data;
+	char *fname, *buf = NULL;
+	int rc = 0, acl_type = 0, value_len;
+	unsigned int maxlen, offset;
+
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	fname = smb_get_name(share, req->FileName, maxlen - offset,
+			     work, false);
+	if (IS_ERR(fname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(fname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(fname);
+		return -EINVAL;
+	}
+
+	buf = vmalloc(XATTR_SIZE_MAX);
+	if (!buf) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	wire_acl_data = (struct cifs_posix_acl *)((char *)req + offset);
+	if (le16_to_cpu(wire_acl_data->access_entry_count) > 0 &&
+	    le16_to_cpu(wire_acl_data->access_entry_count) < 0xFFFF) {
+		acl_type = ACL_TYPE_ACCESS;
+	} else if (le16_to_cpu(wire_acl_data->default_entry_count) > 0 &&
+		   le16_to_cpu(wire_acl_data->default_entry_count) < 0xFFFF) {
+		acl_type = ACL_TYPE_DEFAULT;
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = cifs_copy_posix_acl(buf, (char *)wire_acl_data, XATTR_SIZE_MAX,
+				 acl_type, maxlen - offset);
+	if (rc < 0) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		goto out;
+	}
+
+	value_len = rc;
+	if (acl_type == ACL_TYPE_ACCESS) {
+		rc = ksmbd_vfs_fsetxattr(work,
+					 fname,
+					 XATTR_NAME_POSIX_ACL_ACCESS,
+					 buf, value_len, 0);
+	} else if (acl_type == ACL_TYPE_DEFAULT) {
+		rc = ksmbd_vfs_fsetxattr(work,
+					 fname,
+					 XATTR_NAME_POSIX_ACL_DEFAULT,
+					 buf, value_len, 0);
+	}
+
+	if (rc < 0) {
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		goto out;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(0);
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(0);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Pad = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	if (buf)
+		vfree(buf);
+	kfree(fname);
+	return rc;
+}
+
+static void *ksmbd_realloc_response(void *ptr, size_t old_sz, size_t new_sz)
+{
+	size_t sz = min(old_sz, new_sz);
+	void *nptr;
+
+	nptr = kvmalloc(new_sz, GFP_KERNEL | __GFP_ZERO);
+	if (!nptr)
+		return ptr;
+	memcpy(nptr, ptr, sz);
+	kvfree(ptr);
+	return nptr;
+}
+
+/**
+ * smb_readlink() - handler for reading symlink source path
+ * @work:	smb work containing query link information
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_readlink(struct ksmbd_work *work, struct path *path)
+{
+	struct smb_com_trans2_qpi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	int err, name_len, link_len;
+	char *buf, *ptr;
+
+	buf = kzalloc((CIFS_MF_SYMLINK_LINK_MAXLEN), GFP_KERNEL);
+	if (!buf) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_readlink(path, buf, CIFS_MF_SYMLINK_LINK_MAXLEN);
+	if (err < 0) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+		goto out;
+	}
+
+	/*
+	 * check if this namelen(unicode) and smb header can fit in small rsp
+	 * buf. If not, switch to large rsp buffer.
+	 */
+	err++;
+	err *= 2;
+	if (err + MAX_HEADER_SIZE(work->conn) > work->response_sz) {
+		void *nptr;
+		size_t nsz = err + MAX_HEADER_SIZE(work->conn);
+
+		nptr = ksmbd_realloc_response(work->response_buf,
+					      work->response_sz,
+					      nsz);
+		if (nptr == work->response_buf) {
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			err = -ENOMEM;
+			goto out;
+		}
+
+		work->response_buf = nptr;
+		rsp = (struct smb_com_trans2_rsp *)work->response_buf;
+	}
+	link_len = err;
+	err = 0;
+
+	ptr = (char *)&rsp->Buffer[0];
+	memset(ptr, 0, 4);
+	ptr += 4;
+
+	if (is_smbreq_unicode(&req->hdr)) {
+		name_len = smb_strtoUTF16((__le16 *)ptr,
+					  buf,
+					  link_len,
+					  work->conn->local_nls);
+		name_len++;     /* trailing null */
+		name_len *= 2;
+	} else { /* BB add path length overrun check */
+		name_len = strscpy(ptr, buf, link_len);
+		if (name_len == -E2BIG) {
+			err = -ENOMEM;
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			goto out;
+		}
+		name_len++;     /* trailing null */
+	}
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(name_len);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(name_len + 5);
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+out:
+	kfree(buf);
+	return err;
+}
+
+/**
+ * smb_get_ea() - handler for extended attribute query
+ * @work:	smb work containing query xattr command
+ * @path:	path of file/dir to query xattr command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_get_ea(struct ksmbd_work *work, struct path *path)
+{
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	char *name, *ptr, *xattr_list = NULL, *buf;
+	int rc, name_len, value_len, xattr_list_len;
+	struct fealist *eabuf = (struct fealist *)(work->response_buf +
+			sizeof(struct smb_com_trans2_rsp) + 4);
+	struct fea *temp_fea;
+	ssize_t buf_free_len;
+	__u16 rsp_data_cnt = 4;
+
+	eabuf->list_len = cpu_to_le32(rsp_data_cnt);
+	buf_free_len = work->response_sz - (get_rfc1002_len(rsp) + 4) -
+		sizeof(struct smb_com_trans2_rsp);
+	rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+	if (rc < 0) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+		goto out;
+	} else if (!rc) { /* there is no EA in the file */
+		eabuf->list_len = cpu_to_le32(rsp_data_cnt);
+		goto done;
+	}
+
+	xattr_list_len = rc;
+	rc = 0;
+
+	ptr = (char *)eabuf->list;
+	temp_fea = (struct fea *)ptr;
+	for (name = xattr_list; name - xattr_list < xattr_list_len;
+	     name += strlen(name) + 1) {
+		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+		/*
+		 * CIFS does not support EA other name user.* namespace,
+		 * still keep the framework generic, to list other attrs
+		 * in future.
+		 */
+		if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+			continue;
+
+		name_len = strlen(name);
+		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+			name_len -= XATTR_USER_PREFIX_LEN;
+
+		ptr = (char *)(&temp_fea->name + name_len + 1);
+		buf_free_len -= (offsetof(struct fea, name) + name_len + 1);
+
+		value_len = ksmbd_vfs_getxattr(mnt_idmap(path->mnt),
+					       path->dentry, name, &buf);
+		if (value_len <= 0) {
+			rc = -ENOENT;
+			rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+			goto out;
+		}
+
+		memcpy(ptr, buf, value_len);
+		kfree(buf);
+
+		temp_fea->EA_flags = 0;
+		temp_fea->name_len = name_len;
+		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+			memcpy(temp_fea->name, &name[XATTR_USER_PREFIX_LEN],
+			       name_len);
+		else
+			memcpy(temp_fea->name, name, name_len);
+
+		temp_fea->value_len = cpu_to_le16(value_len);
+		buf_free_len -= value_len;
+		rsp_data_cnt += offsetof(struct fea, name) + name_len + 1 +
+			value_len;
+		eabuf->list_len += cpu_to_le32(offsetof(struct fea, name) +
+				name_len + 1 + value_len);
+		ptr += value_len;
+		temp_fea = (struct fea *)ptr;
+	}
+
+done:
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(rsp_data_cnt);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(rsp_data_cnt + 5);
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+out:
+	kvfree(xattr_list);
+	return rc;
+}
+
+/**
+ * query_path_info() - handler for query path info
+ * @work:	smb work containing query path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_path_info(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct trans2_qpi_req_params *req_params;
+	char *name = NULL;
+	struct path path, parent_path;
+	struct kstat st;
+	int rc;
+	char *ptr;
+	__u64 create_time = 0, time;
+	unsigned int maxlen, offset;
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		rsp_hdr->Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		return 0;
+	}
+
+	maxlen = get_req_len(work->request_buf);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+	req_params = (struct trans2_qpi_req_params *)(work->request_buf + offset);
+
+	offset += offsetof(struct trans2_qpi_req_params, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req_params->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		kfree(name);
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, 0);
+	if (rc) {
+		if (rc == -EACCES || rc == -EXDEV)
+			rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp_hdr->Status.CifsError =
+					STATUS_OBJECT_NAME_NOT_FOUND;
+		ksmbd_debug(SMB, "cannot get linux path for %s, err %d\n",
+				name, rc);
+		goto out;
+	}
+
+	if (d_is_symlink(path.dentry)) {
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+		goto err_out;
+	}
+
+	rc = vfs_getattr(&path, &st, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
+	if (rc) {
+		pr_err("cannot get stat information\n");
+		goto err_out;
+	}
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+		struct xattr_dos_attrib da;
+
+		rc = ksmbd_vfs_get_dos_attrib_xattr(mnt_idmap(path.mnt),
+						    path.dentry, &da);
+		if (rc > 0)
+			create_time = da.create_time;
+		rc = 0;
+	}
+
+	switch (le16_to_cpu(req_params->InformationLevel)) {
+	case SMB_INFO_STANDARD:
+	{
+		struct file_info_standard *infos;
+
+		ksmbd_debug(SMB, "SMB_INFO_STANDARD\n");
+		rc = ksmbd_query_inode_status(path.dentry);
+		if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
+			rc = -EBUSY;
+			goto err_out;
+		}
+
+		rc = 0;
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		infos = (struct file_info_standard *)(ptr + 4);
+		unix_to_dos_time(ksmbd_NTtimeToUnix(cpu_to_le64(create_time)),
+				 &infos->CreationDate, &infos->CreationTime);
+		unix_to_dos_time(st.atime, &infos->LastAccessDate,
+				 &infos->LastAccessTime);
+		unix_to_dos_time(st.mtime, &infos->LastWriteDate,
+				 &infos->LastWriteTime);
+		infos->DataSize = cpu_to_le32(st.size);
+		infos->AllocationSize = cpu_to_le32(st.blocks << 9);
+		infos->Attributes = cpu_to_le16(S_ISDIR(st.mode) ?
+					ATTR_DIRECTORY : ATTR_ARCHIVE);
+		infos->EASize = 0;
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(22);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(22);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		rsp->ByteCount = cpu_to_le16(27);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_STANDARD_INFO:
+	{
+		struct file_standard_info *standard_info;
+		unsigned int del_pending;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_STANDARD_INFO\n");
+		del_pending = ksmbd_query_inode_status(path.dentry);
+		if (del_pending == KSMBD_INODE_STATUS_PENDING_DELETE)
+			del_pending = 1;
+		else
+			del_pending = 0;
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_standard_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		standard_info = (struct file_standard_info *)(ptr + 4);
+		standard_info->AllocationSize = cpu_to_le64(st.blocks << 9);
+		standard_info->EndOfFile = cpu_to_le64(st.size);
+		standard_info->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			del_pending);
+		standard_info->DeletePending = del_pending;
+		standard_info->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_BASIC_INFO:
+	{
+		struct file_basic_info *basic_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_BASIC_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_basic_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		basic_info = (struct file_basic_info *)(ptr + 4);
+		basic_info->CreationTime = cpu_to_le64(create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		basic_info->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		basic_info->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		basic_info->ChangeTime = cpu_to_le64(time);
+		basic_info->Attributes = S_ISDIR(st.mode) ?
+					 ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+		basic_info->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_EA_INFO:
+	{
+		struct file_ea_info *ea_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_EA_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_ea_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ea_info = (struct file_ea_info *)(ptr + 4);
+		ea_info->EaSize = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_NAME_INFO:
+	{
+		struct file_name_info *name_info;
+		size_t len, rsp_offset;
+		int uni_filename_len;
+		char *filename;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_NAME_INFO\n");
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		name_info = (struct file_name_info *)(ptr + 4);
+
+		filename = convert_to_nt_pathname(work->tcon->share_conf, &path);
+		if (!filename) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		len = strlen(filename);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct file_name_info, FileName) + len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		uni_filename_len = smbConvertToUTF16(
+				(__le16 *)name_info->FileName,
+				filename, len,
+				conn->local_nls, 0);
+		kfree(filename);
+		uni_filename_len *= 2;
+		name_info->FileNameLength = cpu_to_le32(uni_filename_len);
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount = cpu_to_le16(2 + uni_filename_len + 4 + 3);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_ALL_INFO:
+	{
+		struct file_all_info *ainfo;
+		size_t len, rsp_offset;
+		unsigned int del_pending;
+		char *filename;
+		int uni_filename_len, total_count = 72;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_ALL_INFO\n");
+
+		del_pending = ksmbd_query_inode_status(path.dentry);
+		if (del_pending == KSMBD_INODE_STATUS_PENDING_DELETE)
+			del_pending = 1;
+		else
+			del_pending = 0;
+
+		filename = convert_to_nt_pathname(work->tcon->share_conf, &path);
+		if (!filename) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+
+		/*
+		 * Observation: sizeof smb_hdr is 33 bytes(including word count)
+		 * After that: trans2 response 22 bytes when stepcount 0 and
+		 * including ByteCount storage.
+		 */
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ainfo = (struct file_all_info *) (ptr + 4);
+
+		ainfo->CreationTime = cpu_to_le64(create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		ainfo->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		ainfo->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		ainfo->ChangeTime = cpu_to_le64(time);
+		ainfo->Attributes = S_ISDIR(st.mode) ?
+					ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+		ainfo->Pad1 = 0;
+		ainfo->AllocationSize = cpu_to_le64(st.blocks << 9);
+		ainfo->EndOfFile = cpu_to_le64(st.size);
+		ainfo->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			del_pending);
+		ainfo->DeletePending = del_pending;
+		ainfo->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		ainfo->Pad2 = 0;
+		ainfo->EASize = 0;
+		len = strlen(filename);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct file_all_info, FileName) + len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		uni_filename_len = smbConvertToUTF16((__le16 *)ainfo->FileName,
+						     filename, len,
+						     conn->local_nls, 0);
+		kfree(filename);
+		uni_filename_len *= 2;
+		ainfo->FileNameLength = cpu_to_le32(uni_filename_len);
+		total_count += uni_filename_len;
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		/* add unicode name length of name */
+		rsp->t2.TotalDataCount = cpu_to_le16(total_count);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(total_count);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/* 2 for parameter count + 72 data count +
+		 * filename length + 3 pad (1pad1 + 2 pad2)
+		 */
+		rsp->ByteCount = cpu_to_le16(5 + total_count);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_ALT_NAME_INFO:
+	{
+		struct alt_name_info *alt_name_info;
+		char *base;
+		int filename_len;
+
+		ksmbd_debug(SMB, "SMB_QUERY_ALT_NAME_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount = cpu_to_le16(25);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		alt_name_info = (struct alt_name_info *)(ptr + 4);
+
+		base = strrchr(name, '/');
+		if (!base)
+			base = name;
+		else
+			base += 1;
+
+		filename_len = ksmbd_extract_shortname(conn, base,
+						       alt_name_info->FileName);
+		alt_name_info->FileNameLength = cpu_to_le32(filename_len);
+		rsp->t2.TotalDataCount = cpu_to_le16(4 + filename_len);
+		rsp->t2.DataCount = cpu_to_le16(4 + filename_len);
+
+		inc_resp_size(work, 4 + filename_len + 25);
+		break;
+	}
+	case SMB_QUERY_FILE_UNIX_BASIC:
+	{
+		struct file_unix_basic_info *unix_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_UNIX_BASIC\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = 0;
+		rsp->t2.TotalDataCount = cpu_to_le16(100);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = 0;
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(100);
+		rsp->t2.DataOffset = cpu_to_le16(56);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		rsp->ByteCount = cpu_to_le16(101); /* 100 data count + 1pad */
+		rsp->Pad = 0;
+		unix_info = (struct file_unix_basic_info *)(&rsp->Pad + 1);
+		init_unix_info(unix_info, &init_user_ns, &st);
+		inc_resp_size(work, 10 * 2 + 101);
+		break;
+	}
+	case SMB_QUERY_FILE_INTERNAL_INFO:
+	{
+		struct file_internal_info *iinfo;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_INTERNAL_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(8);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(8);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		rsp->ByteCount = cpu_to_le16(13);
+		rsp->Pad = 0;
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		iinfo = (struct file_internal_info *) (ptr + 4);
+		iinfo->UniqueId = cpu_to_le64(st.ino);
+		inc_resp_size(work, 10 * 2 + 13);
+		break;
+	}
+	case SMB_QUERY_FILE_UNIX_LINK:
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_UNIX_LINK\n");
+		rc = smb_readlink(work, &path);
+		if (rc < 0)
+			goto err_out;
+		break;
+	case SMB_INFO_QUERY_ALL_EAS:
+		ksmbd_debug(SMB, "SMB_INFO_QUERY_ALL_EAS\n");
+		rc = smb_get_ea(work, &path);
+		if (rc < 0)
+			goto err_out;
+		break;
+	case SMB_QUERY_POSIX_ACL:
+		ksmbd_debug(SMB, "SMB_QUERY_POSIX_ACL\n");
+		rc = smb_get_acl(work, &path);
+		if (rc < 0)
+			goto err_out;
+		break;
+	default:
+		pr_err("query path info not implemnted for %x\n",
+		       le16_to_cpu(req_params->InformationLevel));
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+err_out:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+	kfree(name);
+	return rc;
+}
+
+/**
+ * create_trans2_reply() - create response for trans2 request
+ * @work:	smb work containing smb response buffer
+ * @count:	trans2 response buffer size
+ */
+static void create_trans2_reply(struct ksmbd_work *work, __u16 count)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+
+	rsp_hdr->WordCount = 0x0A;
+	rsp->t2.TotalParameterCount = 0;
+	rsp->t2.TotalDataCount = cpu_to_le16(count);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = 0;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(count);
+	rsp->t2.DataOffset = cpu_to_le16(56);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	rsp->ByteCount = cpu_to_le16(count + 1);
+	rsp->Pad = 0;
+	inc_resp_size(work, 10 * 2 + (count + 1));
+}
+
+/**
+ * set_fs_info() - handler for set fs info commands
+ * @work:	smb work containing set fs info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int set_fs_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_setfsi_req *req = work->request_buf;
+	struct smb_com_trans2_setfsi_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_setfsi_req_params *params;
+	int info_level;
+
+	params = (struct smb_com_trans2_setfsi_req_params *)
+		(work->request_buf + le16_to_cpu(req->ParameterOffset) + 4);
+
+	info_level = le16_to_cpu(params->InformationLevel);
+
+	switch (info_level) {
+	case SMB_SET_CIFS_UNIX_INFO:
+	{
+		u64 client_cap;
+
+		ksmbd_debug(SMB, "SMB_SET_CIFS_UNIX_INFO\n");
+		if (le16_to_cpu(params->ClientUnixMajor) !=
+			CIFS_UNIX_MAJOR_VERSION) {
+			pr_err("Non compatible unix major info\n");
+			return -EINVAL;
+		}
+
+		if (le16_to_cpu(params->ClientUnixMinor) !=
+			CIFS_UNIX_MINOR_VERSION) {
+			pr_err("Non compatible unix minor info\n");
+			return -EINVAL;
+		}
+
+		client_cap = le64_to_cpu(params->ClientUnixCap);
+		ksmbd_debug(SMB, "clients unix cap = %llx\n", client_cap);
+		/* TODO: process caps */
+		rsp->hdr.WordCount = 0x0A;
+		rsp->t2.TotalDataCount = 0;
+		break;
+	}
+	default:
+		ksmbd_debug(SMB, "info level %x  not supported\n", info_level);
+		return -EINVAL;
+	}
+
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+	return 0;
+}
+
+/**
+ * query_fs_info() - handler for query fs info commands
+ * @work:	smb work containing query fs info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_fs_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_qfsi_req_params *req_params;
+	struct ksmbd_conn *conn = work->conn;
+	struct kstatfs stfs;
+	struct ksmbd_share_config *share;
+	int rc;
+	struct path path;
+	bool incomplete = false;
+	int info_level, len = 0;
+	struct ksmbd_tree_connect *tree_conn;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	req_params =
+		(struct smb_com_trans2_qfsi_req_params *)(work->request_buf +
+							  offset);
+
+	/* check if more data is coming */
+	if (le16_to_cpu(req->TotalParameterCount) !=
+	    le16_to_cpu(req->ParameterCount)) {
+		ksmbd_debug(SMB, "total param = %d, received = %d\n",
+			    le16_to_cpu(req->TotalParameterCount),
+			    le16_to_cpu(req->ParameterCount));
+		incomplete = true;
+	}
+
+	if (le16_to_cpu(req->TotalDataCount) != le16_to_cpu(req->DataCount)) {
+		ksmbd_debug(SMB, "total data = %d, received = %d\n",
+			    le16_to_cpu(req->TotalDataCount),
+			    le16_to_cpu(req->DataCount));
+		incomplete = true;
+	}
+
+	if (incomplete) {
+		/* create 1 trans_state structure
+		 * and add to connection list
+		 */
+	}
+
+	info_level = le16_to_cpu(req_params->InformationLevel);
+
+	tree_conn = work->tcon;
+	if (!tree_conn)
+		return -ENOENT;
+	share = tree_conn->share_conf;
+
+	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE))
+		return -ENOENT;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+	if (rc) {
+		ksmbd_revert_fsids(work);
+		pr_err("cannot create vfs path\n");
+		return rc;
+	}
+
+	rc = vfs_statfs(&path, &stfs);
+	if (rc) {
+		pr_err("cannot do stat of path %s\n", share->path);
+		goto err_out;
+	}
+
+	switch (info_level) {
+	case SMB_INFO_ALLOCATION:
+	{
+		struct filesystem_alloc_info *ainfo;
+
+		ksmbd_debug(SMB, "GOT SMB_INFO_ALLOCATION\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(18);
+		ainfo = (struct filesystem_alloc_info *)(&rsp->Pad + 1);
+		ainfo->fsid = 0;
+		ainfo->BytesPerSector = cpu_to_le16(512);
+		ainfo->SectorsPerAllocationUnit =
+		cpu_to_le32(stfs.f_bsize/le16_to_cpu(ainfo->BytesPerSector));
+		ainfo->TotalAllocationUnits = cpu_to_le32(stfs.f_blocks);
+		ainfo->FreeAllocationUnits = cpu_to_le32(stfs.f_bfree);
+		break;
+	}
+	case SMB_QUERY_FS_VOLUME_INFO:
+	{
+		struct filesystem_vol_info *vinfo;
+		size_t share_len, rsp_offset;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_VOLUME_INFO\n");
+		vinfo = (struct filesystem_vol_info *)(&rsp->Pad + 1);
+		vinfo->VolumeCreationTime = 0;
+		/* Taking dummy value of serial number*/
+		vinfo->SerialNumber = cpu_to_le32(0xbc3ac512);
+		share_len = strlen(share->name);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct filesystem_vol_info, VolumeLabel) +
+			     share_len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		len = smbConvertToUTF16((__le16 *)vinfo->VolumeLabel,
+					share->name, share_len,
+					conn->local_nls, 0);
+		vinfo->VolumeLabelSize = cpu_to_le32(len);
+		vinfo->Reserved = 0;
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct filesystem_vol_info) +
+				    len - 2);
+		break;
+	}
+	case SMB_QUERY_FS_SIZE_INFO:
+	{
+		struct filesystem_info *sinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_SIZE_INFO\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(24);
+		sinfo = (struct filesystem_info *)(&rsp->Pad + 1);
+		sinfo->BytesPerSector = cpu_to_le32(512);
+		sinfo->SectorsPerAllocationUnit =
+			cpu_to_le32(stfs.f_bsize / sinfo->BytesPerSector);
+		sinfo->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+		sinfo->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree);
+		break;
+	}
+	case SMB_QUERY_FS_FULL_SIZE_INFO:
+	{
+		struct filesystem_full_info *sinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_FULL_SIZE_INFO\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(32);
+		sinfo = (struct filesystem_full_info *)(&rsp->Pad + 1);
+		sinfo->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+		sinfo->SectorsPerAllocationUnit =
+			cpu_to_le32(stfs.f_bsize/sinfo->BytesPerSector);
+		sinfo->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+		sinfo->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree);
+		sinfo->ActualAvailableUnits = cpu_to_le64(stfs.f_bavail);
+		break;
+	}
+	case SMB_QUERY_FS_DEVICE_INFO:
+	{
+		struct filesystem_device_info *fdi;
+
+		/* query fs info device info response is 0 word and 8 bytes */
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_DEVICE_INFO\n");
+		if (le16_to_cpu(req->MaxDataCount) < 8) {
+			pr_err("Insufficient bytes, cannot response()\n");
+			rc = -EINVAL;
+			goto err_out;
+		}
+
+		rsp->t2.TotalDataCount = cpu_to_le16(18);
+		fdi = (struct filesystem_device_info *)(&rsp->Pad + 1);
+		fdi->DeviceType = cpu_to_le32(FILE_DEVICE_DISK);
+		fdi->DeviceCharacteristics = cpu_to_le32(0x20);
+		break;
+	}
+	case SMB_QUERY_FS_ATTRIBUTE_INFO:
+	{
+		struct filesystem_attribute_info *info;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_ATTRIBUTE_INFO\n");
+		/* constant 12 bytes + variable filesystem name */
+		info = (struct filesystem_attribute_info *)(&rsp->Pad + 1);
+
+		if (le16_to_cpu(req->MaxDataCount) < 12) {
+			pr_err("Insufficient bytes, cannot response()\n");
+			rc = -EINVAL;
+			goto err_out;
+		}
+
+		info->Attributes = cpu_to_le32(FILE_CASE_PRESERVED_NAMES |
+					       FILE_CASE_SENSITIVE_SEARCH |
+					       FILE_VOLUME_QUOTAS);
+		info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
+		info->FileSystemNameLen = 0;
+		rsp->t2.TotalDataCount = cpu_to_le16(12);
+		break;
+	}
+	case SMB_QUERY_CIFS_UNIX_INFO:
+	{
+		struct filesystem_unix_info *uinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_CIFS_UNIX_INFO\n");
+		/* constant 12 bytes + variable filesystem name */
+		uinfo = (struct filesystem_unix_info *)(&rsp->Pad + 1);
+
+		if (le16_to_cpu(req->MaxDataCount) < 12) {
+			pr_err("Insufficient bytes, cannot response()\n");
+			rc = -EINVAL;
+			goto err_out;
+		}
+		uinfo->MajorVersionNumber =
+			cpu_to_le16(CIFS_UNIX_MAJOR_VERSION);
+		uinfo->MinorVersionNumber =
+			cpu_to_le16(CIFS_UNIX_MINOR_VERSION);
+		uinfo->Capability = cpu_to_le64(SMB_UNIX_CAPS);
+		rsp->t2.TotalDataCount = cpu_to_le16(12);
+		break;
+	}
+	case SMB_QUERY_POSIX_FS_INFO:
+	{
+		struct filesystem_posix_info *pinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_POSIX_FS_INFO\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(56);
+		pinfo = (struct filesystem_posix_info *)(&rsp->Pad + 1);
+		pinfo->BlockSize = cpu_to_le32(stfs.f_bsize);
+		pinfo->OptimalTransferSize = cpu_to_le32(stfs.f_blocks);
+		pinfo->TotalBlocks = cpu_to_le64(stfs.f_blocks);
+		pinfo->BlocksAvail = cpu_to_le64(stfs.f_bfree);
+		pinfo->UserBlocksAvail = cpu_to_le64(stfs.f_bavail);
+		pinfo->TotalFileNodes = cpu_to_le64(stfs.f_files);
+		pinfo->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
+		pinfo->FileSysIdentifier = 0;
+		break;
+	}
+	default:
+		ksmbd_debug(SMB, "info level %x not implemented\n", info_level);
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	create_trans2_reply(work, le16_to_cpu(rsp->t2.TotalDataCount));
+
+err_out:
+	path_put(&path);
+	ksmbd_revert_fsids(work);
+	return rc;
+}
+
+/**
+ * smb_posix_convert_flags() - convert smb posix access flags to open flags
+ * @flags:	smb posix access flags
+ *
+ * Return:	file open flags
+ */
+static __u32 smb_posix_convert_flags(__u32 flags, int *may_flags)
+{
+	__u32 posix_flags = 0;
+
+	if ((flags & SMB_ACCMODE) == SMB_O_RDONLY)
+		posix_flags = O_RDONLY;
+	else if ((flags & SMB_ACCMODE) == SMB_O_WRONLY)
+		posix_flags = O_WRONLY;
+	else if ((flags & SMB_ACCMODE) == SMB_O_RDWR)
+		posix_flags = O_RDWR;
+
+	if (flags & SMB_O_CREAT)
+		posix_flags |= O_CREAT;
+	if (flags & SMB_O_SYNC)
+		posix_flags |= O_DSYNC;
+	if (flags & SMB_O_DIRECTORY)
+		posix_flags |= O_DIRECTORY;
+	if (flags & SMB_O_NOFOLLOW)
+		posix_flags |= O_NOFOLLOW;
+	if (flags & SMB_O_APPEND)
+		posix_flags |= O_APPEND;
+
+	*may_flags = ksmbd_openflags_to_mayflags(posix_flags);
+
+	return posix_flags;
+}
+
+/**
+ * smb_get_disposition() - convert smb disposition flags to open flags
+ * @flags:		smb file disposition flags
+ * @file_present:	file already present or not
+ * @stat:		file stat information
+ * @open_flags:		open flags should be stored here
+ *
+ * Return:		file disposition flags
+ */
+static int smb_get_disposition(unsigned int flags, bool file_present,
+			       struct kstat *stat, unsigned int *open_flags)
+{
+	int dispostion, disp_flags;
+
+	if ((flags & (SMB_O_CREAT | SMB_O_EXCL)) == (SMB_O_CREAT | SMB_O_EXCL))
+		dispostion = FILE_CREATE;
+	else if ((flags & (SMB_O_CREAT | SMB_O_TRUNC)) ==
+		 (SMB_O_CREAT | SMB_O_TRUNC))
+		dispostion = FILE_OVERWRITE_IF;
+	else if ((flags & SMB_O_CREAT) == SMB_O_CREAT)
+		dispostion = FILE_OPEN_IF;
+	else if ((flags & SMB_O_TRUNC) == SMB_O_TRUNC)
+		dispostion = FILE_OVERWRITE;
+	else if ((flags & (SMB_O_CREAT | SMB_O_EXCL | SMB_O_TRUNC)) == 0)
+		dispostion = FILE_OPEN;
+	else
+		dispostion = FILE_SUPERSEDE;
+
+	disp_flags = file_create_dispostion_flags(dispostion, file_present);
+	if (disp_flags < 0)
+		return disp_flags;
+
+	*open_flags |= disp_flags;
+	return disp_flags;
+}
+
+/**
+ * smb_posix_open() - handler for smb posix open
+ * @work:	smb work containing posix open command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_posix_open(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *pSMB_req = work->request_buf;
+	struct smb_com_trans2_spi_rsp *pSMB_rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct open_psx_req *psx_req;
+	struct open_psx_rsp *psx_rsp;
+	struct path path, parent_path;
+	struct kstat stat;
+	__u16 data_offset, rsp_info_level, file_info = 0;
+	__u32 oplock_flags, posix_open_flags, may_flags;
+	umode_t mode;
+	char *name;
+	unsigned int maxlen, offset;
+	bool file_present = true, create_directory;
+	int err;
+	struct ksmbd_file *fp = NULL;
+	int oplock_rsp = OPLOCK_NONE;
+
+	maxlen = get_req_len(pSMB_req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, pSMB_req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		kfree(name);
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, 0);
+	if (err) {
+		file_present = false;
+		ksmbd_debug(SMB, "cannot get linux path for %s, err = %d\n",
+			    name, err);
+		if (err == -EACCES || err == -EXDEV)
+			goto out;
+	} else {
+		if (d_is_symlink(path.dentry)) {
+			err = -EACCES;
+			goto free_path;
+		}
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err) {
+			pr_err("can not stat %s, err = %d\n", name, err);
+			goto free_path;
+		}
+	}
+
+	data_offset = le16_to_cpu(pSMB_req->DataOffset) + 4;
+	if (data_offset > maxlen) {
+		err = -EINVAL;
+		goto free_path;
+	}
+	psx_req = (struct open_psx_req *)((char *)pSMB_req + data_offset);
+	oplock_flags = le32_to_cpu(psx_req->OpenFlags);
+
+	posix_open_flags = smb_posix_convert_flags(
+			le32_to_cpu(psx_req->PosixOpenFlags),
+			&may_flags);
+	create_directory = !!(posix_open_flags == (O_DIRECTORY | O_CREAT));
+
+	err = smb_get_disposition(le32_to_cpu(psx_req->PosixOpenFlags),
+				  file_present, &stat, &posix_open_flags);
+	if (err < 0) {
+		ksmbd_debug(SMB, "create_dispostion returned %d\n", err);
+		if (file_present)
+			goto free_path;
+		else
+			goto out;
+	}
+
+	ksmbd_debug(SMB, "filename : %s, posix_open_flags : %x\n", name,
+		    posix_open_flags);
+	mode = (umode_t)le64_to_cpu(psx_req->Permissions);
+	rsp_info_level = le16_to_cpu(psx_req->Level);
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		if (posix_open_flags & O_CREAT) {
+			err = -EACCES;
+			ksmbd_debug(SMB,
+				"returning as user does not have permission to write\n");
+			if (file_present)
+				goto free_path;
+			else
+				goto out;
+		}
+	}
+
+	if (file_present && create_directory) {
+		err = -EEXIST;
+		goto free_path;
+	}
+
+	if (!file_present && (posix_open_flags & O_CREAT)) {
+		err = smb_common_create(work, &parent_path, &path, name,
+					posix_open_flags, mode,
+					create_directory);
+		if (err) {
+			ksmbd_debug(SMB, "smb_common_create err: %d\n", err);
+			goto out;
+		}
+
+		if (create_directory)
+			goto prepare_rsp;
+	} else {
+		err = inode_permission(mnt_idmap(path.mnt),
+				       d_inode(path.dentry),
+				       may_flags);
+		if (err)
+			goto free_path;
+	}
+
+	fp = ksmbd_vfs_dentry_open(work, &path, posix_open_flags, 0,
+				   file_present);
+	if (IS_ERR(fp)) {
+		err = PTR_ERR(fp);
+		fp = NULL;
+		goto free_path;
+	}
+	fp->pid = le16_to_cpu(pSMB_req->hdr.Pid);
+
+	write_lock(&fp->f_ci->m_lock);
+	list_add(&fp->node, &fp->f_ci->m_fp_list);
+	write_unlock(&fp->f_ci->m_lock);
+
+	if (smb1_oplock_enable &&
+	    test_share_config_flag(work->tcon->share_conf,
+		    		   KSMBD_SHARE_FLAG_OPLOCKS) &&
+	    !S_ISDIR(file_inode(fp->filp)->i_mode)) {
+		/* Client cannot request levelII oplock directly */
+		err = smb_grant_oplock(work, oplock_flags &
+			(REQ_OPLOCK | REQ_BATCHOPLOCK), fp->volatile_id, fp,
+			le16_to_cpu(pSMB_req->hdr.Tid), NULL, 0);
+		if (err)
+			goto free_path;
+	}
+
+	oplock_rsp = fp->f_opinfo != NULL ? fp->f_opinfo->level : 0;
+
+prepare_rsp:
+	/* open/mkdir success, send back response */
+	data_offset = sizeof(struct smb_com_trans2_spi_rsp) -
+		      sizeof(pSMB_rsp->hdr.smb_buf_length) + 3 /*alignment*/;
+	psx_rsp = (struct open_psx_rsp *)(((char *)&pSMB_rsp->hdr.Protocol) +
+			data_offset);
+	if (data_offset + sizeof(struct open_psx_rsp) > work->response_sz) {
+		err = -EIO;
+		goto free_path;
+	}
+
+	psx_rsp->OplockFlags = cpu_to_le16(oplock_rsp);
+	psx_rsp->Fid = fp != NULL ? fp->volatile_id : 0;
+
+	if (file_present) {
+		if (!(posix_open_flags & O_TRUNC))
+			file_info = F_OPENED;
+		else
+			file_info = F_OVERWRITTEN;
+	} else
+		file_info = F_CREATED;
+	psx_rsp->CreateAction = cpu_to_le32(file_info);
+
+	if (rsp_info_level != SMB_QUERY_FILE_UNIX_BASIC) {
+		ksmbd_debug(SMB, "returning null information level response");
+		rsp_info_level = SMB_NO_INFO_LEVEL_RESPONSE;
+	}
+	psx_rsp->ReturnedLevel = cpu_to_le16(rsp_info_level);
+
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err) {
+		pr_err("cannot get stat information\n");
+		goto free_path;
+	}
+
+	pSMB_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	pSMB_rsp->hdr.WordCount = 10;
+	pSMB_rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	pSMB_rsp->t2.TotalDataCount = cpu_to_le16(sizeof(struct open_psx_rsp));
+	pSMB_rsp->t2.ParameterCount = pSMB_rsp->t2.TotalParameterCount;
+	pSMB_rsp->t2.Reserved = 0;
+	pSMB_rsp->t2.ParameterCount = cpu_to_le16(2);
+	pSMB_rsp->t2.ParameterOffset = cpu_to_le16(56);
+	pSMB_rsp->t2.ParameterDisplacement = 0;
+	pSMB_rsp->t2.DataCount = pSMB_rsp->t2.TotalDataCount;
+	pSMB_rsp->t2.DataOffset = cpu_to_le16(data_offset);
+	pSMB_rsp->t2.DataDisplacement = 0;
+	pSMB_rsp->t2.SetupCount = 0;
+	pSMB_rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 12 data count + 3 pad (1 pad1 + 2 pad2)*/
+	pSMB_rsp->ByteCount = cpu_to_le16(sizeof(struct open_psx_rsp) + 2 + 3);
+	pSMB_rsp->Reserved2 = 0;
+	inc_resp_size(work, pSMB_rsp->hdr.WordCount * 2 + 117);
+
+free_path:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	switch (err) {
+	case 0:
+		break;
+	case -ENOSPC:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		break;
+	case -EINVAL:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_NO_SUCH_USER;
+		break;
+	case -EACCES:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		break;
+	case -ENOENT:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_NOT_FOUND;
+		break;
+	case -EBUSY:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+		break;
+	case -EEXIST:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_COLLISION;
+		break;
+	default:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+	}
+
+	if (err && fp)
+		ksmbd_close_fd(work, fp->volatile_id);
+	kfree(name);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+
+/**
+ * smb_posix_unlink() - handler for posix file delete
+ * @work:	smb work containing trans2 posix delete command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_posix_unlink(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct unlink_psx_rsp *psx_rsp = NULL;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	unsigned int maxlen, offset;
+	char *name;
+	int rc = 0;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, 0);
+	if (rc < 0)
+		goto out;
+
+	rc = ksmbd_vfs_remove_file(work, &path);
+
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+
+	if (rc < 0)
+		goto out;
+
+	psx_rsp = (struct unlink_psx_rsp *)((char *)rsp +
+			sizeof(struct smb_com_trans2_rsp));
+	psx_rsp->EAErrorOffset = cpu_to_le16(0);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(0);
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(0);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Pad = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	if (rc)
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+
+	kfree(name);
+	return rc;
+}
+
+/**
+ * smb_set_time_pathinfo() - handler for setting time using set path info
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_time_pathinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct file_basic_info *info;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct iattr attrs;
+	unsigned int maxlen, offset;
+	char *name;
+	int err = 0;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset, work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(name);
+		return -EINVAL;
+	}
+
+	info = (struct file_basic_info *)((char *)req + offset);
+
+	attrs.ia_valid = 0;
+	if (le64_to_cpu(info->LastAccessTime)) {
+		attrs.ia_atime = smb_NTtimeToUnix(info->LastAccessTime);
+		attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+	}
+
+	if (le64_to_cpu(info->ChangeTime)) {
+		attrs.ia_ctime = smb_NTtimeToUnix(info->ChangeTime);
+		attrs.ia_valid |= ATTR_CTIME;
+	}
+
+	if (le64_to_cpu(info->LastWriteTime)) {
+		attrs.ia_mtime = smb_NTtimeToUnix(info->LastWriteTime);
+		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+	}
+	/* TODO: check dos mode and acl bits if req->Attributes nonzero */
+
+	if (!attrs.ia_valid)
+		goto done;
+
+	err = ksmbd_vfs_setattr(work, name, 0, &attrs);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+
+done:
+	ksmbd_debug(SMB, "%s setattr done\n", name);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+	kfree(name);
+	return 0;
+}
+
+/**
+ * smb_set_unix_pathinfo() - handler for setting unix path info(setattr)
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_unix_pathinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct file_unix_basic_info *unix_info;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path;
+	struct iattr attrs;
+	char *name;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+	err = kern_path(name, 0, &path);
+	if (err) {
+		ksmbd_revert_fsids(work);
+		kfree(name);
+		return -ENOENT;
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		ksmbd_revert_fsids(work);
+		kfree(name);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	unix_info = (struct file_unix_basic_info *)((char *)req + offset);
+	attrs.ia_valid = 0;
+	attrs.ia_mode = 0;
+	err = unix_info_to_attr(unix_info, &init_user_ns, &attrs);
+	path_put(&path);
+	ksmbd_revert_fsids(work);
+	if (err)
+		goto out;
+
+	err = ksmbd_vfs_setattr(work, name, 0, &attrs);
+	if (err)
+		goto out;
+	/* setattr success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	kfree(name);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+	return 0;
+}
+
+/**
+ * smb_set_ea() - handler for setting extended attributes using set path
+ *		info command
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_ea(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct fealist *eabuf;
+	struct fea *ea;
+	char *fname, *attr_name = NULL, *value;
+	int rc = 0, list_len, i, next = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	fname = smb_get_name(share, req->FileName, maxlen - offset,
+			     work, false);
+	if (IS_ERR(fname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(fname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(fname);
+		return -EINVAL;
+	}
+
+	eabuf = (struct fealist *)((char *)req + offset);
+
+	list_len = le32_to_cpu(eabuf->list_len) - 4;
+	if (offset + list_len > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(fname);
+		return -EINVAL;
+	}
+
+	ea = (struct fea *)eabuf->list;
+
+	for (i = 0; list_len >= 0 && ea->name_len != 0; i++, list_len -= next) {
+		if (ea->name_len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		next = ea->name_len + le16_to_cpu(ea->value_len) + 4;
+		offset += next;
+		if (offset > maxlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
+		if (!attr_name) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		memcpy(attr_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+		memcpy(&attr_name[XATTR_USER_PREFIX_LEN], ea->name,
+		       ea->name_len);
+		attr_name[XATTR_USER_PREFIX_LEN + ea->name_len] = '\0';
+		value = (char *)&ea->name + ea->name_len + 1;
+		ksmbd_debug(SMB, "name: <%s>, name_len %u, value_len %u\n",
+			    ea->name, ea->name_len, le16_to_cpu(ea->value_len));
+
+		rc = ksmbd_vfs_fsetxattr(work, fname, attr_name, value,
+					 le16_to_cpu(ea->value_len), 0);
+		if (rc < 0) {
+			kfree(attr_name);
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			goto out;
+		}
+		kfree(attr_name);
+		ea += next;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(0);
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(0);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Pad = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	kfree(fname);
+	return rc;
+}
+
+/**
+ * smb_set_file_size_pinfo() - handler for setting eof or truncate using
+ *		trans2 set path info command
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_file_size_pinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct file_end_of_file_info *eofinfo;
+	struct iattr attr;
+	char *name = NULL;
+	loff_t newsize;
+	int rc = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	offset += le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(name);
+		return -EINVAL;
+	}
+
+	eofinfo = (struct file_end_of_file_info *)((char *)req + offset);
+	newsize = le64_to_cpu(eofinfo->FileSize);
+	attr.ia_valid = ATTR_SIZE;
+	attr.ia_size = newsize;
+	rc = ksmbd_vfs_setattr(work, name, 0, &attr);
+	if (rc) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		goto out;
+	}
+	ksmbd_debug(SMB, "%s truncated to newsize %lld\n", name, newsize);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	kfree(name);
+	return rc;
+}
+
+/**
+ * smb_creat_hardlink() - handler for creating hardlink
+ * @work:	smb work containing set path info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_creat_hardlink(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	char *oldname, *newname;
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	newname = smb_get_name(share, req->FileName, maxlen - offset,
+			       work, false);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(newname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(newname);
+		return -EINVAL;
+	}
+
+	oldname = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			       work, false);
+	if (IS_ERR(oldname)) {
+		err = PTR_ERR(oldname);
+		oldname = NULL;
+		goto out;
+	}
+	ksmbd_debug(SMB, "oldname %s, newname %s\n", oldname, newname);
+
+	err = ksmbd_vfs_link(work, oldname, newname);
+	if (err < 0) {
+		if (err == -EACCES)
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp->hdr.Status.CifsError = STATUS_NOT_SAME_DEVICE;
+		goto out;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = 0;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+out:
+	kfree(newname);
+	kfree(oldname);
+	return err;
+}
+
+/**
+ * smb_creat_symlink() - handler for creating symlink
+ * @work:	smb work containing set path info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_creat_symlink(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_spi_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	char *name, *symname;
+	bool is_unicode = is_smbreq_unicode(&req->hdr);
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	symname = smb_get_name(share, req->FileName, maxlen - offset,
+			       work, false);
+	if (IS_ERR(symname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(symname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset >= maxlen) {
+		kfree(symname);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_strndup_from_utf16((char *)req + offset, maxlen - offset,
+				      is_unicode, work->conn->local_nls);
+	if (IS_ERR(name)) {
+		kfree(symname);
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return PTR_ERR(name);
+	}
+	ksmbd_debug(SMB, "name %s, symname %s\n", name, symname);
+
+	err = ksmbd_vfs_symlink(work, name, symname);
+	if (err < 0) {
+		if (err == -ENOSPC)
+			rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		else if (err == -EEXIST)
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_COLLISION;
+		else
+			rsp->hdr.Status.CifsError = STATUS_NOT_SAME_DEVICE;
+	} else
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = 0;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+	kfree(name);
+	kfree(symname);
+	return err;
+}
+
+/**
+ * set_path_info() - handler for trans2 set path info sub commands
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int set_path_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *pSMB_req = work->request_buf;
+	struct smb_com_trans2_spi_rsp *pSMB_rsp = work->response_buf;
+	__u16 info_level, total_param;
+	int err = 0;
+
+	info_level = le16_to_cpu(pSMB_req->InformationLevel);
+	total_param = le16_to_cpu(pSMB_req->TotalParameterCount);
+	if (total_param < 7) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		pr_err("invalid total parameter for info_level 0x%x\n",
+		       total_param);
+		return -EINVAL;
+	}
+
+	switch (info_level) {
+	case SMB_POSIX_OPEN:
+		err = smb_posix_open(work);
+		break;
+	case SMB_POSIX_UNLINK:
+		err = smb_posix_unlink(work);
+		break;
+	case SMB_SET_FILE_UNIX_HLINK:
+		err = smb_creat_hardlink(work);
+		break;
+	case SMB_SET_FILE_UNIX_LINK:
+		err = smb_creat_symlink(work);
+		break;
+	case SMB_SET_FILE_BASIC_INFO:
+		/* fall through */
+	case SMB_SET_FILE_BASIC_INFO2:
+		err = smb_set_time_pathinfo(work);
+		break;
+	case SMB_SET_FILE_UNIX_BASIC:
+		err = smb_set_unix_pathinfo(work);
+		break;
+	case SMB_SET_FILE_EA:
+		err = smb_set_ea(work);
+		break;
+	case SMB_SET_POSIX_ACL:
+		err = smb_set_acl(work);
+		break;
+	case SMB_SET_FILE_END_OF_FILE_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_END_OF_FILE_INFO:
+		err = smb_set_file_size_pinfo(work);
+		break;
+	default:
+		ksmbd_debug(SMB, "info level = %x not implemented yet\n",
+			    info_level);
+		pSMB_rsp->hdr.Status.CifsError = STATUS_NOT_IMPLEMENTED;
+		return -EOPNOTSUPP;
+	}
+
+	if (err < 0)
+		ksmbd_debug(SMB, "info_level 0x%x failed, err %d\n", info_level,
+			    err);
+	return err;
+}
+static int readdir_info_level_struct_sz(int info_level)
+{
+	switch (info_level) {
+	case SMB_FIND_FILE_INFO_STANDARD:
+		return sizeof(struct find_info_standard);
+	case SMB_FIND_FILE_QUERY_EA_SIZE:
+		return sizeof(struct find_info_query_ea_size);
+	case SMB_FIND_FILE_DIRECTORY_INFO:
+		return sizeof(struct file_directory_info);
+	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+		return sizeof(struct file_full_directory_info);
+	case SMB_FIND_FILE_NAMES_INFO:
+		return sizeof(struct file_names_info);
+	case SMB_FIND_FILE_BOTH_DIRECTORY_INFO:
+		return sizeof(struct file_both_directory_info);
+	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+		return sizeof(struct file_id_full_dir_info);
+	case SMB_FIND_FILE_ID_BOTH_DIR_INFO:
+		return sizeof(struct file_id_both_directory_info);
+	case SMB_FIND_FILE_UNIX:
+		return sizeof(struct file_unix_info);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * smb_populate_readdir_entry() - encode directory entry in smb response buffer
+ * @conn:	connection instance
+ * @info_level:	smb information level
+ * @d_info: structure included variables for query dir
+ * @ksmbd_kstat: ksmbd wrapper of dirent stat information
+ *
+ * if directory has many entries, find first can't read it fully.
+ * find next might be called multiple times to read remaining dir entries
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+				      struct ksmbd_dir_info *d_info,
+				      struct ksmbd_kstat *ksmbd_kstat)
+{
+	int next_entry_offset;
+	char *conv_name;
+	int conv_len;
+	int struct_sz;
+
+	struct_sz = readdir_info_level_struct_sz(info_level);
+	if (struct_sz == -EOPNOTSUPP)
+		return -EOPNOTSUPP;
+
+	conv_name = ksmbd_convert_dir_info_name(d_info,
+						conn->local_nls,
+						&conv_len);
+	if (!conv_name)
+		return -ENOMEM;
+
+	next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
+				  KSMBD_DIR_INFO_ALIGNMENT);
+
+	if (next_entry_offset > d_info->out_buf_len) {
+		kfree(conv_name);
+		d_info->out_buf_len = -1;
+		return -ENOSPC;
+	}
+
+	switch (info_level) {
+	case SMB_FIND_FILE_INFO_STANDARD:
+	{
+		struct find_info_standard *fsinfo;
+
+		fsinfo = (struct find_info_standard *)(d_info->wptr);
+		unix_to_dos_time(
+			ksmbd_NTtimeToUnix(
+				cpu_to_le64(ksmbd_kstat->create_time)),
+			&fsinfo->CreationTime,
+			&fsinfo->CreationDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->atime,
+				 &fsinfo->LastAccessTime,
+				 &fsinfo->LastAccessDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->mtime,
+				 &fsinfo->LastWriteTime,
+				 &fsinfo->LastWriteDate);
+		fsinfo->DataSize = cpu_to_le32(ksmbd_kstat->kstat->size);
+		fsinfo->AllocationSize =
+			cpu_to_le32(ksmbd_kstat->kstat->blocks << 9);
+		fsinfo->Attributes =
+			cpu_to_le16(S_ISDIR(ksmbd_kstat->kstat->mode) ?
+				ATTR_DIRECTORY : ATTR_ARCHIVE);
+		fsinfo->FileNameLength = cpu_to_le16(conv_len);
+		memcpy(fsinfo->FileName, conv_name, conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_QUERY_EA_SIZE:
+	{
+		struct find_info_query_ea_size *fesize;
+
+		fesize = (struct find_info_query_ea_size *)(d_info->wptr);
+		unix_to_dos_time(
+			ksmbd_NTtimeToUnix(
+				cpu_to_le64(ksmbd_kstat->create_time)),
+			&fesize->CreationTime,
+			&fesize->CreationDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->atime,
+				 &fesize->LastAccessTime,
+				 &fesize->LastAccessDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->mtime,
+				 &fesize->LastWriteTime,
+				 &fesize->LastWriteDate);
+
+		fesize->DataSize = cpu_to_le32(ksmbd_kstat->kstat->size);
+		fesize->AllocationSize =
+			cpu_to_le32(ksmbd_kstat->kstat->blocks << 9);
+		fesize->Attributes =
+			cpu_to_le16(S_ISDIR(ksmbd_kstat->kstat->mode) ?
+				ATTR_DIRECTORY : ATTR_ARCHIVE);
+		fesize->EASize = 0;
+		fesize->FileNameLength = (__u8)(conv_len);
+		memcpy(fesize->FileName, conv_name, conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_DIRECTORY_INFO:
+	{
+		struct file_directory_info *fdinfo = NULL;
+
+		fdinfo = (struct file_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		fdinfo->FileNameLength = cpu_to_le32(conv_len);
+		memcpy(fdinfo->FileName, conv_name, conv_len);
+		fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+	{
+		struct file_full_directory_info *ffdinfo = NULL;
+
+		ffdinfo = (struct file_full_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		ffdinfo->FileNameLength = cpu_to_le32(conv_len);
+		ffdinfo->EaSize = 0;
+		memcpy(ffdinfo->FileName, conv_name, conv_len);
+		ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)ffdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_NAMES_INFO:
+	{
+		struct file_names_info *fninfo = NULL;
+
+		fninfo = (struct file_names_info *)(d_info->wptr);
+		fninfo->FileNameLength = cpu_to_le32(conv_len);
+		memcpy(fninfo->FileName, conv_name, conv_len);
+		fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fninfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_BOTH_DIRECTORY_INFO:
+	{
+		struct file_both_directory_info *fbdinfo = NULL;
+
+		fbdinfo = (struct file_both_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		fbdinfo->FileNameLength = cpu_to_le32(conv_len);
+		fbdinfo->EaSize = 0;
+		fbdinfo->ShortNameLength = 0;
+		fbdinfo->Reserved = 0;
+		memcpy(fbdinfo->FileName, conv_name, conv_len);
+		fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fbdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+	{
+		struct file_id_full_dir_info *dinfo = NULL;
+
+		dinfo = (struct file_id_full_dir_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		dinfo->FileNameLength = cpu_to_le32(conv_len);
+		dinfo->EaSize = 0;
+		dinfo->Reserved = 0;
+		dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+		memcpy(dinfo->FileName, conv_name, conv_len);
+		dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)dinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_ID_BOTH_DIR_INFO:
+	{
+		struct file_id_both_directory_info *fibdinfo = NULL;
+
+		fibdinfo = (struct file_id_both_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		fibdinfo->FileNameLength = cpu_to_le32(conv_len);
+		fibdinfo->EaSize = 0;
+		fibdinfo->ShortNameLength = 0;
+		fibdinfo->Reserved = 0;
+		fibdinfo->Reserved2 = 0;
+		fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+		memcpy(fibdinfo->FileName, conv_name, conv_len);
+		fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fibdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_UNIX:
+	{
+		struct file_unix_info *finfo = NULL;
+		struct file_unix_basic_info *unix_info;
+
+		finfo = (struct file_unix_info *)(d_info->wptr);
+		finfo->ResumeKey = 0;
+		unix_info = (struct file_unix_basic_info *)((char *)finfo + 8);
+		init_unix_info(unix_info, &init_user_ns, ksmbd_kstat->kstat);
+		/* include null terminator */
+		memcpy(finfo->FileName, conv_name, conv_len + 2);
+		next_entry_offset += 2;
+		finfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)finfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	}
+
+	d_info->num_entry++;
+	d_info->last_entry_offset = d_info->data_count;
+	d_info->data_count += next_entry_offset;
+	d_info->out_buf_len -= next_entry_offset;
+	d_info->wptr = (char *)(d_info->wptr) + next_entry_offset;
+	kfree(conv_name);
+
+	ksmbd_debug(SMB, "info_level : %d, buf_len :%d, next_offset : %d, data_count : %d\n",
+		    info_level, d_info->out_buf_len, next_entry_offset,
+		    d_info->data_count);
+	return 0;
+}
+
+/**
+ * ksmbd_fill_dirent() - populates a dirent details in readdir
+ * @ctx:	dir_context information
+ * @name:	dirent name
+ * @namelen:	dirent name length
+ * @offset:	dirent offset in directory
+ * @ino:	dirent inode number
+ * @d_type:	dirent type
+ *
+ * Return:	0 on success, otherwise -EINVAL
+ */
+static bool ksmbd_fill_dirent(struct dir_context *ctx, const char *name, int namlen,
+			      loff_t offset, u64 ino, unsigned int d_type)
+{
+	struct ksmbd_readdir_data *buf =
+		container_of(ctx, struct ksmbd_readdir_data, ctx);
+	struct ksmbd_dirent *de = (void *)(buf->dirent + buf->used);
+	unsigned int reclen;
+
+	reclen = ALIGN(sizeof(struct ksmbd_dirent) + namlen, sizeof(u64));
+	if (buf->used + reclen > PAGE_SIZE)
+		return false;
+
+	de->namelen = namlen;
+	de->offset = offset;
+	de->ino = ino;
+	de->d_type = d_type;
+	memcpy(de->name, name, namlen);
+	buf->used += reclen;
+
+	return true;
+}
+
+/**
+ * find_first() - smb readdir command
+ * @work:	smb work containing find first request params
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int find_first(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_ffirst_req_params *req_params;
+	struct smb_com_trans2_ffirst_rsp_parms *params = NULL;
+	struct path path, parent_path;
+	struct ksmbd_dirent *de;
+	struct ksmbd_file *dir_fp = NULL;
+	struct kstat kstat;
+	struct ksmbd_kstat ksmbd_kstat;
+	struct ksmbd_dir_info d_info;
+	int params_count = sizeof(struct smb_com_trans2_ffirst_rsp_parms);
+	int data_alignment_offset = 0;
+	int rc = 0, reclen = 0;
+	int srch_cnt = 0;
+	char *dirpath = NULL;
+	char *srch_ptr = NULL;
+	int header_size;
+	int struct_sz;
+	unsigned int maxlen, offset;
+
+	memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
+
+	if (ksmbd_override_fsids(work)) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_revert_fsids(work);
+		return -EINVAL;
+	}
+
+	req_params = (struct smb_com_trans2_ffirst_req_params *)
+		     (work->request_buf + offset);
+
+	offset += offsetof(struct smb_com_trans2_ffirst_req_params, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_revert_fsids(work);
+		return -EINVAL;
+	}
+
+	dirpath = smb_get_dir_name(share, req_params->FileName,
+				   maxlen - offset, work, &srch_ptr);
+	if (IS_ERR(dirpath)) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		rc = PTR_ERR(dirpath);
+		goto err_out;
+	}
+
+	if (strlen(dirpath) == 1 && dirpath[0] == '/')
+		dirpath[0] = '\0';
+
+	ksmbd_debug(SMB, "complete dir path = %s\n", dirpath);
+	rc = ksmbd_vfs_kern_path_locked(work, dirpath,
+					LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+					&parent_path, &path, 0);
+	if (rc < 0) {
+		ksmbd_debug(SMB, "cannot create vfs root path <%s> %d\n",
+			    dirpath, rc);
+		goto err_free_dirpath;
+	} else {
+		if (inode_permission(mnt_idmap(path.mnt),
+				     d_inode(path.dentry),
+				     MAY_READ | MAY_EXEC)) {
+			rc = -EACCES;
+			goto err_free_kernpath;
+		}
+	}
+
+	if (d_is_symlink(path.dentry)) {
+		rc = -EACCES;
+		goto err_free_kernpath;
+	}
+
+	dir_fp = ksmbd_vfs_dentry_open(work, &path, O_RDONLY, 0, 1);
+	if (IS_ERR(dir_fp)) {
+		ksmbd_debug(SMB, "dir dentry open failed with rc=%d\n", rc);
+		rc = -EINVAL;
+		dir_fp = NULL;
+		goto err_free_kernpath;
+	}
+
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&parent_path);
+
+	write_lock(&dir_fp->f_ci->m_lock);
+	list_add(&dir_fp->node, &dir_fp->f_ci->m_fp_list);
+	write_unlock(&dir_fp->f_ci->m_lock);
+
+	set_ctx_actor(&dir_fp->readdir_data.ctx, ksmbd_fill_dirent);
+	dir_fp->readdir_data.dirent = (void *)__get_free_page(GFP_KERNEL);
+	if (!dir_fp->readdir_data.dirent) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	dir_fp->filename = dirpath;
+	dir_fp->readdir_data.used = 0;
+	dir_fp->dirent_offset = 0;
+	dir_fp->readdir_data.file_attr =
+		le16_to_cpu(req_params->SearchAttributes);
+	ksmbd_update_fstate(&work->sess->file_table, dir_fp, FP_INITED);
+
+	if (params_count % 4)
+		data_alignment_offset = 4 - params_count % 4;
+
+	d_info.smb1_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
+	if (!d_info.smb1_name) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	d_info.wptr = (char *)((char *)rsp + sizeof(struct smb_com_trans2_rsp) +
+			params_count + data_alignment_offset);
+
+	header_size = sizeof(struct smb_com_trans2_rsp) + params_count +
+		data_alignment_offset;
+
+
+	struct_sz = readdir_info_level_struct_sz(le16_to_cpu(req_params->InformationLevel));
+
+	if (struct_sz < 0) {
+		rc = -EFAULT;
+		goto err_out;
+	}
+
+	/* When search count is zero, respond only 1 entry. */
+	srch_cnt = le16_to_cpu(req_params->SearchCount);
+	if (!srch_cnt)
+		d_info.out_buf_len = struct_sz + header_size;
+	else
+		d_info.out_buf_len = min_t(int, srch_cnt * struct_sz + header_size,
+				MAX_CIFS_LOOKUP_BUFFER_SIZE - header_size);
+
+
+	/* reserve dot and dotdot entries in head of buffer in first response */
+	if (!*srch_ptr || is_asterisk(srch_ptr)) {
+		rc = ksmbd_populate_dot_dotdot_entries(work,
+				le16_to_cpu(req_params->InformationLevel),
+				dir_fp,
+				&d_info,
+				srch_ptr,
+				smb_populate_readdir_entry);
+		if (rc)
+			goto err_out;
+	}
+
+	do {
+		if (dir_fp->dirent_offset >= dir_fp->readdir_data.used) {
+			dir_fp->dirent_offset = 0;
+			dir_fp->readdir_data.used = 0;
+			rc = iterate_dir(dir_fp->filp,
+					 &dir_fp->readdir_data.ctx);
+			if (rc < 0) {
+				ksmbd_debug(SMB, "err : %d\n", rc);
+				goto err_out;
+			}
+
+			if (!dir_fp->readdir_data.used) {
+				free_page((unsigned long)
+						(dir_fp->readdir_data.dirent));
+				dir_fp->readdir_data.dirent = NULL;
+				break;
+			}
+
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent);
+		} else {
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent +
+				 dir_fp->dirent_offset);
+		}
+
+		reclen = ALIGN(sizeof(struct ksmbd_dirent) + de->namelen,
+			       sizeof(__le64));
+		dir_fp->dirent_offset += reclen;
+
+		if (dir_fp->readdir_data.file_attr &
+			SMB_SEARCH_ATTRIBUTE_DIRECTORY && de->d_type != DT_DIR)
+			continue;
+
+		ksmbd_kstat.kstat = &kstat;
+
+		if (de->namelen > NAME_MAX) {
+			pr_err("filename length exceeds 255 bytes.\n");
+			continue;
+		}
+
+		if (!strncmp(de->name, ".", de->namelen) ||
+		    !strncmp(de->name, "..", de->namelen))
+			continue;
+
+		memcpy(d_info.smb1_name, de->name, de->namelen);
+		d_info.smb1_name[de->namelen] = '\0';
+		d_info.name = (const char *)d_info.smb1_name;
+		d_info.name_len = de->namelen;
+		rc = ksmbd_vfs_readdir_name(work,
+					    file_mnt_idmap(dir_fp->filp),
+					    &ksmbd_kstat,
+					    de->name,
+					    de->namelen,
+					    dirpath);
+		if (rc) {
+			ksmbd_debug(SMB, "Cannot read dirent: %d\n", rc);
+			continue;
+		}
+
+		if (ksmbd_share_veto_filename(share, d_info.name)) {
+			ksmbd_debug(SMB, "Veto filename %s\n", d_info.name);
+			continue;
+		}
+
+		if (match_pattern(d_info.name, d_info.name_len, srch_ptr)) {
+			rc = smb_populate_readdir_entry(conn,
+				le16_to_cpu(req_params->InformationLevel),
+				&d_info,
+				&ksmbd_kstat);
+			if (rc == -ENOSPC)
+				break;
+			else if (rc)
+				goto err_out;
+		}
+	} while (d_info.out_buf_len >= 0);
+
+	if (!d_info.data_count && *srch_ptr) {
+		ksmbd_debug(SMB, "There is no entry matched with the search pattern\n");
+		rc = -ENOENT;
+		goto err_out;
+	}
+
+	if (d_info.out_buf_len < 0)
+		dir_fp->dirent_offset -= reclen;
+
+	params = (struct smb_com_trans2_ffirst_rsp_parms *)((char *)rsp +
+			sizeof(struct smb_com_trans2_rsp));
+	params->SearchHandle = dir_fp->volatile_id;
+	params->SearchCount = cpu_to_le16(d_info.num_entry);
+	params->LastNameOffset = cpu_to_le16(d_info.last_entry_offset);
+
+	if (d_info.out_buf_len < 0) {
+		ksmbd_debug(SMB, "continue search\n");
+		params->EndofSearch = cpu_to_le16(0);
+	} else {
+		ksmbd_debug(SMB, "end of search\n");
+		params->EndofSearch = cpu_to_le16(1);
+		path_put(&(dir_fp->filp->f_path));
+		if (le16_to_cpu(req_params->SearchFlags) &
+				CIFS_SEARCH_CLOSE_AT_END)
+			ksmbd_close_fd(work, dir_fp->volatile_id);
+	}
+	params->EAErrorOffset = cpu_to_le16(0);
+
+	rsp_hdr->WordCount = 0x0A;
+	rsp->t2.TotalParameterCount = cpu_to_le16(params_count);
+	rsp->t2.TotalDataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(params_count);
+	rsp->t2.ParameterOffset =
+		cpu_to_le16(sizeof(struct smb_com_trans2_rsp) - 4);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.DataOffset = cpu_to_le16(sizeof(struct smb_com_trans2_rsp) +
+		params_count + data_alignment_offset - 4);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->Pad = 0;
+	rsp->ByteCount = cpu_to_le16(d_info.data_count +
+		params_count + 1 /*pad*/ + data_alignment_offset);
+	memset((char *)rsp + sizeof(struct smb_com_trans2_rsp) + params_count,
+			'\0', 2);
+	inc_resp_size(work, (10 * 2 + d_info.data_count +
+				params_count + 1 + data_alignment_offset));
+	kfree(srch_ptr);
+	kfree(d_info.smb1_name);
+	ksmbd_revert_fsids(work);
+	return 0;
+
+err_free_kernpath:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+err_free_dirpath:
+	kfree(dirpath);
+err_out:
+	if (rc == -EINVAL)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+	else if (rc == -EACCES || rc == -EXDEV)
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+	else if (rc == -ENOENT)
+		rsp_hdr->Status.CifsError = STATUS_NO_SUCH_FILE;
+	else if (rc == -EBADF)
+		rsp_hdr->Status.CifsError = STATUS_FILE_CLOSED;
+	else if (rc == -ENOMEM)
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+	else if (rc == -EFAULT)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_LEVEL;
+	if (!rsp->hdr.Status.CifsError)
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+
+	if (dir_fp) {
+		if (dir_fp->readdir_data.dirent) {
+			free_page((unsigned long)(dir_fp->readdir_data.dirent));
+			dir_fp->readdir_data.dirent = NULL;
+		}
+		path_put(&(dir_fp->filp->f_path));
+		ksmbd_close_fd(work, dir_fp->volatile_id);
+	}
+
+	kfree(srch_ptr);
+	kfree(d_info.smb1_name);
+	ksmbd_revert_fsids(work);
+	return 0;
+}
+
+/**
+ * find_next() - smb next readdir command
+ * @work:	smb work containing find next request params
+ *
+ * if directory has many entries, find first can't read it fully.
+ * find next might be called multiple times to read remaining dir entries
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int find_next(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_fnext_req_params *req_params;
+	struct smb_com_trans2_fnext_rsp_params *params = NULL;
+	struct ksmbd_dirent *de;
+	struct ksmbd_file *dir_fp;
+	struct kstat kstat;
+	struct ksmbd_kstat ksmbd_kstat;
+	struct ksmbd_dir_info d_info;
+	int params_count = sizeof(struct smb_com_trans2_fnext_rsp_params);
+	int data_alignment_offset = 0;
+	int rc = 0, reclen = 0;
+	__u16 sid;
+	int header_size, srch_cnt, struct_sz;
+	unsigned int maxlen, offset;
+
+	memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+
+	if (offset > maxlen) {
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	req_params = (struct smb_com_trans2_fnext_req_params *)
+		     (work->request_buf + offset);
+	sid = req_params->SearchHandle;
+
+	dir_fp = ksmbd_lookup_fd_fast(work, sid);
+	if (!dir_fp) {
+		ksmbd_debug(SMB, "error invalid sid\n");
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	set_ctx_actor(&dir_fp->readdir_data.ctx, ksmbd_fill_dirent);
+
+	if (params_count % 4)
+		data_alignment_offset = 4 - params_count % 4;
+
+	d_info.smb1_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
+	if (!d_info.smb1_name) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	d_info.wptr = (char *)((char *)rsp + sizeof(struct smb_com_trans2_rsp) +
+			params_count + data_alignment_offset);
+
+	header_size = sizeof(struct smb_com_trans2_rsp) + params_count +
+		data_alignment_offset;
+
+	srch_cnt = le16_to_cpu(req_params->SearchCount);
+	struct_sz = readdir_info_level_struct_sz(le16_to_cpu(req_params->InformationLevel));
+
+	if (struct_sz < 0) {
+		rc = -EFAULT;
+		goto err_out;
+	}
+
+	d_info.out_buf_len = min_t(int, srch_cnt * struct_sz + header_size,
+				   MAX_CIFS_LOOKUP_BUFFER_SIZE - header_size);
+	do {
+		if (dir_fp->dirent_offset >= dir_fp->readdir_data.used) {
+			dir_fp->dirent_offset = 0;
+			dir_fp->readdir_data.used = 0;
+			rc = iterate_dir(dir_fp->filp,
+					 &dir_fp->readdir_data.ctx);
+			if (rc < 0) {
+				ksmbd_debug(SMB, "err : %d\n", rc);
+				goto err_out;
+			}
+
+			if (!dir_fp->readdir_data.used) {
+				free_page((unsigned long)
+						(dir_fp->readdir_data.dirent));
+				dir_fp->readdir_data.dirent = NULL;
+				break;
+			}
+
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent);
+		} else {
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent +
+				 dir_fp->dirent_offset);
+		}
+
+		reclen = ALIGN(sizeof(struct ksmbd_dirent) + de->namelen,
+			       sizeof(__le64));
+		dir_fp->dirent_offset += reclen;
+
+		if (dir_fp->readdir_data.file_attr &
+			SMB_SEARCH_ATTRIBUTE_DIRECTORY && de->d_type != DT_DIR)
+			continue;
+
+		if (dir_fp->readdir_data.file_attr &
+			SMB_SEARCH_ATTRIBUTE_ARCHIVE && (de->d_type == DT_DIR ||
+			(!strcmp(de->name, ".") || !strcmp(de->name, ".."))))
+			continue;
+
+		ksmbd_kstat.kstat = &kstat;
+
+		if (de->namelen > NAME_MAX) {
+			pr_err("filename length exceeds 255 bytes.\n");
+			continue;
+		}
+		memcpy(d_info.smb1_name, de->name, de->namelen);
+		d_info.smb1_name[de->namelen] = '\0';
+		d_info.name = (const char *)d_info.smb1_name;
+		d_info.name_len = de->namelen;
+		rc = ksmbd_vfs_readdir_name(work,
+					    file_mnt_idmap(dir_fp->filp),
+					    &ksmbd_kstat,
+					    de->name,
+					    de->namelen,
+					    dir_fp->filename);
+		if (rc) {
+			ksmbd_debug(SMB, "Err while dirent read rc = %d\n", rc);
+			rc = 0;
+			continue;
+		}
+
+		if (ksmbd_share_veto_filename(share, d_info.name)) {
+			ksmbd_debug(SMB, "file(%s) is invisible by setting as veto file\n",
+				    d_info.name);
+			continue;
+		}
+
+		ksmbd_debug(SMB, "filename string = %.*s\n",
+				d_info.name_len, d_info.name);
+		ksmbd_debug(SMB, "filename string = %.*s\n", d_info.name_len,
+			    d_info.name);
+		rc = smb_populate_readdir_entry(conn,
+						le16_to_cpu(req_params->InformationLevel),
+						&d_info, &ksmbd_kstat);
+		if (rc == -ENOSPC)
+			break;
+		else if (rc)
+			goto err_out;
+
+	} while (d_info.out_buf_len >= 0);
+
+	if (d_info.out_buf_len < 0)
+		dir_fp->dirent_offset -= reclen;
+
+	params = (struct smb_com_trans2_fnext_rsp_params *)
+		((char *)rsp + sizeof(struct smb_com_trans_rsp));
+	params->SearchCount = cpu_to_le16(d_info.num_entry);
+
+	if (d_info.out_buf_len < 0) {
+		ksmbd_debug(SMB, "continue search\n");
+		params->EndofSearch = cpu_to_le16(0);
+		params->LastNameOffset = cpu_to_le16(d_info.last_entry_offset);
+	} else {
+		ksmbd_debug(SMB, "end of search\n");
+		params->EndofSearch = cpu_to_le16(1);
+		params->LastNameOffset = cpu_to_le16(0);
+		path_put(&(dir_fp->filp->f_path));
+		if (le16_to_cpu(req_params->SearchFlags) &
+				CIFS_SEARCH_CLOSE_AT_END)
+			ksmbd_close_fd(work, sid);
+	}
+	params->EAErrorOffset = cpu_to_le16(0);
+
+	rsp_hdr->WordCount = 0x0A;
+	rsp->t2.TotalParameterCount = cpu_to_le16(params_count);
+	rsp->t2.TotalDataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(params_count);
+	rsp->t2.ParameterOffset =
+		cpu_to_le16(sizeof(struct smb_com_trans_rsp) - 4);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.DataOffset = cpu_to_le16(sizeof(struct smb_com_trans_rsp) +
+		params_count + data_alignment_offset - 4);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->Pad = 0;
+	rsp->ByteCount = cpu_to_le16(d_info.data_count + params_count + 1 +
+		data_alignment_offset);
+	memset((char *)rsp + sizeof(struct smb_com_trans_rsp) +
+		params_count, '\0', data_alignment_offset);
+	inc_resp_size(work, (10 * 2 + d_info.data_count +
+		params_count + 1 + data_alignment_offset));
+	kfree(d_info.smb1_name);
+	ksmbd_fd_put(work, dir_fp);
+	return 0;
+
+err_out:
+	if (rc == -EINVAL)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+	else if (rc == -EACCES || rc == -EXDEV)
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+	else if (rc == -ENOENT)
+		rsp_hdr->Status.CifsError = STATUS_NO_SUCH_FILE;
+	else if (rc == -EBADF)
+		rsp_hdr->Status.CifsError = STATUS_FILE_CLOSED;
+	else if (rc == -ENOMEM)
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+	else if (rc == -EFAULT)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_LEVEL;
+	if (!rsp->hdr.Status.CifsError)
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+
+	if (dir_fp) {
+		if (dir_fp->readdir_data.dirent) {
+			free_page((unsigned long)(dir_fp->readdir_data.dirent));
+			dir_fp->readdir_data.dirent = NULL;
+		}
+		path_put(&(dir_fp->filp->f_path));
+		ksmbd_close_fd(work, sid);
+	}
+
+	kfree(d_info.smb1_name);
+	return 0;
+}
+
+/**
+ * smb_set_alloc_size() - set file truncate method using trans2
+ *		set file info command - file allocation info level
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_alloc_size(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct file_allocation_info *allocinfo;
+	struct kstat stat;
+	struct ksmbd_file *fp = NULL;
+	loff_t newsize;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	allocinfo =  (struct file_allocation_info *)
+		(((char *) &req->hdr.Protocol) + le16_to_cpu(req->DataOffset));
+	newsize = le64_to_cpu(allocinfo->AllocationSize);
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	err = ksmbd_vfs_getattr(&fp->filp->f_path, &stat);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return err;
+	}
+
+	if (newsize == stat.size) /* nothing to do */
+		goto out;
+
+	/* Round up size */
+	if (alloc_roundup_size) {
+		newsize = div64_u64(newsize + alloc_roundup_size - 1,
+				    alloc_roundup_size);
+		newsize *= alloc_roundup_size;
+	}
+
+	err = ksmbd_vfs_truncate(work, fp, newsize);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return err;
+	}
+
+out:
+	ksmbd_debug(SMB, "fid %u, truncated to newsize %llu\n", req->Fid,
+		    newsize);
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+	ksmbd_fd_put(work, fp);
+
+	return 0;
+}
+
+/**
+ * smb_set_file_size_finfo() - set file truncate method using trans2
+ *		set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_file_size_finfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct file_end_of_file_info *eofinfo;
+	struct ksmbd_file *fp;
+	loff_t newsize;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	eofinfo =  (struct file_end_of_file_info *)
+		(((char *) &req->hdr.Protocol) + le16_to_cpu(req->DataOffset));
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	newsize = le64_to_cpu(eofinfo->FileSize);
+	err = ksmbd_vfs_truncate(work, fp, newsize);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return err;
+	}
+
+	ksmbd_debug(SMB, "fid %u, truncated to newsize %lld\n", req->Fid,
+		    newsize);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+	ksmbd_fd_put(work, fp);
+
+	return 0;
+}
+
+/**
+ * query_file_info_pipe() - query file info of IPC pipe
+ *		using query file info command
+ * @work:	smb work containing query file info command buffer
+ * @req_params:	buffer containing Trans2 Query File Information parameters
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_file_info_pipe(struct ksmbd_work *work,
+				struct smb_trans2_qfi_req_params *req_params)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct file_standard_info *standard_info;
+	char *ptr;
+
+	if (le16_to_cpu(req_params->InformationLevel) !=
+	    SMB_QUERY_FILE_STANDARD_INFO) {
+		ksmbd_debug(SMB, "query file info for info %u not supported\n",
+				le16_to_cpu(req_params->InformationLevel));
+		rsp_hdr->Status.CifsError = STATUS_NOT_SUPPORTED;
+		return -EOPNOTSUPP;
+	}
+
+	ksmbd_debug(SMB, "SMB_QUERY_FILE_STANDARD_INFO\n");
+	rsp_hdr->WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(sizeof(struct file_standard_info));
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_standard_info));
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(2 + sizeof(struct file_standard_info) + 3);
+	rsp->Pad = 0;
+	/* lets set EA info */
+	ptr = (char *)&rsp->Pad + 1;
+	memset(ptr, 0, 4);
+	standard_info = (struct file_standard_info *)(ptr + 4);
+	standard_info->AllocationSize = cpu_to_le64(4096);
+	standard_info->EndOfFile = 0;
+	standard_info->NumberOfLinks = cpu_to_le32(1);
+	standard_info->DeletePending = 0;
+	standard_info->Directory = 0;
+	standard_info->DeletePending = 1;
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+	return 0;
+}
+
+/**
+ * query_file_info() - query file info of file/dir
+ *		using query file info command
+ * @work:	smb work containing query file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_file_info(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_trans2_qfi_req_params *req_params;
+	unsigned int maxlen, offset;
+	struct ksmbd_file *fp;
+	struct kstat st;
+	char *ptr;
+	int rc = 0;
+	u64 time;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+	req_params = (struct smb_trans2_qfi_req_params *)
+		     (work->request_buf + offset);
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		ksmbd_debug(SMB, "query file info for IPC srvsvc\n");
+		return query_file_info_pipe(work, req_params);
+	}
+
+	fp = ksmbd_lookup_fd_fast(work, req_params->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req_params->Fid);
+		rsp_hdr->Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		rc = -EIO;
+		goto err_out;
+	}
+
+	rc = vfs_getattr(&fp->filp->f_path, &st, STATX_BASIC_STATS,
+			 AT_STATX_SYNC_AS_STAT);
+	if (rc)
+		goto err_out;
+
+	switch (le16_to_cpu(req_params->InformationLevel)) {
+
+	case SMB_QUERY_FILE_STANDARD_INFO:
+	{
+		struct file_standard_info *standard_info;
+		unsigned int delete_pending;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_STANDARD_INFO\n");
+		delete_pending = ksmbd_inode_pending_delete(fp);
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_standard_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		standard_info = (struct file_standard_info *)(ptr + 4);
+		standard_info->AllocationSize = cpu_to_le64(st.blocks << 9);
+		standard_info->EndOfFile = cpu_to_le64(st.size);
+		standard_info->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			delete_pending);
+		standard_info->DeletePending = delete_pending;
+		standard_info->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_BASIC_INFO:
+	{
+		struct file_basic_info *basic_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_BASIC_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_basic_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		basic_info = (struct file_basic_info *)(ptr + 4);
+		basic_info->CreationTime = cpu_to_le64(fp->create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		basic_info->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		basic_info->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		basic_info->ChangeTime = cpu_to_le64(time);
+		basic_info->Attributes = S_ISDIR(st.mode) ?
+			ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+		basic_info->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_EA_INFO:
+	{
+		struct file_ea_info *ea_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_EA_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_ea_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ea_info = (struct file_ea_info *)(ptr + 4);
+		ea_info->EaSize = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_UNIX_BASIC:
+	{
+		struct file_unix_basic_info *uinfo;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_UNIX_BASIC\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_unix_basic_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount =
+			cpu_to_le16(sizeof(struct file_unix_basic_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_unix_basic_info)
+				+ 3);
+		rsp->Pad = 0;
+		/* lets set unix info info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		uinfo = (struct file_unix_basic_info *)(ptr + 4);
+		init_unix_info(uinfo, &init_user_ns, &st);
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_NAME_INFO:
+	{
+		struct file_name_info *name_info;
+		size_t len, rsp_offset;
+		int uni_filename_len;
+		char *filename;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_NAME_INFO\n");
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		name_info = (struct file_name_info *)(ptr + 4);
+
+		filename = convert_to_nt_pathname(work->tcon->share_conf,
+						  &fp->filp->f_path);
+		if (!filename) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		len = strlen(filename);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct file_name_info, FileName) + len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		uni_filename_len = smbConvertToUTF16(
+				(__le16 *)name_info->FileName,
+				filename, len, conn->local_nls, 0);
+		kfree(filename);
+		uni_filename_len *= 2;
+		name_info->FileNameLength = cpu_to_le32(uni_filename_len);
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount = cpu_to_le16(2 + uni_filename_len + 4 + 3);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_ALL_INFO:
+	{
+		struct file_all_info *ainfo;
+		unsigned int delete_pending;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_ALL_INFO\n");
+		delete_pending = ksmbd_inode_pending_delete(fp);
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_all_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_all_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_all_info) + 3);
+		rsp->Pad = 0;
+		/* lets set all info info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ainfo = (struct file_all_info *)(ptr + 4);
+		ainfo->CreationTime = cpu_to_le64(fp->create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		ainfo->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		ainfo->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		ainfo->ChangeTime = cpu_to_le64(time);
+		ainfo->Attributes = cpu_to_le32(S_ISDIR(st.mode) ?
+				ATTR_DIRECTORY : ATTR_ARCHIVE);
+		ainfo->Pad1 = 0;
+		ainfo->AllocationSize = cpu_to_le64(st.blocks << 9);
+		ainfo->EndOfFile = cpu_to_le64(st.size);
+		ainfo->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			delete_pending);
+		ainfo->DeletePending = delete_pending;
+		ainfo->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		ainfo->Pad2 = 0;
+		ainfo->EASize = 0;
+		ainfo->FileNameLength = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	default:
+		pr_err("query path info not implemnted for %x\n",
+		       le16_to_cpu(req_params->InformationLevel));
+		rsp_hdr->Status.CifsError = STATUS_NOT_SUPPORTED;
+		rc = -EINVAL;
+		goto err_out;
+
+	}
+
+err_out:
+	ksmbd_fd_put(work, fp);
+	return rc;
+}
+
+/**
+ * smb_set_unix_fileinfo() - set smb unix file info(setattr)
+ * @work:	smb work containing unix basic info buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_unix_fileinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req = work->request_buf;
+	struct smb_com_trans2_sfi_rsp *rsp = work->response_buf;
+	struct file_unix_basic_info *unix_info;
+	struct ksmbd_file *fp;
+	struct iattr attrs;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		ksmbd_revert_fsids(work);
+		return -ENOENT;
+	}
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		ksmbd_revert_fsids(work);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	unix_info =  (struct file_unix_basic_info *) ((char *)req + offset);
+
+	attrs.ia_valid = 0;
+	attrs.ia_mode = 0;
+	err = unix_info_to_attr(unix_info, &init_user_ns, &attrs);
+	ksmbd_fd_put(work, fp);
+	ksmbd_revert_fsids(work);
+	if (err)
+		goto out;
+
+	err = ksmbd_vfs_setattr(work, NULL, (u64)req->Fid, &attrs);
+	if (err)
+		goto out;
+
+	/* setattr success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work,
+		      rsp->hdr.WordCount * 2 + le16_to_cpu(rsp->ByteCount));
+
+out:
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+	return 0;
+}
+
+/**
+ * smb_set_disposition() - set file dispostion method using trans2
+ *		using set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_disposition(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req = work->request_buf;
+	struct smb_com_trans2_sfi_rsp *rsp = work->response_buf;
+	char *disp_info;
+	struct ksmbd_file *fp;
+	int ret = 0;
+
+	disp_info =  (char *) (((char *) &req->hdr.Protocol)
+			+ le16_to_cpu(req->DataOffset));
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		ksmbd_debug(SMB, "Invalid id for close: %d\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	if (*disp_info) {
+		if (!fp->is_nt_open) {
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			ret = -EPERM;
+			goto err_out;
+		}
+
+		if (!(file_inode(fp->filp)->i_mode & 0222)) {
+			rsp->hdr.Status.CifsError = STATUS_CANNOT_DELETE;
+			ret = -EPERM;
+			goto err_out;
+		}
+
+		if (S_ISDIR(file_inode(fp->filp)->i_mode) &&
+		    ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY) {
+			rsp->hdr.Status.CifsError = STATUS_DIRECTORY_NOT_EMPTY;
+			ret = -ENOTEMPTY;
+			goto err_out;
+		}
+
+		ksmbd_set_inode_pending_delete(fp);
+	} else {
+		ksmbd_clear_inode_pending_delete(fp);
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+err_out:
+	ksmbd_fd_put(work, fp);
+	return ret;
+}
+
+/**
+ * smb_set_time_fileinfo() - set file time method using trans2
+ *		using set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_time_fileinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct file_basic_info *info;
+	struct iattr attrs;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	info = (struct file_basic_info *)(((char *) &req->hdr.Protocol) +
+			le16_to_cpu(req->DataOffset));
+
+	attrs.ia_valid = 0;
+	if (le64_to_cpu(info->LastAccessTime)) {
+		attrs.ia_atime = smb_NTtimeToUnix(info->LastAccessTime);
+		attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+	}
+
+	if (le64_to_cpu(info->ChangeTime)) {
+		attrs.ia_ctime = smb_NTtimeToUnix(info->ChangeTime);
+		attrs.ia_valid |= ATTR_CTIME;
+	}
+
+	if (le64_to_cpu(info->LastWriteTime)) {
+		attrs.ia_mtime = smb_NTtimeToUnix(info->LastWriteTime);
+		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+	}
+	/* TODO: check dos mode and acl bits if req->Attributes nonzero */
+
+	if (!attrs.ia_valid)
+		goto done;
+
+	err = ksmbd_vfs_setattr(work, NULL, (u64)req->Fid, &attrs);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+
+done:
+	ksmbd_debug(SMB, "fid %u, setattr done\n", req->Fid);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+	return 0;
+}
+
+/**
+ * smb_fileinfo_rename() - rename method using trans2 set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_fileinfo_rename(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct set_file_rename *info;
+	struct ksmbd_file *fp;
+	unsigned int maxlen, offset;
+	char *newname;
+	int rc = 0, flags;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+	info = (struct set_file_rename *)(work->request_buf + offset);
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	flags = info->overwrite ? 0 : RENAME_NOREPLACE;
+
+	offset += sizeof(struct set_file_rename);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	newname = smb_get_name(share, info->target_name, maxlen - offset,
+			       work, 0);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		rc = PTR_ERR(newname);
+		newname = NULL;
+		goto out;
+	}
+
+	ksmbd_debug(SMB, "new name(%s)\n", newname);
+	rc = smb_common_rename(work, fp, newname, flags);
+	if (rc) {
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		goto out;
+	}
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	ksmbd_fd_put(work, fp);
+	kfree(newname);
+	return rc;
+}
+
+/**
+ * set_file_info() - trans2 set file info command dispatcher
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int set_file_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	__u16 info_level, total_param;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+	info_level = le16_to_cpu(req->InformationLevel);
+	total_param = le16_to_cpu(req->TotalParameterCount);
+	if (total_param < 4) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		pr_err("invalid total parameter for info_level 0x%x\n",
+		       total_param);
+		return -EINVAL;
+	}
+
+	switch (info_level) {
+	case SMB_SET_FILE_EA:
+		err = smb_set_ea(work);
+		break;
+	case SMB_SET_FILE_ALLOCATION_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_ALLOCATION_INFO:
+		err = smb_set_alloc_size(work);
+		break;
+	case SMB_SET_FILE_END_OF_FILE_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_END_OF_FILE_INFO:
+		err = smb_set_file_size_finfo(work);
+		break;
+	case SMB_SET_FILE_UNIX_BASIC:
+		err = smb_set_unix_fileinfo(work);
+		break;
+	case SMB_SET_FILE_DISPOSITION_INFO:
+	case SMB_SET_FILE_DISPOSITION_INFORMATION:
+		err = smb_set_disposition(work);
+		break;
+	case SMB_SET_FILE_BASIC_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_BASIC_INFO:
+		err = smb_set_time_fileinfo(work);
+		break;
+	case SMB_SET_FILE_RENAME_INFORMATION:
+		err = smb_fileinfo_rename(work);
+		break;
+	default:
+		ksmbd_debug(SMB, "info level = %x not implemented yet\n",
+			    info_level);
+		rsp->hdr.Status.CifsError = STATUS_NOT_IMPLEMENTED;
+		return -EOPNOTSUPP;
+	}
+
+	if (err < 0)
+		ksmbd_debug(SMB, "info_level 0x%x failed, err %d\n", info_level,
+			    err);
+	return err;
+}
+
+/*
+ * helper to create a directory and set DOS attrs
+ */
+static int smb_common_mkdir(struct ksmbd_work *work, char *name, mode_t mode)
+{
+	struct smb_hdr *rsp = work->response_buf;
+	int err;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		rsp->Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_mkdir(work, name, mode);
+	if (err) {
+		if (err == -EEXIST) {
+			if (!(rsp->Flags2 & SMBFLG2_ERR_STATUS)) {
+				rsp->Status.DosError.ErrorClass = ERRDOS;
+				rsp->Status.DosError.Error =
+					cpu_to_le16(ERRnoaccess);
+			} else
+				rsp->Status.CifsError =
+					STATUS_OBJECT_NAME_COLLISION;
+		} else
+			rsp->Status.CifsError = STATUS_DATA_ERROR;
+		goto out;
+	}
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+		__u64 ctime;
+		struct path path, parent_path;
+		struct xattr_dos_attrib da = {0};
+
+		err = ksmbd_vfs_kern_path_locked(work, name, 0,
+						 &parent_path, &path, 1);
+		if (!err) {
+			ctime = ksmbd_UnixTimeToNT(current_time(d_inode(path.dentry)));
+
+			da.version = 4;
+			da.attr = ATTR_DIRECTORY;
+			da.itime = da.create_time = ctime;
+			da.flags = XATTR_DOSINFO_ATTRIB |
+				   XATTR_DOSINFO_CREATE_TIME |
+				   XATTR_DOSINFO_ITIME;
+
+			err = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     &path, &da, false);
+			if (err)
+				ksmbd_debug(SMB, "failed to store creation time in xattr\n");
+			inode_unlock(d_inode(parent_path.dentry));
+			path_put(&path);
+			path_put(&parent_path);
+		}
+		err = 0;
+	}
+
+	rsp->Status.CifsError = STATUS_SUCCESS;
+	rsp->WordCount = 0;
+
+out:
+	ksmbd_revert_fsids(work);
+
+	return err;
+}
+
+/**
+ * create_dir() - trans2 create directory dispatcher
+ * @work:   smb work containing set file info command buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+static int create_dir(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	mode_t mode = S_IALLUGO;
+	char *name;
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = smb_common_mkdir(work, name, mode);
+
+	rsp->ByteCount = 0;
+	kfree(name);
+
+	return err;
+}
+
+/**
+ * smb_trans2() - handler for trans2 commands
+ * @work:	smb work containing trans2 command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_trans2(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	int err = 0;
+	u16 sub_command = le16_to_cpu(req->SubCommand);
+
+	/* at least one setup word for TRANS2 command
+	 *		MS-CIFS, SMB COM TRANSACTION
+	 */
+	if (req->SetupCount < 1) {
+		pr_err("Wrong setup count in SMB_TRANS2 - indicates wrong request\n");
+		rsp_hdr->Status.CifsError = STATUS_UNSUCCESSFUL;
+		return -EINVAL;
+	}
+
+	ksmbd_debug(SMB, "processing trans2 subcommand: %s\n",
+		    smb_trans2_cmd_to_str(sub_command));
+	switch (sub_command) {
+	case TRANS2_FIND_FIRST:
+		err = find_first(work);
+		break;
+	case TRANS2_FIND_NEXT:
+		err = find_next(work);
+		break;
+	case TRANS2_QUERY_FS_INFORMATION:
+		err = query_fs_info(work);
+		break;
+	case TRANS2_QUERY_PATH_INFORMATION:
+		err = query_path_info(work);
+		break;
+	case TRANS2_SET_PATH_INFORMATION:
+		err = set_path_info(work);
+		break;
+	case TRANS2_SET_FS_INFORMATION:
+		err = set_fs_info(work);
+		break;
+	case TRANS2_QUERY_FILE_INFORMATION:
+		err = query_file_info(work);
+		break;
+	case TRANS2_SET_FILE_INFORMATION:
+		err = set_file_info(work);
+		break;
+	case TRANS2_CREATE_DIRECTORY:
+		err = create_dir(work);
+		break;
+	case TRANS2_GET_DFS_REFERRAL:
+	default:
+		ksmbd_debug(SMB, "sub command 0x%x not implemented yet\n",
+			    sub_command);
+		err = -EINVAL;
+	}
+
+	if (err) {
+		switch (err) {
+		case -EINVAL:
+			rsp_hdr->Status.CifsError = STATUS_NOT_SUPPORTED;
+			break;
+		case -ENOMEM:
+			rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+			break;
+		case -ENOENT:
+			rsp_hdr->Status.CifsError = STATUS_NO_SUCH_FILE;
+			break;
+		case -EBUSY:
+			rsp_hdr->Status.CifsError = STATUS_DELETE_PENDING;
+			break;
+		case -EACCES:
+		case -EXDEV:
+			rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+			break;
+		case -EBADF:
+			rsp_hdr->Status.CifsError = STATUS_FILE_CLOSED;
+			break;
+		case -EFAULT:
+			rsp_hdr->Status.CifsError = STATUS_INVALID_LEVEL;
+			break;
+		case -EOPNOTSUPP:
+			rsp_hdr->Status.CifsError = STATUS_NOT_IMPLEMENTED;
+			break;
+		case -EIO:
+			rsp_hdr->Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			break;
+		}
+
+		ksmbd_debug(SMB, "%s failed with error %d\n", __func__, err);
+	}
+
+	return err;
+}
+
+/**
+ * smb_mkdir() - handler for smb mkdir
+ * @work:	smb work containing creat directory command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_mkdir(struct ksmbd_work *work)
+{
+	struct smb_com_create_directory_req *req = work->request_buf;
+	struct smb_com_create_directory_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	mode_t mode = S_IALLUGO;
+	char *name;
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_create_directory_req, DirName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->DirName, maxlen - offset, work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = smb_common_mkdir(work, name, mode);
+
+	rsp->ByteCount = 0;
+	kfree(name);
+
+	return err;
+}
+
+/**
+ * smb_checkdir() - handler to verify whether a specified
+ * path resolves to a valid directory or not
+ *
+ * @work:   smb work containing creat directory command buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_checkdir(struct ksmbd_work *work)
+{
+	struct smb_com_check_directory_req *req = work->request_buf;
+	struct smb_com_check_directory_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	char *name, *last;
+	int err;
+	bool caseless_lookup = req->hdr.Flags & SMBFLG_CASELESS;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_check_directory_req, DirName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->DirName, maxlen - offset, work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, caseless_lookup);
+	if (err) {
+		if (err == -ENOENT) {
+			/*
+			 * If the parent directory is valid but not the
+			 * last component - then returns
+			 * STATUS_OBJECT_NAME_NOT_FOUND
+			 * for that case and STATUS_OBJECT_PATH_NOT_FOUND
+			 * if the path is invalid.
+			 */
+			last = strrchr(name, '/');
+			if (last && last[1] != '\0') {
+				*last = '\0';
+				last++;
+
+				err = ksmbd_vfs_kern_path_locked(work, name,
+						LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+						&parent_path, &path,
+						caseless_lookup);
+			} else {
+				ksmbd_debug(SMB, "can't lookup parent %s\n",
+					name);
+				err = -ENOENT;
+			}
+		}
+		if (err) {
+			ksmbd_debug(SMB, "look up failed err %d\n", err);
+			switch (err) {
+			case -ENOENT:
+				rsp->hdr.Status.CifsError =
+					STATUS_OBJECT_NAME_NOT_FOUND;
+				break;
+			case -ENOMEM:
+				rsp->hdr.Status.CifsError =
+					STATUS_INSUFFICIENT_RESOURCES;
+				break;
+			case -EACCES:
+				rsp->hdr.Status.CifsError =
+					STATUS_ACCESS_DENIED;
+				break;
+			case -EIO:
+				rsp->hdr.Status.CifsError = STATUS_DATA_ERROR;
+				break;
+			default:
+				rsp->hdr.Status.CifsError =
+					STATUS_OBJECT_PATH_SYNTAX_BAD;
+				break;
+			}
+			kfree(name);
+			return err;
+		}
+	}
+
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err)
+		goto out;
+
+	if (!S_ISDIR(stat.mode)) {
+		rsp->hdr.Status.CifsError = STATUS_NOT_A_DIRECTORY;
+	} else {
+		/* checkdir success, return response to server */
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		rsp->hdr.WordCount = 0;
+		rsp->ByteCount = 0;
+	}
+
+out:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_process_exit() - handler for smb process exit
+ * @work:	smb work containing process exit command buffer
+ *
+ * Return:	0 on success always
+ * This command is obsolete now. Starting with the LAN Manager 1.0 dialect,
+ * FIDs are no longer associated with PIDs.CIFS clients SHOULD NOT send
+ * SMB_COM_PROCESS_EXIT requests. Instead, CIFS clients SHOULD perform all
+ * process cleanup operations, sending individual file close operations
+ * as needed.Here it is implemented very minimally for sake
+ * of passing smbtorture testcases.
+ */
+int smb_process_exit(struct ksmbd_work *work)
+{
+	struct smb_com_process_exit_rsp *rsp = work->response_buf;
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+	return 0;
+}
+
+/**
+ * smb_rmdir() - handler for smb rmdir
+ * @work:	smb work containing delete directory command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_rmdir(struct ksmbd_work *work)
+{
+	struct smb_com_delete_directory_req *req = work->request_buf;
+	struct smb_com_delete_directory_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	unsigned int maxlen, offset;
+	char *name;
+	int err;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_delete_directory_req, DirName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->DirName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, 0);
+	if (err < 0)
+		goto out;
+
+	err = ksmbd_vfs_remove_file(work, &path);
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+	if (err) {
+		if (err == -ENOTEMPTY)
+			rsp->hdr.Status.CifsError = STATUS_DIRECTORY_NOT_EMPTY;
+		else if (err == -ENOENT)
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_NOT_FOUND;
+		else
+			rsp->hdr.Status.CifsError = STATUS_DATA_ERROR;
+	} else {
+		/* rmdir success, return response to server */
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		rsp->hdr.WordCount = 0;
+		rsp->ByteCount = 0;
+	}
+
+out:
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_unlink() - handler for smb delete file
+ * @work:	smb work containing delete file command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_unlink(struct ksmbd_work *work)
+{
+	struct smb_com_delete_file_req *req = work->request_buf;
+	struct smb_com_delete_file_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	char *name;
+	int err;
+	struct ksmbd_file *fp;
+
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_delete_file_req, fileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->fileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	fp = ksmbd_lookup_fd_filename(work, name);
+	if (fp)
+		err = -ESHARE;
+	else {
+		struct path path, parent_path;
+
+		err = ksmbd_vfs_kern_path_locked(work, name,
+						 LOOKUP_NO_SYMLINKS,
+						 &parent_path, &path, 0);
+		if (!err) {
+			err = ksmbd_vfs_remove_file(work, &path);
+			inode_unlock(d_inode(parent_path.dentry));
+			path_put(&path);
+			path_put(&parent_path);
+		}
+	}
+
+	if (err) {
+		if (err == -EISDIR)
+			rsp->hdr.Status.CifsError = STATUS_FILE_IS_A_DIRECTORY;
+		else if (err == -ESHARE)
+			rsp->hdr.Status.CifsError = STATUS_SHARING_VIOLATION;
+		else if (err == -EACCES || err == -EXDEV)
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_NOT_FOUND;
+	} else {
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		rsp->hdr.WordCount = 0;
+		rsp->ByteCount = 0;
+	}
+
+	ksmbd_fd_put(work, fp);
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_nt_cancel() - handler for smb cancel command
+ * @work:	smb work containing cancel command buffer
+ *
+ * Return:	0
+ */
+int smb_nt_cancel(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_hdr *hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *work_hdr;
+	struct ksmbd_work *new_work;
+
+	ksmbd_debug(SMB, "smb cancel called on mid %u\n", hdr->Mid);
+
+	spin_lock(&conn->request_lock);
+	list_for_each_entry(new_work, &conn->requests, request_entry) {
+		work_hdr = (struct smb_hdr *)new_work->request_buf;
+		if (work_hdr->Mid == hdr->Mid) {
+			ksmbd_debug(SMB, "smb with mid %u cancelled command = 0x%x\n",
+			       hdr->Mid, work_hdr->Command);
+			new_work->send_no_response = 1;
+			list_del_init(&new_work->request_entry);
+			new_work->sess->sequence_number--;
+			break;
+		}
+	}
+	spin_unlock(&conn->request_lock);
+
+	/* For SMB_COM_NT_CANCEL command itself send no response */
+	work->send_no_response = 1;
+	return 0;
+}
+
+/**
+ * smb_nt_rename() - handler for smb rename command
+ * @work:	smb work containing nt rename command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_nt_rename(struct ksmbd_work *work)
+{
+	struct smb_com_nt_rename_req *req = work->request_buf;
+	struct smb_com_rename_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	char *oldname, *newname;
+	int oldname_len, err;
+	unsigned int maxlen, offset;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	if (le16_to_cpu(req->Flags) != CREATE_HARD_LINK) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_rename_req, OldFileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	oldname = smb_get_name(share, req->OldFileName, maxlen - offset,
+			       work, false);
+	if (IS_ERR(oldname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(oldname);
+	}
+
+	if (is_smbreq_unicode(&req->hdr))
+		oldname_len = smb1_utf16_name_length((__le16 *)req->OldFileName,
+						     maxlen - offset);
+	else {
+		oldname_len = strlen(oldname);
+		oldname_len++;
+	}
+
+	/* 2 bytes for BufferFormat field and padding byte */
+	offset += oldname_len + 2;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(oldname);
+		return -EINVAL;
+	}
+
+	newname = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			       work, false);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		kfree(oldname);
+		return PTR_ERR(newname);
+	}
+	ksmbd_debug(SMB, "oldname %s, newname %s, oldname_len %d, unicode %d\n",
+		    oldname, newname, oldname_len,
+		    is_smbreq_unicode(&req->hdr));
+
+	err = ksmbd_vfs_link(work, oldname, newname);
+	if (err == -EACCES)
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+	else if (err < 0)
+		rsp->hdr.Status.CifsError = STATUS_NOT_SAME_DEVICE;
+
+	kfree(newname);
+	kfree(oldname);
+	return err;
+}
+
+static __le32 smb_query_info_pipe(struct ksmbd_share_config *share,
+				  struct kstat *st)
+{
+	st->mode = S_IFDIR;
+	return 0;
+}
+
+static __le32 smb_query_info_path(struct ksmbd_work *work, struct kstat *st)
+{
+	struct smb_com_query_information_req *req = work->request_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	unsigned int maxlen, offset;
+	char *name;
+	__le32 err = 0;
+	int ret;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_query_information_req, FileName);
+	if (offset >= maxlen) {
+		return STATUS_INVALID_PARAMETER;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name))
+		return STATUS_OBJECT_NAME_INVALID;
+
+	if (ksmbd_override_fsids(work)) {
+		kfree(name);
+		return STATUS_NO_MEMORY;
+	}
+
+	ret = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, 0);
+	if (ret) {
+		pr_err("look up failed err %d\n", ret);
+
+		if (d_is_symlink(path.dentry)) {
+			err = STATUS_ACCESS_DENIED;
+			goto out;
+		}
+		err = STATUS_OBJECT_NAME_NOT_FOUND;
+		goto out;
+	}
+
+	err = vfs_getattr(&path, st, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_query_info() - handler for query information command
+ * @work:	smb work containing query info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_query_info(struct ksmbd_work *work)
+{
+	struct smb_com_query_information_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct kstat st = {0,};
+	__u16 attr = 0;
+	int i;
+	__le32 err;
+
+	if (!test_share_config_flag(work->tcon->share_conf,
+				    KSMBD_SHARE_FLAG_PIPE))
+		err = smb_query_info_path(work, &st);
+	else
+		err = smb_query_info_pipe(share, &st);
+
+	if (le32_to_cpu(err) != 0) {
+		rsp->hdr.Status.CifsError = err;
+		return -EINVAL;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+
+	if (st.mode & S_ISVTX)
+		attr |= (ATTR_HIDDEN | ATTR_SYSTEM);
+	if (!(st.mode & 0222))
+		attr |= ATTR_READONLY;
+	if (S_ISDIR(st.mode))
+		attr |= ATTR_DIRECTORY;
+
+	rsp->attr = cpu_to_le16(attr);
+	rsp->last_write_time = cpu_to_le32(st.mtime.tv_sec);
+	rsp->size = cpu_to_le32((u32)st.size);
+	for (i = 0; i < 5; i++)
+		rsp->reserved[i] = 0;
+
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+	return 0;
+}
+
+/**
+ * smb_closedir() - handler closing dir handle, opened for readdir
+ * @work:	smb work containing find close command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_closedir(struct ksmbd_work *work)
+{
+	struct smb_com_findclose_req *req = work->request_buf;
+	struct smb_com_close_rsp *rsp = work->response_buf;
+	int err;
+
+	ksmbd_debug(SMB, "SMB_COM_FIND_CLOSE2 called for fid %u\n",
+		    req->FileID);
+
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+	err = ksmbd_close_fd(work, req->FileID);
+	if (!err)
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	else
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * convert_open_flags() - convert smb open flags to file open flags
+ * @file_present:	is file already present
+ * @mode:		smp file open mode
+ * @disposition:	smp file disposition information
+ *
+ * Return:	converted file open flags
+ */
+static int convert_open_flags(bool file_present,
+			      __u16 mode, __u16 dispostion,
+			      int *may_flags)
+{
+	int oflags = 0;
+
+	switch (mode & 0x0007) {
+	case SMBOPEN_READ:
+		oflags |= O_RDONLY;
+		break;
+	case SMBOPEN_WRITE:
+		oflags |= O_WRONLY;
+		break;
+	case SMBOPEN_READWRITE:
+		oflags |= O_RDWR;
+		break;
+	default:
+		oflags |= O_RDONLY;
+		break;
+	}
+
+	if (mode & SMBOPEN_WRITE_THROUGH)
+		oflags |= O_SYNC;
+
+	if (file_present) {
+		switch (dispostion & 0x0003) {
+		case SMBOPEN_DISPOSITION_NONE:
+			return -EEXIST;
+		case SMBOPEN_OAPPEND:
+			oflags |= O_APPEND;
+			break;
+		case SMBOPEN_OTRUNC:
+			oflags |= O_TRUNC;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (dispostion & 0x0010) {
+		case SMBOPEN_DISPOSITION_NONE:
+			return -EINVAL;
+		case SMBOPEN_OCREATE:
+			oflags |= O_CREAT;
+			break;
+		default:
+			break;
+		}
+	}
+
+	*may_flags = ksmbd_openflags_to_mayflags(oflags);
+
+	return oflags;
+}
+
+/**
+ * smb_open_andx() - smb andx open method handler
+ * @work:	smb work containing buffer for andx open command buffer
+ *
+ * Return:	error if there is error while processing current command,
+ *		otherwise pointer to next andx command in the chain
+ */
+int smb_open_andx(struct ksmbd_work *work)
+{
+	struct smb_com_openx_req *req = work->request_buf;
+	struct smb_com_openx_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	int oplock_flags, file_info, open_flags, may_flags;
+	char *name;
+	bool file_present = true;
+	umode_t mode = 0;
+	int err;
+	struct ksmbd_file *fp = NULL;
+	int oplock_rsp = OPLOCK_NONE, share_ret;
+	unsigned int maxlen, offset;
+
+	rsp->hdr.Status.CifsError = STATUS_UNSUCCESSFUL;
+
+	/* check for sharing mode flag */
+	if ((le16_to_cpu(req->Mode) & SMBOPEN_SHARING_MODE) >
+			SMBOPEN_DENY_NONE) {
+		rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+		rsp->hdr.Status.DosError.Error = cpu_to_le16(ERRbadaccess);
+		rsp->hdr.Flags2 &= ~SMBFLG2_ERR_STATUS;
+
+		memset(&rsp->hdr.WordCount, 0, 3);
+		return -EINVAL;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_openx_req, fileName);
+
+	if (is_smbreq_unicode(&req->hdr))
+		offset++;
+
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		kfree(name);
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path,
+					 req->hdr.Flags & SMBFLG_CASELESS);
+	if (err) {
+		if (err == -EACCES || err == -EXDEV)
+			goto out;
+		file_present = false;
+	} else {
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err)
+			goto free_path;
+	}
+
+	oplock_flags =
+		le16_to_cpu(req->OpenFlags) & (REQ_OPLOCK | REQ_BATCHOPLOCK);
+
+	open_flags = convert_open_flags(file_present,
+					le16_to_cpu(req->Mode),
+					le16_to_cpu(req->OpenFunction),
+					&may_flags);
+	if (open_flags < 0) {
+		ksmbd_debug(SMB, "create_dispostion returned %d\n", open_flags);
+		if (file_present)
+			goto free_path;
+		else {
+			err = -ENOENT;
+			goto out;
+		}
+	}
+
+	if (file_present && !(stat.mode & 0222)) {
+		if ((open_flags & O_ACCMODE) == O_WRONLY ||
+		    (open_flags & O_ACCMODE) == O_RDWR) {
+			ksmbd_debug(SMB, "readonly file(%s)\n", name);
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			memset(&rsp->hdr.WordCount, 0, 3);
+			goto free_path;
+		}
+	}
+
+	if (!file_present && (open_flags & O_CREAT)) {
+		mode |= 0777;
+		if (le16_to_cpu(req->FileAttributes) & ATTR_READONLY)
+			mode &= ~0222;
+
+		mode |= S_IFREG;
+		err = smb_common_create(work, &parent_path, &path, name,
+					open_flags, mode, false);
+		if (err) {
+			ksmbd_debug(SMB, "smb_common_create err: %d\n", err);
+			goto out;
+		}
+
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err)
+			goto free_path;
+	} else if (file_present) {
+		err = inode_permission(mnt_idmap(path.mnt),
+				       d_inode(path.dentry),
+				       may_flags);
+		if (err)
+			goto free_path;
+	}
+
+	err = ksmbd_query_inode_status(path.dentry->d_parent);
+	if (err == KSMBD_INODE_STATUS_PENDING_DELETE) {
+		err = -EBUSY;
+		goto free_path;
+	}
+
+	err = 0;
+	ksmbd_debug(SMB, "(%s) open_flags = 0x%x, oplock_flags 0x%x\n", name,
+		    open_flags, oplock_flags);
+	/* open  file and get FID */
+	fp = ksmbd_vfs_dentry_open(work, &path, open_flags, 0, file_present);
+	if (IS_ERR(fp)) {
+		err = PTR_ERR(fp);
+		fp = NULL;
+		goto free_path;
+	}
+	fp->pid = le16_to_cpu(req->hdr.Pid);
+
+	write_lock(&fp->f_ci->m_lock);
+	list_add(&fp->node, &fp->f_ci->m_fp_list);
+	write_unlock(&fp->f_ci->m_lock);
+
+	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+	if (smb1_oplock_enable &&
+	    test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_OPLOCKS) &&
+	    !S_ISDIR(file_inode(fp->filp)->i_mode) && oplock_flags) {
+		/* Client cannot request levelII oplock directly */
+		err = smb_grant_oplock(work, oplock_flags, fp->volatile_id, fp,
+				       le16_to_cpu(req->hdr.Tid), NULL, 0);
+		if (err)
+			goto free_path;
+	} else {
+		if (ksmbd_inode_pending_delete(fp)) {
+			err = -EBUSY;
+			goto free_path;
+		}
+
+		if (share_ret < 0) {
+			err = -EPERM;
+			goto free_path;
+		}
+	}
+
+	oplock_rsp = fp->f_opinfo != NULL ? fp->f_opinfo->level : 0;
+
+	/* open success, send back response */
+	if (file_present) {
+		if (!(open_flags & O_TRUNC))
+			file_info = F_OPENED;
+		else
+			file_info = F_OVERWRITTEN;
+	} else
+		file_info = F_CREATED;
+
+	if (oplock_rsp)
+		file_info |= SMBOPEN_LOCK_GRANTED;
+
+	if (stat.result_mask & STATX_BTIME)
+		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+	else
+		fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+	if (file_present) {
+		if (test_share_config_flag(work->tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da;
+
+			err = ksmbd_vfs_get_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     path.dentry, &da);
+			if (err > 0) {
+				fp->create_time = da.create_time;
+				fp->itime = da.itime;
+			}
+			err = 0;
+		}
+	} else {
+		if (test_share_config_flag(work->tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da = {0};
+
+			da.version = 4;
+			da.attr = ATTR_NORMAL;
+			da.itime = da.create_time = fp->create_time;
+			da.flags = XATTR_DOSINFO_ATTRIB |
+				   XATTR_DOSINFO_CREATE_TIME |
+				   XATTR_DOSINFO_ITIME;
+
+			err = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     &path, &da, false);
+			if (err)
+				ksmbd_debug(SMB, "failed to store creation time in xattr\n");
+			err = 0;
+		}
+	}
+
+	/* prepare response buffer */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0x0F;
+	rsp->Fid = fp->volatile_id;
+	rsp->FileAttributes = cpu_to_le16(ATTR_NORMAL);
+	rsp->LastWriteTime = cpu_to_le32(stat.mtime.tv_sec);
+	rsp->EndOfFile = cpu_to_le32(stat.size);
+	switch (open_flags & O_ACCMODE) {
+	case O_RDONLY:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_READ);
+		break;
+	case O_WRONLY:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_WRITE);
+		break;
+	case O_RDWR:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_READ_WRITE);
+		break;
+	default:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_READ);
+		break;
+	}
+
+	rsp->FileType = 0;
+	rsp->IPCState = 0;
+	rsp->Action = cpu_to_le16(file_info);
+	rsp->Reserved = 0;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+free_path:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+	if (err) {
+		if (err == -ENOSPC)
+			rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		else if (err == -EMFILE)
+			rsp->hdr.Status.CifsError =
+				STATUS_TOO_MANY_OPENED_FILES;
+		else if (err == -EBUSY)
+			rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+		else if (err == -ENOENT)
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_NOT_FOUND;
+		else if (err == -EACCES || err == -EXDEV)
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		if (fp)
+			ksmbd_close_fd(work, fp->volatile_id);
+	} else
+		ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+
+	kfree(name);
+	if (!rsp->hdr.WordCount)
+		return err;
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return err;
+}
+
+/**
+ * smb_setattr() - set file attributes
+ * @work:	smb work containing setattr command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_setattr(struct ksmbd_work *work)
+{
+	struct smb_com_setattr_req *req = work->request_buf;
+	struct smb_com_setattr_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	struct iattr attrs;
+	int err = 0;
+	char *name;
+	unsigned int maxlen, offset;
+	__u16 dos_attr;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_setattr_req, fileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->fileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path,
+					 req->hdr.Flags & SMBFLG_CASELESS);
+	if (err) {
+		ksmbd_debug(SMB, "look up failed err %d\n", err);
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_NOT_FOUND;
+		err = 0;
+		goto out;
+	}
+
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err)
+		goto out;
+
+	attrs.ia_valid = 0;
+	attrs.ia_mode = 0;
+
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+
+	dos_attr = le16_to_cpu(req->attr);
+	if (!dos_attr)
+		attrs.ia_mode = stat.mode | 0200;
+
+	if (dos_attr & ATTR_READONLY)
+		attrs.ia_mode = stat.mode & ~0222;
+
+	if (attrs.ia_mode)
+		attrs.ia_valid |= ATTR_MODE;
+
+	attrs.ia_mtime.tv_sec = le32_to_cpu(req->LastWriteTime);
+	attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+
+	err = ksmbd_vfs_setattr(work, name, 0, &attrs);
+	if (err)
+		goto out;
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+out:
+	kfree(name);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * smb_query_information_disk() - determine capacity and remaining free space
+ * @work:	smb work containing command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_query_information_disk(struct ksmbd_work *work)
+{
+	struct smb_hdr *req = work->request_buf;
+	struct smb_com_query_information_disk_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct ksmbd_tree_connect *tree_conn;
+	struct kstatfs stfs;
+	struct path path;
+	int err = 0;
+
+	u16 blocks_per_unit, bytes_per_block, total_units, free_units;
+	u64 total_blocks, free_blocks;
+	u32 block_size, unit_size;
+
+	if (req->WordCount) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	tree_conn = work->tcon;
+	if (!tree_conn) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	share = tree_conn->share_conf;
+
+	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+	if (err)
+		goto out_fsids;
+
+	err = vfs_statfs(&path, &stfs);
+	if (err) {
+		pr_err("cannot do stat of path %s\n", share->path);
+		goto out_path;
+	}
+
+	unit_size = stfs.f_bsize / stfs.f_frsize;
+	block_size = stfs.f_bsize;
+	total_blocks = stfs.f_blocks;
+	free_blocks = stfs.f_bavail;
+
+	/*
+	 * clamp block size to at most 512 KB for compatibility with
+	 * older clients
+	 */
+	while (block_size > 512) {
+		block_size >>= 1;
+		unit_size <<= 1;
+	}
+
+	/* adjust blocks and sizes until they fit into a u16 */
+	while (total_blocks >= 0xFFFF) {
+		total_blocks >>= 1;
+		free_blocks >>= 1;
+		if ((unit_size <<= 1) > 0xFFFF) {
+			unit_size >>= 1;
+			total_blocks = 0xFFFF;
+			free_blocks <<= 1;
+			break;
+		}
+	}
+
+	total_units = (total_blocks >= 0xFFFF) ? 0xFFFF : (u16)total_blocks;
+	free_units = (free_blocks >= 0xFFFF) ? 0xFFFF : (u16)free_blocks;
+	bytes_per_block = (u16)block_size;
+	blocks_per_unit = (u16)unit_size;
+
+	rsp->hdr.WordCount = 5;
+
+	rsp->TotalUnits = total_units;
+	rsp->BlocksPerUnit = blocks_per_unit;
+	rsp->BlockSize = bytes_per_block;
+	rsp->FreeUnits = free_units;
+	rsp->Pad = 0;
+	rsp->ByteCount = 0;
+
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+out_path:
+	path_put(&path);
+out_fsids:
+	ksmbd_revert_fsids(work);
+out:
+	if (err) {
+		switch (err) {
+		case -EINVAL:
+			rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+			break;
+		case -ENOMEM:
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			break;
+		case -ENOENT:
+			rsp->hdr.Status.CifsError = STATUS_NO_SUCH_FILE;
+			break;
+		case -EBUSY:
+			rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+			break;
+		case -EACCES:
+		case -EXDEV:
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			break;
+		case -EBADF:
+			rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+			break;
+		case -EFAULT:
+			rsp->hdr.Status.CifsError = STATUS_INVALID_LEVEL;
+			break;
+		case -EOPNOTSUPP:
+			rsp->hdr.Status.CifsError = STATUS_NOT_IMPLEMENTED;
+			break;
+		case -EIO:
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			break;
+		}
+		ksmbd_debug(SMB, "%s failed with error %d\n", __func__, err);
+	}
+
+	return err;
+}
+
+/**
+ * smb1_is_sign_req() - handler for checking packet signing status
+ * @work:	smb work containing notify command buffer
+ *
+ * Return:	true if packed is signed, false otherwise
+ */
+bool smb1_is_sign_req(struct ksmbd_work *work, unsigned int command)
+{
+#if 0
+	struct smb_hdr *rcv_hdr1 = (struct smb_hdr *)work->request_buf;
+
+	/*
+	 * FIXME: signed tree connect failed by signing error
+	 * with windows XP client. For now, Force to turn off
+	 * signing feature in SMB1.
+	 */
+	if ((rcv_hdr1->Flags2 & SMBFLG2_SECURITY_SIGNATURE) &&
+			command != SMB_COM_SESSION_SETUP_ANDX)
+		return true;
+	return false;
+#else
+	return false;
+#endif
+}
+
+/**
+ * smb1_check_sign_req() - handler for req packet sign processing
+ * @work:	smb work containing notify command buffer
+ *
+ * Return:	1 on success, 0 otherwise
+ */
+int smb1_check_sign_req(struct ksmbd_work *work)
+{
+	struct smb_hdr *rcv_hdr1 = (struct smb_hdr *)work->request_buf;
+	char signature_req[CIFS_SMB1_SIGNATURE_SIZE];
+	char signature[20];
+	struct kvec iov[1];
+
+	memcpy(signature_req, rcv_hdr1->Signature.SecuritySignature,
+	       CIFS_SMB1_SIGNATURE_SIZE);
+	rcv_hdr1->Signature.Sequence.SequenceNumber =
+		cpu_to_le32(++work->sess->sequence_number);
+	rcv_hdr1->Signature.Sequence.Reserved = 0;
+
+	iov[0].iov_base = rcv_hdr1->Protocol;
+	iov[0].iov_len = be32_to_cpu(rcv_hdr1->smb_buf_length);
+
+	if (ksmbd_sign_smb1_pdu(work->sess, iov, 1, signature))
+		return 0;
+
+	if (memcmp(signature, signature_req, CIFS_SMB1_SIGNATURE_SIZE)) {
+		ksmbd_debug(SMB, "bad smb1 sign\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * smb1_set_sign_rsp() - handler for rsp packet sign processing
+ * @work:	smb work containing notify command buffer
+ *
+ */
+void smb1_set_sign_rsp(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	char signature[20];
+	struct kvec iov[2];
+	int n_vec = 1;
+
+	rsp_hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+	rsp_hdr->Signature.Sequence.SequenceNumber =
+		cpu_to_le32(++work->sess->sequence_number);
+	rsp_hdr->Signature.Sequence.Reserved = 0;
+
+	iov[0].iov_base = rsp_hdr->Protocol;
+	iov[0].iov_len = be32_to_cpu(rsp_hdr->smb_buf_length);
+
+#if 0
+	/* XXX smb1 signing is broken iirc */
+	if (work->aux_payload_sz) {
+		iov[0].iov_len -= work->aux_payload_sz;
+
+		iov[1].iov_base = work->aux_payload_buf;
+		iov[1].iov_len = work->aux_payload_sz;
+		n_vec++;
+	}
+#endif
+
+	if (ksmbd_sign_smb1_pdu(work->sess, iov, n_vec, signature))
+		memset(rsp_hdr->Signature.SecuritySignature, 0,
+		       CIFS_SMB1_SIGNATURE_SIZE);
+	else
+		memcpy(rsp_hdr->Signature.SecuritySignature, signature,
+		       CIFS_SMB1_SIGNATURE_SIZE);
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/fs/smb/server/smb1pdu.h	2023-11-07 13:38:44.042256145 +0100
@@ -0,0 +1,1653 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __SMB1PDU_H
+#define __SMB1PDU_H
+
+#define MAX_CIFS_HDR_SIZE 0x58
+
+#define SMB_HEADER_SIZE			32
+
+#define SMB1_CLIENT_GUID_SIZE		(16)
+#define SMB1_MAX_MPX_COUNT		10
+#define SMB1_MAX_VCS			1
+#define SMB1_MAX_RAW_SIZE		65536
+#define MAX_CIFS_LOOKUP_BUFFER_SIZE	(16*1024)
+
+/*
+ * Size of the ntlm client response
+ */
+#define CIFS_AUTH_RESP_SIZE		24
+#define CIFS_SMB1_SIGNATURE_SIZE	8
+#define CIFS_SMB1_SESSKEY_SIZE		16
+
+#define SMB1_SERVER_CAPS					\
+	(CAP_UNICODE | CAP_LARGE_FILES | CAP_EXTENDED_SECURITY |\
+	 CAP_NT_SMBS | CAP_STATUS32 | CAP_LOCK_AND_READ |	\
+	 CAP_NT_FIND | CAP_UNIX | CAP_LARGE_READ_X |		\
+	 CAP_LARGE_WRITE_X | CAP_LEVEL_II_OPLOCKS)
+
+#define SMB1_SERVER_SECU  (SECMODE_USER | SECMODE_PW_ENCRYPT)
+
+/* Service Type of TreeConnect*/
+#define SERVICE_DISK_SHARE	"A:"
+#define SERVICE_IPC_SHARE	"IPC"
+#define SERVICE_PRINTER_SHARE	"LPT1:"
+#define SERVICE_COMM		"COMM"
+
+#define NATIVE_FILE_SYSTEM	"NTFS"
+
+#define SMB_NO_MORE_ANDX_COMMAND 0xFF
+#define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff)
+
+/* Transact2 subcommand codes */
+#define TRANS2_OPEN                   0x00
+#define TRANS2_FIND_FIRST             0x01
+#define TRANS2_FIND_NEXT              0x02
+#define TRANS2_QUERY_FS_INFORMATION   0x03
+#define TRANS2_SET_FS_INFORMATION     0x04
+#define TRANS2_QUERY_PATH_INFORMATION 0x05
+#define TRANS2_SET_PATH_INFORMATION   0x06
+#define TRANS2_QUERY_FILE_INFORMATION 0x07
+#define TRANS2_SET_FILE_INFORMATION   0x08
+#define TRANS2_CREATE_DIRECTORY       0x0d
+#define TRANS2_GET_DFS_REFERRAL       0x10
+#define TRANS2_REPORT_DFS_INCOSISTENCY 0x11
+
+/* SMB Transact (Named Pipe) subcommand codes */
+#define TRANS_SET_NMPIPE_STATE      0x0001
+#define TRANS_RAW_READ_NMPIPE       0x0011
+#define TRANS_QUERY_NMPIPE_STATE    0x0021
+#define TRANS_QUERY_NMPIPE_INFO     0x0022
+#define TRANS_PEEK_NMPIPE           0x0023
+#define TRANS_TRANSACT_NMPIPE       0x0026
+#define TRANS_RAW_WRITE_NMPIPE      0x0031
+#define TRANS_READ_NMPIPE           0x0036
+#define TRANS_WRITE_NMPIPE          0x0037
+#define TRANS_WAIT_NMPIPE           0x0053
+#define TRANS_CALL_NMPIPE           0x0054
+
+/* NT Transact subcommand codes */
+#define NT_TRANSACT_CREATE            0x01
+#define NT_TRANSACT_IOCTL             0x02
+#define NT_TRANSACT_SET_SECURITY_DESC 0x03
+#define NT_TRANSACT_NOTIFY_CHANGE     0x04
+#define NT_TRANSACT_RENAME            0x05
+#define NT_TRANSACT_QUERY_SECURITY_DESC 0x06
+#define NT_TRANSACT_GET_USER_QUOTA    0x07
+#define NT_TRANSACT_SET_USER_QUOTA    0x08
+
+/*
+ * SMB flag definitions
+ */
+#define SMBFLG_EXTD_LOCK 0x01   /* server supports lock-read write-unlock smb */
+#define SMBFLG_RCV_POSTED 0x02  /* obsolete */
+#define SMBFLG_RSVD 0x04
+#define SMBFLG_CASELESS 0x08    /*
+				 * all pathnames treated as caseless (off
+				 * implies case sensitive file handling
+				 * request)
+				 */
+#define SMBFLG_CANONICAL_PATH_FORMAT 0x10       /* obsolete */
+#define SMBFLG_OLD_OPLOCK 0x20  /* obsolete */
+#define SMBFLG_OLD_OPLOCK_NOTIFY 0x40   /* obsolete */
+#define SMBFLG_RESPONSE 0x80    /* this PDU is a response from server */
+
+/*
+ * SMB flag2 definitions
+ */
+#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1) /*
+						 * can send long (non-8.3)
+						 * path names in response
+						 */
+#define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
+#define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
+#define SMBFLG2_COMPRESSED (8)
+#define SMBFLG2_SECURITY_SIGNATURE_REQUIRED (0x10)
+#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
+#define SMBFLG2_REPARSE_PATH (0x400)
+#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
+#define SMBFLG2_DFS cpu_to_le16(0x1000)
+#define SMBFLG2_PAGING_IO cpu_to_le16(0x2000)
+#define SMBFLG2_ERR_STATUS cpu_to_le16(0x4000)
+#define SMBFLG2_UNICODE cpu_to_le16(0x8000)
+
+#define SMB_COM_CREATE_DIRECTORY      0x00 /* trivial response */
+#define SMB_COM_DELETE_DIRECTORY      0x01 /* trivial response */
+#define SMB_COM_CLOSE                 0x04 /* triv req/rsp, timestamp ignored */
+#define SMB_COM_FLUSH                 0x05 /* triv req/rsp */
+#define SMB_COM_DELETE                0x06 /* trivial response */
+#define SMB_COM_RENAME                0x07 /* trivial response */
+#define SMB_COM_QUERY_INFORMATION     0x08 /* aka getattr */
+#define SMB_COM_SETATTR               0x09 /* trivial response */
+#define SMB_COM_WRITE                 0x0b
+#define SMB_COM_CHECK_DIRECTORY       0x10 /* trivial response */
+#define SMB_COM_PROCESS_EXIT          0x11 /* trivial response */
+#define SMB_COM_LOCKING_ANDX          0x24 /* trivial response */
+#define SMB_COM_TRANSACTION	      0x25
+#define SMB_COM_COPY                  0x29 /* trivial rsp, fail filename ignrd*/
+#define SMB_COM_ECHO                  0x2B /* echo request */
+#define SMB_COM_OPEN_ANDX             0x2D /* Legacy open for old servers */
+#define SMB_COM_READ_ANDX             0x2E
+#define SMB_COM_WRITE_ANDX            0x2F
+#define SMB_COM_TRANSACTION2          0x32
+#define SMB_COM_TRANSACTION2_SECONDARY 0x33
+#define SMB_COM_FIND_CLOSE2           0x34 /* trivial response */
+#define SMB_COM_TREE_DISCONNECT       0x71 /* trivial response */
+#define SMB_COM_NEGOTIATE             0x72
+#define SMB_COM_SESSION_SETUP_ANDX    0x73
+#define SMB_COM_LOGOFF_ANDX           0x74 /* trivial response */
+#define SMB_COM_TREE_CONNECT_ANDX     0x75
+#define SMB_COM_QUERY_INFORMATION_DISK 0x80
+#define SMB_COM_NT_TRANSACT           0xA0
+#define SMB_COM_NT_TRANSACT_SECONDARY 0xA1
+#define SMB_COM_NT_CREATE_ANDX        0xA2
+#define SMB_COM_NT_CANCEL             0xA4 /* no response */
+#define SMB_COM_NT_RENAME             0xA5 /* trivial response */
+
+/* Negotiate response Capabilities */
+#define CAP_RAW_MODE           0x00000001
+#define CAP_MPX_MODE           0x00000002
+#define CAP_UNICODE            0x00000004
+#define CAP_LARGE_FILES        0x00000008
+#define CAP_NT_SMBS            0x00000010       /* implies CAP_NT_FIND */
+#define CAP_RPC_REMOTE_APIS    0x00000020
+#define CAP_STATUS32           0x00000040
+#define CAP_LEVEL_II_OPLOCKS   0x00000080
+#define CAP_LOCK_AND_READ      0x00000100
+#define CAP_NT_FIND            0x00000200
+#define CAP_DFS                0x00001000
+#define CAP_INFOLEVEL_PASSTHRU 0x00002000
+#define CAP_LARGE_READ_X       0x00004000
+#define CAP_LARGE_WRITE_X      0x00008000
+#define CAP_LWIO               0x00010000 /* support fctl_srv_req_resume_key */
+#define CAP_UNIX               0x00800000
+#define CAP_COMPRESSED_DATA    0x02000000
+#define CAP_DYNAMIC_REAUTH     0x20000000
+#define CAP_PERSISTENT_HANDLES 0x40000000
+#define CAP_EXTENDED_SECURITY  0x80000000
+
+/* RFC 1002 session packet types */
+#define RFC1002_SESSION_MESSAGE 0x00
+#define RFC1002_SESSION_REQUEST  0x81
+#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
+#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
+#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
+#define RFC1002_SESSION_KEEP_ALIVE 0x85
+
+/* Action bits */
+#define GUEST_LOGIN 1
+
+struct smb_negotiate_rsp {
+	struct smb_hdr hdr;     /* wct = 17 */
+	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
+	__u8 SecurityMode;
+	__le16 MaxMpxCount;
+	__le16 MaxNumberVcs;
+	__le32 MaxBufferSize;
+	__le32 MaxRawSize;
+	__le32 SessionKey;
+	__le32 Capabilities;    /* see below */
+	__le32 SystemTimeLow;
+	__le32 SystemTimeHigh;
+	__le16 ServerTimeZone;
+	__u8 EncryptionKeyLength;
+	__le16 ByteCount;
+	union {
+		unsigned char EncryptionKey[8]; /* cap extended security off */
+		/* followed by Domain name - if extended security is off */
+		/* followed by 16 bytes of server GUID */
+		/* then security blob if cap_extended_security negotiated */
+		struct {
+			unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
+			unsigned char SecurityBlob[1];
+		} __packed extended_response;
+	} __packed u;
+} __packed;
+
+struct smb_com_read_req {
+	struct smb_hdr hdr;     /* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__le16 MaxCount;
+	__le16 MinCount;                /* obsolete */
+	__le32 MaxCountHigh;
+	__le16 Remaining;
+	__le32 OffsetHigh;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_read_rsp {
+	struct smb_hdr hdr;     /* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Remaining;
+	__le16 DataCompactionMode;
+	__le16 Reserved;
+	__le16 DataLength;
+	__le16 DataOffset;
+	__le16 DataLengthHigh;
+	__u64 Reserved2;
+	__le16 ByteCount;
+	/* read response data immediately follows */
+} __packed;
+
+struct smb_com_write_req {
+	struct smb_hdr hdr;	/* wct = 14 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__u32 Reserved;
+	__le16 WriteMode;
+	__le16 Remaining;
+	__le16 DataLengthHigh;
+	__le16 DataLengthLow;
+	__le16 DataOffset;
+	__le32 OffsetHigh;
+	__le16 ByteCount;
+	__u8 Pad;		/*
+				 * BB check for whether padded to DWORD
+				 * boundary and optimum performance here
+				 */
+	char Data[0];
+} __packed;
+
+struct smb_com_write_req_32bit {
+	struct smb_hdr hdr;	/* wct = 5 */
+	__u16 Fid;
+	__le16 Length;
+	__le32 Offset;
+	__u16 Estimate;
+	__le16 ByteCount;	/* must be greater than 2 */
+	__u8 BufferFormat;
+	__u16 DataLength;
+	char Data[0];
+} __packed;
+
+struct smb_com_write_rsp_32bit {
+	struct smb_hdr hdr;	/* wct = 1 */
+	__le16 Written;
+	__le16 ByteCount;	/* must be 0 */
+} __packed;
+
+struct smb_com_write_rsp {
+	struct smb_hdr hdr;	/* wct = 6 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Count;
+	__le16 Remaining;
+	__le16 CountHigh;
+	__u16  Reserved;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_rename_req {
+	struct smb_hdr hdr;     /* wct = 1 */
+	__le16 SearchAttributes;        /* target file attributes */
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII or Unicode */
+	unsigned char OldFileName[1];
+	/* followed by __u8 BufferFormat2 */
+	/* followed by NewFileName */
+} __packed;
+
+struct smb_com_rename_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+/* SecurityMode bits */
+#define SECMODE_USER          0x01      /* off indicates share level security */
+#define SECMODE_PW_ENCRYPT    0x02
+#define SECMODE_SIGN_ENABLED  0x04      /* SMB security signatures enabled */
+#define SECMODE_SIGN_REQUIRED 0x08      /* SMB security signatures required */
+
+struct smb_com_session_setup_req {	/* request format */
+	struct smb_hdr hdr;	/* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 MaxBufferSize;
+	__le16 MaxMpxCount;
+	__le16 VcNumber;
+	__u32 SessionKey;
+	__le16 SecurityBlobLength;
+	__u32 Reserved;
+	__le32 Capabilities;	/* see below */
+	__le16 ByteCount;
+	unsigned char SecurityBlob[1];	/* followed by */
+	/* STRING NativeOS */
+	/* STRING NativeLanMan */
+} __packed;	/* NTLM request format (with extended security) */
+
+struct smb_com_session_setup_req_no_secext {	/* request format */
+	struct smb_hdr hdr;	/* we will handle this :: wct = 13 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 MaxBufferSize;
+	__le16 MaxMpxCount;
+	__le16 VcNumber;
+	__u32 SessionKey;
+	__le16 CaseInsensitivePasswordLength;	/* ASCII password len */
+	__le16 CaseSensitivePasswordLength;	/* Unicode password length*/
+	__u32 Reserved;	/* see below */
+	__le32 Capabilities;
+	__le16 ByteCount;
+	unsigned char CaseInsensitivePassword[0];	/* followed by: */
+	/* unsigned char * CaseSensitivePassword; */
+	/* STRING AccountName */
+	/* STRING PrimaryDomain */
+	/* STRING NativeOS */
+	/* STRING NativeLanMan */
+} __packed;	/* NTLM request format (without extended security */
+
+struct smb_com_session_setup_resp {	/* default (NTLM) response format */
+	struct smb_hdr hdr;	/* wct = 4 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Action;	/* see below */
+	__le16 SecurityBlobLength;
+	__le16 ByteCount;
+	unsigned char SecurityBlob[1];	/* followed by */
+	/*      unsigned char  * NativeOS;      */
+	/*      unsigned char  * NativeLanMan;  */
+	/*      unsigned char  * PrimaryDomain; */
+} __packed;	/* NTLM response (with or without extended sec) */
+
+struct smb_com_session_setup_old_resp { /* default (NTLM) response format */
+	struct smb_hdr hdr;	/* wct = 3 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Action;	/* see below */
+	__le16 ByteCount;
+	unsigned char NativeOS[1];	/* followed by */
+	/*      unsigned char * NativeLanMan; */
+	/*      unsigned char * PrimaryDomain; */
+} __packed;	/* pre-NTLM (LANMAN2.1) response */
+
+union smb_com_session_setup_andx {
+	struct smb_com_session_setup_req req;
+	struct smb_com_session_setup_req_no_secext req_no_secext;
+	struct smb_com_session_setup_resp resp;
+	struct smb_com_session_setup_old_resp old_resp;
+} __packed;
+
+struct smb_com_tconx_req {
+	__u8 WordCount;  /* wct = 4, it could be ANDX */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Flags;           /* see below */
+	__le16 PasswordLength;
+	__le16 ByteCount;
+	unsigned char Password[1];      /* followed by */
+	/* STRING Path    *//* \\server\share name */
+	/* STRING Service */
+} __packed;
+
+struct smb_com_tconx_rsp {
+	__u8 WordCount;     /* wct = 3 , not extended response */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 OptionalSupport; /* see below */
+	__le16 ByteCount;
+	unsigned char Service[1];       /* always ASCII, not Unicode */
+	/* STRING NativeFileSystem */
+} __packed;
+
+struct smb_com_tconx_rsp_ext {
+	__u8 WordCount;	/* wct = 7, extended response */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 OptionalSupport; /* see below */
+	__le32 MaximalShareAccessRights;
+	__le32 GuestMaximalShareAccessRights;
+	__le16 ByteCount;
+	unsigned char Service[1];       /* always ASCII, not Unicode */
+	/* STRING NativeFileSystem */
+} __packed;
+
+struct andx_block {
+	__u8 WordCount;
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+} __packed;
+
+struct locking_andx_range64 {
+	__le16 Pid;
+	__le16 Pad;
+	__le32 OffsetHigh;
+	__le32 OffsetLow;
+	__le32 LengthHigh;
+	__le32 LengthLow;
+} __packed;
+
+struct locking_andx_range32 {
+	__le16 Pid;
+	__le32 Offset;
+	__le32 Length;
+} __packed;
+
+#define LOCKING_ANDX_SHARED_LOCK     0x01
+#define LOCKING_ANDX_OPLOCK_RELEASE  0x02
+#define LOCKING_ANDX_CHANGE_LOCKTYPE 0x04
+#define LOCKING_ANDX_CANCEL_LOCK     0x08
+#define LOCKING_ANDX_LARGE_FILES     0x10       /* always on for us */
+
+struct smb_com_lock_req {
+	struct smb_hdr hdr;	/* wct = 8 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__u8 LockType;
+	__u8 OplockLevel;
+	__le32 Timeout;
+	__le16 NumberOfUnlocks;
+	__le16 NumberOfLocks;
+	__le16 ByteCount;
+	char *Locks[1];
+} __packed;
+
+struct smb_com_lock_rsp {
+	struct smb_hdr hdr;     /* wct = 2 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_query_information_disk_rsp {
+	struct smb_hdr hdr;     /* wct = 5 */
+	__le16 TotalUnits;
+	__le16 BlocksPerUnit;
+	__le16 BlockSize;
+	__le16 FreeUnits;
+	__le16 Pad;
+	__le16 ByteCount;
+} __packed;
+
+/* tree connect Flags */
+#define DISCONNECT_TID          0x0001
+#define TCON_EXTENDED_SIGNATURES 0x0004
+#define TCON_EXTENDED_SECINFO   0x0008
+
+/* OptionalSupport bits */
+#define SMB_SUPPORT_SEARCH_BITS 0x0001  /*
+					 * "must have" directory search bits
+					 * (exclusive searches supported)
+					 */
+#define SMB_SHARE_IS_IN_DFS     0x0002
+#define SMB_CSC_MASK               0x000C
+/* CSC flags defined as follows */
+#define SMB_CSC_CACHE_MANUAL_REINT 0x0000
+#define SMB_CSC_CACHE_AUTO_REINT   0x0004
+#define SMB_CSC_CACHE_VDO          0x0008
+#define SMB_CSC_NO_CACHING         0x000C
+#define SMB_UNIQUE_FILE_NAME    0x0010
+#define SMB_EXTENDED_SIGNATURES 0x0020
+
+/* OpenFlags */
+#define REQ_MORE_INFO      0x00000001  /* legacy (OPEN_AND_X) only */
+#define REQ_OPLOCK         0x00000002
+#define REQ_BATCHOPLOCK    0x00000004
+#define REQ_OPENDIRONLY    0x00000008
+#define REQ_EXTENDED_INFO  0x00000010
+
+/* File type */
+#define DISK_TYPE               0x0000
+#define BYTE_PIPE_TYPE          0x0001
+#define MESSAGE_PIPE_TYPE       0x0002
+#define PRINTER_TYPE            0x0003
+#define COMM_DEV_TYPE           0x0004
+#define UNKNOWN_TYPE            0xFFFF
+
+/* Device Type or File Status Flags */
+#define NO_EAS                  0x0001
+#define NO_SUBSTREAMS           0x0002
+#define NO_REPARSETAG           0x0004
+/* following flags can apply if pipe */
+#define ICOUNT_MASK             0x00FF
+#define PIPE_READ_MODE          0x0100
+#define NAMED_PIPE_TYPE         0x0400
+#define PIPE_END_POINT          0x4000
+#define BLOCKING_NAMED_PIPE     0x8000
+
+/* ShareAccess flags */
+#define FILE_NO_SHARE     0x00000000
+#define FILE_SHARE_READ   0x00000001
+#define FILE_SHARE_WRITE  0x00000002
+#define FILE_SHARE_DELETE 0x00000004
+#define FILE_SHARE_ALL    0x00000007
+
+/* CreateDisposition flags, similar to CreateAction as well */
+#define FILE_SUPERSEDE    0x00000000
+#define FILE_OPEN         0x00000001
+#define FILE_CREATE       0x00000002
+#define FILE_OPEN_IF      0x00000003
+#define FILE_OVERWRITE    0x00000004
+#define FILE_OVERWRITE_IF 0x00000005
+
+/* ImpersonationLevel flags */
+#define SECURITY_ANONYMOUS      0
+#define SECURITY_IDENTIFICATION 1
+#define SECURITY_IMPERSONATION  2
+#define SECURITY_DELEGATION     3
+
+/* SecurityFlags */
+#define SECURITY_CONTEXT_TRACKING 0x01
+#define SECURITY_EFFECTIVE_ONLY   0x02
+
+struct smb_com_open_req {       /* also handles create */
+	struct smb_hdr hdr;     /* wct = 24 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 Reserved;          /* Must Be Zero */
+	__le16 NameLength;
+	__le32 OpenFlags;
+	__u32  RootDirectoryFid;
+	__le32 DesiredAccess;
+	__le64 AllocationSize;
+	__le32 FileAttributes;
+	__le32 ShareAccess;
+	__le32 CreateDisposition;
+	__le32 CreateOptions;
+	__le32 ImpersonationLevel;
+	__u8 SecurityFlags;
+	__le16 ByteCount;
+	char fileName[1];
+} __packed;
+
+/* open response for CreateAction shifted left */
+#define CIFS_CREATE_ACTION 0x20000 /* file created */
+
+/* Basic file attributes */
+#define SMB_FILE_ATTRIBUTE_NORMAL	0x0000
+#define SMB_FILE_ATTRIBUTE_READONLY	0x0001
+#define SMB_FILE_ATTRIBUTE_HIDDEN	0x0002
+#define SMB_FILE_ATTRIBUTE_SYSTEM	0x0004
+#define SMB_FILE_ATTRIBUTE_VOLUME	0x0008
+#define SMB_FILE_ATTRIBUTE_DIRECTORY	0x0010
+#define SMB_FILE_ATTRIBUTE_ARCHIVE	0x0020
+#define SMB_SEARCH_ATTRIBUTE_READONLY	0x0100
+#define SMB_SEARCH_ATTRIBUTE_HIDDEN	0x0200
+#define SMB_SEARCH_ATTRIBUTE_SYSTEM	0x0400
+#define SMB_SEARCH_ATTRIBUTE_DIRECTORY	0x1000
+#define SMB_SEARCH_ATTRIBUTE_ARCHIVE	0x2000
+
+struct smb_com_open_rsp {
+	struct smb_hdr hdr;     /* wct = 34 BB */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 OplockLevel;
+	__u16 Fid;
+	__le32 CreateAction;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 FileAttributes;
+	__le64 AllocationSize;
+	__le64 EndOfFile;
+	__le16 FileType;
+	__le16 DeviceState;
+	__u8 DirectoryFlag;
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_open_ext_rsp {
+	struct smb_hdr hdr;     /* wct = 42 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 OplockLevel;
+	__u16 Fid;
+	__le32 CreateAction;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 FileAttributes;
+	__le64 AllocationSize;
+	__le64 EndOfFile;
+	__le16 FileType;
+	__le16 DeviceState;
+	__u8 DirectoryFlag;
+	__u8 VolId[16];
+	__u64 fid;
+	__le32 MaxAccess;
+	__le32 GuestAccess;
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_close_req {
+	struct smb_hdr hdr;     /* wct = 3 */
+	__u16 FileID;
+	__le32 LastWriteTime;    /* should be zero or -1 */
+	__le16  ByteCount;        /* 0 */
+} __packed;
+
+struct smb_com_close_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_echo_req {
+	struct  smb_hdr hdr;
+	__le16  EchoCount;
+	__le16  ByteCount;
+	char    Data[1];
+} __packed;
+
+struct smb_com_echo_rsp {
+	struct  smb_hdr hdr;
+	__le16  SequenceNumber;
+	__le16  ByteCount;
+	char    Data[1];
+} __packed;
+
+struct smb_com_flush_req {
+	struct smb_hdr hdr;     /* wct = 1 */
+	__u16 FileID;
+	__le16 ByteCount;        /* 0 */
+} __packed;
+
+struct smb_com_flush_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+/* SMB_COM_TRANSACTION */
+struct smb_com_trans_req {
+	struct smb_hdr hdr;
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;
+	__u8  Pad;
+	__u8 Data[1];
+} __packed;
+
+struct smb_com_trans_pipe_req {
+	struct smb_hdr hdr;
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__u16 SubCommand;
+	__u16 fid;
+	__le16 ByteCount;
+	__u8  Pad;
+	__u8 Data[1];
+} __packed;
+
+struct smb_com_trans_rsp {
+	struct smb_hdr hdr;     /* wct = 10+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__u16 Reserved;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 ParameterDisplacement;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__le16 DataDisplacement;
+	__u8 SetupCount;
+	__u8 Reserved1;
+	__le16 ByteCount;
+	__u8 Pad;
+} __packed;
+
+/* SMB_COM_TRANSACTION subcommands */
+
+#define TRANSACT_DCERPCCMD	0x26
+
+/*****************************************************************************
+ * TRANS2 command implementation functions
+ *****************************************************************************/
+#define NO_CHANGE_64          0xFFFFFFFFFFFFFFFFULL
+
+/* QFSInfo Levels */
+#define SMB_INFO_ALLOCATION         1
+#define SMB_INFO_VOLUME             2
+#define SMB_QUERY_FS_VOLUME_INFO    0x102
+#define SMB_QUERY_FS_SIZE_INFO      0x103
+#define SMB_QUERY_FS_DEVICE_INFO    0x104
+#define SMB_QUERY_FS_ATTRIBUTE_INFO 0x105
+#define SMB_QUERY_CIFS_UNIX_INFO    0x200
+#define SMB_QUERY_POSIX_FS_INFO     0x201
+#define SMB_QUERY_POSIX_WHO_AM_I    0x202
+#define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203
+#define SMB_QUERY_FS_PROXY          0x204 /*
+					   * WAFS enabled. Returns structure
+					   * FILE_SYSTEM__UNIX_INFO to tell
+					   * whether new NTIOCTL available
+					   * (0xACE) for WAN friendly SMB
+					   * operations to be carried
+					   */
+#define SMB_QUERY_LABEL_INFO        0x3ea
+#define SMB_QUERY_FS_QUOTA_INFO     0x3ee
+#define SMB_QUERY_FS_FULL_SIZE_INFO 0x3ef
+#define SMB_QUERY_OBJECTID_INFO     0x3f0
+
+struct trans2_resp {
+	/* struct smb_hdr hdr precedes. Note wct = 10 + setup count */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__u16 Reserved;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 ParameterDisplacement;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__le16 DataDisplacement;
+	__u8 SetupCount;
+	__u8 Reserved1;
+	/*
+	 * SetupWords[SetupCount];
+	 * __u16 ByteCount;
+	 * __u16 Reserved2;
+	 */
+	/* data area follows */
+} __packed;
+
+struct smb_com_trans2_req {
+	struct smb_hdr hdr;
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+} __packed;
+
+struct smb_com_trans2_qfsi_req {
+	struct smb_hdr hdr;     /* wct = 14+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__le16 InformationLevel;
+} __packed;
+
+struct smb_com_trans2_qfsi_req_params {
+	__le16 InformationLevel;
+} __packed;
+
+#define CIFS_SEARCH_CLOSE_ALWAYS	0x0001
+#define CIFS_SEARCH_CLOSE_AT_END	0x0002
+#define CIFS_SEARCH_RETURN_RESUME	0x0004
+#define CIFS_SEARCH_CONTINUE_FROM_LAST	0x0008
+#define CIFS_SEARCH_BACKUP_SEARCH	0x0010
+
+struct smb_com_trans2_ffirst_req_params {
+	__le16 SearchAttributes;
+	__le16 SearchCount;
+	__le16 SearchFlags;
+	__le16 InformationLevel;
+	__le32 SearchStorageType;
+	char FileName[1];
+} __packed;
+
+struct smb_com_trans2_ffirst_rsp_parms {
+	__u16 SearchHandle;
+	__le16 SearchCount;
+	__le16 EndofSearch;
+	__le16 EAErrorOffset;
+	__le16 LastNameOffset;
+} __packed;
+
+struct smb_com_trans2_fnext_req_params {
+	__u16 SearchHandle;
+	__le16 SearchCount;
+	__le16 InformationLevel;
+	__u32 ResumeKey;
+	__le16 SearchFlags;
+	char ResumeFileName[1];
+} __packed;
+
+struct smb_com_trans2_fnext_rsp_params {
+	__le16 SearchCount;
+	__le16 EndofSearch;
+	__le16 EAErrorOffset;
+	__le16 LastNameOffset;
+} __packed;
+
+struct smb_com_trans2_rsp {
+	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+	__u8 Pad;       /* may be three bytes? *//* followed by data area */
+	__u8 Buffer[0];
+} __packed;
+
+struct file_internal_info {
+	__le64  UniqueId; /* inode number */
+} __packed;      /* level 0x3ee */
+
+/* DeviceType Flags */
+#define FILE_DEVICE_CD_ROM              0x00000002
+#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
+#define FILE_DEVICE_DFS                 0x00000006
+#define FILE_DEVICE_DISK                0x00000007
+#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
+#define FILE_DEVICE_FILE_SYSTEM         0x00000009
+#define FILE_DEVICE_NAMED_PIPE          0x00000011
+#define FILE_DEVICE_NETWORK             0x00000012
+#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
+#define FILE_DEVICE_NULL                0x00000015
+#define FILE_DEVICE_PARALLEL_PORT       0x00000016
+#define FILE_DEVICE_PRINTER             0x00000018
+#define FILE_DEVICE_SERIAL_PORT         0x0000001b
+#define FILE_DEVICE_STREAMS             0x0000001e
+#define FILE_DEVICE_TAPE                0x0000001f
+#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
+#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
+#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
+
+/* Filesystem Attributes. */
+#define FILE_CASE_SENSITIVE_SEARCH      0x00000001
+#define FILE_CASE_PRESERVED_NAMES       0x00000002
+#define FILE_UNICODE_ON_DISK            0x00000004
+/* According to cifs9f, this is 4, not 8 */
+/* Acconding to testing, this actually sets the security attribute! */
+#define FILE_PERSISTENT_ACLS            0x00000008
+#define FILE_FILE_COMPRESSION           0x00000010
+#define FILE_VOLUME_QUOTAS              0x00000020
+#define FILE_SUPPORTS_SPARSE_FILES      0x00000040
+#define FILE_SUPPORTS_REPARSE_POINTS    0x00000080
+#define FILE_SUPPORTS_REMOTE_STORAGE    0x00000100
+#define FS_LFN_APIS                     0x00004000
+#define FILE_VOLUME_IS_COMPRESSED       0x00008000
+#define FILE_SUPPORTS_OBJECT_IDS        0x00010000
+#define FILE_SUPPORTS_ENCRYPTION        0x00020000
+#define FILE_NAMED_STREAMS              0x00040000
+#define FILE_READ_ONLY_VOLUME           0x00080000
+
+/* PathInfo/FileInfo infolevels */
+#define SMB_INFO_STANDARD                   1
+#define SMB_SET_FILE_EA                     2
+#define SMB_QUERY_FILE_EA_SIZE              2
+#define SMB_INFO_QUERY_EAS_FROM_LIST        3
+#define SMB_INFO_QUERY_ALL_EAS              4
+#define SMB_INFO_IS_NAME_VALID              6
+#define SMB_QUERY_FILE_BASIC_INFO       0x101
+#define SMB_QUERY_FILE_STANDARD_INFO    0x102
+#define SMB_QUERY_FILE_EA_INFO          0x103
+#define SMB_QUERY_FILE_NAME_INFO        0x104
+#define SMB_QUERY_FILE_ALLOCATION_INFO  0x105
+#define SMB_QUERY_FILE_END_OF_FILEINFO  0x106
+#define SMB_QUERY_FILE_ALL_INFO         0x107
+#define SMB_QUERY_ALT_NAME_INFO         0x108
+#define SMB_QUERY_FILE_STREAM_INFO      0x109
+#define SMB_QUERY_FILE_COMPRESSION_INFO 0x10B
+#define SMB_QUERY_FILE_UNIX_BASIC       0x200
+#define SMB_QUERY_FILE_UNIX_LINK        0x201
+#define SMB_QUERY_POSIX_ACL             0x204
+#define SMB_QUERY_XATTR                 0x205  /* e.g. system EA name space */
+#define SMB_QUERY_ATTR_FLAGS            0x206  /* append,immutable etc. */
+#define SMB_QUERY_POSIX_PERMISSION      0x207
+#define SMB_QUERY_POSIX_LOCK            0x208
+/* #define SMB_POSIX_OPEN               0x209 */
+/* #define SMB_POSIX_UNLINK             0x20a */
+#define SMB_QUERY_FILE__UNIX_INFO2      0x20b
+#define SMB_QUERY_FILE_INTERNAL_INFO    0x3ee
+#define SMB_QUERY_FILE_ACCESS_INFO      0x3f0
+#define SMB_QUERY_FILE_NAME_INFO2       0x3f1 /* 0x30 bytes */
+#define SMB_QUERY_FILE_POSITION_INFO    0x3f6
+#define SMB_QUERY_FILE_MODE_INFO        0x3f8
+#define SMB_QUERY_FILE_ALGN_INFO        0x3f9
+
+
+#define SMB_SET_FILE_BASIC_INFO         0x101
+#define SMB_SET_FILE_DISPOSITION_INFO   0x102
+#define SMB_SET_FILE_ALLOCATION_INFO    0x103
+#define SMB_SET_FILE_END_OF_FILE_INFO   0x104
+#define SMB_SET_FILE_UNIX_BASIC         0x200
+#define SMB_SET_FILE_UNIX_LINK          0x201
+#define SMB_SET_FILE_UNIX_HLINK         0x203
+#define SMB_SET_POSIX_ACL               0x204
+#define SMB_SET_XATTR                   0x205
+#define SMB_SET_ATTR_FLAGS              0x206  /* append, immutable etc. */
+#define SMB_SET_POSIX_LOCK              0x208
+#define SMB_POSIX_OPEN                  0x209
+#define SMB_POSIX_UNLINK                0x20a
+#define SMB_SET_FILE_UNIX_INFO2         0x20b
+#define SMB_SET_FILE_BASIC_INFO2        0x3ec
+#define SMB_SET_FILE_RENAME_INFORMATION 0x3f2 /* BB check if qpathinfo too */
+#define SMB_SET_FILE_DISPOSITION_INFORMATION   0x3f5   /* alias for 0x102 */
+#define SMB_FILE_ALL_INFO2              0x3fa
+#define SMB_SET_FILE_ALLOCATION_INFO2   0x3fb
+#define SMB_SET_FILE_END_OF_FILE_INFO2  0x3fc
+#define SMB_FILE_MOVE_CLUSTER_INFO      0x407
+#define SMB_FILE_QUOTA_INFO             0x408
+#define SMB_FILE_REPARSEPOINT_INFO      0x409
+#define SMB_FILE_MAXIMUM_INFO           0x40d
+
+/* Find File infolevels */
+#define SMB_FIND_FILE_INFO_STANDARD       0x001
+#define SMB_FIND_FILE_QUERY_EA_SIZE       0x002
+#define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003
+#define SMB_FIND_FILE_DIRECTORY_INFO      0x101
+#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
+#define SMB_FIND_FILE_NAMES_INFO          0x103
+#define SMB_FIND_FILE_BOTH_DIRECTORY_INFO 0x104
+#define SMB_FIND_FILE_ID_FULL_DIR_INFO    0x105
+#define SMB_FIND_FILE_ID_BOTH_DIR_INFO    0x106
+#define SMB_FIND_FILE_UNIX                0x202
+
+struct smb_com_trans2_qpi_req {
+	struct smb_hdr hdr;     /* wct = 14+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} __packed;
+
+struct trans2_qpi_req_params {
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} __packed;
+
+/******************************************************************************/
+/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
+/******************************************************************************/
+struct file_basic_info {
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 Attributes;
+	__u32 Pad;
+} __packed;      /* size info, level 0x101 */
+
+struct file_standard_info {
+	__le64 AllocationSize;
+	__le64 EndOfFile;
+	__le32 NumberOfLinks;
+	__u8 DeletePending;
+	__u8 Directory;
+	__le16 Reserved;
+} __packed;
+
+struct file_ea_info {
+	__le32 EaSize;
+} __packed;
+
+struct alt_name_info {
+	__le32 FileNameLength;
+	char FileName[1];
+} __packed;
+
+struct file_name_info {
+	__le32 FileNameLength;
+	char FileName[1];
+} __packed;
+
+/* data block encoding of response to level 263 QPathInfo */
+struct file_all_info {
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 Attributes;
+	__u32 Pad1;
+	__le64 AllocationSize;
+	__le64 EndOfFile;       /* size ie offset to first free byte in file */
+	__le32 NumberOfLinks;   /* hard links */
+	__u8 DeletePending;
+	__u8 Directory;
+	__u16 Pad2;
+	__le32 EASize;
+	__le32 FileNameLength;
+	char FileName[1];
+} __packed; /* level 0x107 QPathInfo */
+
+/* set path info/open file */
+/* defines for enumerating possible values of the Unix type field below */
+#define UNIX_FILE      0
+#define UNIX_DIR       1
+#define UNIX_SYMLINK   2
+#define UNIX_CHARDEV   3
+#define UNIX_BLOCKDEV  4
+#define UNIX_FIFO      5
+#define UNIX_SOCKET    6
+#define UNIX_UNKNOWN   0xFFFFFFFF
+
+struct file_unix_basic_info {
+	__le64 EndOfFile;
+	__le64 NumOfBytes;
+	__le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
+	__le64 LastAccessTime;
+	__le64 LastModificationTime;
+	__le64 Uid;
+	__le64 Gid;
+	__le32 Type;
+	__le64 DevMajor;
+	__le64 DevMinor;
+	__le64 UniqueId;
+	__le64 Permissions;
+	__le64 Nlinks;
+} __packed; /* level 0x200 QPathInfo */
+
+struct smb_com_trans2_spi_req {
+	struct smb_hdr hdr;     /* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 Pad1;
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} __packed;
+
+struct smb_com_trans2_spi_rsp {
+	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+	__u16 Reserved2; /* parameter word is present for infolevels > 100 */
+} __packed;
+
+/* POSIX Open Flags */
+#define SMB_O_RDONLY     0x1
+#define SMB_O_WRONLY    0x2
+#define SMB_O_RDWR      0x4
+#define SMB_O_CREAT     0x10
+#define SMB_O_EXCL      0x20
+#define SMB_O_TRUNC     0x40
+#define SMB_O_APPEND    0x80
+#define SMB_O_SYNC      0x100
+#define SMB_O_DIRECTORY 0x200
+#define SMB_O_NOFOLLOW  0x400
+#define SMB_O_DIRECT    0x800
+#define SMB_ACCMODE	0x7
+
+/* info level response for SMB_POSIX_PATH_OPEN */
+#define SMB_NO_INFO_LEVEL_RESPONSE 0xFFFF
+
+struct open_psx_req {
+	__le32 OpenFlags; /* same as NT CreateX */
+	__le32 PosixOpenFlags;
+	__le64 Permissions;
+	__le16 Level; /* reply level requested (see QPathInfo levels) */
+} __packed; /* level 0x209 SetPathInfo data */
+
+struct open_psx_rsp {
+	__le16 OplockFlags;
+	__u16 Fid;
+	__le32 CreateAction;
+	__le16 ReturnedLevel;
+	__le16 Pad;
+	/* struct following varies based on requested level */
+} __packed; /* level 0x209 SetPathInfo data */
+
+struct unlink_psx_rsp {
+	__le16 EAErrorOffset;
+} __packed; /* level 0x209 SetPathInfo data*/
+
+/* Version numbers for CIFS UNIX major and minor. */
+#define CIFS_UNIX_MAJOR_VERSION 1
+#define CIFS_UNIX_MINOR_VERSION 0
+
+struct filesystem_unix_info {
+	__le16 MajorVersionNumber;
+	__le16 MinorVersionNumber;
+	__le64 Capability;
+} __packed; /* Unix extension level 0x200*/
+
+/* Linux/Unix extensions capability flags */
+#define CIFS_UNIX_FCNTL_CAP             0x00000001 /* support for fcntl locks */
+#define CIFS_UNIX_POSIX_ACL_CAP         0x00000002 /* support getfacl/setfacl */
+#define CIFS_UNIX_XATTR_CAP             0x00000004 /* support new namespace   */
+#define CIFS_UNIX_EXTATTR_CAP           0x00000008 /* support chattr/chflag   */
+#define CIFS_UNIX_POSIX_PATHNAMES_CAP   0x00000010 /* Allow POSIX path chars  */
+#define CIFS_UNIX_POSIX_PATH_OPS_CAP    0x00000020 /*
+						    * Allow new POSIX path based
+						    * calls including posix open
+						    * and posix unlink
+						    */
+#define CIFS_UNIX_LARGE_READ_CAP        0x00000040 /*
+						    * support reads >128K (up
+						    * to 0xFFFF00
+						    */
+#define CIFS_UNIX_LARGE_WRITE_CAP       0x00000080
+#define CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP 0x00000100 /* can do SPNEGO crypt */
+#define CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP  0x00000200 /* must do  */
+#define CIFS_UNIX_PROXY_CAP             0x00000400 /*
+						    * Proxy cap: 0xACE ioctl and
+						    * QFS PROXY call
+						    */
+#ifdef CONFIG_CIFS_POSIX
+/* presumably don't need the 0x20 POSIX_PATH_OPS_CAP since we never send
+ * LockingX instead of posix locking call on unix sess (and we do not expect
+ * LockingX to use different (ie Windows) semantics than posix locking on
+ * the same session (if WINE needs to do this later, we can add this cap
+ * back in later
+ */
+
+/* #define CIFS_UNIX_CAP_MASK              0x000000fb */
+#define CIFS_UNIX_CAP_MASK              0x000003db
+#else
+#define CIFS_UNIX_CAP_MASK              0x00000013
+#endif /* CONFIG_CIFS_POSIX */
+
+
+#define CIFS_POSIX_EXTENSIONS           0x00000010 /* support for new QFSInfo */
+
+/* Our server caps */
+
+#define SMB_UNIX_CAPS	(CIFS_UNIX_FCNTL_CAP | CIFS_UNIX_POSIX_ACL_CAP | \
+		CIFS_UNIX_XATTR_CAP | CIFS_UNIX_POSIX_PATHNAMES_CAP| \
+		CIFS_UNIX_POSIX_PATH_OPS_CAP | CIFS_UNIX_LARGE_READ_CAP | \
+		CIFS_UNIX_LARGE_WRITE_CAP)
+
+#define SMB_SET_CIFS_UNIX_INFO    0x200
+/* Level 0x200 request structure follows */
+struct smb_com_trans2_setfsi_req {
+	struct smb_hdr hdr;     /* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;  /* 4 */
+	__le16 ParameterOffset;
+	__le16 DataCount;       /* 12 */
+	__le16 DataOffset;
+	__u8 SetupCount;        /* one */
+	__u8 Reserved3;
+	__le16 SubCommand;      /* TRANS2_SET_FS_INFORMATION */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 FileNum;          /* Parameters start. */
+	__le16 InformationLevel;/* Parameters end. */
+	__le16 ClientUnixMajor; /* Data start. */
+	__le16 ClientUnixMinor;
+	__le64 ClientUnixCap;   /* Data end */
+} __packed;
+
+/* response for setfsinfo levels 0x200 and 0x203 */
+struct smb_com_trans2_setfsi_rsp {
+	struct smb_hdr hdr;     /* wct = 10 */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_trans2_setfsi_req_params {
+	__u16 FileNum;
+	__le16 InformationLevel;
+	__le16 ClientUnixMajor; /* Data start. */
+	__le16 ClientUnixMinor;
+	__le64 ClientUnixCap;   /* Data end */
+} __packed;
+
+struct smb_trans2_qfi_req_params {
+	__u16   Fid;
+	__le16  InformationLevel;
+} __packed;
+
+/* FIND FIRST2 and FIND NEXT2 INFORMATION Level Codes*/
+
+struct find_info_standard {
+	__le16 CreationDate; /* SMB Date see above */
+	__le16 CreationTime; /* SMB Time */
+	__le16 LastAccessDate;
+	__le16 LastAccessTime;
+	__le16 LastWriteDate;
+	__le16 LastWriteTime;
+	__le32 DataSize; /* File Size (EOF) */
+	__le32 AllocationSize;
+	__le16 Attributes; /* verify not u32 */
+	__le16 FileNameLength;
+	char FileName[1];
+} __packed;
+
+struct find_info_query_ea_size {
+	__le16 CreationDate; /* SMB Date see above */
+	__le16 CreationTime; /* SMB Time */
+	__le16 LastAccessDate;
+	__le16 LastAccessTime;
+	__le16 LastWriteDate;
+	__le16 LastWriteTime;
+	__le32 DataSize; /* File Size (EOF) */
+	__le32 AllocationSize;
+	__le16 Attributes; /* verify not u32 */
+	__le32 EASize;
+	__u8 FileNameLength;
+	char FileName[1];
+} __packed;
+
+struct file_unix_info {
+	__le32 NextEntryOffset;
+	__u32 ResumeKey; /* as with FileIndex - no need to convert */
+	struct file_unix_basic_info basic;
+	char FileName[1];
+} __packed; /* level 0x202 */
+
+struct smb_com_trans2_sfi_req {
+	struct smb_hdr hdr;     /* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 Pad1;
+	__u16 Fid;
+	__le16 InformationLevel;
+	__u16 Reserved4;
+} __packed;
+
+struct smb_com_trans2_sfi_rsp {
+	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+	__u16 Reserved2;        /*
+				 * parameter word reserved -
+				 * present for infolevels > 100
+				 */
+} __packed;
+
+struct file_end_of_file_info {
+	__le64 FileSize;                /* offset to end of file */
+} __packed; /* size info, level 0x104 for set, 0x106 for query */
+
+struct smb_com_create_directory_req {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII */
+	unsigned char DirName[1];
+} __packed;
+
+struct smb_com_create_directory_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;	/* bct = 0 */
+} __packed;
+
+struct smb_com_check_directory_req {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII */
+	unsigned char DirName[1];
+} __packed;
+
+struct smb_com_check_directory_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;	/* bct = 0 */
+} __packed;
+
+struct smb_com_process_exit_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;	/* bct = 0 */
+} __packed;
+
+struct smb_com_delete_directory_req {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII */
+	unsigned char DirName[1];
+} __packed;
+
+struct smb_com_delete_directory_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_delete_file_req {
+	struct smb_hdr hdr;     /* wct = 1 */
+	__le16 SearchAttributes;
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII */
+	unsigned char fileName[1];
+} __packed;
+
+struct smb_com_delete_file_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+#define CREATE_HARD_LINK         0x103
+
+struct smb_com_nt_rename_req {  /* A5 - also used for create hardlink */
+	struct smb_hdr hdr;     /* wct = 4 */
+	__le16 SearchAttributes;        /* target file attributes */
+	__le16 Flags;           /* spec says Information Level */
+	__le32 ClusterCount;
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII or Unicode */
+	unsigned char OldFileName[1];
+	/* followed by __u8 BufferFormat2 */
+	/* followed by NewFileName */
+} __packed;
+
+struct smb_com_query_information_req {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;       /* 1 + namelen + 1 */
+	__u8 BufferFormat;      /* 4 = ASCII */
+	unsigned char FileName[1];
+} __packed;
+
+struct smb_com_query_information_rsp {
+	struct smb_hdr hdr;     /* wct = 10 */
+	__le16 attr;
+	__le32  last_write_time;
+	__le32 size;
+	__u16  reserved[5];
+	__le16 ByteCount;       /* bcc = 0 */
+} __packed;
+
+struct smb_com_findclose_req {
+	struct smb_hdr hdr; /* wct = 1 */
+	__u16 FileID;
+	__le16 ByteCount;    /* 0 */
+} __packed;
+
+#define SMBOPEN_DISPOSITION_NONE        0
+#define SMBOPEN_LOCK_GRANTED            0x8000
+
+#define SMB_DA_ACCESS_READ              0
+#define SMB_DA_ACCESS_WRITE             0x0001
+#define SMB_DA_ACCESS_READ_WRITE        0x0002
+
+/*
+ * Flags on SMB open
+ */
+#define SMBOPEN_WRITE_THROUGH 0x4000
+#define SMBOPEN_DENY_ALL      0x0010
+#define SMBOPEN_DENY_WRITE    0x0020
+#define SMBOPEN_DENY_READ     0x0030
+#define SMBOPEN_DENY_NONE     0x0040
+#define SMBOPEN_SHARING_MODE  (SMBOPEN_DENY_ALL |	\
+				SMBOPEN_DENY_WRITE |	\
+				SMBOPEN_DENY_READ |	\
+				SMBOPEN_DENY_NONE)
+#define SMBOPEN_READ          0x0000
+#define SMBOPEN_WRITE         0x0001
+#define SMBOPEN_READWRITE     0x0002
+#define SMBOPEN_EXECUTE       0x0003
+
+#define SMBOPEN_OCREATE       0x0010
+#define SMBOPEN_OTRUNC        0x0002
+#define SMBOPEN_OAPPEND       0x0001
+
+/* format of legacy open request */
+struct smb_com_openx_req {
+	struct smb_hdr  hdr;    /* wct = 15 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 OpenFlags;
+	__le16 Mode;
+	__le16 Sattr; /* search attributes */
+	__le16 FileAttributes;  /* dos attrs */
+	__le32 CreateTime; /* os2 format */
+	__le16 OpenFunction;
+	__le32 EndOfFile;
+	__le32 Timeout;
+	__le32 Reserved;
+	__le16  ByteCount;  /* file name follows */
+	char   fileName[1];
+} __packed;
+
+struct smb_com_openx_rsp {
+	struct smb_hdr  hdr;    /* wct = 15 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16  Fid;
+	__le16 FileAttributes;
+	__le32 LastWriteTime; /* os2 format */
+	__le32 EndOfFile;
+	__le16 Access;
+	__le16 FileType;
+	__le16 IPCState;
+	__le16 Action;
+	__u32  FileId;
+	__u16  Reserved;
+	__le16 ByteCount;
+} __packed;
+
+struct filesystem_alloc_info {
+	__le32 fsid;
+	__le32 SectorsPerAllocationUnit;
+	__le32 TotalAllocationUnits;
+	__le32 FreeAllocationUnits;
+	__le16  BytesPerSector;
+} __packed;
+
+struct file_allocation_info {
+	__le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
+} __packed;      /* size used on disk: 0x103 for set, 0x105 for query */
+
+struct file_info_standard {
+	__le16 CreationDate; /* SMB Date see above */
+	__le16 CreationTime; /* SMB Time */
+	__le16 LastAccessDate;
+	__le16 LastAccessTime;
+	__le16 LastWriteDate;
+	__le16 LastWriteTime;
+	__le32 DataSize; /* File Size (EOF) */
+	__le32 AllocationSize;
+	__le16 Attributes; /* verify not u32 */
+	__le32 EASize;
+} __packed;  /* level 1 SetPath/FileInfo */
+
+#define CIFS_MF_SYMLINK_LINK_MAXLEN (1024)
+
+struct set_file_rename {
+	__le32 overwrite;   /* 1 = overwrite dest */
+	__u32 root_fid;   /* zero */
+	__le32 target_name_len;
+	char  target_name[0];  /* Must be unicode */
+} __packed;
+
+struct fea {
+	unsigned char EA_flags;
+	__u8 name_len;
+	__le16 value_len;
+	char name[1];
+	/* optionally followed by value */
+} __packed;
+
+struct fealist {
+	__le32 list_len;
+	__u8 list[1];
+} __packed;
+
+/* POSIX ACL set/query path info structures */
+#define CIFS_ACL_VERSION 1
+struct cifs_posix_ace { /* access control entry (ACE) */
+	__u8  cifs_e_tag;
+	__u8  cifs_e_perm;
+	__le64 cifs_uid; /* or gid */
+} __packed;
+
+struct cifs_posix_acl { /* access conrol list  (ACL) */
+	__le16  version;
+	__le16  access_entry_count;  /* access ACL - count of entries */
+	__le16  default_entry_count; /* default ACL - count of entries */
+	struct cifs_posix_ace ace_array[0];
+	/*
+	 * followed by
+	 * struct cifs_posix_ace default_ace_arraay[]
+	 */
+} __packed;  /* level 0x204 */
+
+struct smb_com_setattr_req {
+	struct smb_hdr hdr; /* wct = 8 */
+	__le16 attr;
+	__le32 LastWriteTime;
+	__le16 reserved[5]; /* must be zero */
+	__le16 ByteCount;
+	__u8   BufferFormat; /* 4 = ASCII */
+	unsigned char fileName[1];
+} __packed;
+
+struct smb_com_setattr_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+#ifdef CONFIG_SMB_INSECURE_SERVER
+extern int init_smb1_server(struct ksmbd_conn *conn);
+#endif
+
+/* function prototypes */
+extern int init_smb_rsp_hdr(struct ksmbd_work *work);
+extern u16 get_smb_cmd_val(struct ksmbd_work *work);
+extern void set_smb_rsp_status(struct ksmbd_work *work, __le32 err);
+extern int smb_allocate_rsp_buf(struct ksmbd_work *work);
+extern bool smb1_is_sign_req(struct ksmbd_work *work, unsigned int command);
+extern int smb1_check_sign_req(struct ksmbd_work *work);
+extern void smb1_set_sign_rsp(struct ksmbd_work *work);
+extern int smb_check_user_session(struct ksmbd_work *work);
+extern int smb_get_ksmbd_tcon(struct ksmbd_work *work);
+extern int ksmbd_smb1_check_message(struct ksmbd_work *work);
+
+/* smb1 command handlers */
+extern int smb_rename(struct ksmbd_work *work);
+extern int smb_negotiate_request(struct ksmbd_work *work);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+extern int smb_handle_negotiate(struct ksmbd_work *work);
+#endif
+extern int smb_session_setup_andx(struct ksmbd_work *work);
+extern int smb_tree_connect_andx(struct ksmbd_work *work);
+extern int smb_trans2(struct ksmbd_work *work);
+extern int smb_nt_create_andx(struct ksmbd_work *work);
+extern int smb_trans(struct ksmbd_work *work);
+extern int smb_locking_andx(struct ksmbd_work *work);
+extern int smb_close(struct ksmbd_work *work);
+extern int smb_read_andx(struct ksmbd_work *work);
+extern int smb_tree_disconnect(struct ksmbd_work *work);
+extern int smb_session_disconnect(struct ksmbd_work *work);
+extern int smb_write_andx(struct ksmbd_work *work);
+extern int smb_echo(struct ksmbd_work *work);
+extern int smb_flush(struct ksmbd_work *work);
+extern int smb_mkdir(struct ksmbd_work *work);
+extern int smb_rmdir(struct ksmbd_work *work);
+extern int smb_unlink(struct ksmbd_work *work);
+extern int smb_nt_cancel(struct ksmbd_work *work);
+extern int smb_nt_rename(struct ksmbd_work *work);
+extern int smb_query_info(struct ksmbd_work *work);
+extern int smb_closedir(struct ksmbd_work *work);
+extern int smb_open_andx(struct ksmbd_work *work);
+extern int smb_write(struct ksmbd_work *work);
+extern int smb_setattr(struct ksmbd_work *work);
+extern int smb_query_information_disk(struct ksmbd_work *work);
+extern int smb_checkdir(struct ksmbd_work *work);
+extern int smb_process_exit(struct ksmbd_work *work);
+#endif /* __SMB1PDU_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/fs/smb/server/smberr.h	2023-11-07 13:38:44.046256254 +0100
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ *   Copyright (c) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   See Error Codes section of the SNIA CIFS Specification
+ *   for more information
+ */
+#ifndef __KSMBD_SMBERR_H
+#define __KSMBD_SMBERR_H
+
+#define SUCCESS	0x00	/* The request was successful. */
+#define ERRDOS	0x01	/* Error is from the core DOS operating system set */
+#define ERRSRV	0x02	/* Error is generated by the file server daemon */
+#define ERRHRD	0x03	/* Error is a hardware error. */
+#define ERRCMD	0xFF	/* Command was not in the "SMB" format. */
+
+/* The following error codes may be generated with the SUCCESS error class.*/
+
+/*#define SUCCESS	0	The request was successful. */
+
+/* The following error codes may be generated with the ERRDOS error class.*/
+
+#define ERRbadfunc		1	/*
+					 * Invalid function. The server did not
+					 * recognize or could not perform a
+					 * system call generated by the server,
+					 * e.g., set the DIRECTORY attribute on
+					 * a data file, invalid seek mode.
+					 */
+#define ERRbadfile		2	/*
+					 * File not found. The last component
+					 * of a file's pathname could not be
+					 * found.
+					 */
+#define ERRbadpath		3	/*
+					 * Directory invalid. A directory
+					 * component in a pathname could not be
+					 * found.
+					 */
+#define ERRnofids		4	/*
+					 * Too many open files. The server has
+					 * no file handles available.
+					 */
+#define ERRnoaccess		5	/*
+					 * Access denied, the client's context
+					 * does not permit the requested
+					 * function. This includes the
+					 * following conditions: invalid rename
+					 * command, write to Fid open for read
+					 * only, read on Fid open for write
+					 * only, attempt to delete a non-empty
+					 * directory
+					 */
+#define ERRbadfid		6	/*
+					 * Invalid file handle. The file handle
+					 * specified was not recognized by the
+					 * server.
+					 */
+#define ERRbadmcb		7	/* Memory control blocks destroyed. */
+#define ERRnomem		8	/*
+					 * Insufficient server memory to
+					 * perform the requested function.
+					 */
+#define ERRbadmem		9	/* Invalid memory block address. */
+#define ERRbadenv		10	/* Invalid environment. */
+#define ERRbadformat		11	/* Invalid format. */
+#define ERRbadaccess		12	/* Invalid open mode. */
+#define ERRbaddata		13	/*
+					 * Invalid data (generated only by
+					 * IOCTL calls within the server).
+					 */
+#define ERRbaddrive		15	/* Invalid drive specified. */
+#define ERRremcd		16	/*
+					 * A Delete Directory request attempted
+					 * to remove the server's current
+					 * directory.
+					 */
+#define ERRdiffdevice		17	/*
+					 * Not same device (e.g., a cross
+					 * volume rename was attempted
+					 */
+#define ERRnofiles		18	/*
+					 * A File Search command can find no
+					 * more files matching the specified
+					 * criteria.
+					 */
+#define ERRwriteprot		19	/* media is write protected */
+#define ERRgeneral		31
+#define ERRbadshare		32	/*
+					 * The sharing mode specified for an
+					 * Open conflicts with existing FIDs on
+					 * the file.
+					 */
+#define ERRlock			33	/*
+					 * A Lock request conflicted with an
+					 * existing lock or specified an
+					 * invalid mode, or an Unlock requested
+					 * attempted to remove a lock held by
+					 * another process.
+					 */
+#define ERRunsup		50
+#define ERRnosuchshare		67
+#define ERRfilexists		80	/*
+					 * The file named in the request
+					 * already exists.
+					 */
+#define ERRinvparm		87
+#define ERRdiskfull		112
+#define ERRinvname		123
+#define ERRinvlevel		124
+#define ERRdirnotempty		145
+#define ERRnotlocked		158
+#define ERRcancelviolation	173
+#define ERRnoatomiclocks	174
+#define ERRalreadyexists	183
+#define ERRbadpipe		230
+#define ERRpipebusy		231
+#define ERRpipeclosing		232
+#define ERRnotconnected		233
+#define ERRmoredata		234
+#define ERReasnotsupported	282
+#define ErrQuota		0x200	/*
+					 * The operation would cause a quota
+					 * limit to be exceeded.
+					 */
+#define ErrNotALink		0x201	/*
+					 * A link operation was performed on a
+					 * pathname that was not a link.
+					 */
+
+/*
+ * Below errors are used internally (do not come over the wire) for passthrough
+ * from STATUS codes to POSIX only
+ */
+#define ERRsymlink              0xFFFD
+#define ErrTooManyLinks         0xFFFE
+
+/* Following error codes may be generated with the ERRSRV error class.*/
+
+#define ERRerror		1	/*
+					 * Non-specific error code. It is
+					 * returned under the following
+					 * conditions: resource other than disk
+					 * space exhausted (e.g. TIDs), first
+					 * SMB command was not negotiate,
+					 * multiple negotiates attempted, and
+					 * internal server error.
+					 */
+#define ERRbadpw		2	/*
+					 * Bad password - name/password pair in
+					 * a TreeConnect or Session Setup are
+					 * invalid.
+					 */
+#define ERRbadtype		3	/*
+					 * used for indicating DFS referral
+					 * needed
+					 */
+#define ERRaccess		4	/*
+					 * The client does not have the
+					 * necessary access rights within the
+					 * specified context for requested
+					 * function.
+					 */
+#define ERRinvtid		5	/*
+					 * The Tid specified in a command was
+					 * invalid.
+					 */
+#define ERRinvnetname		6	/*
+					 * Invalid network name in tree
+					 * connect.
+					 */
+#define ERRinvdevice		7	/*
+					 * Invalid device - printer request
+					 * made to non-printer connection or
+					 * non-printer request made to printer
+					 * connection.
+					 */
+#define ERRqfull		49	/*
+					 * Print queue full (files) -- returned
+					 * by open print file.
+					 */
+#define ERRqtoobig		50	/* Print queue full -- no space. */
+#define ERRqeof			51	/* EOF on print queue dump */
+#define ERRinvpfid		52	/* Invalid print file FID. */
+#define ERRsmbcmd		64	/*
+					 * The server did not recognize the
+					 * command received.
+					 */
+#define ERRsrverror		65	/*
+					 * The server encountered an internal
+					 * error, e.g., system file
+					 * unavailable.
+					 */
+#define ERRbadBID		66	/* (obsolete) */
+#define ERRfilespecs		67	/*
+					 * The Fid and pathname parameters
+					 * contained an invalid combination of
+					 * values.
+					 */
+#define ERRbadLink		68	/* (obsolete) */
+#define ERRbadpermits		69	/*
+					 * The access permissions specified for
+					 * a file or directory are not a valid
+					 * combination.
+					 */
+#define ERRbadPID		70
+#define ERRsetattrmode		71	/* attribute (mode) is invalid */
+#define ERRpaused		81	/* Server is paused */
+#define ERRmsgoff		82	/* reserved - messaging off */
+#define ERRnoroom		83	/* reserved - no room for message */
+#define ERRrmuns		87	/* reserved - too many remote names */
+#define ERRtimeout		88	/* operation timed out */
+#define ERRnoresource		89	/* No resources available for request */
+#define ERRtoomanyuids		90	/*
+					 * Too many UIDs active on this session
+					 */
+#define ERRbaduid		91	/*
+					 * The UID is not known as a valid user
+					 */
+#define ERRusempx		250	/* temporarily unable to use raw */
+#define ERRusestd		251	/*
+					 * temporarily unable to use either raw
+					 * or mpx
+					 */
+#define ERR_NOTIFY_ENUM_DIR	1024
+#define ERRnoSuchUser		2238	/* user account does not exist */
+#define ERRaccountexpired	2239
+#define ERRbadclient		2240	/* can not logon from this client */
+#define ERRbadLogonTime		2241	/* logon hours do not allow this */
+#define ERRpasswordExpired	2242
+#define ERRnetlogonNotStarted	2455
+#define ERRnosupport		0xFFFF
+
+#endif /* __KSMBD_SMBERR_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/dt-bindings/clock/qcom,nsscc-ipq9574.h	2023-05-22 20:30:14.549854255 +0200
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_NSSCC_9048_H
+#define _DT_BINDINGS_CLOCK_IPQ_NSSCC_9048_H
+
+#define NSS_CC_CE_APB_CLK					0
+#define NSS_CC_CE_AXI_CLK					1
+#define NSS_CC_CE_CLK_SRC					2
+#define NSS_CC_CFG_CLK_SRC					3
+#define NSS_CC_CLC_AXI_CLK					4
+#define NSS_CC_CLC_CLK_SRC					5
+#define NSS_CC_CRYPTO_CLK					6
+#define NSS_CC_CRYPTO_CLK_SRC					7
+#define NSS_CC_CRYPTO_PPE_CLK					8
+#define NSS_CC_HAQ_AHB_CLK					9
+#define NSS_CC_HAQ_AXI_CLK					10
+#define NSS_CC_HAQ_CLK_SRC					11
+#define NSS_CC_IMEM_AHB_CLK					12
+#define NSS_CC_IMEM_CLK_SRC					13
+#define NSS_CC_IMEM_QSB_CLK					14
+#define NSS_CC_INT_CFG_CLK_SRC					15
+#define NSS_CC_NSS_CSR_CLK					16
+#define NSS_CC_NSSNOC_CE_APB_CLK				17
+#define NSS_CC_NSSNOC_CE_AXI_CLK				18
+#define NSS_CC_NSSNOC_CLC_AXI_CLK				19
+#define NSS_CC_NSSNOC_CRYPTO_CLK				20
+#define NSS_CC_NSSNOC_HAQ_AHB_CLK				21
+#define NSS_CC_NSSNOC_HAQ_AXI_CLK				22
+#define NSS_CC_NSSNOC_IMEM_AHB_CLK				23
+#define NSS_CC_NSSNOC_IMEM_QSB_CLK				24
+#define NSS_CC_NSSNOC_NSS_CSR_CLK				25
+#define NSS_CC_NSSNOC_PPE_CFG_CLK				26
+#define NSS_CC_NSSNOC_PPE_CLK					27
+#define NSS_CC_NSSNOC_UBI32_AHB0_CLK				28
+#define NSS_CC_NSSNOC_UBI32_AXI0_CLK				29
+#define NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK			30
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK			31
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK				32
+#define NSS_CC_PORT1_MAC_CLK					33
+#define NSS_CC_PORT1_RX_CLK					34
+#define NSS_CC_PORT1_RX_CLK_SRC					35
+#define NSS_CC_PORT1_RX_DIV_CLK_SRC				36
+#define NSS_CC_PORT1_TX_CLK					37
+#define NSS_CC_PORT1_TX_CLK_SRC					38
+#define NSS_CC_PORT1_TX_DIV_CLK_SRC				39
+#define NSS_CC_PORT2_MAC_CLK					40
+#define NSS_CC_PORT2_RX_CLK					41
+#define NSS_CC_PORT2_RX_CLK_SRC					42
+#define NSS_CC_PORT2_RX_DIV_CLK_SRC				43
+#define NSS_CC_PORT2_TX_CLK					44
+#define NSS_CC_PORT2_TX_CLK_SRC					45
+#define NSS_CC_PORT2_TX_DIV_CLK_SRC				46
+#define NSS_CC_PORT3_MAC_CLK					47
+#define NSS_CC_PORT3_RX_CLK					48
+#define NSS_CC_PORT3_RX_CLK_SRC					49
+#define NSS_CC_PORT3_RX_DIV_CLK_SRC				50
+#define NSS_CC_PORT3_TX_CLK					51
+#define NSS_CC_PORT3_TX_CLK_SRC					52
+#define NSS_CC_PORT3_TX_DIV_CLK_SRC				53
+#define NSS_CC_PORT4_MAC_CLK					54
+#define NSS_CC_PORT4_RX_CLK					55
+#define NSS_CC_PORT4_RX_CLK_SRC					56
+#define NSS_CC_PORT4_RX_DIV_CLK_SRC				57
+#define NSS_CC_PORT4_TX_CLK					58
+#define NSS_CC_PORT4_TX_CLK_SRC					59
+#define NSS_CC_PORT4_TX_DIV_CLK_SRC				60
+#define NSS_CC_PORT5_MAC_CLK					61
+#define NSS_CC_PORT5_RX_CLK					62
+#define NSS_CC_PORT5_RX_CLK_SRC					63
+#define NSS_CC_PORT5_RX_DIV_CLK_SRC				64
+#define NSS_CC_PORT5_TX_CLK					65
+#define NSS_CC_PORT5_TX_CLK_SRC					66
+#define NSS_CC_PORT5_TX_DIV_CLK_SRC				67
+#define NSS_CC_PORT6_MAC_CLK					68
+#define NSS_CC_PORT6_RX_CLK					69
+#define NSS_CC_PORT6_RX_CLK_SRC					70
+#define NSS_CC_PORT6_RX_DIV_CLK_SRC				71
+#define NSS_CC_PORT6_TX_CLK					72
+#define NSS_CC_PORT6_TX_CLK_SRC					73
+#define NSS_CC_PORT6_TX_DIV_CLK_SRC				74
+#define NSS_CC_PPE_CLK_SRC					75
+#define NSS_CC_PPE_EDMA_CFG_CLK					76
+#define NSS_CC_PPE_EDMA_CLK					77
+#define NSS_CC_PPE_SWITCH_BTQ_CLK				78
+#define NSS_CC_PPE_SWITCH_CFG_CLK				79
+#define NSS_CC_PPE_SWITCH_CLK					80
+#define NSS_CC_PPE_SWITCH_IPE_CLK				81
+#define NSS_CC_UBI0_CLK_SRC					82
+#define NSS_CC_UBI0_DIV_CLK_SRC					83
+#define NSS_CC_UBI1_CLK_SRC					84
+#define NSS_CC_UBI1_DIV_CLK_SRC					85
+#define NSS_CC_UBI2_CLK_SRC					86
+#define NSS_CC_UBI2_DIV_CLK_SRC					87
+#define NSS_CC_UBI32_AHB0_CLK					88
+#define NSS_CC_UBI32_AHB1_CLK					89
+#define NSS_CC_UBI32_AHB2_CLK					90
+#define NSS_CC_UBI32_AHB3_CLK					91
+#define NSS_CC_UBI32_AXI0_CLK					92
+#define NSS_CC_UBI32_AXI1_CLK					93
+#define NSS_CC_UBI32_AXI2_CLK					94
+#define NSS_CC_UBI32_AXI3_CLK					95
+#define NSS_CC_UBI32_CORE0_CLK					96
+#define NSS_CC_UBI32_CORE1_CLK					97
+#define NSS_CC_UBI32_CORE2_CLK					98
+#define NSS_CC_UBI32_CORE3_CLK					99
+#define NSS_CC_UBI32_INTR0_AHB_CLK				100
+#define NSS_CC_UBI32_INTR1_AHB_CLK				101
+#define NSS_CC_UBI32_INTR2_AHB_CLK				102
+#define NSS_CC_UBI32_INTR3_AHB_CLK				103
+#define NSS_CC_UBI32_NC_AXI0_CLK				104
+#define NSS_CC_UBI32_NC_AXI1_CLK				105
+#define NSS_CC_UBI32_NC_AXI2_CLK				106
+#define NSS_CC_UBI32_NC_AXI3_CLK				107
+#define NSS_CC_UBI32_UTCM0_CLK					108
+#define NSS_CC_UBI32_UTCM1_CLK					109
+#define NSS_CC_UBI32_UTCM2_CLK					110
+#define NSS_CC_UBI32_UTCM3_CLK					111
+#define NSS_CC_UBI3_CLK_SRC					112
+#define NSS_CC_UBI3_DIV_CLK_SRC					113
+#define NSS_CC_UBI_AXI_CLK_SRC					114
+#define NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC				115
+#define NSS_CC_UNIPHY_PORT1_RX_CLK				116
+#define NSS_CC_UNIPHY_PORT1_TX_CLK				117
+#define NSS_CC_UNIPHY_PORT2_RX_CLK				118
+#define NSS_CC_UNIPHY_PORT2_TX_CLK				119
+#define NSS_CC_UNIPHY_PORT3_RX_CLK				120
+#define NSS_CC_UNIPHY_PORT3_TX_CLK				121
+#define NSS_CC_UNIPHY_PORT4_RX_CLK				122
+#define NSS_CC_UNIPHY_PORT4_TX_CLK				123
+#define NSS_CC_UNIPHY_PORT5_RX_CLK				124
+#define NSS_CC_UNIPHY_PORT5_TX_CLK				125
+#define NSS_CC_UNIPHY_PORT6_RX_CLK				126
+#define NSS_CC_UNIPHY_PORT6_TX_CLK				127
+#define NSS_CC_XGMAC0_PTP_REF_CLK				128
+#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC			129
+#define NSS_CC_XGMAC1_PTP_REF_CLK				130
+#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC			131
+#define NSS_CC_XGMAC2_PTP_REF_CLK				132
+#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC			133
+#define NSS_CC_XGMAC3_PTP_REF_CLK				134
+#define NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC			135
+#define NSS_CC_XGMAC4_PTP_REF_CLK				136
+#define NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC			137
+#define NSS_CC_XGMAC5_PTP_REF_CLK				138
+#define NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC			139
+#define UBI32_PLL						140
+#define UBI32_PLL_MAIN						141
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/dt-bindings/clock/qcom,uniphycc-ipq9574.h	2023-05-22 20:30:14.549854255 +0200
@@ -0,0 +1,11 @@
+#ifndef _DT_BINDINGS_CLOCK_IPQ_UNIPHYCC_95XX_H
+#define _DT_BINDINGS_CLOCK_IPQ_UNIPHYCC_95XX_H
+
+#define UNIPHY0_GCC_RX_CLK				0
+#define UNIPHY0_GCC_TX_CLK				1
+#define UNIPHY1_GCC_RX_CLK				2
+#define UNIPHY1_GCC_TX_CLK				3
+#define UNIPHY2_GCC_RX_CLK				4
+#define UNIPHY2_GCC_TX_CLK				5
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/dt-bindings/net/realtek-phy-rtl8211f.h	2023-03-09 19:50:18.846938506 +0100
@@ -0,0 +1,19 @@
+/*
+ * Device Tree constants for Realek rtl8211f PHY
+ *
+ * Author: Remi Pommarel
+ *
+ * License: GPL
+ * Copyright (c) 2017 Remi Pommarel
+ */
+
+#ifndef _DT_BINDINGS_RTL_8211F_H
+#define _DT_BINDINGS_RTL_8211F_H
+
+#define RTL8211F_LED_MODE_10M			0x1
+#define RTL8211F_LED_MODE_100M			0x2
+#define RTL8211F_LED_MODE_1000M			0x8
+#define RTL8211F_LED_MODE_ACT			0x10
+
+#endif
+
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/dt-bindings/reset/qcom,nsscc-ipq9574.h	2023-05-22 20:30:14.549854255 +0200
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_NSSCC_9048_H
+#define _DT_BINDINGS_RESET_IPQ_NSSCC_9048_H
+
+#define NSS_CC_CE_BCR			1
+#define NSS_CC_CLC_BCR			2
+#define NSS_CC_EIP197_BCR		3
+#define NSS_CC_HAQ_BCR			4
+#define NSS_CC_IMEM_BCR			5
+#define NSS_CC_MAC_BCR			6
+#define NSS_CC_PPE_BCR			7
+#define NSS_CC_UBI_BCR			8
+#define NSS_CC_UNIPHY_BCR		9
+#define UBI3_CLKRST_CLAMP_ENABLE	10
+#define UBI3_CORE_CLAMP_ENABLE		11
+#define UBI2_CLKRST_CLAMP_ENABLE	12
+#define UBI2_CORE_CLAMP_ENABLE		13
+#define UBI1_CLKRST_CLAMP_ENABLE	14
+#define UBI1_CORE_CLAMP_ENABLE		15
+#define UBI0_CLKRST_CLAMP_ENABLE	16
+#define UBI0_CORE_CLAMP_ENABLE		17
+#define NSSNOC_NSS_CSR_ARES		18
+#define NSS_CSR_ARES			19
+#define PPE_BTQ_ARES			20
+#define PPE_IPE_ARES			21
+#define PPE_ARES			22
+#define PPE_CFG_ARES			23
+#define PPE_EDMA_ARES			24
+#define PPE_EDMA_CFG_ARES		25
+#define CRY_PPE_ARES			26
+#define NSSNOC_PPE_ARES			27
+#define NSSNOC_PPE_CFG_ARES		28
+#define PORT1_MAC_ARES			29
+#define PORT2_MAC_ARES			30
+#define PORT3_MAC_ARES			31
+#define PORT4_MAC_ARES			32
+#define PORT5_MAC_ARES			33
+#define PORT6_MAC_ARES			34
+#define XGMAC0_PTP_REF_ARES		35
+#define XGMAC1_PTP_REF_ARES		36
+#define XGMAC2_PTP_REF_ARES		37
+#define XGMAC3_PTP_REF_ARES		38
+#define XGMAC4_PTP_REF_ARES		39
+#define XGMAC5_PTP_REF_ARES		40
+#define HAQ_AHB_ARES			41
+#define HAQ_AXI_ARES			42
+#define NSSNOC_HAQ_AHB_ARES		43
+#define NSSNOC_HAQ_AXI_ARES		44
+#define CE_APB_ARES			45
+#define CE_AXI_ARES			46
+#define NSSNOC_CE_APB_ARES		47
+#define NSSNOC_CE_AXI_ARES		48
+#define CRYPTO_ARES			49
+#define NSSNOC_CRYPTO_ARES		50
+#define NSSNOC_NC_AXI0_1_ARES		51
+#define UBI0_CORE_ARES			52
+#define UBI1_CORE_ARES			53
+#define UBI2_CORE_ARES			54
+#define UBI3_CORE_ARES			55
+#define NC_AXI0_ARES			56
+#define UTCM0_ARES			57
+#define NC_AXI1_ARES			58
+#define UTCM1_ARES			59
+#define NC_AXI2_ARES			60
+#define UTCM2_ARES			61
+#define NC_AXI3_ARES			62
+#define UTCM3_ARES			63
+#define NSSNOC_NC_AXI0_ARES		64
+#define AHB0_ARES			65
+#define INTR0_AHB_ARES			66
+#define AHB1_ARES			67
+#define INTR1_AHB_ARES			68
+#define AHB2_ARES			69
+#define INTR2_AHB_ARES			70
+#define AHB3_ARES			71
+#define INTR3_AHB_ARES			72
+#define NSSNOC_AHB0_ARES		73
+#define NSSNOC_INT0_AHB_ARES		74
+#define AXI0_ARES			75
+#define AXI1_ARES			76
+#define AXI2_ARES			77
+#define AXI3_ARES			78
+#define NSSNOC_AXI0_ARES		79
+#define IMEM_QSB_ARES			80
+#define NSSNOC_IMEM_QSB_ARES		81
+#define IMEM_AHB_ARES			82
+#define NSSNOC_IMEM_AHB_ARES		83
+#define UNIPHY_PORT1_RX_ARES		84
+#define UNIPHY_PORT1_TX_ARES		85
+#define UNIPHY_PORT2_RX_ARES		86
+#define UNIPHY_PORT2_TX_ARES		87
+#define UNIPHY_PORT3_RX_ARES		88
+#define UNIPHY_PORT3_TX_ARES		89
+#define UNIPHY_PORT4_RX_ARES		90
+#define UNIPHY_PORT4_TX_ARES		91
+#define UNIPHY_PORT5_RX_ARES		92
+#define UNIPHY_PORT5_TX_ARES		93
+#define UNIPHY_PORT6_RX_ARES		94
+#define UNIPHY_PORT6_TX_ARES		95
+#define PORT1_RX_ARES			96
+#define PORT1_TX_ARES			97
+#define PORT2_RX_ARES			98
+#define PORT2_TX_ARES			99
+#define PORT3_RX_ARES			100
+#define PORT3_TX_ARES			101
+#define PORT4_RX_ARES			102
+#define PORT4_TX_ARES			103
+#define PORT5_RX_ARES			104
+#define PORT5_TX_ARES			105
+#define PORT6_RX_ARES			106
+#define PORT6_TX_ARES			107
+#define PPE_FULL_RESET			108
+#define UNIPHY0_SOFT_RESET		109
+#define UNIPHY1_SOFT_RESET		110
+#define UNIPHY2_SOFT_RESET		111
+#define UNIPHY_PORT1_ARES		112
+#define UNIPHY_PORT2_ARES		113
+#define UNIPHY_PORT3_ARES		114
+#define UNIPHY_PORT4_ARES		115
+#define UNIPHY_PORT5_ARES		116
+#define UNIPHY_PORT6_ARES		117
+#define NSSPORT1_RESET			118
+#define NSSPORT2_RESET 			119
+#define NSSPORT3_RESET			120
+#define NSSPORT4_RESET			121
+#define NSSPORT5_RESET			122
+#define NSSPORT6_RESET			123
+#define EDMA_HW_RESET			124
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/diagchar.h	2023-03-15 19:52:23.541979840 +0100
@@ -0,0 +1,880 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_SHARED
+#define DIAGCHAR_SHARED
+
+#define MSG_MASKS_TYPE		0x00000001
+#define LOG_MASKS_TYPE		0x00000002
+#define EVENT_MASKS_TYPE	0x00000004
+#define PKT_TYPE		0x00000008
+#define DEINIT_TYPE		0x00000010
+#define USER_SPACE_DATA_TYPE	0x00000020
+#define DCI_DATA_TYPE		0x00000040
+#define USER_SPACE_RAW_DATA_TYPE	0x00000080
+#define DCI_LOG_MASKS_TYPE	0x00000100
+#define DCI_EVENT_MASKS_TYPE	0x00000200
+#define DCI_PKT_TYPE		0x00000400
+#define HDLC_SUPPORT_TYPE	0x00001000
+
+#define USB_MODE			1
+#define MEMORY_DEVICE_MODE		2
+#define NO_LOGGING_MODE			3
+#define UART_MODE			4
+#define SOCKET_MODE			5
+#define CALLBACK_MODE			6
+
+/* different values that go in for diag_data_type */
+
+#define DATA_TYPE_EVENT         	0
+#define DATA_TYPE_F3            	1
+#define DATA_TYPE_LOG           	2
+#define DATA_TYPE_RESPONSE      	3
+#define DATA_TYPE_DELAYED_RESPONSE	4
+#define DATA_TYPE_DCI_LOG		0x00000100
+#define DATA_TYPE_DCI_EVENT		0x00000200
+
+/* Different IOCTL values */
+#define DIAG_IOCTL_COMMAND_REG  	0
+#define DIAG_IOCTL_COMMAND_DEREG	1
+#define DIAG_IOCTL_SWITCH_LOGGING	7
+#define DIAG_IOCTL_GET_DELAYED_RSP_ID 	8
+#define DIAG_IOCTL_LSM_DEINIT		9
+#define DIAG_IOCTL_DCI_INIT		20
+#define DIAG_IOCTL_DCI_DEINIT		21
+#define DIAG_IOCTL_DCI_SUPPORT		22
+#define DIAG_IOCTL_DCI_REG		23
+#define DIAG_IOCTL_DCI_STREAM_INIT	24
+#define DIAG_IOCTL_DCI_HEALTH_STATS	25
+#define DIAG_IOCTL_DCI_LOG_STATUS	26
+#define DIAG_IOCTL_DCI_EVENT_STATUS	27
+#define DIAG_IOCTL_DCI_CLEAR_LOGS	28
+#define DIAG_IOCTL_DCI_CLEAR_EVENTS	29
+#define DIAG_IOCTL_REMOTE_DEV		32
+#define DIAG_IOCTL_VOTE_REAL_TIME	33
+#define DIAG_IOCTL_GET_REAL_TIME	34
+#define DIAG_IOCTL_PERIPHERAL_BUF_CONFIG	35
+#define DIAG_IOCTL_PERIPHERAL_BUF_DRAIN		36
+#define DIAG_IOCTL_REGISTER_CALLBACK	37
+#define DIAG_IOCTL_HDLC_TOGGLE	38
+
+/* PC Tools IDs */
+#define APQ8060_TOOLS_ID	4062
+#define AO8960_TOOLS_ID		4064
+#define APQ8064_TOOLS_ID	4072
+#define MSM8625_TOOLS_ID	4075
+#define MSM8930_TOOLS_ID	4076
+#define MSM8630_TOOLS_ID	4077
+#define MSM8230_TOOLS_ID	4078
+#define APQ8030_TOOLS_ID	4079
+#define MSM8627_TOOLS_ID	4080
+#define MSM8227_TOOLS_ID	4081
+#define MSM8974_TOOLS_ID	4083
+#define APQ8074_TOOLS_ID	4090
+#define MSM8916_TOOLS_ID	4094
+#define APQ8084_TOOLS_ID	4095
+#define MSM8994_TOOLS_ID	4097
+#define MSM8939_TOOLS_ID	4103
+#define APQ8026_TOOLS_ID	4104
+#define MSM8909_TOOLS_ID	4108
+#define MSM8992_TOOLS_ID	4111
+#define MSM8952_TOOLS_ID	4110
+#define MSM_8996_TOOLS_ID	4112
+
+#define MSG_MASK_0			(0x00000001)
+#define MSG_MASK_1			(0x00000002)
+#define MSG_MASK_2			(0x00000004)
+#define MSG_MASK_3			(0x00000008)
+#define MSG_MASK_4			(0x00000010)
+#define MSG_MASK_5			(0x00000020)
+#define MSG_MASK_6			(0x00000040)
+#define MSG_MASK_7			(0x00000080)
+#define MSG_MASK_8			(0x00000100)
+#define MSG_MASK_9			(0x00000200)
+#define MSG_MASK_10			(0x00000400)
+#define MSG_MASK_11			(0x00000800)
+#define MSG_MASK_12			(0x00001000)
+#define MSG_MASK_13			(0x00002000)
+#define MSG_MASK_14			(0x00004000)
+#define MSG_MASK_15			(0x00008000)
+#define MSG_MASK_16			(0x00010000)
+#define MSG_MASK_17			(0x00020000)
+#define MSG_MASK_18			(0x00040000)
+#define MSG_MASK_19			(0x00080000)
+#define MSG_MASK_20			(0x00100000)
+#define MSG_MASK_21			(0x00200000)
+#define MSG_MASK_22			(0x00400000)
+#define MSG_MASK_23			(0x00800000)
+#define MSG_MASK_24			(0x01000000)
+#define MSG_MASK_25			(0x02000000)
+#define MSG_MASK_26			(0x04000000)
+#define MSG_MASK_27			(0x08000000)
+#define MSG_MASK_28			(0x10000000)
+#define MSG_MASK_29			(0x20000000)
+#define MSG_MASK_30			(0x40000000)
+#define MSG_MASK_31			(0x80000000)
+
+/*  These masks are to be used for support of all legacy messages in the sw.
+The user does not need to remember the names as they will be embedded in
+the appropriate macros. */
+#define MSG_LEGACY_LOW			MSG_MASK_0
+#define MSG_LEGACY_MED			MSG_MASK_1
+#define MSG_LEGACY_HIGH			MSG_MASK_2
+#define MSG_LEGACY_ERROR		MSG_MASK_3
+#define MSG_LEGACY_FATAL		MSG_MASK_4
+
+/* Legacy Message Priorities */
+#define MSG_LVL_FATAL			(MSG_LEGACY_FATAL)
+#define MSG_LVL_ERROR			(MSG_LEGACY_ERROR | MSG_LVL_FATAL)
+#define MSG_LVL_HIGH			(MSG_LEGACY_HIGH | MSG_LVL_ERROR)
+#define MSG_LVL_MED			(MSG_LEGACY_MED | MSG_LVL_HIGH)
+#define MSG_LVL_LOW			(MSG_LEGACY_LOW | MSG_LVL_MED)
+
+#define MSG_LVL_NONE			0
+
+/* This needs to be modified manually now, when we add
+ a new RANGE of SSIDs to the msg_mask_tbl */
+#define MSG_MASK_TBL_CNT		25
+#define APPS_EVENT_LAST_ID		0x0B14
+
+#define MSG_SSID_0			0
+#define MSG_SSID_0_LAST			118
+#define MSG_SSID_1			500
+#define MSG_SSID_1_LAST			506
+#define MSG_SSID_2			1000
+#define MSG_SSID_2_LAST			1007
+#define MSG_SSID_3			2000
+#define MSG_SSID_3_LAST			2008
+#define MSG_SSID_4			3000
+#define MSG_SSID_4_LAST			3014
+#define MSG_SSID_5			4000
+#define MSG_SSID_5_LAST			4010
+#define MSG_SSID_6			4500
+#define MSG_SSID_6_LAST			4573
+#define MSG_SSID_7			4600
+#define MSG_SSID_7_LAST			4615
+#define MSG_SSID_8			5000
+#define MSG_SSID_8_LAST			5032
+#define MSG_SSID_9			5500
+#define MSG_SSID_9_LAST			5516
+#define MSG_SSID_10			6000
+#define MSG_SSID_10_LAST		6081
+#define MSG_SSID_11			6500
+#define MSG_SSID_11_LAST		6521
+#define MSG_SSID_12			7000
+#define MSG_SSID_12_LAST		7003
+#define MSG_SSID_13			7100
+#define MSG_SSID_13_LAST		7111
+#define MSG_SSID_14			7200
+#define MSG_SSID_14_LAST		7201
+#define MSG_SSID_15			8000
+#define MSG_SSID_15_LAST		8000
+#define MSG_SSID_16			8500
+#define MSG_SSID_16_LAST		8529
+#define MSG_SSID_17			9000
+#define MSG_SSID_17_LAST		9008
+#define MSG_SSID_18			9500
+#define MSG_SSID_18_LAST		9510
+#define MSG_SSID_19			10200
+#define MSG_SSID_19_LAST		10210
+#define MSG_SSID_20			10251
+#define MSG_SSID_20_LAST		10255
+#define MSG_SSID_21			10300
+#define MSG_SSID_21_LAST		10300
+#define MSG_SSID_22			10350
+#define MSG_SSID_22_LAST		10377
+#define MSG_SSID_23			10400
+#define MSG_SSID_23_LAST		10415
+#define MSG_SSID_24			0xC000
+#define MSG_SSID_24_LAST		0xC063
+
+static const uint32_t msg_bld_masks_0[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7|MSG_MASK_8,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_7 | \
+		MSG_MASK_8|MSG_MASK_9|MSG_MASK_10|MSG_MASK_11|MSG_MASK_12 | \
+		MSG_MASK_13|MSG_MASK_14|MSG_MASK_15|MSG_MASK_16 | \
+		MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20|MSG_MASK_21,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14| \
+		MSG_MASK_15|MSG_MASK_16|MSG_MASK_17,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15| \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20| \
+		MSG_MASK_21|MSG_MASK_22|MSG_MASK_23|MSG_MASK_24|MSG_MASK_25,
+	MSG_LVL_MED|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+	MSG_LVL_LOW | MSG_MASK_5 | \
+		MSG_MASK_6,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED | MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15 | \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH | MSG_MASK_21,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR,
+	MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR,
+	MSG_LVL_MED|MSG_LVL_HIGH,
+	MSG_LVL_MED|MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_1[] = {
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_2[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_3[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7|
+			MSG_MASK_8|MSG_MASK_9|MSG_MASK_10,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_4[] = {
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_5[] = {
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED|MSG_LVL_MED|MSG_MASK_5|MSG_MASK_6|MSG_MASK_7| \
+		MSG_MASK_8|MSG_MASK_9,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_6[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_7[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_8[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_9[] = {
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5,
+	MSG_LVL_MED|MSG_MASK_5
+};
+
+static const uint32_t msg_bld_masks_10[] =  {
+	MSG_LVL_MED,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_MASK_5 | \
+		MSG_MASK_6|MSG_MASK_7|MSG_MASK_8|MSG_MASK_9|MSG_MASK_10| \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15| \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20| \
+		MSG_MASK_21|MSG_MASK_22,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_0 | MSG_MASK_1 | MSG_MASK_2 | \
+		MSG_MASK_3 | MSG_MASK_4 | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_11[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+};
+
+static const uint32_t msg_bld_masks_12[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_13[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_14[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_15[] = {
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_16[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_17[] =  {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_6 | \
+		MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9,
+	MSG_LVL_MED | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9 | \
+		MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 | MSG_MASK_13 | \
+		MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 | MSG_MASK_17,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9 | \
+		MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 | MSG_MASK_13 | \
+		MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 | MSG_MASK_17 | \
+		MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 | MSG_MASK_21 | \
+		MSG_MASK_22,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_18[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | \
+		MSG_MASK_11|MSG_MASK_12|MSG_MASK_13|MSG_MASK_14|MSG_MASK_15 | \
+		MSG_MASK_16|MSG_MASK_17|MSG_MASK_18|MSG_MASK_19|MSG_MASK_20,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5 | \
+		MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_19[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_20[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_21[] = {
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_22[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_23[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+/* LOG CODES */
+static const uint32_t log_code_last_tbl[] = {
+	0x0,	/* EQUIP ID 0 */
+	0x1966,	/* EQUIP ID 1 */
+	0x0,	/* EQUIP ID 2 */
+	0x0,	/* EQUIP ID 3 */
+	0x4910,	/* EQUIP ID 4 */
+	0x5420,	/* EQUIP ID 5 */
+	0x0,	/* EQUIP ID 6 */
+	0x74FF,	/* EQUIP ID 7 */
+	0x0,	/* EQUIP ID 8 */
+	0x0,	/* EQUIP ID 9 */
+	0xA38A,	/* EQUIP ID 10 */
+	0xB201,	/* EQUIP ID 11 */
+	0x0,	/* EQUIP ID 12 */
+	0xD1FF,	/* EQUIP ID 13 */
+	0x0,	/* EQUIP ID 14 */
+	0x0,	/* EQUIP ID 15 */
+};
+
+#define LOG_GET_ITEM_NUM(xx_code)	(xx_code & 0x0FFF)
+#define LOG_GET_EQUIP_ID(xx_code)	((xx_code & 0xF000) >> 12)
+#define LOG_ITEMS_TO_SIZE(num_items)	((num_items+7)/8)
+#define LOG_SIZE_TO_ITEMS(size)		((8*size) - 7)
+#define EVENT_COUNT_TO_BYTES(count)	((count/8) + 1)
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/fbxatm_dev.h	2023-02-27 17:17:36.496529019 +0100
@@ -0,0 +1,436 @@
+#ifndef LINUX_FBXATM_DEV_H_
+#define LINUX_FBXATM_DEV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/mutex.h>
+#include <linux/fbxatm.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+#include <linux/if_vlan.h>
+#include <linux/fbxatm_remote.h>
+
+/*
+ * atm cell helper
+ */
+#define ATM_CELL_HDR_SIZE	5
+
+#define ATM_GET_GFC(h)		(((h)[0] & 0xf0) >> 4)
+#define ATM_SET_GFC(h,v)	do {					\
+					(h)[0] &= ~0xf0;		\
+					(h)[0] |= (v) << 4;		\
+				} while (0)
+
+#define ATM_GET_VPI(h)		((((h)[0] & 0x0f) << 4) |		\
+				 (((h)[1] & 0xf0) >> 4))
+#define ATM_SET_VPI(h,v)	do {					\
+					(h)[0] &= ~0xf;			\
+					(h)[1] &= ~0xf0;		\
+					(h)[0] |= (v) >> 4;		\
+					(h)[1] |= ((v) & 0xf) << 4;	\
+				} while (0)
+
+#define ATM_GET_VCI(h)		((((h)[1] & 0x0f) << 12) |		\
+				 ((h)[2] << 4) |			\
+				 ((((h)[3] & 0xf0) >> 4)))
+#define ATM_SET_VCI(h,v)	do {					\
+					(h)[1] &= ~0xf;			\
+					(h)[3] &= ~0xf0;		\
+					(h)[1] |= (v) >> 12;		\
+					(h)[2] = ((v) & 0xff0) >> 4;	\
+					(h)[3] |= ((v) & 0xf) << 4;	\
+				} while (0)
+
+
+#define ATM_GET_PT(h)		(((h)[3] & 0x0e) >> 1)
+#define ATM_SET_PT(h,v)		do {					\
+					(h)[3] &= ~0xe;			\
+					(h)[3] |= (v) << 1;		\
+				} while (0)
+
+#define ATM_GET_CLP(h)		(((h)[3] & 0x01))
+#define ATM_SET_CLP(h,v)	do {					\
+					(h)[3] &= ~1;			\
+					(h)[3] |= (v);			\
+				} while (0)
+
+#define ATM_GET_HEC(h)		((h)[4])
+#define ATM_SET_HEC(h,v)	do {					\
+					(h)[4] = (v);			\
+				} while (0)
+
+
+/*
+ * OAM definition
+ */
+#define OAM_VCI_SEG_F4			3
+#define OAM_VCI_END2END_F4		4
+
+#define OAM_PTI_SEG_F5			0x4
+#define OAM_PTI_END2END_F5		0x5
+
+#define OAM_TYPE_SHIFT			4
+#define OAM_TYPE_MASK			(0xf << OAM_TYPE_SHIFT)
+#define OAM_TYPE_FAULT_MANAGEMENT	0x1
+#define OAM_TYPE_PERF_MANAGEMENT	0x2
+#define OAM_TYPE_ACTIVATION		0x8
+
+#define FUNC_TYPE_SHIFT			0
+#define FUNC_TYPE_MASK			(0xf << FUNC_TYPE_SHIFT)
+#define FUNC_TYPE_AIS			0x0
+#define FUNC_TYPE_FERF			0x1
+#define FUNC_TYPE_CONT_CHECK		0x4
+#define FUNC_TYPE_OAM_LOOPBACK		0x8
+
+struct fbxatm_oam_cell_payload {
+	u8			cell_hdr[5];
+	u8			cell_type;
+	u8			loopback_indication;
+	u8			correlation_tag[4];
+	u8			loopback_id[16];
+	u8			source_id[16];
+	u8			reserved[8];
+	u8			crc10[2];
+};
+
+struct fbxatm_oam_cell {
+	struct fbxatm_oam_cell_payload	payload;
+	struct list_head		next;
+};
+
+struct fbxatm_oam_ping {
+	struct fbxatm_oam_ping_req	req;
+	u32				correlation_id;
+	int				replied;
+	wait_queue_head_t		wq;
+	struct list_head		next;
+};
+
+/*
+ * vcc/device stats
+ */
+struct fbxatm_vcc_stats {
+	u64			rx_bytes;
+	u64			tx_bytes;
+	u32			rx_aal5;
+	u32			tx_aal5;
+};
+
+struct fbxatm_dev_stats {
+	u64			rx_bytes;
+	u64			tx_bytes;
+	u32			rx_aal5;
+	u32			tx_aal5;
+	u32			rx_f4_oam;
+	u32			tx_f4_oam;
+	u32			rx_f5_oam;
+	u32			tx_f5_oam;
+	u32			rx_bad_oam;
+	u32			rx_bad_llid_oam;
+	u32			rx_other_oam;
+	u32			rx_dropped;
+	u32			tx_drop_nolink;
+};
+
+/*
+ * vcc user ops
+ */
+struct fbxatm_vcc_uops {
+	void	(*link_change)(void *cb_data, int link,
+			       unsigned int rx_cell_rate,
+			       unsigned int tx_cell_rate);
+	void	(*rx_pkt)(struct sk_buff *skb, void *cb_data);
+	void	(*tx_done)(void *cb_data);
+};
+
+/*
+ * vcc status flags
+ */
+enum {
+	FBXATM_VCC_F_FULL		= (1 << 0),
+
+	FBXATM_VCC_F_LINK_UP		= (1 << 1),
+};
+
+
+/*
+ * vcc definition
+ */
+struct fbxatm_dev;
+
+struct fbxatm_vcc {
+	unsigned int			vpi;
+	unsigned int			vci;
+
+	struct fbxatm_vcc_qos		qos;
+
+	struct fbxatm_vcc_stats		stats;
+
+	enum fbxatm_vcc_user		user;
+	void				*user_priv;
+
+	struct fbxatm_dev		*adev;
+	void				*dev_priv;
+
+	spinlock_t			user_ops_lock;
+	const struct fbxatm_vcc_uops	*user_ops;
+	void				*user_cb_data;
+
+	unsigned int			to_drop_pkt;
+
+	spinlock_t			tx_lock;
+	unsigned long			vcc_flags;
+
+	struct list_head		next;
+};
+
+/*
+ * fbxatm device operation
+ */
+struct fbxatm_dev_ops {
+	int (*open)(struct fbxatm_vcc *vcc);
+
+	void (*close)(struct fbxatm_vcc *vcc);
+
+	int (*ioctl)(struct fbxatm_dev *adev,
+		     unsigned int cmd, void __user *arg);
+
+	int (*send)(struct fbxatm_vcc *vcc, struct sk_buff *skb);
+
+	int (*send_oam)(struct fbxatm_dev *adev,
+			struct fbxatm_oam_cell *cell);
+
+	int (*init_procfs)(struct fbxatm_dev *adev);
+	void (*release_procfs)(struct fbxatm_dev *adev);
+
+	struct module			*owner;
+};
+
+/*
+ * device flags
+ */
+enum {
+	FBXATM_DEV_F_LINK_UP		= (1 << 0),
+};
+
+/*
+ * fbxatm device definition
+ */
+struct fbxatm_dev {
+	int				ifindex;
+	unsigned long			dev_flags;
+	spinlock_t			dev_link_lock;
+
+	unsigned int			max_vcc;
+	unsigned int			vci_mask;
+	unsigned int			vpi_mask;
+	unsigned int			max_priority;
+	unsigned int			max_rx_priority;
+	unsigned int			tx_headroom;
+
+	char				*name;
+
+	/* unit: b/s */
+	unsigned int			link_rate_ds;
+	unsigned int			link_rate_us;
+
+	unsigned int			link_cell_rate_ds;
+	unsigned int			link_cell_rate_us;
+
+	const struct fbxatm_dev_ops	*ops;
+
+	spinlock_t			stats_lock;
+	struct fbxatm_dev_stats		stats;
+
+	spinlock_t			vcc_list_lock;
+	struct list_head		vcc_list;
+
+	struct device			dev;
+
+	spinlock_t			oam_list_lock;
+	struct list_head		rx_oam_cells;
+	unsigned int			rx_oam_cells_count;
+	struct work_struct		oam_work;
+
+	struct list_head		oam_pending_ping;
+	u32				oam_correlation_id;
+
+	struct proc_dir_entry		*dev_proc_entry;
+	void				*priv;
+	struct list_head		next;
+};
+
+/*
+ * API for device drivers
+ */
+struct fbxatm_dev *fbxatm_alloc_device(int sizeof_priv);
+
+int fbxatm_register_device(struct fbxatm_dev *adev,
+			   const char *base_name,
+			   const struct fbxatm_dev_ops *ops);
+
+void fbxatm_free_device(struct fbxatm_dev *adev);
+
+void fbxatm_dev_set_link_up(struct fbxatm_dev *adev);
+
+void fbxatm_dev_set_link_down(struct fbxatm_dev *adev);
+
+int fbxatm_unregister_device(struct fbxatm_dev *adev);
+
+void fbxatm_netifrx_oam(struct fbxatm_dev *adev,
+			struct fbxatm_oam_cell *cell);
+
+
+static inline int fbxatm_vcc_link_is_up(struct fbxatm_vcc *vcc)
+{
+	return test_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+}
+
+#define	FBXATMDEV_ALIGN		4
+
+static inline void *fbxatm_dev_priv(struct fbxatm_dev *adev)
+{
+	return (u8 *)adev + ((sizeof(struct fbxatm_dev)
+			      + (FBXATMDEV_ALIGN - 1))
+			     & ~(FBXATMDEV_ALIGN - 1));
+}
+
+/*
+ * API for FBXATM stack user
+ */
+struct fbxatm_ioctl {
+	int (*handler)(struct socket *sock,
+		       unsigned int cmd, void __user *useraddr);
+
+	void (*release)(struct socket *sock);
+
+	struct module		*owner;
+	struct list_head	next;
+};
+
+void fbxatm_set_uops(struct fbxatm_vcc *vcc,
+		     const struct fbxatm_vcc_uops *user_ops,
+		     void *user_cb_data);
+
+struct fbxatm_vcc *
+fbxatm_bind_to_vcc(const struct fbxatm_vcc_id *id,
+		   enum fbxatm_vcc_user user);
+
+void fbxatm_unbind_vcc(struct fbxatm_vcc *vcc);
+
+
+static inline int fbxatm_vcc_queue_full(struct fbxatm_vcc *vcc)
+{
+	return test_bit(FBXATM_VCC_F_FULL, &vcc->vcc_flags);
+}
+
+#ifdef CONFIG_FBXATM_STACK
+/*
+ * stack user callback to send data on given vcc
+ */
+static inline int fbxatm_send(struct fbxatm_vcc *vcc, struct sk_buff *skb)
+{
+	int ret;
+	unsigned int len;
+
+	len = skb->len;
+
+	spin_lock_bh(&vcc->tx_lock);
+	if (!test_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags)) {
+		spin_unlock_bh(&vcc->tx_lock);
+		dev_kfree_skb(skb);
+		spin_lock(&vcc->adev->stats_lock);
+		vcc->adev->stats.tx_drop_nolink++;
+		spin_unlock(&vcc->adev->stats_lock);
+		return 0;
+	}
+
+	ret = vcc->adev->ops->send(vcc, skb);
+	if (!ret) {
+		vcc->stats.tx_bytes += len;
+		vcc->stats.tx_aal5++;
+	}
+	spin_unlock_bh(&vcc->tx_lock);
+
+	if (!ret) {
+		spin_lock_bh(&vcc->adev->stats_lock);
+		vcc->adev->stats.tx_bytes += len;
+		vcc->adev->stats.tx_aal5++;
+		spin_unlock_bh(&vcc->adev->stats_lock);
+	}
+	return ret;
+}
+
+/*
+ * device callback when packet comes in
+ */
+static inline void fbxatm_netifrx(struct fbxatm_vcc *vcc, struct sk_buff *skb)
+{
+	unsigned int len;
+
+	len = skb->len;
+
+	spin_lock_bh(&vcc->user_ops_lock);
+	if (!vcc->user_ops) {
+		spin_unlock_bh(&vcc->user_ops_lock);
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	if (vcc->to_drop_pkt) {
+		vcc->to_drop_pkt--;
+		spin_unlock_bh(&vcc->user_ops_lock);
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	vcc->stats.rx_bytes += len;
+	vcc->stats.rx_aal5++;
+
+	vcc->user_ops->rx_pkt(skb, vcc->user_cb_data);
+	spin_unlock_bh(&vcc->user_ops_lock);
+
+	spin_lock_bh(&vcc->adev->stats_lock);
+	vcc->adev->stats.rx_bytes += len;
+	vcc->adev->stats.rx_aal5++;
+	spin_unlock_bh(&vcc->adev->stats_lock);
+}
+
+/*
+ * device callback when tx is done on vcc
+ */
+static inline void fbxatm_tx_done(struct fbxatm_vcc *vcc)
+{
+	spin_lock_bh(&vcc->user_ops_lock);
+	if (vcc->user_ops)
+		vcc->user_ops->tx_done(vcc->user_cb_data);
+	spin_unlock_bh(&vcc->user_ops_lock);
+}
+#else
+int fbxatm_send(struct fbxatm_vcc *vcc, struct sk_buff *skb);
+void fbxatm_netifrx(struct fbxatm_vcc *vcc, struct sk_buff *skb);
+void fbxatm_tx_done(struct fbxatm_vcc *vcc);
+#endif
+
+static inline unsigned int fbxatm_rx_reserve(void)
+{
+#ifdef CONFIG_FBXATM_STACK
+	/* normal stack, no headroom needed */
+	return 0;
+#else
+	/* remote stub, we need to send rx skb to another location,
+	 * adding the fbxatm_remote header, an ethernet header (with
+	 * possible vlan) */
+	return ALIGN(sizeof (struct fbxatm_remote_hdr) + VLAN_ETH_HLEN, 4);
+#endif
+}
+
+void fbxatm_register_ioctl(struct fbxatm_ioctl *ioctl);
+
+void fbxatm_unregister_ioctl(struct fbxatm_ioctl *ioctl);
+
+#endif /* !LINUX_FBXATM_DEV_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/fbxatm_remote.h	2023-02-27 17:10:06.964489879 +0100
@@ -0,0 +1,216 @@
+#ifndef FBXATM_REMOTE_H_
+#define FBXATM_REMOTE_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+/*
+ * fbxatm remote protocol messages
+ */
+#define ETH_P_FBXATM_REMOTE	0x8844
+#define FBXATM_REMOTE_MAGIC	0xd76f8d2f
+
+enum fbxatm_remote_flags {
+	FBXATM_RFLAGS_ACK = (1 << 0),
+};
+
+enum fbxatm_remote_mtype {
+	/* driver => stub */
+	FBXATM_RMT_CONNECT = 0,
+
+	/* stub => driver */
+	FBXATM_RMT_DEV_LINK,
+	FBXATM_RMT_DEV_RX_OAM,
+
+	/* driver => stub */
+	FBXATM_RMT_KEEPALIVE,
+	FBXATM_RMT_DEV_SEND_OAM,
+	FBXATM_RMT_VCC_ACTION,
+
+	/* driver => stub */
+	FBXATM_RMT_VCC_SEND,
+
+	/* stub => driver */
+	FBXATM_RMT_VCC_QEMPTY,
+	FBXATM_RMT_VCC_RX,
+};
+
+struct fbxatm_remote_hdr {
+	u32	magic;
+	u8	flags;
+	u8	seq;
+	u16	len;
+	u16	sport;
+	u16	dport;
+
+	u32	session_id;
+	u32	mtype;
+};
+
+/*
+ * sent to destination port 0
+ */
+struct fbxatm_remote_connect {
+	u8	name[32];
+
+	u16	dev_link_port;
+	u16	dev_rx_oam_port;
+};
+
+struct fbxatm_remote_connect_ack {
+	u16	vcc_action_port;
+	u16	dev_send_oam_port;
+	u16	keepalive_port;
+	u16	pad;
+
+	u32	max_vcc;
+	u32	vci_mask;
+	u32	vpi_mask;
+	u32	max_priority;
+	u32	max_rx_priority;
+
+	u32	link;
+	u32	link_rate_ds;
+	u32	link_rate_us;
+	u32	link_cell_rate_ds;
+	u32	link_cell_rate_us;
+};
+
+/*
+ * sent on dev_link port
+ */
+struct fbxatm_remote_dev_link {
+	u32	link;
+	u32	link_rate_ds;
+	u32	link_rate_us;
+	u32	link_cell_rate_ds;
+	u32	link_cell_rate_us;
+};
+
+/*
+ * sent on vcc_action port
+ */
+struct fbxatm_remote_vcc_action {
+	/* 1: open - 0: close */
+	u32	action;
+
+	/*
+	 * open args
+	 */
+	u16	vcc_rx_port;
+	u16	vcc_qempty_port;
+
+	/* from vcc id struct */
+	u32	vpi;
+	u32	vci;
+
+	/* from qos struct */
+	u32	traffic_class;
+	u32	max_sdu;
+	u32	max_buffered_pkt;
+	u32	priority;
+	u32	rx_priority;
+
+	/*
+	 * close args
+	 */
+	u32	vcc_remote_id;
+};
+
+struct fbxatm_remote_vcc_action_ack {
+	u32	ret;
+
+	/* open args ack */
+	u32	vcc_remote_id;
+	u16	vcc_send_port;
+	u16	pad;
+};
+
+/*
+ * sent on vcc_send port
+ */
+struct fbxatm_remote_vcc_send_ack {
+	u32	full;
+};
+
+/*
+ * pseudo socket layer
+ */
+struct fbxatm_remote_sock;
+struct fbxatm_remote_ctx;
+
+struct fbxatm_remote_sockaddr {
+	u16		lport;
+	u16		dport;
+	u32		mtype;
+	int		infinite_retry;
+	int		(*deliver)(void *priv, struct sk_buff *skb,
+				   struct sk_buff **ack);
+	void		(*response)(void *priv, struct sk_buff *skb);
+	void		*priv;
+};
+
+struct sk_buff *fbxatm_remote_alloc_skb(struct fbxatm_remote_ctx *ctx,
+					unsigned int size);
+
+unsigned int fbxatm_remote_headroom(struct fbxatm_remote_ctx *ctx);
+
+void fbxatm_remote_sock_getaddr(struct fbxatm_remote_sock *sock,
+				struct fbxatm_remote_sockaddr *addr);
+
+void fbxatm_remote_sock_purge(struct fbxatm_remote_sock *sock);
+
+int fbxatm_remote_sock_pending(struct fbxatm_remote_sock *sock);
+
+struct fbxatm_remote_ctx *fbxatm_remote_alloc_ctx(struct net_device *netdev,
+						  u8 *remote_mac,
+						  u32 session_id,
+						  void (*timeout)(void *priv),
+						  void *priv);
+
+struct fbxatm_remote_sock *
+fbxatm_remote_sock_bind(struct fbxatm_remote_ctx *ctx,
+			struct fbxatm_remote_sockaddr *addr,
+			int send_ack);
+
+struct fbxatm_remote_sock *
+fbxatm_remote_sock_connect(struct fbxatm_remote_ctx *ctx,
+			   struct fbxatm_remote_sockaddr *addr,
+			   int need_ack);
+
+int fbxatm_remote_sock_send(struct fbxatm_remote_sock *sock,
+			    struct sk_buff *skb);
+
+int fbxatm_remote_sock_send_ack(struct fbxatm_remote_sock *sock,
+				struct sk_buff *skb);
+
+int fbxatm_remote_sock_send_raw_ack(struct fbxatm_remote_ctx *ctx,
+				    struct net_device *dev,
+				    u8 *remote_mac,
+				    struct fbxatm_remote_hdr *hdr,
+				    struct sk_buff *ack);
+
+void fbxatm_remote_sock_close(struct fbxatm_remote_sock *sock);
+
+void fbxatm_remote_set_unknown_cb(void (*cb)(struct net_device *,
+					     struct sk_buff *));
+
+void fbxatm_remote_free_ctx(struct fbxatm_remote_ctx *ctx);
+
+void fbxatm_remote_ctx_set_dead(struct fbxatm_remote_ctx *ctx);
+
+int fbxatm_remote_init(void);
+
+void fbxatm_remote_exit(void);
+
+/*
+ * platform data for fbxatm_remote driver
+ */
+struct fbxatm_remote_pdata {
+	u8	remote_mac[ETH_ALEN];
+	char	netdev_name[IFNAMSIZ];
+	char	remote_name[32];
+};
+
+#endif /* !FBXATM_REMOTE_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/fbxgpio_core.h	2023-05-22 20:06:44.543861781 +0200
@@ -0,0 +1,54 @@
+/*
+ * fbxgpio.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Feb 21 22:09:46 2007
+ * Freebox SA
+ */
+
+#ifndef FBXGPIO_H
+# define FBXGPIO_H
+
+# include <linux/types.h>
+# include <linux/gpio/consumer.h>
+
+struct fbxgpio_pin;
+
+#define FBXGPIO_PIN_REVERSE_POL		(1 << 0)
+
+struct fbxgpio_pin {
+	const char			*pin_name;
+	bool				use_desc;
+
+	/* when use_desc is true */
+	struct gpio_desc		*(*request_desc)(struct fbxgpio_pin *);
+	void				(*release_desc)(struct fbxgpio_pin *);
+
+	/* when use_desc is false */
+	int				pin_num;
+	unsigned int			flags;
+
+	int				direction;
+	unsigned int			cur_dataout;
+	struct device			*dev;
+	struct device_node		*of_node;
+
+	/* private flags used by fbxgpio-dt */
+	struct {
+		enum gpiod_flags		flags;
+		bool				no_claim;
+		struct gpio_desc		*desc;
+		char				pin_name[32];
+	} dt;
+};
+
+
+#define GPIO_DIR_IN	0x1
+#define GPIO_DIR_OUT	0x0
+
+struct fbxgpio_pin *fbxgpio_of_get(struct device_node *np,
+				   const char *propname,
+				   int index);
+
+int fbxgpio_set_data_out(struct fbxgpio_pin *pin, int val);
+int fbxgpio_get_data_in(struct fbxgpio_pin *pin);
+
+#endif /* !FBXGPIO_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/fbxprocfs.h	2023-02-27 19:50:21.524231407 +0100
@@ -0,0 +1,40 @@
+#ifndef FBXPROCFS_H_
+#define FBXPROCFS_H_
+
+#include <linux/proc_fs.h>
+#include <asm/atomic.h>
+#include <linux/seq_file.h>
+
+struct fbxprocfs_client
+{
+	const char *dirname;
+	struct module *owner;
+	struct proc_dir_entry *dir;
+	atomic_t refcount;
+	struct list_head list;
+};
+
+struct fbxprocfs_desc {
+	char		*name;
+	unsigned long	id;
+	int	(*rfunc)(struct seq_file *, void *);
+	int	(*wfunc)(struct file *, const char *, unsigned long, void *);
+};
+
+struct fbxprocfs_client *fbxprocfs_add_client(const char *dirname,
+					      struct module *owner);
+
+int fbxprocfs_remove_client(struct fbxprocfs_client *client);
+
+
+int
+fbxprocfs_create_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc);
+
+int
+fbxprocfs_remove_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc);
+
+#endif /* FBXPROCFS_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/fbxserial.h	2023-02-27 19:50:19.628180800 +0100
@@ -0,0 +1,129 @@
+#ifndef FBXSERIAL_H_
+#define FBXSERIAL_H_
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/*
+ * some part of serial may vary, we use abstract struct to store this,
+ * data content depends on type.
+ */
+#define EXTINFO_SIZE		128
+#define EXTINFO_MAX_COUNT	16
+
+/*
+ * extdev desc
+ */
+#define EXTINFO_TYPE_EXTDEV	1
+
+#define EXTDEV_TYPE_BUNDLE	1
+#define EXTDEV_TYPE_MAX		2
+
+struct fbx_serial_extinfo {
+	u32			type;
+
+	union {
+		/* extdev */
+		struct {
+			u32	type;
+			u32	model;
+			char	serial[64];
+		} extdev;
+
+		/* raw access */
+		unsigned char	data[EXTINFO_SIZE];
+	} u;
+}  __attribute__ ((packed));;
+
+
+/*
+ * master serial structure
+ */
+
+#define FBXSERIAL_VERSION	1
+
+#define FBXSERIAL_MAGIC		0x2d9521ab
+
+#define MAC_ADDR_SIZE		6
+#define RANDOM_DATA_SIZE	32
+
+/*
+ * this  is the  maximum size  we accept  to check  crc32  against, so
+ * structure may no grow larger than this
+ */
+#define FBXSERIAL_MAX_SIZE	8192
+
+struct fbx_serial {
+	u32	crc32;
+	u32	magic;
+	u32	struct_version;
+	u32	len;
+
+	/* board serial */
+	u16	type;
+	u8	version;
+	u8	manufacturer;
+	u16	year;
+	u8	week;
+	u32	number;
+	u32	flags;
+
+	/* mac address base */
+	u8	mac_addr_base[MAC_ADDR_SIZE];
+
+	/* mac address count */
+	u8	mac_count;
+
+	/* random data */
+	u8	random_data[RANDOM_DATA_SIZE];
+
+	/* last update of data (seconds since epoch) */
+	u32	last_modified;
+
+	/* count of following extinfo tag */
+	u32	extinfo_count;
+
+	/* beginning of extended info */
+	struct fbx_serial_extinfo	extinfos[EXTINFO_MAX_COUNT];
+
+} __attribute__ ((packed));
+
+
+/*
+ * default value to use in case magic is wrong (no cksum in that case)
+ */
+static inline void fbxserial_set_default(struct fbx_serial *s)
+{
+	memset(s, 0, sizeof (*s));
+	s->magic = FBXSERIAL_MAGIC;
+	s->struct_version = FBXSERIAL_VERSION;
+	s->len = sizeof (*s);
+	s->manufacturer = '_';
+	memcpy(s->mac_addr_base, "\x00\x07\xCB\x00\x00\xFD", 6);
+	s->mac_count = 1;
+}
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len);
+
+const void *
+fbxserialinfo_get_mac_addr(unsigned int index);
+
+int
+fbxserialinfo_read(const void *data, struct fbx_serial *out);
+
+struct fbx_serial *fbxserialinfo_get(void);
+
+/*
+ * implemented in board specific code
+ */
+#ifdef CONFIG_ARCH_HAS_FBXSERIAL
+extern const struct fbx_serial *arch_get_fbxserial(void);
+#else
+static inline const struct fbx_serial *arch_get_fbxserial(void)
+{
+	return NULL;
+}
+#endif
+
+#endif /* FBXSERIAL_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/ipc_logging.h	2023-03-15 19:52:23.541979840 +0100
@@ -0,0 +1,290 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_LOGGING_H
+#define _IPC_LOGGING_H
+
+#include <linux/types.h>
+
+#define MAX_MSG_SIZE 255
+
+enum {
+	TSV_TYPE_MSG_START = 1,
+	TSV_TYPE_SKB = TSV_TYPE_MSG_START,
+	TSV_TYPE_STRING,
+	TSV_TYPE_MSG_END = TSV_TYPE_STRING,
+};
+
+struct tsv_header {
+	unsigned char type;
+	unsigned char size; /* size of data field */
+};
+
+struct encode_context {
+	struct tsv_header hdr;
+	char buff[MAX_MSG_SIZE];
+	int offset;
+};
+
+struct decode_context {
+	int output_format;      /* 0 = debugfs */
+	char *buff;             /* output buffer */
+	int size;               /* size of output buffer */
+};
+
+#if defined(CONFIG_IPC_LOGGING)
+/*
+ * ipc_log_context_create: Create a debug log context
+ *                         Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name     : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages, const char *modname,
+		uint16_t user_version);
+
+/*
+ * msg_encode_start: Start encoding a log message
+ *
+ * @ectxt: Temporary storage to hold the encoded message
+ * @type:  Root event type defined by the module which is logging
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type);
+
+/*
+ * tsv_timestamp_write: Writes the current timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt);
+
+/*
+ * tsv_qtimer_write: Writes the current QTimer timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt);
+
+/*
+ * tsv_pointer_write: Writes a data pointer
+ *
+ * @ectxt:   Context initialized by calling msg_encode_start()
+ * @pointer: Pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n:     Integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n:     Integer to write
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void msg_encode_end(struct encode_context *ectxt);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt);
+
+/*
+ * ipc_log_string: Helper function to log a string
+ *
+ * @ilctxt: Debug Log Context created using ipc_log_context_create()
+ * @fmt:    Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ilctxt:  logging context
+ * @buff:    buffer to receive the data
+ * @size:    size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized.  This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ilctxt, char *buff, int size);
+
+/*
+ * Print a string to decode context.
+ * @dctxt   Decode context
+ * @args   printf args
+ */
+#define IPC_SPRINTF_DECODE(dctxt, args...) \
+do { \
+	int i; \
+	i = scnprintf(dctxt->buff, dctxt->size, args); \
+	dctxt->buff += i; \
+	dctxt->size -= i; \
+} while (0)
+
+/*
+ * tsv_timestamp_read: Reads a timestamp
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_qtimer_read: Reads a QTimer timestamp
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+		     struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_pointer_read: Reads a data pointer
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format);
+
+/*
+ * add_deserialization_func: Register a deserialization function to
+ *                           to unpack the subevents of a main event
+ *
+ * @ctxt: Debug log context to which the deserialization function has
+ *        to be registered
+ * @type: Main/Root event, defined by the module which is logging, to
+ *        which this deserialization function has to be registered.
+ * @dfune: Deserialization function to be registered
+ *
+ * return 0 on success, -ve value on FAILURE
+ */
+int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *));
+
+/*
+ * ipc_log_context_destroy: Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt);
+
+#else
+
+static inline void *ipc_log_context_create(int max_num_pages,
+	const char *modname, uint16_t user_version)
+{ return NULL; }
+
+static inline void msg_encode_start(struct encode_context *ectxt,
+	uint32_t type) { }
+
+static inline int tsv_timestamp_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_qtimer_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{ return -EINVAL; }
+
+static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{ return -EINVAL; }
+
+static inline int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size)
+{ return -EINVAL; }
+
+static inline void msg_encode_end(struct encode_context *ectxt) { }
+
+static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { }
+
+static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{ return -EINVAL; }
+
+static inline int ipc_log_extract(void *ilctxt, char *buff, int size)
+{ return -EINVAL; }
+
+#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0)
+
+static inline void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_qtimer_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format) { }
+
+static inline int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format)
+{ return 0; }
+
+static inline void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format) { }
+
+static inline int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *))
+{ return 0; }
+
+static inline int ipc_log_context_destroy(void *ctxt)
+{ return 0; }
+
+#endif
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/mfd/fbxgwr_pmu.h	2024-03-18 14:40:14.867741770 +0100
@@ -0,0 +1,425 @@
+#pragma once
+
+/*
+ * note: unknown registers will return 0 on read
+ */
+
+enum pmu_reg {
+
+	/*
+	 * NOTE NOTE NOTE NOTE
+	 *
+	 * this first set of registers is not subject to API
+	 * versioning, so the semantic cannot ever change
+	 */
+
+	/* used to detect presence of PMU */
+	PMU_REG_MAGIC0 = 0x00,
+	PMU_REG_MAGIC1 = 0x01,
+
+	/*
+	 * bit 5 for test mode through usb-pd
+	 * bit 6 for test mode through gpio)
+	 * bit 7 for any test mode
+	 */
+	PMU_REG_TEST_MODE = 0x02,
+
+	/* return API major/minor version of active app */
+	PMU_REG_API_MAJOR = 0x03,
+	PMU_REG_API_MINOR = 0x04,
+
+	/* return board id */
+	PMU_REG_BOARD_ID = 0x05,
+
+	/* return version of active app (16 bits) */
+	PMU_REG_APP_VERSION_LO = 0x06,
+	PMU_REG_APP_VERSION_HI = 0x07,
+
+	/* return revision of active app (16 bits) */
+	PMU_REG_APP_REVISION_LO = 0x08,
+	PMU_REG_APP_REVISION_HI = 0x09,
+
+	/* return currently active app bank (0 or 1) */
+	PMU_REG_CUR_APP_BANK = 0x0a,
+
+
+
+	/*
+	 * the remaining set of registers is defined for API major
+	 * version 1
+	 */
+
+	/* return firmware capabilities */
+	PMU_REG_FW_CAPABILITIES = 0xb,
+
+	/* return number of gpio bank available, max is 4, so 32
+	 * gpios */
+	PMU_REG_GPIO_BANK_COUNT = 0xc,
+
+	/* return number of leds available, max is 8 */
+	PMU_REG_LED_COUNT = 0xd,
+
+	/* return number of fans pwm available, max is 4 */
+	PMU_REG_FAN_PWM_COUNT = 0xe,
+
+	/* return number of input sensors registers (including
+	 * inactive ones) */
+	PMU_REG_IN_COUNT = 0xf,
+
+	/* trigger false dying gasp interrupt */
+	PMU_REG_FAKE_DGASP = 0x10,
+
+	/*
+	 * Watchdog registers
+	 */
+	PMU_REG_WDT_CTL = 0x11,
+	PMU_REG_WDT_STS = 0x12,
+	PMU_REG_WDT_TIMEOUT = 0x13,
+	PMU_REG_WDT_REFRESH = 0x14,
+
+	/*
+	 * Type-C polarity
+	 */
+	PMU_REG_CC_POLARITY = 0x15,
+
+	/* wake-on-pon interval (seconds unit) */
+	PMU_REG_WAKE_PON_INTERVAL = 0x17,
+
+	/* soft-reset the PMU, which will cause a full reset */
+	PMU_REG_BOARD_RESET = 0x18,
+
+	/* app bank switch (light soft reset), write bank number to
+	 * switch to */
+	PMU_REG_BANK_SWITCH = 0x19,
+
+	/* suspend command, set wakeup mask and write magic value to
+	 * enter standby */
+	PMU_REG_ENTER_STANDBY = 0x1a,
+
+	/* mask of allowed wakeup source to leave standby */
+	PMU_REG_WAKE_SRC_MASK = 0x1b,
+
+	/* reason we left standby state (wake-up source mask), 0 means
+	 * POR, write 1 to clear */
+	PMU_REG_WAKE_REASON_MASK = 0x1c,
+
+	/* reset reason for the PMU itself */
+	PMU_REG_PMU_RESET_REASON = 0x1d,
+
+	/* wake-on-pon over i2c */
+	PMU_REG_PON_BOOT_MODE = 0x1e,
+	PMU_REG_PON_WOP_RESULT = 0x1f,
+
+	/*
+	 * gpio input value
+	 */
+	PMU_REG_GPIO_IN_BASE = 0x20,
+
+	PMU_REG_GPIO_IN_0 = PMU_REG_GPIO_IN_BASE + 0,
+	PMU_REG_GPIO_IN_1 = PMU_REG_GPIO_IN_BASE + 1,
+	PMU_REG_GPIO_IN_2 = PMU_REG_GPIO_IN_BASE + 2,
+	PMU_REG_GPIO_IN_3 = PMU_REG_GPIO_IN_BASE + 3,
+
+	/* gpio output:
+	 *  setting bit n in PMU_REG_GPIO_OUT_SET_x will set the n-th
+	 *  GPIO in bank x to logical level 1.
+	 *
+	 *  setting bit n in PMU_REG_GPIO_OUT_CLR_x will set the n-th
+	 *  GPIO in bank x to logical level 0.
+	 */
+	PMU_REG_GPIO_OUT_SET_BASE = 0x24,
+
+	PMU_REG_GPIO_OUT_SET_0 = PMU_REG_GPIO_OUT_SET_BASE + 0,
+	PMU_REG_GPIO_OUT_SET_1 = PMU_REG_GPIO_OUT_SET_BASE + 1,
+	PMU_REG_GPIO_OUT_SET_2 = PMU_REG_GPIO_OUT_SET_BASE + 2,
+	PMU_REG_GPIO_OUT_SET_3 = PMU_REG_GPIO_OUT_SET_BASE + 3,
+
+	PMU_REG_GPIO_OUT_CLR_BASE = 0x28,
+
+	PMU_REG_GPIO_OUT_CLR_0 = PMU_REG_GPIO_OUT_CLR_BASE + 0,
+	PMU_REG_GPIO_OUT_CLR_1 = PMU_REG_GPIO_OUT_CLR_BASE + 1,
+	PMU_REG_GPIO_OUT_CLR_2 = PMU_REG_GPIO_OUT_CLR_BASE + 2,
+	PMU_REG_GPIO_OUT_CLR_3 = PMU_REG_GPIO_OUT_CLR_BASE + 3,
+
+	/*
+	 * gpio direction:
+	 *  when reading PMU_REG_GPIO_DIR_GET_x, a bit clear indicates
+	 *  the pin is configure as input.
+	 *
+	 *  setting bit n in PMU_REG_GPIO_DIR_SET_x will configure the
+	 *  pin as output, output value is set to previous written
+	 *  value inside the corresponding OUT register
+	 *
+	 *  setting bit n in PMU_REG_GPIO_DIR_CLR_x will configure
+	 *  the pin as input
+	 */
+	PMU_REG_GPIO_DIR_GET_BASE = 0x2c,
+
+	PMU_REG_GPIO_DIR_GET_0 = PMU_REG_GPIO_DIR_GET_BASE + 0,
+	PMU_REG_GPIO_DIR_GET_1 = PMU_REG_GPIO_DIR_GET_BASE + 1,
+	PMU_REG_GPIO_DIR_GET_2 = PMU_REG_GPIO_DIR_GET_BASE + 2,
+	PMU_REG_GPIO_DIR_GET_3 = PMU_REG_GPIO_DIR_GET_BASE + 3,
+
+	PMU_REG_GPIO_DIR_SET_BASE = 0x30,
+
+	PMU_REG_GPIO_DIR_SET_0 = PMU_REG_GPIO_DIR_SET_BASE + 0,
+	PMU_REG_GPIO_DIR_SET_1 = PMU_REG_GPIO_DIR_SET_BASE + 1,
+	PMU_REG_GPIO_DIR_SET_2 = PMU_REG_GPIO_DIR_SET_BASE + 2,
+	PMU_REG_GPIO_DIR_SET_3 = PMU_REG_GPIO_DIR_SET_BASE + 3,
+
+	PMU_REG_GPIO_DIR_CLR_BASE = 0x34,
+
+	PMU_REG_GPIO_DIR_CLR_0 = PMU_REG_GPIO_DIR_CLR_BASE + 0,
+	PMU_REG_GPIO_DIR_CLR_1 = PMU_REG_GPIO_DIR_CLR_BASE + 1,
+	PMU_REG_GPIO_DIR_CLR_2 = PMU_REG_GPIO_DIR_CLR_BASE + 2,
+	PMU_REG_GPIO_DIR_CLR_3 = PMU_REG_GPIO_DIR_CLR_BASE + 3,
+
+	/*
+	 * gpio IRQ capable/status/mask, will detect any edge, write 1
+	 * to status bit to clear
+	 */
+	PMU_REG_GPIO_IRQ_CAP_BASE = 0x38,
+
+	PMU_REG_GPIO_IRQ_CAP_0 = PMU_REG_GPIO_IRQ_CAP_BASE + 0,
+	PMU_REG_GPIO_IRQ_CAP_1 = PMU_REG_GPIO_IRQ_CAP_BASE + 1,
+	PMU_REG_GPIO_IRQ_CAP_2 = PMU_REG_GPIO_IRQ_CAP_BASE + 2,
+	PMU_REG_GPIO_IRQ_CAP_3 = PMU_REG_GPIO_IRQ_CAP_BASE + 3,
+
+	PMU_REG_GPIO_IRQ_MASK_BASE = 0x3c,
+
+	PMU_REG_GPIO_IRQ_MASK_0 = PMU_REG_GPIO_IRQ_MASK_BASE + 0,
+	PMU_REG_GPIO_IRQ_MASK_1 = PMU_REG_GPIO_IRQ_MASK_BASE + 1,
+	PMU_REG_GPIO_IRQ_MASK_2 = PMU_REG_GPIO_IRQ_MASK_BASE + 2,
+	PMU_REG_GPIO_IRQ_MASK_3 = PMU_REG_GPIO_IRQ_MASK_BASE + 3,
+
+	PMU_REG_GPIO_IRQ_STAT_BASE = 0x40,
+
+	PMU_REG_GPIO_IRQ_STAT_0 = PMU_REG_GPIO_IRQ_STAT_BASE + 0,
+	PMU_REG_GPIO_IRQ_STAT_1 = PMU_REG_GPIO_IRQ_STAT_BASE + 1,
+	PMU_REG_GPIO_IRQ_STAT_2 = PMU_REG_GPIO_IRQ_STAT_BASE + 2,
+	PMU_REG_GPIO_IRQ_STAT_3 = PMU_REG_GPIO_IRQ_STAT_BASE + 3,
+
+	/* command registers to enable/disable IRQ */
+	PMU_REG_GPIO_IRQ_CMD = 0x44,
+	PMU_REG_GPIO_IRQ_CMD_NR = 0x45,
+	PMU_REG_GPIO_IRQ_CMD_STAT = 0x46,
+
+	/* write 1 to reset internal state (unrequest all irq gpios),
+	 * self clear */
+	PMU_REG_GPIO_REINIT = 0x47,
+
+	/*
+	 * led pwm control
+	 */
+	PMU_REG_LED_PWM_BASE = 0x50,
+
+	PMU_REG_LED0_PWM = PMU_REG_LED_PWM_BASE + 0,
+	PMU_REG_LED1_PWM = PMU_REG_LED_PWM_BASE + 1,
+	PMU_REG_LED2_PWM = PMU_REG_LED_PWM_BASE + 2,
+	PMU_REG_LED3_PWM = PMU_REG_LED_PWM_BASE + 3,
+	PMU_REG_LED4_PWM = PMU_REG_LED_PWM_BASE + 4,
+	PMU_REG_LED5_PWM = PMU_REG_LED_PWM_BASE + 5,
+	PMU_REG_LED6_PWM = PMU_REG_LED_PWM_BASE + 6,
+	PMU_REG_LED7_PWM = PMU_REG_LED_PWM_BASE + 7,
+
+	/*
+	 * input value (current/voltage/temperature/power/fan_input)
+	 *
+	 * 16 bits
+	 *
+	 * current: mA unit
+	 * voltage: mV unit
+	 * temperature: milli °C unit
+	 * power: mW unit
+	 * fan_input: RPM unit
+	 *
+	 * value reported is optionally scaled by *10^n, (1 = value
+	 * reported is divided by 10, ...)
+	 */
+	PMU_REG_IN_BASE = 0x60,
+
+	PMU_REG_IN0_TYPE = PMU_REG_IN_BASE + 0,
+	PMU_REG_IN0_LO = PMU_REG_IN_BASE + 1,
+	PMU_REG_IN0_HI = PMU_REG_IN_BASE + 2,
+	PMU_REG_IN1_TYPE = PMU_REG_IN_BASE + 3,
+	PMU_REG_IN1_LO = PMU_REG_IN_BASE + 4,
+	PMU_REG_IN1_HI = PMU_REG_IN_BASE + 5,
+	PMU_REG_IN2_TYPE = PMU_REG_IN_BASE + 6,
+	PMU_REG_IN2_LO = PMU_REG_IN_BASE + 7,
+	PMU_REG_IN2_HI = PMU_REG_IN_BASE + 8,
+	PMU_REG_IN3_TYPE = PMU_REG_IN_BASE + 9,
+	PMU_REG_IN3_LO = PMU_REG_IN_BASE + 10,
+	PMU_REG_IN3_HI = PMU_REG_IN_BASE + 11,
+	PMU_REG_IN4_TYPE = PMU_REG_IN_BASE + 12,
+	PMU_REG_IN4_LO = PMU_REG_IN_BASE + 13,
+	PMU_REG_IN4_HI = PMU_REG_IN_BASE + 14,
+	PMU_REG_IN5_TYPE = PMU_REG_IN_BASE + 15,
+	PMU_REG_IN5_LO = PMU_REG_IN_BASE + 16,
+	PMU_REG_IN5_HI = PMU_REG_IN_BASE + 17,
+	PMU_REG_IN6_TYPE = PMU_REG_IN_BASE + 18,
+	PMU_REG_IN6_LO = PMU_REG_IN_BASE + 19,
+	PMU_REG_IN6_HI = PMU_REG_IN_BASE + 20,
+	PMU_REG_IN7_TYPE = PMU_REG_IN_BASE + 21,
+	PMU_REG_IN7_LO = PMU_REG_IN_BASE + 22,
+	PMU_REG_IN7_HI = PMU_REG_IN_BASE + 23,
+	PMU_REG_IN8_TYPE = PMU_REG_IN_BASE + 24,
+	PMU_REG_IN8_LO = PMU_REG_IN_BASE + 25,
+	PMU_REG_IN8_HI = PMU_REG_IN_BASE + 26,
+	PMU_REG_IN9_TYPE = PMU_REG_IN_BASE + 27,
+	PMU_REG_IN9_LO = PMU_REG_IN_BASE + 28,
+	PMU_REG_IN9_HI = PMU_REG_IN_BASE + 29,
+	PMU_REG_IN10_TYPE = PMU_REG_IN_BASE + 30,
+	PMU_REG_IN10_LO = PMU_REG_IN_BASE + 31,
+	PMU_REG_IN10_HI = PMU_REG_IN_BASE + 32,
+	PMU_REG_IN11_TYPE = PMU_REG_IN_BASE + 33,
+	PMU_REG_IN11_LO = PMU_REG_IN_BASE + 34,
+	PMU_REG_IN11_HI = PMU_REG_IN_BASE + 35,
+	PMU_REG_IN12_TYPE = PMU_REG_IN_BASE + 36,
+	PMU_REG_IN12_LO = PMU_REG_IN_BASE + 37,
+	PMU_REG_IN12_HI = PMU_REG_IN_BASE + 38,
+	PMU_REG_IN13_TYPE = PMU_REG_IN_BASE + 39,
+	PMU_REG_IN13_LO = PMU_REG_IN_BASE + 40,
+	PMU_REG_IN13_HI = PMU_REG_IN_BASE + 41,
+	PMU_REG_IN14_TYPE = PMU_REG_IN_BASE + 42,
+	PMU_REG_IN14_LO = PMU_REG_IN_BASE + 43,
+	PMU_REG_IN14_HI = PMU_REG_IN_BASE + 44,
+	PMU_REG_IN15_TYPE = PMU_REG_IN_BASE + 45,
+	PMU_REG_IN15_LO = PMU_REG_IN_BASE + 46,
+	PMU_REG_IN15_HI = PMU_REG_IN_BASE + 47,
+
+	/*
+	 * fan control
+	 * PWM value between 0 -> 255,
+	 * fan speed in RPM, 16 bits unsigned
+	 */
+	PMU_REG_FAN_PWM_BASE = 0x90,
+
+	PMU_REG_FAN0_PWM = PMU_REG_FAN_PWM_BASE + 0,
+	PMU_REG_FAN1_PWM = PMU_REG_FAN_PWM_BASE + 1,
+	PMU_REG_FAN2_PWM = PMU_REG_FAN_PWM_BASE + 2,
+	PMU_REG_FAN3_PWM = PMU_REG_FAN_PWM_BASE + 3,
+
+	/*
+	 * firmware update registers
+	 */
+	PMU_REG_FWUP_BASE = 0xa0,
+
+	/* issue command by writing to this register */
+	PMU_REG_FWUP_CMD = PMU_REG_FWUP_BASE + 0,
+
+	/* read-only: poll for command completion via busy bit;
+	 * writing to command will clear other bits */
+	PMU_REG_FWUP_STATUS = PMU_REG_FWUP_BASE + 1,
+
+	/* all data block must be transfered with the following size,
+	 * only last block can be smaller */
+	PMU_REG_FWUP_BLOCK_SIZE = PMU_REG_FWUP_BASE + 2,
+
+	/* set data len before START_DATA/NEXT_DATA command with
+	 * actual data len in buffer (must be multiple of BLOCK_SIZE
+	 * except last block) */
+	PMU_REG_FWUP_DATA_LEN = PMU_REG_FWUP_BASE + 3,
+
+	/* area used to write sig / firmware data */
+	PMU_REG_FWUP_DATA_BASE = PMU_REG_FWUP_BASE + 4,
+	PMU_REG_FWUP_DATA_LAST = PMU_REG_FWUP_DATA_BASE + 31,
+
+	/*
+	 * RTC registers
+	 *
+	 * current value, read value 0 first (LSB) to snapshot
+	 */
+	PMU_REG_RTC_VALUE_0 = 0xd0,
+	PMU_REG_RTC_VALUE_1 = 0xd1,
+	PMU_REG_RTC_VALUE_2 = 0xd2,
+	PMU_REG_RTC_VALUE_3 = 0xd3,
+
+	/* RTC compare value for wakeup source */
+	PMU_REG_RTC_CMP_VALUE_0 = 0xd4,
+	PMU_REG_RTC_CMP_VALUE_1 = 0xd5,
+	PMU_REG_RTC_CMP_VALUE_2 = 0xd6,
+	PMU_REG_RTC_CMP_VALUE_3 = 0xd7,
+};
+
+
+#define PMU_API_VERSION_MAJOR	1
+#define PMU_API_VERSION_MINOR	0
+
+#define PMU_MAGIC0_VAL		0xfb
+#define PMU_MAGIC1_VAL		0xec
+
+#define PMU_TEST_MODE_NONE	0x00
+#define PMU_TEST_MODE_USBPD_MASK	(1 << 5)
+#define PMU_TEST_MODE_GPIO_MASK		(1 << 6)
+#define PMU_TEST_MODE_ANY_MASK	(1 << 7)
+
+#define PMU_RESET_MAGIC		0x3d
+
+#define PMU_FAKE_DGASP_MAGIC	0x3e
+
+#define PMU_IN_TYPE_SHIFT	(0)
+#define PMU_IN_TYPE_MASK	(0xf << PMU_IN_TYPE_SHIFT)
+#define PMU_IN_TYPE_UNUSED	0x00
+#define PMU_IN_TYPE_CURRENT	0x01
+#define PMU_IN_TYPE_VOLTAGE	0x02
+#define PMU_IN_TYPE_POWER	0x03
+#define PMU_IN_TYPE_TEMPERATURE	0x04
+#define PMU_IN_TYPE_FAN_INPUT	0x05
+#define PMU_IN_DIVIDER_SHIFT	4
+#define PMU_IN_DIVIDER_MASK	(0x7 << PMU_IN_DIVIDER_SHIFT)
+#define PMU_IN_SIGNED_MASK	(1 << 7)
+
+#define PMU_GPIOIRQCMD_ENABLE	0
+#define PMU_GPIOIRQCMD_DISABLE	1
+#define PMU_GPIOIRQCMD_RES_BUSY		(1 << 0)
+#define PMU_GPIOIRQCMD_RES_SUCCESS	(1 << 1)
+
+#define PMU_STANDBY_MAGIC	0x94
+#define PMU_WAKE_R_RTC_MASK	(1 << 0)
+#define PMU_WAKE_R_PWRBTN_MASK	(1 << 1)
+#define PMU_WAKE_R_WAKEPON_MASK	(1 << 2)
+#define PMU_WAKE_R_SOC_RST_MASK	(1 << 3)
+#define PMU_WAKE_R_WDT_RST_MASK	(1 << 4)
+
+#define PMU_FW_CAP_FWUPGRADE	(1 << 0)
+#define PMU_FW_CAP_BANK_SWITCH	(1 << 1)
+#define PMU_FW_CAP_RTC		(1 << 2)
+#define PMU_FW_CAP_STANDBY	(1 << 3)
+#define PMU_FW_CAP_GPIO_IRQ	(1 << 4)
+#define PMU_FW_CAP_WDT		(1 << 5)
+
+#define PMU_FWUPCMD_NOOP	0
+#define PMU_FWUPCMD_START_SIG	1
+#define PMU_FWUPCMD_NEXT_SIG	2
+#define PMU_FWUPCMD_START_DATA	3
+#define PMU_FWUPCMD_NEXT_DATA	4
+#define PMU_FWUPCMD_VALIDATE	5
+
+#define PMU_FWUPSTATUS_BUSY	(1 << 0)
+#define PMU_FWUPSTATUS_SUCCESS	(1 << 1)
+
+#define PMU_RESET_R_UNKNOWN	0
+#define PMU_RESET_R_POR		1
+#define PMU_RESET_R_SWRESET	2
+#define PMU_RESET_R_VDROP	3
+#define PMU_RESET_R_HWRESET	4
+#define PMU_RESET_R_WATCHDOG	5
+#define PMU_RESET_R_BUS_ERROR	6
+#define PMU_RESET_R_SRAM_PARITY	7
+#define PMU_RESET_R_BOOTSTRAP	8
+
+#define PMU_PON_BOOT_MODE_NORMAL	0
+#define PMU_PON_BOOT_MODE_WOP		1
+
+#define PMU_PON_WOP_RES_WAKE		1
+#define PMU_PON_WOP_RES_SLEEP		2
+
+#define PMU_WDT_CTL_EN			(1 << 0)
+#define PMU_WDT_CTL_INT_EN		(1 << 1)
+
+#define PMU_WDT_STS_INT_STS		(1 << 0)
+#define PMU_WDT_REFRESH_VAL		0xA5
+
+#define PMU_CC_POLARITY_UNKNOWN		0
+#define PMU_CC_POLARITY_CC1		1
+#define PMU_CC_POLARITY_CC2		2
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/linux/xdsl_phy_api.h	2023-02-24 19:07:30.754305868 +0100
@@ -0,0 +1,125 @@
+#ifndef XDSL_PHY_API_H_
+#define XDSL_PHY_API_H_
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/notifier.h>
+
+struct xdsl_phy;
+
+/*
+ * PHY device API
+ */
+struct xdsl_phy_status {
+	bool		powered_up;
+	bool		link_up;
+	bool		ptm_mode;
+	unsigned int	ds_rate;
+	unsigned int	us_rate;
+	unsigned int	ds_cell_rate;
+	unsigned int	us_cell_rate;
+};
+
+struct xdsl_phy_ops {
+	void	(*get_status)(struct xdsl_phy *, struct xdsl_phy_status *);
+	int	(*open_vcc)(struct xdsl_phy *, int vpi, int vci);
+	int	(*close_vcc)(struct xdsl_phy *, int vpi, int vci);
+	int	(*set_max_sdu)(struct xdsl_phy *, unsigned int max_sdu);
+};
+
+struct xdsl_phy {
+	/*
+	 * to fill before registering
+	 */
+	const struct xdsl_phy_ops	*ops;
+	struct device_node		*of_node;
+	unsigned int			id;
+	struct module			*owner;
+	void				*priv;
+
+	struct mutex			lock;
+	struct mutex			ops_lock;
+	bool				in_use;
+	bool				started;
+	bool				initial_change_pending;
+	struct work_struct		initial_change_work;
+	void				(*change_cb)(struct xdsl_phy *,
+						     void *);
+	void				*change_priv;
+
+	struct list_head		next;
+};
+
+int xdsl_phy_device_register(struct xdsl_phy *);
+
+void xdsl_phy_device_notify_change(struct xdsl_phy *);
+
+void xdsl_phy_device_unregister(struct xdsl_phy *);
+
+
+/*
+ * PHY users API
+ */
+struct xdsl_phy *xdsl_phy_attach(struct device_node *node,
+				 unsigned int id,
+				 void (*change_cb)(struct xdsl_phy *,
+						   void *),
+				 void *change_priv);
+
+void xdsl_phy_start(struct xdsl_phy *);
+void xdsl_phy_stop(struct xdsl_phy *);
+
+void xdsl_phy_detach(struct xdsl_phy *);
+
+static inline void xdsl_phy_op_get_status(struct xdsl_phy *phy_dev,
+					  struct xdsl_phy_status *s)
+{
+	mutex_lock(&phy_dev->ops_lock);
+	phy_dev->ops->get_status(phy_dev, s);
+	mutex_unlock(&phy_dev->ops_lock);
+}
+
+static inline int xdsl_phy_op_open_vcc(struct xdsl_phy *phy_dev,
+				       int vpi, int vci)
+{
+	int ret;
+
+	if (!phy_dev->ops->open_vcc)
+		return -ENOTSUPP;
+
+	mutex_lock(&phy_dev->ops_lock);
+	ret = phy_dev->ops->open_vcc(phy_dev, vpi, vci);
+	mutex_unlock(&phy_dev->ops_lock);
+	return ret;
+}
+
+static inline int xdsl_phy_op_close_vcc(struct xdsl_phy *phy_dev,
+					int vpi, int vci)
+{
+	int ret;
+
+	if (!phy_dev->ops->close_vcc)
+		return -ENOTSUPP;
+
+	mutex_lock(&phy_dev->ops_lock);
+	ret = phy_dev->ops->close_vcc(phy_dev, vpi, vci);
+	mutex_unlock(&phy_dev->ops_lock);
+	return ret;
+}
+
+static inline int xdsl_phy_op_set_max_sdu(struct xdsl_phy *phy_dev,
+					  unsigned int max_sdu)
+{
+	int ret;
+
+	if (!phy_dev->ops->set_max_sdu)
+		return -ENOTSUPP;
+
+	mutex_lock(&phy_dev->ops_lock);
+	ret = phy_dev->ops->set_max_sdu(phy_dev, max_sdu);
+	mutex_unlock(&phy_dev->ops_lock);
+	return ret;
+}
+
+#endif /* ! BCM_DSL_API_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/net/gso.h	2023-11-07 13:38:44.058256582 +0100
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_GSO_H
+#define _NET_GSO_H
+
+#include <linux/skbuff.h>
+
+/* Keeps track of mac header offset relative to skb->head.
+ * It is useful for TSO of Tunneling protocol. e.g. GRE.
+ * For non-tunnel skb it points to skb_mac_header() and for
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
+struct skb_gso_cb {
+	union {
+		int	mac_offset;
+		int	data_offset;
+	};
+	int	encap_level;
+	__wsum	csum;
+	__u16	csum_start;
+};
+#define SKB_GSO_CB_OFFSET	32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
+
+static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
+{
+	return (skb_mac_header(inner_skb) - inner_skb->head) -
+		SKB_GSO_CB(inner_skb)->mac_offset;
+}
+
+static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
+{
+	int new_headroom, headroom;
+	int ret;
+
+	headroom = skb_headroom(skb);
+	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
+	if (ret)
+		return ret;
+
+	new_headroom = skb_headroom(skb);
+	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
+	return 0;
+}
+
+static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
+{
+	/* Do not update partial checksums if remote checksum is enabled. */
+	if (skb->remcsum_offload)
+		return;
+
+	SKB_GSO_CB(skb)->csum = res;
+	SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
+}
+
+/* Compute the checksum for a gso segment. First compute the checksum value
+ * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
+ * then add in skb->csum (checksum from csum_start to end of packet).
+ * skb->csum and csum_start are then updated to reflect the checksum of the
+ * resultant packet starting from the transport header-- the resultant checksum
+ * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
+ * header.
+ */
+static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
+{
+	unsigned char *csum_start = skb_transport_header(skb);
+	int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
+	__wsum partial = SKB_GSO_CB(skb)->csum;
+
+	SKB_GSO_CB(skb)->csum = res;
+	SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
+
+	return csum_fold(csum_partial(csum_start, plen, partial));
+}
+
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+				  netdev_features_t features, bool tx_path);
+
+static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+					      netdev_features_t features)
+{
+	return __skb_gso_segment(skb, features, true);
+}
+
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+				    netdev_features_t features, __be16 type);
+
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+				    netdev_features_t features);
+
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
+
+bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
+
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+					int pulled_hlen, u16 mac_offset,
+					int mac_len)
+{
+	skb->protocol = protocol;
+	skb->encapsulation = 1;
+	skb_push(skb, pulled_hlen);
+	skb_reset_transport_header(skb);
+	skb->mac_header = mac_offset;
+	skb->network_header = skb->mac_header + mac_len;
+	skb->mac_len = mac_len;
+}
+
+#endif /* _NET_GSO_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/net/ip6_ffn.h	2024-01-19 17:01:19.901848014 +0100
@@ -0,0 +1,59 @@
+#ifndef IP6_FFN_H_
+#define IP6_FFN_H_
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/rwlock.h>
+#include <net/route.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct ffn6_data {
+	u32 new_sip[4];
+	u32 new_dip[4];
+
+	u16 new_sport;
+	u16 new_dport;
+	__sum16 adjustment;
+	u8 new_tos;
+	u32 new_skb_prio;
+	u32 new_mark;
+
+	u32 force_skb_prio : 1;
+	u32 alter : 1;
+	u32 tos_change : 1;
+	struct dst_entry *dst;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	void (*priv_destructor)(void *);
+	u32 ffn_priv_area[8];
+};
+
+struct ffn6_lookup_entry {
+	u32 sip[4];
+	u32 dip[4];
+	u16 sport;
+	u16 dport;
+	u8 protocol;
+	u8 added_when;
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	uint64_t forwarded_bytes;
+	uint32_t forwarded_packets;
+#endif
+	struct list_head next;
+	struct ffn6_data manip;
+	struct list_head all_next;
+	struct rcu_head rcu;
+};
+
+struct ffn6_lookup_key {
+	const u32 *sip;
+	const u32 *dip;
+	u16 sport;
+	u16 dport;
+	bool is_tcp;
+};
+
+struct ffn6_lookup_entry *__ffn6_get_rcu(const struct ffn6_lookup_key *key);
+
+#endif /* ! IP6_FFN_H_*/
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/net/ip_ffn.h	2024-01-19 17:01:19.901848014 +0100
@@ -0,0 +1,58 @@
+#ifndef IP_FFN_H_
+#define IP_FFN_H_
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/rwlock.h>
+#include <net/route.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct ffn_data {
+	u32 new_sip;
+	u32 new_dip;
+	u16 new_sport;
+	u16 new_dport;
+	u8 new_tos;
+	u8 force_skb_prio : 1;
+	u8 alter : 1;
+	u8 tos_change : 1;
+	__sum16 ip_adjustment;
+	__sum16 l4_adjustment;
+	unsigned int new_skb_prio;
+	u32 new_mark;
+	struct dst_entry *dst;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	void (*priv_destructor)(void *);
+	u32 ffn_priv_area[8];
+};
+
+struct ffn_lookup_entry {
+	int added_when;
+	u32 sip;
+	u32 dip;
+	u16 sport;
+	u16 dport;
+	u8 protocol;
+#ifdef CONFIG_IP_FFN_PROCFS
+	uint64_t forwarded_bytes;
+	uint32_t forwarded_packets;
+#endif
+	struct list_head next;
+	struct ffn_data manip;
+	struct list_head all_next;
+	struct rcu_head rcu;
+};
+
+struct ffn_lookup_key {
+	u32 sip;
+	u32 dip;
+	u16 sport;
+	u16 dport;
+	bool is_tcp;
+};
+
+struct ffn_lookup_entry *__ffn_get_rcu(const struct ffn_lookup_key *key);
+
+#endif /* ! IP_FFN_H_*/
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/bcm63xx_rdp_ioctl.h	2023-03-09 15:06:12.428262660 +0100
@@ -0,0 +1,71 @@
+#ifndef LINUX_BCM63XX_RDP_IOCTL_H_
+#define LINUX_BCM63XX_RDP_IOCTL_H_
+
+#include <linux/types.h>
+
+enum {
+	RDP_IOC_OP_GET_INFO,
+
+	RDP_IOC_OP_READ8,
+	RDP_IOC_OP_READ16,
+	RDP_IOC_OP_READ32,
+	RDP_IOC_OP_WRITE8,
+	RDP_IOC_OP_WRITE16,
+	RDP_IOC_OP_WRITE32,
+
+	RDP_IOC_OP_READ_TM_32,
+	RDP_IOC_OP_WRITE_TM_32,
+	RDP_IOC_OP_READ_MC_32,
+	RDP_IOC_OP_WRITE_MC_32,
+
+	RDP_IOC_OP_RESET,
+
+	RDP_IOC_DMA_MAP,
+	RDP_IOC_DMA_GET_INFO,
+	RDP_IOC_DMA_FLUSH_ALL,
+	RDP_IOC_DMA_READ_BUFFER,
+	RDP_IOC_DMA_WRITE_BUFFER,
+
+	RDP_IOC_OP_MAP_INTERRUPTS,
+};
+
+struct bcm_rdp_pioctl_dma_result {
+	__u32		id;
+	__u32		size;
+	__u64		virt_addr;
+	__u64		dma_addr;
+};
+
+struct bcm_rdp_pioctl_get_info_result {
+	__u64		tm_dma_addr;
+	__u64		mc_dma_addr;
+	__u32		tm_size;
+	__u32		mc_size;
+};
+
+struct bcm_rdp_pioctl {
+	union {
+		/* for get_info op */
+		struct {
+			void __user	*buf_addr;
+		} get_info;
+
+		/* for read/write op */
+		struct {
+			__u32		reg_area;
+			__u32		offset;
+			__u32		size;
+			void __user	*buf_addr;
+		} io;
+
+		/* for dma op */
+		struct {
+			__u32		id;
+			__u32		size;
+			void __user	*buf_addr;
+		} dma;
+	} u;
+};
+
+#endif /* LINUX_BCM63XX_RDP_IOCTL_H_ */
+
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/exfat_user.h	2023-02-24 19:09:23.405368085 +0100
@@ -0,0 +1,47 @@
+/*
+ * exfat_user.h for exfat
+ * Created by <nschichan@freebox.fr> on Fri Aug 23 15:31:08 2013
+ */
+
+#ifndef __EXFAT_USER_H
+# define __EXFAT_USER_H
+
+struct exfat_fragment {
+	uint32_t	fcluster_start;
+	uint32_t	dcluster_start;
+	uint32_t	nr_clusters;
+	uint64_t	sector_start;
+};
+
+struct exfat_fragment_head {
+	uint32_t		fcluster_start;
+	uint32_t		nr_fragments;
+	uint32_t		sector_size;
+	uint32_t		cluster_size;
+	struct exfat_fragment	fragments[0];
+};
+
+struct exfat_bitmap_data {
+	uint32_t		start_cluster;
+	uint32_t		nr_clusters;
+	uint64_t		sector_start;
+	uint64_t		nr_sectors;
+};
+
+struct exfat_bitmap_head {
+	uint32_t			start_cluster;
+	uint32_t			nr_entries;
+	struct exfat_bitmap_data	entries[0];
+};
+
+struct exfat_dirent_head {
+	uint32_t offset;
+	uint32_t nr_entries;
+	uint8_t entries[0];
+};
+
+#define EXFAT_IOCGETFRAGMENTS	_IOR('X', 0x01, struct exfat_fragment_head)
+#define EXFAT_IOCGETBITMAP	_IOR('X', 0x02, struct exfat_bitmap_head)
+#define EXFAT_IOCGETDIRENTS	_IOR('X', 0x03, struct exfat_dirent_head)
+
+#endif /* !__EXFAT_USER_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/fbxatm.h	2023-02-27 17:10:06.964489879 +0100
@@ -0,0 +1,159 @@
+/*
+ * Generic fbxatm definition, exported to userspace
+ */
+#ifndef LINUX_FBXATM_H_
+#define LINUX_FBXATM_H_
+
+#include <linux/types.h>
+#include <linux/if.h>
+
+#define FBXATM_IOCTL_MAGIC		0xd3
+
+/* allow userspace usage without up to date kernel headers */
+#ifndef PF_FBXATM
+#define PF_FBXATM			32
+#define AF_FBXATM			PF_FBXATM
+#endif
+
+struct fbxatm_vcc_id {
+	int				dev_idx;
+	__u32				vpi;
+	__u32				vci;
+};
+
+enum fbxatm_vcc_user {
+	FBXATM_VCC_USER_NONE = 0,
+	FBXATM_VCC_USER_2684,
+	FBXATM_VCC_USER_PPPOA,
+};
+
+enum fbxatm_vcc_traffic_class {
+	FBXATM_VCC_TC_UBR_NO_PCR = 0,
+	FBXATM_VCC_TC_UBR,
+};
+
+struct fbxatm_vcc_qos {
+	__u32				traffic_class;
+	__u32				max_sdu;
+	__u32				max_buffered_pkt;
+	__u32				priority;
+	__u32				rx_priority;
+};
+
+
+/*
+ * VCC related
+ */
+struct fbxatm_vcc_params {
+	/* ADD/DEL/GET */
+	struct fbxatm_vcc_id		id;
+
+	/* ADD/GET */
+	struct fbxatm_vcc_qos		qos;
+
+	/* GET */
+	enum fbxatm_vcc_user		user;
+};
+
+#define FBXATM_IOCADD		_IOW(FBXATM_IOCTL_MAGIC,	1,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCDEL		_IOR(FBXATM_IOCTL_MAGIC,	2,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCGET		_IOWR(FBXATM_IOCTL_MAGIC,	3,	\
+					struct fbxatm_vcc_params)
+
+
+struct fbxatm_vcc_drop_params {
+	struct fbxatm_vcc_id		id;
+	unsigned int			drop_count;
+};
+
+#define FBXATM_IOCDROP		_IOWR(FBXATM_IOCTL_MAGIC,	5,	\
+					struct fbxatm_vcc_drop_params)
+
+/*
+ * OAM related
+ */
+enum fbxatm_oam_ping_type {
+	FBXATM_OAM_PING_SEG_F4	= 0,
+	FBXATM_OAM_PING_SEG_F5,
+	FBXATM_OAM_PING_E2E_F4,
+	FBXATM_OAM_PING_E2E_F5,
+};
+
+struct fbxatm_oam_ping_req {
+	/* only dev_idx for F4 */
+	struct fbxatm_vcc_id		id;
+
+	__u8				llid[16];
+	enum fbxatm_oam_ping_type	type;
+};
+
+#define FBXATM_IOCOAMPING	_IOWR(FBXATM_IOCTL_MAGIC,	10,	\
+				      struct fbxatm_oam_ping_req)
+
+
+/*
+ * PPPOA related
+ */
+enum fbxatm_pppoa_encap {
+	FBXATM_EPPPOA_AUTODETECT = 0,
+	FBXATM_EPPPOA_VCMUX,
+	FBXATM_EPPPOA_LLC,
+};
+
+struct fbxatm_pppoa_vcc_params {
+	struct fbxatm_vcc_id		id;
+	__u32				encap;
+	__u32				cur_encap;
+};
+
+#define FBXATM_PPPOA_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	20,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	21,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	22,	\
+					struct fbxatm_pppoa_vcc_params)
+
+
+
+/*
+ * 2684 related
+ */
+enum fbxatm_2684_encap {
+	FBXATM_E2684_VCMUX = 0,
+	FBXATM_E2684_LLC,
+};
+
+enum fbxatm_2684_payload {
+	FBXATM_P2684_BRIDGE = 0,
+	FBXATM_P2684_ROUTED,
+};
+
+#define FBXATM_2684_MAX_VCC		8
+
+struct fbxatm_2684_vcc_params {
+	struct fbxatm_vcc_id		id_list[FBXATM_2684_MAX_VCC];
+	size_t				id_count;
+
+	__u32				encap;
+	__u32				payload;
+	char				dev_name[IFNAMSIZ];
+	__u8				perm_addr[6];
+};
+
+
+#define FBXATM_2684_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	30,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	31,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	32,	\
+					struct fbxatm_2684_vcc_params)
+
+#endif /* LINUX_FBXATM_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/fbxbridge.h	2023-02-27 19:50:22.648261408 +0100
@@ -0,0 +1,72 @@
+#ifndef _UAPI_FBXBRIDGE_H
+# define _UAPI_FBXBRIDGE_H
+
+#include <linux/if.h>
+#include <linux/if_ether.h>
+
+#define MAX_ALIASES				3
+
+#define FBXBRIDGE_FLAGS_FILTER			(1 << 0)
+#define FBXBRIDGE_FLAGS_DHCPD			(1 << 1)
+#define FBXBRIDGE_FLAGS_NETFILTER		(1 << 2)
+
+/*
+ * ioctl command
+ */
+
+enum fbxbridge_ioctl_cmd
+{
+	E_CMD_BR_CHG = 0,
+	E_CMD_BR_DEV_CHG,
+	E_CMD_BR_PARAMS,
+};
+
+struct fbxbridge_ioctl_chg
+{
+	char	brname[IFNAMSIZ];
+	__u32	action;
+};
+
+struct fbxbridge_ioctl_dev_chg
+{
+	char	brname[IFNAMSIZ];
+	char	devname[IFNAMSIZ];
+	__u32	wan;
+	__u32	action;
+};
+
+struct fbxbridge_port_info
+{
+	char	name[IFNAMSIZ];
+	__u32	present;
+};
+
+struct fbxbridge_ioctl_params
+{
+	int				action;
+	char				brname[IFNAMSIZ];
+
+	/* config */
+	__u32				flags;
+	__be32				dns1_addr;
+	__be32				dns2_addr;
+	__be32				ip_aliases[MAX_ALIASES];
+	__u32				dhcpd_renew_time;
+	__u32				dhcpd_rebind_time;
+	__u32				dhcpd_lease_time;
+	__u32				inputmark;
+
+	/* status */
+	struct fbxbridge_port_info	wan_dev;
+	struct fbxbridge_port_info	lan_dev;
+	__u8				lan_hwaddr[ETH_ALEN];
+	__u32				have_hw_addr;
+};
+
+struct fbxbridge_ioctl_req
+{
+	enum fbxbridge_ioctl_cmd	cmd;
+	unsigned long			arg;
+};
+
+#endif /* _UAPI_FBXBRIDGE_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/fbxjtag.h	2023-03-09 15:06:12.428262660 +0100
@@ -0,0 +1,89 @@
+#ifndef FBXJTAG_H_
+# define FBXJTAG_H_
+
+#ifdef __KERNEL__
+# include <linux/types.h>
+#endif
+
+# define JTAG_RESET_STEPS	16
+# define JTAG_DATA_READ_SIZE	128
+# define JTAG_INST_READ_SIZE	128
+# define JTAG_DEF_CLOCK_DELAY	500
+# define JTAG_DEF_WAIT_TMS	0
+
+enum jtag_main_state {
+	JTAG_STATE_TEST_MASK	=	0x10,
+	JTAG_STATE_RUN_MASK	=	0x20,
+	JTAG_STATE_DR_MASK	=	0x40,
+	JTAG_STATE_IR_MASK	=	0x80,
+};
+#define JTAG_STATE_MASK			0xF0
+
+enum jtag_sub_state {
+	JTAG_SUB_STATE_SELECT	=	0x0,
+	JTAG_SUB_STATE_CAPTURE	=	0x1,
+	JTAG_SUB_STATE_SHIFT	=	0x2,
+	JTAG_SUB_STATE_EXIT1	=	0x3,
+	JTAG_SUB_STATE_PAUSE	=	0x4,
+	JTAG_SUB_STATE_EXIT2	=	0x5,
+	JTAG_SUB_STATE_UPDATE	=	0x6,
+};
+#define JTAG_SUB_STATE_MASK		0xF
+
+enum jtag_state {
+	JTAG_STATE_UNDEF	= 0,
+	JTAG_STATE_TEST_LOGIC_RESET	= JTAG_STATE_TEST_MASK,
+	JTAG_STATE_RUN_TEST_IDLE	= JTAG_STATE_RUN_MASK,
+
+	JTAG_STATE_SELECT_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_SELECT,
+	JTAG_STATE_CAPTURE_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_CAPTURE,
+	JTAG_STATE_SHIFT_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_SHIFT,
+	JTAG_STATE_EXIT1_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_EXIT1,
+	JTAG_STATE_PAUSE_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_PAUSE,
+	JTAG_STATE_EXIT2_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_EXIT2,
+	JTAG_STATE_UPDATE_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_UPDATE,
+
+	JTAG_STATE_SELECT_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_SELECT,
+	JTAG_STATE_CAPTURE_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_CAPTURE,
+	JTAG_STATE_SHIFT_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_SHIFT,
+	JTAG_STATE_EXIT1_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_EXIT1,
+	JTAG_STATE_PAUSE_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_PAUSE,
+	JTAG_STATE_EXIT2_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_EXIT2,
+	JTAG_STATE_UPDATE_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_UPDATE,
+
+	JTAG_STATE_MAX
+};
+
+#define JTAG_STATE_IN_DR(state)	((state) & JTAG_STATE_DR_MASK)
+#define JTAG_STATE_IN_IR(state)	((state) & JTAG_STATE_IR_MASK)
+
+#ifdef __KERNEL__
+
+#define JTAG_BUF_SIZE	2048
+
+struct fbxjtag_data {
+	const char	*name;
+	struct {
+		struct fbxgpio_pin	*tck;
+		struct fbxgpio_pin	*tms;
+		struct fbxgpio_pin	*tdi;
+		struct fbxgpio_pin	*tdo;
+	}		gpios;
+	u32		clock_delay;
+	u32		wait_tms;
+	u32		data_read_size;
+	u32		instruction_read_size;
+	bool		last_tms_dataout;
+	struct device	*dev;
+	enum jtag_state state;
+	char		nb_reset;
+	char		dr_buf[JTAG_BUF_SIZE];
+	unsigned 	dr_w;
+	unsigned 	dr_r;
+	char		ir_buf[JTAG_BUF_SIZE];
+	unsigned 	ir_r;
+	unsigned 	ir_w;
+};
+#endif
+
+#endif /* !FBXJTAG_H_ */
diff -Nruw linux-6.4-fbx/include/uapi/linux/hdmi-cec./dev.h linux-6.4-fbx/include/uapi/linux/hdmi-cec/dev.h
--- linux-6.4-fbx/include/uapi/linux/hdmi-cec./dev.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/include/uapi/linux/hdmi-cec/dev.h	2023-03-09 15:06:12.428262660 +0100
@@ -0,0 +1,30 @@
+#ifndef __HDMI_CEC_DEV_H
+#define __HDMI_CEC_DEV_H
+
+#include <linux/ioctl.h>
+#include <linux/hdmi-cec/hdmi-cec.h>
+
+#define CEC_IOCTL_BASE	'C'
+
+#define CEC_SET_LOGICAL_ADDRESS	_IOW(CEC_IOCTL_BASE, 0, int)
+#define CEC_RESET_DEVICE	_IOW(CEC_IOCTL_BASE, 3, int)
+#define CEC_GET_COUNTERS	_IOR(CEC_IOCTL_BASE, 4, struct cec_counters)
+#define CEC_SET_RX_MODE		_IOW(CEC_IOCTL_BASE, 5, enum cec_rx_mode)
+#define CEC_GET_TX_STATUS	_IOW(CEC_IOCTL_BASE, 6, struct cec_tx_status)
+#define CEC_SET_DETACHED_CONFIG	_IOW(CEC_IOCTL_BASE, 7, struct cec_detached_config)
+
+#define CEC_MAX_DEVS	(10)
+
+#ifdef __KERNEL__
+
+struct cec_adapter;
+
+int __init cec_cdev_init(void);
+void __exit cec_cdev_exit(void);
+
+int cec_create_adapter_node(struct cec_adapter *);
+void cec_remove_adapter_node(struct cec_adapter *);
+
+#endif /* __KERNEL__ */
+
+#endif /* __HDMI_CEC_DEV_H */
diff -Nruw linux-6.4-fbx/include/uapi/linux/hdmi-cec./hdmi-cec.h linux-6.4-fbx/include/uapi/linux/hdmi-cec/hdmi-cec.h
--- linux-6.4-fbx/include/uapi/linux/hdmi-cec./hdmi-cec.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/include/uapi/linux/hdmi-cec/hdmi-cec.h	2023-03-09 15:06:12.428262660 +0100
@@ -0,0 +1,153 @@
+#ifndef __UAPI_HDMI_CEC_H
+#define __UAPI_HDMI_CEC_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* Common defines for HDMI CEC */
+#define CEC_BCAST_ADDR		(0x0f)
+#define CEC_ADDR_MAX		CEC_BCAST_ADDR
+
+#define CEC_MAX_MSG_LEN		(16)	/* 16 blocks */
+
+enum cec_rx_msg_flags {
+	/*
+	 * an ACK was received for this message
+	 */
+	CEC_RX_F_ACKED			= (1 << 0),
+
+	/*
+	 * message was fully received
+	 */
+	CEC_RX_F_COMPLETE		= (1 << 1),
+};
+
+/**
+ * struct cec_rx_msg - user-space exposed cec message cookie
+ * @data:	cec message payload
+ * @len:	cec message length
+ * @valid:	0 for invalid message
+ * @flags:	flag field (cec_rx_msg_flags)
+ */
+struct cec_rx_msg {
+	__u8	data[CEC_MAX_MSG_LEN];
+	__u8	len;
+	__u8	valid;
+	__u8	flags;
+
+} __attribute__((packed));
+
+enum cec_tx_status_flags {
+	/*
+	 * message was nacked at some point
+	 */
+	CEC_TX_F_NACK			= (1 << 0),
+
+	/*
+	 * abort sending because total time to send was elapsed
+	 */
+	CEC_TX_F_TIMEOUT		= (1 << 1),
+
+	/*
+	 * abort sending because maximum number of retry has passed
+	 */
+	CEC_TX_F_MAX_RETRIES		= (1 << 2),
+
+	/*
+	 * abort sending because of arbitration loss
+	 */
+	CEC_TX_F_ARBITRATION_LOST	= (1 << 3),
+
+	/*
+	 * message failed for other reason
+	 */
+	CEC_TX_F_UNKNOWN_ERROR		= (1 << 7),
+};
+
+/**
+ * struct cec_tx_msg - user-space exposed cec message cookie
+ * @expire_ms:	how long we try to send message (milliseconds)
+ * @data:	cec message payload
+ * @len:	cec message length
+ * @success:	0 => message was sent, else => failed to send message
+ * @flags:	flag field (cec_tx_msg_flags)
+ * @tries:	number of try done to send message
+ */
+struct cec_tx_msg {
+	__u16	expire_ms;
+	__u8	data[CEC_MAX_MSG_LEN];
+	__u8	len;
+	__u8	success;
+	__u8	flags;
+	__u8	tries;
+} __attribute__((packed));
+
+struct cec_tx_status {
+	__u8	sent;
+	__u8	success;
+	__u8	flags;
+	__u8	tries;
+} __attribute__((packed));
+
+#define DETACH_CFG_F_WAKEUP		(1 << 0)
+
+struct cec_detached_config {
+	__u8	phys_addr_valid;
+	__u8	phys_addr[2];
+	__u8	flags;
+} __attribute__((packed));
+
+/* Counters */
+
+/**
+ * struct cec_rx_counters - cec adpater RX counters
+ */
+struct cec_rx_counters {
+	__u8	pkts;
+	__u8	filtered_pkts;
+	__u8	valid_pkts;
+	__u8	rx_queue_full;
+	__u8	late_ack;
+	__u8	error;
+	__u8	rx_timeout_abort;
+	__u8	rx_throttled;
+};
+
+/**
+ * struct cec_tx_counters - cec adapter TX counters
+ */
+struct cec_tx_counters {
+	__u8	done;
+	__u8	fail;
+	__u8	timeout;
+	__u8	arb_loss;
+	__u8	bad_ack_timings;
+	__u8	tx_miss_early;
+	__u8	tx_miss_late;
+};
+
+/**
+ * struct cec_counters - tx and rx cec counters
+ * @rx:	struct cec_rx_counters
+ * @tx: struct cec_tx_counters
+ */
+struct cec_counters {
+	struct cec_rx_counters	rx;
+	struct cec_tx_counters	tx;
+};
+
+/**
+ * enum cec_rx_mode - cec adapter rx mode
+ * @CEC_RX_MODE_DISABLED:	RX path is disabled (default)
+ * @CEC_RX_MODE_DEFAULT:	accept only unicast traffic
+ * @CEC_RX_MODE_ACCEPT_ALL:	accept all incoming RX traffic (sniffing mode)
+ * @CEC_RX_MODE_MAX:		sentinel
+ */
+enum cec_rx_mode {
+	CEC_RX_MODE_DISABLED = 0,
+	CEC_RX_MODE_DEFAULT,
+	CEC_RX_MODE_ACCEPT_ALL,
+	CEC_RX_MODE_MAX
+};
+
+#endif /* __UAPI_HDMI_CEC_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/ipx.h	2023-03-09 15:06:12.428262660 +0100
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _IPX_H_
+#define _IPX_H_
+#include <linux/libc-compat.h>	/* for compatibility with glibc netipx/ipx.h */
+#include <linux/types.h>
+#include <linux/sockios.h>
+#include <linux/socket.h>
+#define IPX_NODE_LEN	6
+#define IPX_MTU		576
+
+#if __UAPI_DEF_SOCKADDR_IPX
+struct sockaddr_ipx {
+	__kernel_sa_family_t sipx_family;
+	__be16		sipx_port;
+	__be32		sipx_network;
+	unsigned char 	sipx_node[IPX_NODE_LEN];
+	__u8		sipx_type;
+	unsigned char	sipx_zero;	/* 16 byte fill */
+};
+#endif /* __UAPI_DEF_SOCKADDR_IPX */
+
+/*
+ * So we can fit the extra info for SIOCSIFADDR into the address nicely
+ */
+#define sipx_special	sipx_port
+#define sipx_action	sipx_zero
+#define IPX_DLTITF	0
+#define IPX_CRTITF	1
+
+#if __UAPI_DEF_IPX_ROUTE_DEFINITION
+struct ipx_route_definition {
+	__be32        ipx_network;
+	__be32        ipx_router_network;
+	unsigned char ipx_router_node[IPX_NODE_LEN];
+};
+#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
+
+#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
+struct ipx_interface_definition {
+	__be32        ipx_network;
+	unsigned char ipx_device[16];
+	unsigned char ipx_dlink_type;
+#define IPX_FRAME_NONE		0
+#define IPX_FRAME_SNAP		1
+#define IPX_FRAME_8022		2
+#define IPX_FRAME_ETHERII	3
+#define IPX_FRAME_8023		4
+#define IPX_FRAME_TR_8022       5 /* obsolete */
+	unsigned char ipx_special;
+#define IPX_SPECIAL_NONE	0
+#define IPX_PRIMARY		1
+#define IPX_INTERNAL		2
+	unsigned char ipx_node[IPX_NODE_LEN];
+};
+#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
+
+#if __UAPI_DEF_IPX_CONFIG_DATA
+struct ipx_config_data {
+	unsigned char	ipxcfg_auto_select_primary;
+	unsigned char	ipxcfg_auto_create_interfaces;
+};
+#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
+
+/*
+ * OLD Route Definition for backward compatibility.
+ */
+
+#if __UAPI_DEF_IPX_ROUTE_DEF
+struct ipx_route_def {
+	__be32		ipx_network;
+	__be32		ipx_router_network;
+#define IPX_ROUTE_NO_ROUTER	0
+	unsigned char	ipx_router_node[IPX_NODE_LEN];
+	unsigned char	ipx_device[16];
+	unsigned short	ipx_flags;
+#define IPX_RT_SNAP		8
+#define IPX_RT_8022		4
+#define IPX_RT_BLUEBOOK		2
+#define IPX_RT_ROUTED		1
+};
+#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
+
+#define SIOCAIPXITFCRT		(SIOCPROTOPRIVATE)
+#define SIOCAIPXPRISLT		(SIOCPROTOPRIVATE + 1)
+#define SIOCIPXCFGDATA		(SIOCPROTOPRIVATE + 2)
+#define SIOCIPXNCPCONN		(SIOCPROTOPRIVATE + 3)
+#endif /* _IPX_H_ */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/nlfbx.h	2023-12-12 17:24:34.163627207 +0100
@@ -0,0 +1,126 @@
+#ifndef __LINUX_NLFBX_H
+#define __LINUX_NLFBX_H
+
+/*
+ * Freebox netlink interface public header
+ *
+ * Copyright 2023 Freebox
+ */
+
+#include <linux/types.h>
+
+#define NLFBX_GENL_NAME "nlfbx"
+
+/**
+ * enum nlfbx_commands - supported nlfbx commands
+ *
+ * @NLFBX_CMD_UNSPEC: unspecified command to catch errors
+ *
+ * @NLFBX_CMD_CFG_STA_MONITOR: Configure unassociated STA monitor interface
+ * @NLFBX_CMD_ADD_STA_MONITOR: Add a new unasociated STA to monitor, needs
+ *	%NLFBX_ATTR_IFINDEX and %NLFBX_ATTR_MAC.
+ * @NLFBX_CMD_DEL_STA_MONITOR: Remove a new unasociated STA to monitor, needs
+ *	%NLFBX_ATTR_IFINDEX and %NLFBX_ATTR_MAC.
+ * @NLFBX_CMD_GET_STA_MONITOR: Dump info for all monitored unasociated STA,
+ *	a single monitor dev could be filtered with %NLFBX_ATTR_IFINDEX.
+ *
+ * @NLFBX_CMD_MAX: highest used command number
+ * @__NLFBX_CMD_AFTER_LAST: internal use
+ */
+enum nlfbx_commands {
+	NLFBX_CMD_UNSPEC,
+
+	NLFBX_CMD_CFG_STA_MONITOR,
+	NLFBX_CMD_ADD_STA_MONITOR,
+	NLFBX_CMD_DEL_STA_MONITOR,
+	NLFBX_CMD_GET_STA_MONITOR,
+
+	/* add new commands above here */
+
+	/* used to define NLFBX_CMD_MAX below */
+	__NLFBX_CMD_AFTER_LAST,
+	NLFBX_CMD_MAX = __NLFBX_CMD_AFTER_LAST - 1
+};
+
+/**
+ * enum nlfbx_attrs - nlfbx netlink attributes
+ *
+ * @NLFBX_ATTR_UNSPEC: unspecified attribute to catch errors
+ *
+ * @NLFBX_ATTR_IFINDEX: network interface index of the device to operate on
+ * @NLFBX_ATTR_MAC: MAC address (various uses)
+ * @NLFBX_ATTR_SCUM_LIST_INFO: Same Channel Unassociated Metrics list
+ * @NLFBX_ATTR_DROP_FRAMES: Do not report frame to userland
+ */
+enum nlfbx_attrs {
+	NLFBX_ATTR_UNSPEC,
+	NLFBX_ATTR_IFINDEX,
+	NLFBX_ATTR_MAC,
+	NLFBX_ATTR_SCUM_INFO,
+	NLFBX_ATTR_SKIP_MONITOR,
+
+	/* add attributes here, update the policy in nlfbx.c */
+
+	__NLFBX_ATTR_AFTER_LAST,
+	NUM_NLFBX_ATTR = __NLFBX_ATTR_AFTER_LAST,
+	NLFBX_ATTR_MAX = __NLFBX_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nlfbx_scum_info_attrs - type of unassociated station information
+ *
+ * These describe the type of packets received to compute related unassociated
+ * station metrics.
+ *
+ * @NLFBX_ATTR_SCUM_INFO_UNSPEC: unspecified atttribute to catch errors
+ *
+ * @NLFBX_ATTR_SCUM_INFO_DATA: Unassociated station metrics for data packets
+ * @NLFBX_ATTR_SCUM_INFO_NONDATA: Unassociated station metrics for non data
+ *	packets
+ *
+ * @NLFBX_ATTR_SCUM_INFO_MAX: highest type of unassociated station infomartions
+ */
+enum nlfbx_scum_info_attrs {
+	NLFBX_ATTR_SCUM_INFO_UNSPEC,
+
+	NLFBX_ATTR_SCUM_INFO_DATA,
+	NLFBX_ATTR_SCUM_INFO_NONDATA,
+
+	__NLFBX_ATTR_SCUM_INFO_AFTER_LAST,
+	NUM_NLFBX_ATTR_SCUM_INFO = __NLFBX_ATTR_SCUM_INFO_AFTER_LAST,
+	NLFBX_ATTR_SCUM_INFO_MAX = __NLFBX_ATTR_SCUM_INFO_AFTER_LAST - 1,
+};
+
+/**
+ * enum nlfbx_scum_info_metrics - Type of unassociated station information
+ * metrics
+ *
+ * These attribute types are used within a %NLFBX_ATTR_SCUM_INFO_*
+ * when getting information about a station.
+ *
+ * @NLFBX_SCUM_INFO_METRICS_UNSPEC: attribute number 0 is reserved
+ *
+ * @NLFBX_SCUM_INFO_METRICS_SIGNAL: Average signal stength of PPDU monitored
+ * @NLFBX_SCUM_INFO_METRICS_BYTES: Total bytes monitored
+ * @NLFBX_SCUM_INFO_METRICS_PACKETS: Total number of packets monitored
+ * @NLFBX_SCUM_INFO_METRICS_INACTIVE_TIME: Time since last activity
+ *					      (u32, msecs)
+ *
+ * @__NLFBX_SCUM_INFO_METRICS_AFTER_LAST: internal
+ * @NLFBX_SCUM_LIST_INFO_ENTRY_MAX: highest possible scum info metrics attribute
+ */
+enum nlfbx_scum_info_metrics {
+	NLFBX_SCUM_INFO_METRICS_UNSPEC,
+
+	NLFBX_SCUM_INFO_METRICS_SIGNAL,
+	NLFBX_SCUM_INFO_METRICS_BYTES,
+	NLFBX_SCUM_INFO_METRICS_PACKETS,
+	NLFBX_SCUM_INFO_METRICS_INACTIVE_TIME,
+
+	/* keep last */
+	__NLFBX_SCUM_INFO_METRICS_AFTER_LAST,
+	NUM_NLFBX_SCUM_INFO_METRICS = __NLFBX_SCUM_INFO_METRICS_AFTER_LAST,
+	NLFBX_SCUM_INFO_METRICS_MAX = __NLFBX_SCUM_INFO_METRICS_AFTER_LAST - 1
+};
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/nmeshd_nl.h	2024-04-19 16:04:28.965735994 +0200
@@ -0,0 +1,50 @@
+#ifndef __LINUX_NMESHD_NL_H
+#define __LINUX_NMESHD_NL_H
+
+#define NMESHD_NL_FAMILY "nmeshd_nl"
+#define NNL_MULTICAST_GROUP_MLME "nmeshd_nl_mlme"
+
+enum nmeshd_nl_commands {
+	NNL_CMD_MESH_PEER_PATH_METRICS = 1,
+	NNL_CMD_MESH_NODE_METRICS,
+	NNL_CMD_SET_MPP,
+	NNL_CMD_DEL_MPP,
+	NNL_CMD_MPLINK_BLOCK,
+	NNL_CMD_MPLINK_UNBLOCK,
+	NNL_CMD_DUMP_BLOCKED_MPLINK_INFO,
+	NNL_CMD_MPLINK_FLUSH,
+
+	NNL_CMD_AFTER_LAST,
+	NNL_CMD_MAX = NNL_CMD_AFTER_LAST - 1,
+};
+
+enum nmeshd_nl_attributes {
+	NNL_ATTR_IFINDEX = 1,
+	NNL_ATTR_IE_NODE_METRICS,
+	NNL_ATTR_IE_PATH_METRICS,
+	NNL_ATTR_WIPHY,
+	NNL_ATTR_IFNAME,
+	NNL_ATTR_MAC,
+	NNL_ATTR_FRAME_TYPE,
+	NNL_ATTR_SIGNAL_STRENGTH,
+	NNL_ATTR_BEACON_INTERVAL,
+	NNL_ATTR_MPP_PROXY,
+	NNL_ATTR_MPLINK_INFO,
+
+	NNL_ATTR_AFTER_LAST,
+	NNL_NUM_ATTR = NNL_ATTR_AFTER_LAST,
+	NNL_ATTR_MAX = NNL_ATTR_AFTER_LAST - 1,
+};
+
+#define QBC_VENDOR_IE NNL_CMD_AFTER_LAST
+
+enum mplink_info_attr {
+	__MPLINK_ATTR_INVALID,
+	NNL_MPLINK_ATTR_MAC,
+
+	/* keep last */
+	NNL_MPLINK_ATTR_AFTER_LAST,
+	NNL_MPLINK_ATTR_MAX = NNL_MPLINK_ATTR_AFTER_LAST - 1
+};
+
+#endif /* __LINUX_NMESHD_NL_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/include/uapi/linux/prctl-private.h	2023-05-22 20:06:44.903871357 +0200
@@ -0,0 +1,10 @@
+#ifndef _LINUX_PRCTL_PRIVATE_H
+#define _LINUX_PRCTL_PRIVATE_H
+
+/*
+ * Freebox addition: set/get exec mode.
+ */
+#define PR_SET_EXEC_MODE	69
+#define PR_GET_EXEC_MODE	70
+
+#endif /* ! _LINUX_PRCTL_PRIVATE_H */
diff -Nruw linux-6.4-fbx/include/uapi/linux/remoti./remoti.h linux-6.4-fbx/include/uapi/linux/remoti/remoti.h
--- linux-6.4-fbx/include/uapi/linux/remoti./remoti.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/include/uapi/linux/remoti/remoti.h	2023-03-09 15:06:12.428262660 +0100
@@ -0,0 +1,137 @@
+#ifndef _UAPI_REMOTI_H
+#define _UAPI_REMOTI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * subsystem definitions
+ */
+#define NPI_SYS_RES0		0
+#define NPI_SYS_SYS		1
+#define NPI_SYS_MAC		2
+#define NPI_SYS_NWK		3
+#define NPI_SYS_AF		4
+#define NPI_SYS_ZDO		5
+#define NPI_SYS_SAPI		6
+#define NPI_SYS_UTIL		7
+#define NPI_SYS_DBG		8
+#define NPI_SYS_APP		9
+#define NPI_SYS_RCAF		10
+#define NPI_SYS_RCN		11
+#define NPI_SYS_RCN_CLI		12
+#define NPI_SYS_BOOT		13
+#define NPI_SYS_MAX		14
+#define NPI_SYS_MASK		0x1F
+
+/*
+ * type definitions
+ */
+#define NPI_POLL		0
+#define NPI_SREQ		1
+#define NPI_AREQ		2
+#define NPI_SRSP		3
+#define NPI_TYPE_MAX		4
+#define NPI_TYPE_MASK		3
+#define NPI_TYPE_SHIFT		5
+
+
+/* common error codes (see RemoTI API) */
+#define RTI_SUCCESS		0x00
+
+/*
+ * rti user message
+ */
+#define NPI_MAX_DATA_LEN	123
+
+struct rti_msg {
+	__u8	type;
+	__u8	subsys;
+	__u8	cmd;
+
+	__u8	data_len;
+	__u8	data[NPI_MAX_DATA_LEN];
+
+	__u8	custom_reply_cmd;
+	__u8	reply_cmd;
+	__u8	reply_len;
+	__u8	reply[NPI_MAX_DATA_LEN];
+};
+
+/*
+ * socket addr family on "user" device
+ */
+#ifndef PF_REMOTI
+#define PF_REMOTI			37
+#define AF_REMOTI			PF_REMOTI
+#endif
+
+struct sockaddr_rti {
+	__u32	device_id;
+};
+
+#define SOL_REMOTI			280
+#define REMOTI_REGISTER_CB		0
+
+struct rti_callback {
+	__u8	subsys;
+	__u8	cmd;
+};
+
+/*
+ * ioctl on uart device
+ */
+enum rti_dev_state {
+	RTI_DEV_S_STOPPED = 0,
+	RTI_DEV_S_BOOTING,
+	RTI_DEV_S_BOOT_FAILED,
+	RTI_DEV_S_OPERATIONAL,
+	RTI_DEV_S_STOPPING,
+	RTI_DEV_S_DEAD,
+};
+
+struct rti_dev_status {
+	__u32	dev_state;
+	__u32	fw_version;
+};
+
+struct rti_dev_stats {
+	__u64	tx_bytes;
+	__u64	tx_packets;
+
+	__u64	tx_boot_packets;
+	__u64	tx_rcaf_packets;
+	__u64	tx_util_packets;
+	__u64	tx_other_packets;
+
+
+	__u64	rx_bytes;
+	__u64	rx_packets;
+	__u64	rx_bad_sof;
+	__u64	rx_len_errors;
+	__u64	rx_fcs_errors;
+	__u64	rx_tty_errors;
+	__u64	rx_full_errors;
+	__u64	rx_subsys_errors;
+	__u64	rx_type_errors;
+	__u64	rx_no_callback;
+
+	__u64	rx_boot_packets;
+	__u64	rx_rcaf_packets;
+	__u64	rx_util_packets;
+	__u64	rx_other_packets;
+};
+
+enum {
+	RTI_BOOT_FLAGS_FORCE_UPDATE	= (1 << 0),
+};
+
+#define RTI_IOCTL_MAGIC		0xd4
+#define RTI_ATTACH_DEVICE	_IOR(RTI_IOCTL_MAGIC, 1, __u32)
+#define RTI_GET_STATUS		_IOW(RTI_IOCTL_MAGIC, 2, struct rti_dev_status)
+#define RTI_GET_STATS		_IOW(RTI_IOCTL_MAGIC, 3, struct rti_dev_stats)
+
+#define RTI_START_DEVICE	_IOR(RTI_IOCTL_MAGIC, 8, __u32)
+#define RTI_STOP_DEVICE		_IO(RTI_IOCTL_MAGIC, 9)
+
+#endif /* _UAPI_REMOTI_H */
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/lib/fbxserial.c	2023-02-27 19:50:20.220196601 +0100
@@ -0,0 +1,178 @@
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+
+#include <linux/fbxserial.h>
+
+#define PFX "builtin-fbxserial: "
+
+static void __init
+fbxserialinfo_use_default(struct fbx_serial *serial)
+{
+	printk(KERN_WARNING PFX "warning: using default serial infos\n");
+	fbxserial_set_default(serial);
+}
+
+/*
+ * add trailing 0 for bundle string here.
+ */
+static void __init
+bundle_fixup(struct fbx_serial *serial)
+{
+	struct fbx_serial_extinfo *p;
+	int i;
+
+	for (i = 0; i < be32_to_cpu(serial->extinfo_count); i++) {
+
+		if (i >= EXTINFO_MAX_COUNT)
+			break;
+
+		p = &serial->extinfos[i];
+		if (be32_to_cpu(p->type) == EXTINFO_TYPE_EXTDEV &&
+		    be32_to_cpu(p->u.extdev.type) == EXTDEV_TYPE_BUNDLE) {
+			int size;
+
+			size = sizeof (p->u.extdev.serial);
+			p->u.extdev.serial[size - 1] = 0;
+		}
+	}
+}
+
+/*
+ * called from  arch code early  in the boot sequence.   This function
+ * returns 1  in case serial infos are  invalid/unreadable and default
+ * values have been used.
+ */
+int __init
+fbxserialinfo_read(const void *data, struct fbx_serial *out)
+{
+	uint32_t sum;
+
+	/*
+	 * get partial serial data from flash/whatever.
+	 */
+	memcpy(out, data, sizeof (*out));
+
+	/* check magic first */
+	if (be32_to_cpu(out->magic) != FBXSERIAL_MAGIC) {
+		printk(KERN_NOTICE PFX "invalid magic (%08x, expected %08x), "
+			"using defaults !\n", be32_to_cpu(out->magic),
+		       FBXSERIAL_MAGIC);
+		goto out_default;
+	}
+
+	/* fetch size for which we have to check CRC */
+	if (be32_to_cpu(out->len) > FBXSERIAL_MAX_SIZE) {
+		printk(KERN_NOTICE PFX "structure size too big (%d), "
+		       "using defaults !\n", be32_to_cpu(out->len));
+		goto out_default;
+	}
+
+	/* compute and check checksum */
+	sum = crc32(0, data + 4, be32_to_cpu(out->len) - 4);
+
+	if (be32_to_cpu(out->crc32) != sum) {
+		printk(KERN_NOTICE PFX "invalid checksum (%08x, "
+		       "expected %08x), using defaults !\n", sum,
+		       be32_to_cpu(out->crc32));
+		goto out_default;
+	}
+
+	printk(KERN_INFO PFX "Found valid serial infos !\n");
+	bundle_fixup(out);
+	return 0;
+
+ out_default:
+	fbxserialinfo_use_default(out);
+	bundle_fixup(out);
+	return 1;
+}
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len)
+{
+	const struct fbx_serial *s;
+
+	memset(data, 0, 6);
+	s = arch_get_fbxserial();
+	if (WARN(!s, "arch_get_fbxserial returned NULL"))
+		return;
+
+	if (len > sizeof (s->random_data))
+		len = sizeof (s->random_data);
+
+	memcpy(data, s->random_data, len);
+}
+EXPORT_SYMBOL(fbxserialinfo_get_random);
+
+static u8 *mac_table;
+
+static void inc_mac(u8 *mac, int count)
+{
+	int index = 5;
+	int overflow;
+
+	do {
+		unsigned int val = mac[index] + count;
+
+		overflow = val >> 8;
+		mac[index] = val;
+		count = (count + 255) >> 8;
+		--index;
+	} while (index >= 0 && overflow);
+}
+
+static int gen_mac_table(const struct fbx_serial *s)
+{
+	int i;
+
+	mac_table = kmalloc(6 * s->mac_count, GFP_KERNEL);
+	if (!mac_table)
+		return -ENOMEM;
+
+	for (i = 0; i < s->mac_count; ++i) {
+		u8 *mac = &mac_table[6 * i];
+
+		memcpy(mac, s->mac_addr_base, 6);
+		inc_mac(mac, i);
+	}
+	return 0;
+}
+
+const void *
+fbxserialinfo_get_mac_addr(unsigned int index)
+{
+	const struct fbx_serial *s;
+
+	s = arch_get_fbxserial();
+
+	if (!s) {
+		pr_warn(PFX "no serial available: using default.\n");
+		goto default_mac;
+	}
+
+	if (index >= s->mac_count) {
+		pr_warn(PFX "mac index %d too high: using default.\n",
+			index);
+		goto default_mac;
+	}
+
+	if (!mac_table) {
+		int error = gen_mac_table(s);
+		if (error) {
+			pr_err(PFX "gen_mac_table() failed: using default.\n");
+			goto default_mac;
+		}
+	}
+
+	return &mac_table[6 * index];
+
+default_mac:
+	 return "\x00\x07\xcb\x00\x00\xfd";
+}
+EXPORT_SYMBOL(fbxserialinfo_get_mac_addr);
diff -Nruw linux-6.4-fbx/net/batman-adv/fbx./fbx.c linux-6.4-fbx/net/batman-adv/fbx/fbx.c
--- linux-6.4-fbx/net/batman-adv/fbx./fbx.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/batman-adv/fbx/fbx.c	2023-12-21 17:30:06.449516617 +0100
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#include <linux/skbuff.h>
+#include <net/genetlink.h>
+
+#include "../main.h"
+#include "../tvlv.h"
+#include "fbx.h"
+
+#pragma pack(2)
+/**
+ * batadv_fbx_tvlv_hdr() - FBX TVLV header
+ * @type: FBX tvlv type
+ * @ver: FBX tvlv version
+ * @len: FBX tvlv data length
+ */
+struct batadv_fbx_tvlv_hdr {
+	__u8 type;
+	__u8 ver;
+	__be16 len;
+};
+#pragma pack()
+
+static int (*__fbx_handler[BATADV_FBX_SUB_LAST])(struct batadv_hard_iface *,
+						 struct sk_buff *);
+
+static struct batadv_fbx_module const *__fbx_modules[] = {
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+	&batadv_mtu_module,
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	&batadv_slap_module,
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER
+	&batadv_router_module,
+#endif
+};
+
+static int batadv_fbx_recv_unhandled_packet(struct batadv_hard_iface *recv_if,
+					    struct sk_buff *skb)
+{
+	kfree_skb(skb);
+
+	return NET_RX_DROP;
+}
+
+static int batadv_fbx_recv_packet(struct sk_buff *skb,
+				  struct batadv_hard_iface *hard_iface)
+{
+	struct batadv_fbx_packet *batadv_fbx;
+
+	if (unlikely(!pskb_may_pull(skb, BATADV_FBX_HLEN)))
+		goto drop;
+
+	batadv_fbx = (struct batadv_fbx_packet *)skb->data;
+
+	if (batadv_fbx->subtype >= ARRAY_SIZE(__fbx_handler))
+		goto drop;
+
+	return __fbx_handler[batadv_fbx->subtype](hard_iface, skb);
+
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+/**
+ * batadv_fbx_recv_handler_register() - Register handler for batman-adv FBX sub
+ * packet type
+ * @fbx_type: subtype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_fbx_recv_handler_register(u8 packet_type,
+				     int (*hdl)(struct batadv_hard_iface *,
+						struct sk_buff *))
+{
+	int (*curr)(struct batadv_hard_iface *,
+		    struct sk_buff *);
+
+	curr = __fbx_handler[packet_type];
+
+	if (curr != batadv_fbx_recv_unhandled_packet)
+		return -EBUSY;
+
+	__fbx_handler[packet_type] = hdl;
+	return 0;
+}
+
+/**
+ * batadv_fbx_recv_handler_unregister() - Unregister FBX handler for packet
+ * subtype.
+ * @packet_type: subtype which should no longer be handled
+ */
+void batadv_fbx_recv_handler_unregister(u8 packet_type)
+{
+	__fbx_handler[packet_type] = batadv_fbx_recv_unhandled_packet;
+}
+
+/**
+ * batadv_fbx_tvlv_container - Container for a FBX TVLV to send in each OGM
+ */
+struct batadv_fbx_tvlv_container {
+	/** @list: hlist node for bat_priv->fbx_tvlv_containers */
+	struct hlist_node list;
+	/** @hdr: FBX tvlv header information */
+	struct batadv_fbx_tvlv_hdr hdr;
+	/** @data: FBX tvlv actual data */
+	u8 data[];
+};
+
+/**
+ * batadv_fbx_tvlv_container_update() - Update current FBX TVLV global
+ * container
+ * fbx_tvlv_lock should be held
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_fbx_tvlv_update(struct batadv_priv *bat_priv)
+{
+	struct batadv_fbx_tvlv_container *tvlv;
+	struct batadv_fbx_tvlv_hdr *hdr;
+	u8 *tvlv_value;
+	void *ptr;
+	size_t len = 0;
+
+	lockdep_assert_held(bat_priv->fbx_tvlv_lock);
+
+	hlist_for_each_entry(tvlv, &bat_priv->fbx_tvlv_containers, list) {
+		len += sizeof(struct batadv_fbx_tvlv_hdr);
+		len += ntohs(tvlv->hdr.len);
+	}
+
+	if (!len) {
+		batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_FBX, 1);
+		return;
+	}
+
+	tvlv_value = kmalloc(len, GFP_ATOMIC);
+	if (!tvlv_value)
+		return;
+
+	ptr = tvlv_value;
+	hlist_for_each_entry(tvlv, &bat_priv->fbx_tvlv_containers, list) {
+		hdr = ptr;
+		hdr->type = tvlv->hdr.type;
+		hdr->ver = tvlv->hdr.ver;
+		hdr->len = tvlv->hdr.len;
+		ptr = hdr + 1;
+		memcpy(ptr, tvlv->data, ntohs(tvlv->hdr.len));
+		ptr = (u8 *)ptr + ntohs(tvlv->hdr.len);
+	}
+
+	batadv_tvlv_container_register(bat_priv, BATADV_TVLV_FBX, 1,
+				       tvlv_value, len);
+	kfree(tvlv_value);
+}
+
+/**
+ * batadv_fbx_tvlv_container_unregister() - Unregister FBX TVLV container of a
+ * specific type and verison
+ * Takes bat_priv->fbx_tvlv_lock
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: FBX tvlv container type to unregister
+ * @ver: FBX tvlv container type version to unregister
+ */
+void batadv_fbx_tvlv_container_unregister(struct batadv_priv *bat_priv,
+					  u8 type, u8 ver)
+{
+	struct batadv_fbx_tvlv_container *tvlv;
+
+	spin_lock(&bat_priv->fbx_tvlv_lock);
+	hlist_for_each_entry(tvlv, &bat_priv->fbx_tvlv_containers, list) {
+		if (tvlv->hdr.type == type && tvlv->hdr.ver == ver) {
+			hlist_del(&tvlv->list);
+			kfree(tvlv);
+			batadv_fbx_tvlv_update(bat_priv);
+			break;
+		}
+	}
+	spin_unlock(&bat_priv->fbx_tvlv_lock);
+}
+
+/**
+ * batadv_fbx_tvlv_container_unregister() - Unregister FBX TVLV container of a
+ * specific type and verison
+ * Takes bat_priv->fbx_tvlv_lock
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: FBX tvlv container type to unregister
+ * @ver: FBX tvlv container type version to unregister
+ */
+void batadv_fbx_tvlv_container_register(struct batadv_priv *bat_priv,
+					u8 type, u8 ver,
+					void *tvlv, u16 len)
+{
+	struct batadv_fbx_tvlv_container *tvlv_old, *tvlv_new;
+
+	if (!tvlv)
+		return;
+
+	tvlv_new = kzalloc(sizeof(*tvlv_new) + len, GFP_ATOMIC);
+	tvlv_new->hdr.type = type;
+	tvlv_new->hdr.ver = ver;
+	tvlv_new->hdr.len = htons(len);
+
+	memcpy(tvlv_new->data, tvlv, len);
+	INIT_HLIST_NODE(&tvlv_new->list);
+
+	spin_lock(&bat_priv->fbx_tvlv_lock);
+	hlist_for_each_entry(tvlv_old, &bat_priv->fbx_tvlv_containers, list) {
+		if (tvlv_old->hdr.type == type && tvlv_old->hdr.ver == ver) {
+			hlist_del(&tvlv_old->list);
+			kfree(tvlv_old);
+			break;
+		}
+	}
+	hlist_add_head(&tvlv_new->list, &bat_priv->fbx_tvlv_containers);
+	batadv_fbx_tvlv_update(bat_priv);
+	spin_unlock(&bat_priv->fbx_tvlv_lock);
+}
+/**
+ * struct batadv_tvlv_handler - handler for FBX specific tvlv type and version
+ */
+struct batadv_fbx_tvlv_handler {
+	/** @list: hlist node to keep list of register handler in bat_priv */
+	struct hlist_node list;
+	/** @ref: reference counter for this handler */
+	struct kref ref;
+	/** @rcu: struct used to free handler in RCU-safe manner */
+	struct rcu_head rcu;
+	/**
+	 * @ogm: Callback called when matching FBX tvlv is received in OGM
+	 * packet
+	 */
+	void (*ogm)(struct batadv_priv *bat_priv,
+		    struct batadv_orig_node *orig,
+		    void *tvlv, u16 len);
+	/**
+	 * @ogm: Callback called when matching FBX tvlv is received in direct
+	 * unicast packet
+	 */
+	int (*uni)(struct batadv_priv *bat_priv,
+		   u8 *src, u8 *dst,
+		   void *tvlv, u16 len);
+	/** @type: FBX tvlv type this handler is responsible for */
+	u8 type;
+	/** @ver: FBX tvlv version this handler is responsible for */
+	u8 ver;
+};
+
+/**
+ * batadv_fbx_tvlv_handler_release() - release FBX tvlv handler
+ * @ref: FBX tvlv handler's ref pointer
+ */
+static void batadv_fbx_tvlv_handler_release(struct kref *ref)
+{
+	struct batadv_fbx_tvlv_handler *hdl;
+
+	hdl = container_of(ref, struct batadv_fbx_tvlv_handler, ref);
+	kfree_rcu(hdl, rcu);
+}
+
+/**
+ * batadv_fbx_tvlv_handler_put() - decrement FBX tvlv handler ref, releasing it
+ * if needed
+ * @tvlv_handler: the FBX tvlv handler to put
+ */
+static void batadv_fbx_tvlv_handler_put(struct batadv_fbx_tvlv_handler *hdl)
+{
+	if (!hdl)
+		return;
+	kref_put(&hdl->ref, batadv_fbx_tvlv_handler_release);
+}
+
+/**
+ * batadv_fbx_tvlv_handler_get() - Get a FBX tvlv handler from the register
+ * handler list.
+ * Takes rcu_read_lock()
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to look for
+ * @ver: tvlv handler version to look for
+ * @return: tvlv handler if found (with ref incremented), NULL otherwise
+ */
+static struct batadv_fbx_tvlv_handler *
+batadv_fbx_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 ver)
+{
+	struct batadv_fbx_tvlv_handler *hdl, *tvlv_hdl = NULL;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(hdl, &bat_priv->fbx_tvlv_handlers, list) {
+		if (hdl->type != type)
+			continue;
+		if (hdl->ver != ver)
+			continue;
+		if (!kref_get_unless_zero(&hdl->ref))
+			continue;
+		tvlv_hdl = hdl;
+		break;
+	}
+	rcu_read_unlock();
+	return tvlv_hdl;
+}
+
+/**
+ * batadv_fbx_tvlv_ogm_handler() - parse a OGM FBX TVLV buffer to call
+ * appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: originator node emitting the OGM packet
+ * @flags: OGM handler flags
+ * @tvlv: tvlv content
+ * @len: tvlv content length
+ */
+static void batadv_fbx_tvlv_ogm_handler(struct batadv_priv *bat_priv,
+					struct batadv_orig_node *orig,
+					u8 flags, void *tvlv, u16 len)
+{
+	struct batadv_fbx_tvlv_handler *hdl;
+	struct batadv_fbx_tvlv_hdr *hdr;
+	void *fbx_tvlv;
+	u16 fbx_len;
+
+	while (len >= sizeof(*hdr)) {
+		hdr = tvlv;
+		fbx_len = ntohs(hdr->len);
+		fbx_tvlv = hdr + 1;
+		len -= sizeof(*hdr);
+
+		if (fbx_len > len)
+			break;
+
+		tvlv = (u8 *)tvlv + fbx_len;
+		len -= fbx_len;
+
+		hdl = batadv_fbx_tvlv_handler_get(bat_priv, hdr->type,
+						  hdr->ver);
+		if (!hdl)
+			continue;
+		if (hdl->ogm)
+			hdl->ogm(bat_priv, orig, fbx_tvlv, fbx_len);
+		batadv_fbx_tvlv_handler_put(hdl);
+	}
+}
+
+/**
+ * batadv_fbx_tvlv_uni_handler() - parse a direct unicast FBX TVLV buffer to
+ * call appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: src MAC address of the unicast TVLV packet
+ * @dst: dst MAC address of the unicast TVLV packet
+ * @tvlv: tvlv content
+ * @len: tvlv content length
+ * @return: NET_RX_SUCCESS
+ */
+static int batadv_fbx_tvlv_uni_handler(struct batadv_priv *bat_priv,
+				       u8 *src, u8 *dst,
+				       void *tvlv, u16 len)
+{
+	struct batadv_fbx_tvlv_handler *hdl;
+	struct batadv_fbx_tvlv_hdr *hdr;
+	void *fbx_tvlv;
+	u16 fbx_len;
+
+	while (len >= sizeof(*hdr)) {
+		hdr = tvlv;
+		fbx_len = ntohs(hdr->len);
+		fbx_tvlv = hdr + 1;
+		len -= sizeof(*hdr);
+
+		if (fbx_len > len)
+			break;
+
+		tvlv = (u8 *)tvlv + fbx_len;
+		len -= fbx_len;
+
+		hdl = batadv_fbx_tvlv_handler_get(bat_priv, hdr->type,
+						  hdr->ver);
+		if (!hdl)
+			continue;
+		if (hdl->uni)
+			hdl->uni(bat_priv, src, dst, fbx_tvlv, fbx_len);
+		batadv_fbx_tvlv_handler_put(hdl);
+	}
+
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_fbx_tvlv_mcast_handler() - parse a multicast FBX TVLV buffer to
+ * call appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: received TVLV skb data
+ * @return: NET_RX_SUCCESS
+ */
+static int batadv_fbx_tvlv_mcast_handler(struct batadv_priv *bat_priv,
+					 struct sk_buff *skb)
+{
+	WARN(1, "FBX multicast TVLV handler not supported\n");
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_fbx_tvlv_handler_register() - Register a FBX tvlv handler
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: FBX tvlv subtype
+ * @ver: tvlv handler version
+ * @ogm: OGM FBX tvlv handler callback function
+ * @uni: Direct unicast tvlv handler callback function
+ */
+void batadv_fbx_tvlv_handler_register(struct batadv_priv *bat_priv,
+				      u8 type, u8 ver,
+				      void (*ogm)(struct batadv_priv *bat_priv,
+						  struct batadv_orig_node *orig,
+						  void *tvlv, u16 len),
+				      int (*uni)(struct batadv_priv *bat_priv,
+						 u8 *src, u8 *dst,
+						 void *tvlv, u16 len))
+{
+	struct batadv_fbx_tvlv_handler *tvlv_hdl;
+
+	spin_lock(&bat_priv->fbx_tvlv_lock);
+	tvlv_hdl = batadv_fbx_tvlv_handler_get(bat_priv, type, ver);
+	if (tvlv_hdl)
+		goto out;
+
+	tvlv_hdl = kzalloc(sizeof(*tvlv_hdl), GFP_ATOMIC);
+	if (!tvlv_hdl)
+		goto out;
+
+	tvlv_hdl->ogm = ogm;
+	tvlv_hdl->uni = uni;
+	tvlv_hdl->type = type;
+	tvlv_hdl->ver = ver;
+	kref_init(&tvlv_hdl->ref);
+	INIT_HLIST_NODE(&tvlv_hdl->list);
+
+	kref_get(&tvlv_hdl->ref);
+	hlist_add_head_rcu(&tvlv_hdl->list, &bat_priv->fbx_tvlv_handlers);
+out:
+	spin_unlock(&bat_priv->fbx_tvlv_lock);
+	batadv_fbx_tvlv_handler_put(tvlv_hdl);
+}
+
+/**
+ * batadv_fbx_tvlv_handler_unregister() - Unregister a FBX tvlv handler
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: FBX tvlv subtype
+ * @ver: tvlv handler version
+ */
+void batadv_fbx_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+					u8 type, u8 ver)
+{
+	struct batadv_fbx_tvlv_handler *tvlv_hdl;
+
+	tvlv_hdl = batadv_fbx_tvlv_handler_get(bat_priv, type, ver);
+	if (!tvlv_hdl)
+		return;
+	batadv_fbx_tvlv_handler_put(tvlv_hdl);
+	spin_lock(&bat_priv->fbx_tvlv_lock);
+	hlist_del_rcu(&tvlv_hdl->list);
+	spin_unlock(&bat_priv->fbx_tvlv_lock);
+	batadv_fbx_tvlv_handler_put(tvlv_hdl);
+}
+
+/**
+ * batadv_fbx_shortcut: Check if we are a shortcut for dest orig
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @dest: destination address
+ * @return: true if this node is a valid shortcut, false otherwise
+ */
+bool batadv_fbx_shortcut(struct batadv_priv *bat_priv, u8 const *dest)
+{
+	struct batadv_fbx_module const *m;
+	bool ret = false;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->shortcut)
+			ret = m->ops->shortcut(bat_priv, dest);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_check_skb_rx: Check ingress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: B.A.T.M.A.N-Adv packet type
+ * @skb: ingress skb
+ * @return: true if packet shall pass, false otherwise
+ */
+bool batadv_fbx_check_skb_rx(struct batadv_priv *bat_priv,
+			    enum batadv_packettype type,
+			    struct sk_buff *skb)
+{
+	struct batadv_fbx_module const *m;
+	bool ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->check_skb_rx)
+			ret = m->ops->check_skb_rx(bat_priv, type, skb);
+		if (!ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_check_skb_tx: Check egress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @skb: egress skb
+ * @vid: skb's vlan ID
+ * @return: true if packet shall pass, false otherwise
+ */
+bool batadv_fbx_check_skb_tx(struct batadv_priv *bat_priv,
+			     struct sk_buff *skb,
+			     unsigned short vid)
+{
+	struct batadv_fbx_module const *m;
+	bool ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->check_skb_tx)
+			ret = m->ops->check_skb_tx(bat_priv, skb, vid);
+		if (!ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_ogm_process: FBX specific OGM2 process
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: The orig node that generates this OGM
+ * @neigh: Neighbour that sends this OGM on behalf of orig_node
+ * @ogm: The OGM2 packet
+ */
+void batadv_fbx_ogm_process(struct batadv_priv *bat_priv,
+			    struct batadv_orig_node *orig_node,
+			    struct batadv_neigh_node *neigh,
+			    struct batadv_ogm2_packet *ogm)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->ogm_process)
+			m->ops->ogm_process(bat_priv, orig_node, neigh, ogm);
+	}
+}
+
+/**
+ * batadv_fbx_neigh_release: Call FBX specific work on neighbour release event
+ * @neigh: the neighbor being freed
+ */
+void batadv_fbx_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->neigh_release)
+			m->ops->neigh_release(neigh);
+	}
+}
+
+/**
+ * batadv_fbx_neigh_init: Call FBX specific work on neighbour creation event
+ *
+ * @neigh: Neighbor to initialize
+ * @return: 0 on success negative number otherwise
+ */
+int batadv_fbx_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->neigh_init)
+			ret = m->ops->neigh_init(neigh);
+		if (ret)
+			goto clean;
+	}
+
+	return 0;
+
+clean:
+	for (; i > 0; i--) {
+		m = __fbx_modules[i - 1];
+		if (m->ops->neigh_release)
+			m->ops->neigh_release(neigh);
+	}
+
+	return -1;
+}
+
+/**
+ * batadv_fbx_orig_release: Call FBX specific work on originator release event
+ * @orig: the originator being freed
+ */
+void batadv_fbx_orig_release(struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_release)
+			m->ops->orig_release(orig);
+	}
+}
+
+/**
+ * batadv_fbx_orig_init: Call FBX specific work on originator creation event
+ *
+ * @orig: Neighbor to initialize
+ * @return: 0 on success negative number otherwise
+ */
+int batadv_fbx_orig_init(struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_init)
+			ret = m->ops->orig_init(orig);
+		if (ret)
+			goto clean;
+	}
+
+	return 0;
+
+clean:
+	for (; i > 0; i--) {
+		m = __fbx_modules[i - 1];
+		if (m->ops->orig_release)
+			m->ops->orig_release(orig);
+	}
+
+	return -1;
+}
+
+/**
+ * batadv_fbx_orig_ifinfo_release: Call FBX specific work on originator ifinfo
+ * release event
+ *
+ * @orig_ifinfo: The originator ifinfo being freed
+ */
+void batadv_fbx_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_ifinfo_release)
+			m->ops->orig_ifinfo_release(orig_ifinfo);
+	}
+}
+
+/**
+ * batadv_fbx_orig_ifinfo_init: Call FBX specific work on originator ifinfo
+ * creation event
+ *
+ * @orig_ifinfo: Originator ifinfo to initialize
+ * @return: 0 on success negative number otherwise
+ */
+int batadv_fbx_orig_ifinfo_init(struct batadv_orig_ifinfo *orig_ifinfo)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_ifinfo_init)
+			ret = m->ops->orig_ifinfo_init(orig_ifinfo);
+		if (ret)
+			goto clean;
+	}
+
+	return 0;
+
+clean:
+	for (; i > 0; i--) {
+		m = __fbx_modules[i - 1];
+		if (m->ops->orig_ifinfo_release)
+			m->ops->orig_ifinfo_release(orig_ifinfo);
+	}
+
+	return -1;
+}
+
+/**
+ * batadv_fbx_hardif_update() - Update hardif event
+ */
+void batadv_fbx_hardif_update(struct batadv_hard_iface *hard_iface)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->hardif_update)
+			m->ops->hardif_update(hard_iface);
+	}
+}
+
+/**
+ * batadv_fbx_orig_update() - Update primary iface event
+ */
+void batadv_fbx_primary_update(struct batadv_priv *bat_priv,
+			       struct batadv_hard_iface *primary)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->primary_update)
+			m->ops->primary_update(bat_priv, primary);
+	}
+}
+
+/**
+ * batadv_fbx_tt_local_add() - Notify FBX modules a local TT is added
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Local TT that is added
+ * @return: false if we want to prevent roaming notification, true otherwise
+ */
+bool batadv_fbx_tt_local_add(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt,
+			     struct batadv_tt_global_entry *tg,
+			     int ifindex)
+{
+	struct batadv_fbx_module const *m;
+	bool rc, ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_local_add) {
+			rc = m->ops->tt_local_add(bat_priv, tt, tg, ifindex);
+			ret = ret && rc;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * batadv_fbx_tt_local_del() - Notify FBX modules a local TT is deleted
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Local TT that is removed
+ * @return: True if local TT entry should be removed, false otherwise
+ */
+bool batadv_fbx_tt_local_del(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt)
+{
+	struct batadv_fbx_module const *m;
+	bool rc, ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_local_del) {
+			rc = m->ops->tt_local_del(bat_priv, tt);
+			ret = ret && rc;
+		}
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_tt_global_add() - Notify FBX modules a global TT is added
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: the global TT that is added
+ * @orig: Originator that can reach this global TT
+ * @return: False if we want to prevent matching local TT removal, true
+ * otherwise
+ */
+bool batadv_fbx_tt_global_add(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	bool rc, ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_global_add) {
+			rc = m->ops->tt_global_add(bat_priv, tt, orig);
+			ret = ret && rc;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * batadv_fbx_tt_global_del() - Notify FBX modules a global TT is deleted
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: the global TT that is removed
+ * @orig: Originator that was able to reach this global TT
+ */
+void batadv_fbx_tt_global_del(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_global_del)
+			m->ops->tt_global_del(bat_priv, tt, orig);
+	}
+}
+
+static const struct nla_policy batadv_fbx_policy[NUM_BATADV_ATTR_FBX] = {
+	[BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS] = { .len = ETH_ALEN },
+	[BATADV_ATTR_FBX_SLAP_MASTER_PRIO] = { .type = NLA_U32 },
+	[BATADV_ATTR_FBX_SLAP_IFINDEX] = { .type = NLA_U32 },
+	[BATADV_ATTR_FBX_SLAP_PRIO] = { .type = NLA_U32 },
+};
+
+/**
+ * batadv_fbx_nl_parse_fbx() - Parse FBX specific attributes of NL message
+ * @info: NL message info
+ * @fbx: FBX attr array
+ * @max: Max FBX NL id
+ * @return: return @fbx if attributes were found and parsed correctly, NULL
+ * otherwise
+ */
+static struct nlattr **batadv_fbx_nl_parse_fbx(struct genl_info *info,
+					       struct nlattr *fbx[],
+					       size_t max)
+{
+	struct nlattr *attr;
+	int err;
+
+	if (!info)
+		return NULL;
+
+	attr = info->attrs[BATADV_ATTR_FBX];
+	if (!attr)
+		return NULL;
+
+	err = nla_parse_nested_deprecated(fbx, max, attr, batadv_fbx_policy,
+					  NULL);
+	if (err)
+		return NULL;
+
+	return fbx;
+}
+
+/**
+ * batadv_fbx_nl_start_fbx() - Start nested FBX attributes of NL response
+ * @skb: NL response
+ * @return: Nest attributes
+ */
+static struct nlattr *batadv_fbx_nl_start_fbx(struct sk_buff *skb)
+{
+	if (!skb)
+		return NULL;
+
+	return nla_nest_start(skb, BATADV_ATTR_FBX);
+}
+
+/**
+ * batadv_fbx_nl_stop_fbx() - Stop nested FBX attributes of NL response
+ * @nested: FBX nested to close
+ */
+static void batadv_fbx_nl_stop_fbx(struct sk_buff *skb, struct nlattr *attr)
+{
+	if (!attr || !skb)
+		return;
+
+	nla_nest_end(skb, attr);
+}
+
+/**
+ * batadv_fbx_nl() - Handle FBX specific part of a B.A.T.M.A.N-Adv NL command
+ * @bat_priv: The bat priv with all the soft interface information
+ * @cmd: B.A.T.M.A.N-Adv NL command
+ * @info: NL message info
+ * @skb: NL message to fill
+ * @data: Handler specific data
+ */
+void batadv_fbx_nl(struct batadv_priv *bat_priv,
+		   enum batadv_nl_commands cmd,
+		   struct genl_info *info,
+		   struct sk_buff *skb,
+		   void *data)
+{
+	struct nlattr *nest, **attr, *fbxattr[NUM_BATADV_ATTR_FBX];
+	struct batadv_fbx_module const *m;
+	int i, j;
+
+	attr = batadv_fbx_nl_parse_fbx(info, fbxattr, BATADV_ATTR_FBX_MAX);
+	nest = batadv_fbx_nl_start_fbx(skb);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		for (j = 0; j < m->nl_ops_sz; ++j) {
+			if (m->nl_ops[j].cmd == cmd) {
+				m->nl_ops[j].hdl(bat_priv, info, attr,
+						 skb, data);
+			}
+		}
+	}
+
+	batadv_fbx_nl_stop_fbx(skb, nest);
+}
+
+/**
+ * batadv_fbx_new_priv: init FBX specific bits in bat_priv
+ * @bat_priv: the bat_priv to init
+ *
+ */
+int batadv_fbx_new_priv(struct batadv_priv *bat_priv)
+{
+	struct batadv_fbx_module const *m;
+	int ret = 0, i;
+
+	INIT_HLIST_HEAD(&bat_priv->fbx_tvlv_handlers);
+	INIT_HLIST_HEAD(&bat_priv->fbx_tvlv_containers);
+	spin_lock_init(&bat_priv->fbx_tvlv_lock);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->new_priv)
+			ret |= m->ops->new_priv(bat_priv);
+	}
+
+	batadv_tvlv_handler_register(bat_priv, batadv_fbx_tvlv_ogm_handler,
+				     batadv_fbx_tvlv_uni_handler,
+				     batadv_fbx_tvlv_mcast_handler,
+				     BATADV_TVLV_FBX, 1, BATADV_NO_FLAGS);
+
+	return ret;
+}
+
+/**
+ * batadv_fbx_free_priv: release FBX specific bits in bat_priv
+ * @bat_priv: the bat_priv to release
+ *
+ */
+void batadv_fbx_free_priv(struct batadv_priv *bat_priv)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_FBX, 1);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->free_priv)
+			m->ops->free_priv(bat_priv);
+	}
+}
+
+/**
+ * batadv_fbx_init: Init B.A.T.M.A.N-Adv fbx submodule
+ */
+int __init batadv_fbx_init(void)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_handler); i++)
+		__fbx_handler[i] = batadv_fbx_recv_unhandled_packet;
+
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		ret = 0;
+		m = __fbx_modules[i];
+		if (m->ops->init)
+			ret = m->ops->init();
+		if (ret)
+			pr_err("batadv: Cannot init fbx module %s\n", m->name);
+	}
+
+	return batadv_recv_handler_register(BATADV_FBX,
+					    batadv_fbx_recv_packet);
+}
+
+/**
+ * batadv_fbx_exit: Exit B.A.T.M.A.N-Adv fbx submodule
+ */
+void __exit batadv_fbx_exit(void)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	batadv_recv_handler_unregister(BATADV_FBX);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->exit)
+			m->ops->exit();
+	}
+}
diff -Nruw linux-6.4-fbx/net/batman-adv/fbx./fbx.h linux-6.4-fbx/net/batman-adv/fbx/fbx.h
--- linux-6.4-fbx/net/batman-adv/fbx./fbx.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/batman-adv/fbx/fbx.h	2023-12-21 17:30:06.449516617 +0100
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) B.A.T.M.A.N. contributors:
+ *
+ * Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#ifndef _NET_BATMAN_ADV_FBX_H_
+#define _NET_BATMAN_ADV_FBX_H_
+
+#ifdef CONFIG_BATMAN_ADV_FBX
+
+struct genl_info;
+
+enum batadv_fbx_tvlv_type {
+	BATADV_FBX_TVLV_SLAP_MASTER,
+};
+
+#define BATADV_FBX_TVLV_SLAP_VERSION 1
+
+struct batadv_fbx_module_ops {
+	int (*init)(void);
+	void (*exit)(void);
+	int (*new_priv)(struct batadv_priv *bat_priv);
+	void (*free_priv)(struct batadv_priv *bat_priv);
+	void (*neigh_release)(struct batadv_hardif_neigh_node *neigh);
+	int (*neigh_init)(struct batadv_hardif_neigh_node *neigh);
+	void (*orig_release)(struct batadv_orig_node *orig);
+	int (*orig_init)(struct batadv_orig_node *orig);
+	void (*orig_ifinfo_release)(struct batadv_orig_ifinfo *orig_ifinfo);
+	int (*orig_ifinfo_init)(struct batadv_orig_ifinfo *orig_ifinfo);
+	void (*hardif_update)(struct batadv_hard_iface *hard_iface);
+	void (*primary_update)(struct batadv_priv *bat_priv,
+			       struct batadv_hard_iface *primary);
+	bool (*tt_local_add)(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tl,
+			     struct batadv_tt_global_entry *tg,
+			     int ifindex);
+	bool (*tt_local_del)(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt);
+	bool (*tt_global_add)(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig);
+	void (*tt_global_del)(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig);
+	bool (*shortcut)(struct batadv_priv *bat_priv, u8 const *dest);
+	bool (*check_skb_rx)(struct batadv_priv *bat_priv,
+			     enum batadv_packettype type,
+			     struct sk_buff *skb);
+	bool (*check_skb_tx)(struct batadv_priv *bat_priv,
+			     struct sk_buff *skb,
+			     unsigned short vid);
+	void (*ogm_process)(struct batadv_priv *bat_priv,
+			    struct batadv_orig_node *orig_node,
+			    struct batadv_neigh_node *neigh_node,
+			    struct batadv_ogm2_packet *ogm);
+};
+
+struct batadv_fbx_nl_ops {
+	enum batadv_nl_commands cmd;
+	void (*hdl)(struct batadv_priv *, struct genl_info *,
+		    struct nlattr **, struct sk_buff *, void *data);
+};
+
+struct batadv_fbx_module {
+	char const *name;
+	struct batadv_fbx_module_ops const *ops;
+	struct batadv_fbx_nl_ops const *nl_ops;
+	size_t nl_ops_sz;
+};
+
+int __init batadv_fbx_init(void);
+void __exit batadv_fbx_exit(void);
+int batadv_fbx_new_priv(struct batadv_priv *bat_priv);
+void batadv_fbx_free_priv(struct batadv_priv *bat_priv);
+void batadv_fbx_neigh_release(struct batadv_hardif_neigh_node *neigh);
+int batadv_fbx_neigh_init(struct batadv_hardif_neigh_node *neigh);
+void batadv_fbx_orig_release(struct batadv_orig_node *orig);
+int batadv_fbx_orig_init(struct batadv_orig_node *orig);
+void batadv_fbx_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_info);
+int batadv_fbx_orig_ifinfo_init(struct batadv_orig_ifinfo *orig_info);
+void batadv_fbx_hardif_update(struct batadv_hard_iface *hard_iface);
+void batadv_fbx_primary_update(struct batadv_priv *bat_priv,
+			       struct batadv_hard_iface *primary);
+void batadv_fbx_nl(struct batadv_priv *bat_priv, enum batadv_nl_commands cmd,
+		   struct genl_info *info, struct sk_buff *skb, void *data);
+
+void batadv_fbx_tvlv_container_unregister(struct batadv_priv *bat_priv,
+					  u8 type, u8 ver);
+void batadv_fbx_tvlv_container_register(struct batadv_priv *bat_priv,
+					u8 type, u8 ver,
+					void *tvlv, u16 len);
+void batadv_fbx_tvlv_handler_register(struct batadv_priv *bat_priv,
+				      u8 type, u8 ver,
+				      void (*ogm)(struct batadv_priv *bat_priv,
+						  struct batadv_orig_node *orig,
+						  void *tvlv, u16 len),
+				      int (*uni)(struct batadv_priv *bat_priv,
+						 u8 *src, u8 *dst,
+						 void *tvlv, u16 len));
+void batadv_fbx_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+					u8 type, u8 ver);
+bool batadv_fbx_tt_local_add(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tl,
+			     struct batadv_tt_global_entry *tg,
+			     int ifindex);
+bool batadv_fbx_tt_local_del(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt_local);
+bool batadv_fbx_tt_global_add(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt_global,
+			      struct batadv_orig_node *orig_node);
+void batadv_fbx_tt_global_del(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt_global,
+			      struct batadv_orig_node *orig_node);
+bool batadv_fbx_shortcut(struct batadv_priv *bat_priv, u8 const *dest);
+bool batadv_fbx_check_skb_rx(struct batadv_priv *bat_priv,
+			     enum batadv_packettype type,
+			     struct sk_buff *skb);
+bool batadv_fbx_check_skb_tx(struct batadv_priv *bat_priv,
+			     struct sk_buff *skb,
+			     unsigned short vid);
+void batadv_fbx_ogm_process(struct batadv_priv *bat_priv,
+			    struct batadv_orig_node *orig_node,
+			    struct batadv_neigh_node *neigh_node,
+			    struct batadv_ogm2_packet *ogm);
+int batadv_fbx_recv_handler_register(u8 packet_type,
+				     int (*hdl)(struct batadv_hard_iface *,
+						struct sk_buff *));
+void batadv_fbx_recv_handler_unregister(u8 packet_type);
+
+extern struct batadv_fbx_module const batadv_mtu_module;
+extern struct batadv_fbx_module const batadv_slap_module;
+extern struct batadv_fbx_module const batadv_router_module;
+
+#else
+
+struct genl_info;
+
+static inline int __init batadv_fbx_init(void)
+{
+	return 0;
+}
+
+static inline void __exit batadv_fbx_exit(void)
+{
+}
+
+static inline int batadv_fbx_new_priv(struct batadv_priv *bat_priv)
+{
+	return 0;
+}
+
+static inline void batadv_fbx_free_priv(struct batadv_priv *bat_priv)
+{
+}
+
+static inline void
+batadv_fbx_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+}
+
+static inline int
+batadv_fbx_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	return 0;
+}
+
+static inline void
+batadv_fbx_orig_release(struct batadv_orig_node *orig)
+{
+}
+
+static inline int
+batadv_fbx_orig_init(struct batadv_orig_node *orig)
+{
+	return 0;
+}
+
+static inline void
+batadv_fbx_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_info)
+{
+}
+
+static inline int
+batadv_fbx_orig_ifinfo_init(struct batadv_orig_ifinfo *orig_info)
+{
+	return 0;
+}
+
+static inline void batadv_fbx_hardif_update(struct batadv_hard_iface *hif)
+{
+}
+
+static inline void batadv_fbx_primary_update(struct batadv_priv *bat_priv,
+					     struct batadv_hard_iface *primary)
+{
+}
+
+static inline void batadv_fbx_nl(struct batadv_priv *bat_priv,
+				 enum batadv_nl_commands cmd,
+				 struct genl_info *info,
+				 struct sk_buff *skb,
+				 void *data)
+{
+}
+
+static inline bool batadv_fbx_tt_local_add(struct batadv_priv *bat_priv,
+					   struct batadv_tt_local_entry *tl,
+					   struct batadv_tt_global_entry *tg,
+					   int ifindex)
+{
+	return true;
+}
+
+static inline bool batadv_fbx_tt_local_del(struct batadv_priv *bat_priv,
+					   struct batadv_tt_local_entry *tt)
+{
+	return true;
+}
+
+static inline bool batadv_fbx_tt_global_add(struct batadv_priv *bat_priv,
+					    struct batadv_tt_global_entry *tt,
+					    struct batadv_orig_node *orig_node)
+{
+	return true;
+}
+
+static inline void batadv_fbx_tt_global_del(struct batadv_priv *bat_priv,
+					    struct batadv_tt_global_entry *tt,
+					    struct batadv_orig_node *orig_node)
+{
+}
+
+static inline bool batadv_fbx_check_skb_rx(struct batadv_priv *bat_priv,
+					   enum batadv_packettype type,
+					   struct sk_buff *skb)
+{
+	return true;
+}
+
+static inline bool batadv_fbx_check_skb_tx(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb,
+					   unsigned short vid)
+{
+	return true;
+}
+
+static inline void batadv_fbx_ogm_process(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig_node,
+					  struct batadv_neigh_node *neigh_node,
+					  struct batadv_ogm2_packet *ogm)
+{
+}
+
+#endif
+
+#endif
diff -Nruw linux-6.4-fbx/net/batman-adv/fbx./mtu.c linux-6.4-fbx/net/batman-adv/fbx/mtu.c
--- linux-6.4-fbx/net/batman-adv/fbx./mtu.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/batman-adv/fbx/mtu.c	2023-12-12 17:24:34.163627207 +0100
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#include "../main.h"
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/lockdep.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "../hard-interface.h"
+#include "../originator.h"
+#include "../send.h"
+#include "../log.h"
+#include "fbx.h"
+#include "mtu.h"
+
+#define BATADV_MTU_NB_FRAMES 3
+#define BATADV_MTU_RECV_DELAY 1000
+#define BATADV_MTU_INTERVAL_MIN 3000
+#define BATADV_MTU_INTERVAL_MAX 30000
+#define BATADV_MTU_DEF 1500
+
+static_assert(BATADV_MTU_RECV_DELAY < BATADV_MTU_INTERVAL_MIN);
+
+#define DELAY_MIN msecs_to_jiffies(BATADV_MTU_INTERVAL_MIN)
+#define DELAY_MAX msecs_to_jiffies(BATADV_MTU_INTERVAL_MAX)
+#define DELAY_RECV msecs_to_jiffies(BATADV_MTU_RECV_DELAY)
+
+/**
+ * batadv_mtu_send_probes - send a burst of MTU probe packets
+ * @neigh: the neighbor we sould send probes to
+ *
+ * This will only send a few probe packets for the maximum MTU the hard if
+ * supports (increasing the seqno in the process)
+ */
+static int batadv_mtu_send_probes(struct batadv_hardif_neigh_node *neigh)
+{
+	struct sk_buff *skb;
+	struct batadv_hard_iface *hard_if = neigh->if_incoming;
+	int mtu = hard_if->net_dev->mtu;
+	struct batadv_priv *bat_priv;
+	struct batadv_fbx_mtu_packet pkt = {
+		.hdr = {
+			.packet_type = BATADV_FBX,
+			.version = BATADV_COMPAT_VERSION,
+			.subtype = BATADV_FBX_SUB_MTU_PROBE,
+		},
+		.mtu = mtu,
+	};
+	size_t i;
+
+	if (!hard_if->soft_iface)
+		return 0;
+
+	bat_priv = netdev_priv(hard_if->soft_iface);
+
+	for (i = 0; i < BATADV_MTU_NB_FRAMES; i++) {
+		pkt.hdr.seqno =
+			cpu_to_be16(atomic_inc_return(&bat_priv->mtu_seqno));
+		skb = alloc_skb(ETH_HLEN + NET_IP_ALIGN + mtu, GFP_KERNEL);
+		if (!skb)
+			return -ENOMEM;
+
+		skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+		skb_put_data(skb, &pkt, sizeof(pkt));
+		skb_put(skb, mtu - sizeof(pkt));
+		batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
+	}
+
+	return 0;
+}
+
+/**
+ * batadv_mtu_work_to_neigh() - Get neighbor reference from MTU neighbor
+ * work.
+ *
+ * @mtud: work_struct associate with neighbor
+ * @return: NULL if Neighbor is currently being deleted, neighbor hardif
+ * pointer with incremented ref count otherwise
+ */
+static struct batadv_hardif_neigh_node *
+batadv_mtu_work_to_neigh(struct batadv_mtu *mtud)
+{
+	struct batadv_hardif_neigh_node *neigh;
+
+	rcu_read_lock();
+	neigh = rcu_dereference(mtud->neigh);
+	if (!neigh)
+		goto out;
+	if (!kref_get_unless_zero(&neigh->refcount))
+		neigh = NULL;
+out:
+	rcu_read_unlock();
+	return neigh;
+}
+
+/**
+ * batadv_mtu_process_periodic() - periodic resend of the probing frames
+ * @work: the delayed work struct
+ *
+ * This will :
+ *   - send a burst of probing frames
+ *   - schedule the next periodic run
+ *   - schedule the no response wq
+ *
+ */
+static void batadv_mtu_process_periodic(struct work_struct *work)
+{
+	struct delayed_work *delayed_work;
+	struct batadv_mtu *mtud;
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_priv *bat_priv;
+	struct batadv_hard_iface *hard_if;
+	unsigned long delay;
+
+	delayed_work = to_delayed_work(work);
+	mtud = container_of(delayed_work, struct batadv_mtu, periodic_work);
+	neigh = batadv_mtu_work_to_neigh(mtud);
+	if (!neigh)
+		goto out;
+	hard_if = neigh->if_incoming;
+	if (!hard_if->soft_iface)
+		goto out;
+	bat_priv = netdev_priv(hard_if->soft_iface);
+
+	batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+		"MTU: %pM/%s - reprobing link MTU %d\n",
+		neigh->addr, netdev_name(hard_if->net_dev),
+		hard_if->net_dev->mtu);
+
+	/* send probes */
+	mod_delayed_work(batadv_event_workqueue, &mtud->recv_work, DELAY_RECV);
+	batadv_mtu_send_probes(neigh);
+
+	/* reschedule periodic */
+	delay = READ_ONCE(mtud->delay);
+	delay = clamp(delay * 2, DELAY_MIN, DELAY_MAX);
+	WRITE_ONCE(mtud->delay, delay);
+	mod_delayed_work(batadv_event_workqueue, &mtud->periodic_work, delay);
+out:
+	batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_mtu_process_timeout() - ack delay of the probing frame
+ * @work: the delayed work struct
+ *
+ * If triggered this means we should:
+ *   - fall back to conservative mtu
+ *   - reschedule the periodic wq soonish
+ *
+ */
+static void batadv_mtu_process_timeout(struct work_struct *work)
+{
+	struct delayed_work *delayed_work;
+	struct batadv_mtu *mtud;
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_priv *bat_priv;
+	struct batadv_hard_iface *hard_if;
+	int prev_mtu;
+
+	delayed_work = to_delayed_work(work);
+	mtud = container_of(delayed_work, struct batadv_mtu, recv_work);
+	neigh = batadv_mtu_work_to_neigh(mtud);
+	if (!neigh)
+		goto out;
+	hard_if = neigh->if_incoming;
+	if (!hard_if->soft_iface)
+		goto out;
+	bat_priv = netdev_priv(hard_if->soft_iface);
+
+	batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+		"MTU: %pM/%s - probing timeout for MTU %d\n",
+		neigh->addr, netdev_name(hard_if->net_dev),
+		hard_if->net_dev->mtu);
+
+	/* send probes */
+	prev_mtu = atomic_xchg(&mtud->mtu, BATADV_MTU_DEF);
+	if (prev_mtu != BATADV_MTU_DEF) {
+		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+			"MTU: %pM/%s - downgrading MTU to %d\n",
+			neigh->addr, netdev_name(hard_if->net_dev),
+			BATADV_MTU_DEF);
+		/* reschedule periodic */
+		WRITE_ONCE(mtud->delay, DELAY_MIN);
+		mod_delayed_work(batadv_event_workqueue,
+				 &mtud->periodic_work,
+				 DELAY_MIN);
+	}
+out:
+	batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_mtu_do_neigh_release() - Release neighbor related data
+ */
+static void batadv_mtu_do_neigh_release(struct work_struct *work)
+{
+	struct delayed_work *delayed_work;
+	struct batadv_mtu *mtud;
+
+	delayed_work = to_delayed_work(work);
+	mtud = container_of(delayed_work, struct batadv_mtu, release_work);
+
+	cancel_delayed_work_sync(&mtud->periodic_work);
+	cancel_delayed_work_sync(&mtud->recv_work);
+	kfree(mtud);
+}
+
+/**
+ * Checks a skb is a valid MTU probe/resp packet
+ * @skb: the ethernet pkt
+ */
+static int batadv_mtu_skb_check(struct sk_buff *skb)
+{
+	struct ethhdr *ethhdr = eth_hdr(skb);
+	struct batadv_fbx_mtu_packet *pkt;
+
+	pkt = (struct batadv_fbx_mtu_packet*)(ethhdr + 1);
+
+	/* handle packet */
+	if (unlikely(!pskb_may_pull(skb, BATADV_FBX_MTU_HLEN)))
+		return -EINVAL;
+	if (is_broadcast_ether_addr(ethhdr->h_dest))
+		return -EINVAL;
+	if (!is_valid_ether_addr(ethhdr->h_source))
+		return -EINVAL;
+
+	return 0;
+}
+
+static const char * subtype_to_str[] = {
+	[BATADV_FBX_SUB_MTU_PROBE] = "probe",
+	[BATADV_FBX_SUB_MTU_RESP] = "resp",
+};
+
+/**
+ * batadv_recv_mtu_packet() - receive a MTU probe packet
+ * @iface: the hard interface we received the skb on
+ * @skb: probe packet received
+ *
+ * This will process a probe request or a probe response.
+ * Either sending a responce or adjusting the mtu if needed.
+ * If the MTU gets upgraded, we reschedule the periodic work
+ *
+ */
+static int batadv_recv_mtu_packet(struct batadv_hard_iface *iface,
+				  struct sk_buff *skb)
+{
+	struct batadv_priv *bat_priv;
+	struct ethhdr *ethhdr;
+	struct batadv_fbx_mtu_packet *pkt;
+	struct batadv_hardif_neigh_node *neigh = NULL;
+	u8 dst[ETH_ALEN];
+	int ret = NET_RX_DROP, rc;
+
+	if (!iface->soft_iface)
+		goto free_skb;
+	bat_priv = netdev_priv(iface->soft_iface);
+
+	skb = skb_unshare(skb, GFP_ATOMIC);
+	if (!skb)
+		goto free_skb;
+
+	ethhdr = eth_hdr(skb);
+	pkt = (struct batadv_fbx_mtu_packet *)(ethhdr + 1);
+
+	if (batadv_mtu_skb_check(skb) < 0)
+		goto free_skb;
+
+	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+		"MTU: %pM/%s: received MTU %s packet\n",
+		ethhdr->h_source, netdev_name(iface->net_dev),
+		subtype_to_str[pkt->hdr.subtype]);
+
+	switch(pkt->hdr.subtype) {
+	case BATADV_FBX_SUB_MTU_PROBE:
+		ether_addr_copy(dst, ethhdr->h_source);
+
+		/* convert probe packet in responce packet */
+		pkt->hdr.subtype = BATADV_FBX_SUB_MTU_RESP;
+
+		/* trim to new size */
+		if (skb_linearize(skb) < 0)
+			goto free_skb;
+		skb_trim(skb, sizeof(*pkt));
+
+		/* send it back to owner */
+		rc = batadv_send_skb_packet(skb, iface, dst);
+		if (rc != NET_XMIT_SUCCESS)
+			goto free_skb;
+		break;
+	case BATADV_FBX_SUB_MTU_RESP:
+		/* get neigh */
+		neigh = batadv_hardif_neigh_get(iface, ethhdr->h_source);
+		if (!neigh) {
+			pr_warn("batadv: MTU: %pM - unknown neigh",
+				ethhdr->h_source);
+			goto free_skb;
+		}
+
+		/* increase counters */
+		if (pkt->mtu != iface->net_dev->mtu){
+			pr_warn("batadv: %pM - bad mtu %d",
+				ethhdr->h_source, pkt->mtu);
+			goto free_skb;
+		}
+
+		/* use this mtu and store ack time */
+		cancel_delayed_work(&neigh->mtud->recv_work);
+		rc = atomic_xchg(&neigh->mtud->mtu, iface->net_dev->mtu);
+		if (rc != iface->net_dev->mtu) {
+			batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+				"MTU: %pM/%s: upgrading MTU to %d\n",
+				ethhdr->h_source, netdev_name(iface->net_dev),
+				iface->net_dev->mtu);
+			WRITE_ONCE(neigh->mtud->delay, DELAY_MIN);
+			mod_delayed_work(batadv_event_workqueue,
+					 &neigh->mtud->periodic_work,
+					 DELAY_MIN);
+		}
+
+		consume_skb(skb);
+		break;
+	default:
+		pr_warn_ratelimited("batadv: MTU: %pM - unknown subtype: %d\n",
+				ethhdr->h_source, pkt->hdr.subtype);
+		goto free_skb;
+	}
+
+	ret = NET_RX_SUCCESS;
+
+free_skb:
+	if (ret == NET_RX_DROP)
+		kfree_skb(skb);
+	if (neigh)
+		batadv_hardif_neigh_put(neigh);
+	return ret;
+}
+
+/**
+ * batadv_mtu_neigh_release: unschedules the periodic & recv wq
+ * @neigh: the neighbor being freed
+ */
+static void batadv_mtu_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+	rcu_assign_pointer(neigh->mtud->neigh, NULL);
+	mod_delayed_work(batadv_event_workqueue, &neigh->mtud->release_work, 0);
+}
+
+/**
+ * batadv_mtu_neigh_init: init a neighbor for mtu check
+ *
+ * @neigh: the neighbor being initialized
+ *
+ * - init the periodic & recv wq
+ * - sets the default MTU
+ * - schedules the periodic MTU check if needed
+ */
+static int batadv_mtu_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_hard_iface *hard_if = neigh->if_incoming;
+	struct batadv_mtu *mtud;
+	int hard_mtu;
+
+	mtud = kmalloc(sizeof(*mtud), GFP_ATOMIC);
+	if (!mtud)
+		return -ENOMEM;
+
+	rcu_assign_pointer(mtud->neigh, neigh);
+	hard_mtu = hard_if->net_dev->mtu;
+	INIT_DELAYED_WORK(&mtud->periodic_work, batadv_mtu_process_periodic);
+	INIT_DELAYED_WORK(&mtud->recv_work, batadv_mtu_process_timeout);
+	INIT_DELAYED_WORK(&mtud->release_work, batadv_mtu_do_neigh_release);
+	atomic_set(&mtud->mtu, hard_mtu);
+	mtud->delay = 0;
+
+	if (!batadv_is_wifi_hardif(hard_if) && hard_mtu > BATADV_MTU_DEF) {
+		atomic_set(&mtud->mtu, BATADV_MTU_DEF);
+		mod_delayed_work(batadv_event_workqueue,
+				 &mtud->periodic_work, 0);
+	}
+
+	neigh->mtud = mtud;
+	return 0;
+}
+
+/**
+ * batadv_mtu_hardif_update() - update mtu of hardif
+ *
+ * This will cause all neighs to renegotiate their MTU if they are on an
+ * ethernet link with a big MTU
+ */
+static void batadv_mtu_hardif_update(struct batadv_hard_iface *iface)
+{
+	struct batadv_hardif_neigh_node *hardif_neigh;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(hardif_neigh, &iface->neigh_list, list) {
+		batadv_mtu_neigh_release(hardif_neigh);
+		batadv_mtu_neigh_init(hardif_neigh);
+	}
+	rcu_read_unlock();
+}
+
+/**
+ * batadv_mtu_neigh_dump() - Dump MTU specific information for a specific
+ * neighbour
+ * @bat_priv: The bat priv with all the soft interface information
+ * @info: NL message info (not used here)
+ * @attr: NL message attributes (not used here)
+ * @skb: Current originator NL message
+ * @data: Here this is the neighbour being dumped
+ */
+static void batadv_mtu_neigh_dump(struct batadv_priv *bat_priv,
+				  struct genl_info *info,
+				  struct nlattr **attr,
+				  struct sk_buff *skb,
+				  void *data)
+{
+	struct batadv_hardif_neigh_node *n = data;
+
+	if (!skb)
+		return;
+
+	nla_put_u32(skb, BATADV_ATTR_FBX_MTU, atomic_read(&n->mtud->mtu));
+}
+
+/**
+ * batadv_mtu_new_priv: init MTU data for a bat_priv
+ * @bat_priv: the bat_priv to init
+ *
+ * - inits the MTU packet seqno
+ */
+static int batadv_mtu_new_priv(struct batadv_priv *bat_priv)
+{
+	atomic_set(&bat_priv->mtu_seqno, 0);
+	return 0;
+}
+
+/**
+ * batadv_mtu_init: init FBX MTU module
+ */
+static int __init batadv_mtu_init(void)
+{
+	BUILD_BUG_ON(sizeof(struct batadv_fbx_mtu_packet) != 10);
+
+	batadv_fbx_recv_handler_register(BATADV_FBX_SUB_MTU_PROBE,
+					 batadv_recv_mtu_packet);
+	batadv_fbx_recv_handler_register(BATADV_FBX_SUB_MTU_RESP,
+					 batadv_recv_mtu_packet);
+	return 0;
+}
+
+/**
+ * batadv_mtu_exit: Exit FBX MTU module
+ */
+static void __exit batadv_mtu_exit(void)
+{
+	batadv_fbx_recv_handler_unregister(BATADV_FBX_SUB_MTU_PROBE);
+	batadv_fbx_recv_handler_unregister(BATADV_FBX_SUB_MTU_RESP);
+}
+
+struct batadv_fbx_module_ops const batadv_mtu_module_ops = {
+	.init = batadv_mtu_init,
+	.exit = batadv_mtu_exit,
+	.new_priv = batadv_mtu_new_priv,
+	.hardif_update = batadv_mtu_hardif_update,
+	.neigh_init = batadv_mtu_neigh_init,
+	.neigh_release = batadv_mtu_neigh_release,
+};
+
+struct batadv_fbx_nl_ops const batadv_mtu_nl_ops[] = {
+	{
+		.cmd = BATADV_CMD_GET_NEIGHBORS,
+		.hdl = batadv_mtu_neigh_dump,
+	},
+};
+
+struct batadv_fbx_module const batadv_mtu_module = {
+	.name = "mtu",
+	.ops = &batadv_mtu_module_ops,
+	.nl_ops = batadv_mtu_nl_ops,
+	.nl_ops_sz = ARRAY_SIZE(batadv_mtu_nl_ops),
+};
diff -Nruw linux-6.4-fbx/net/batman-adv/fbx./mtu.h linux-6.4-fbx/net/batman-adv/fbx/mtu.h
--- linux-6.4-fbx/net/batman-adv/fbx./mtu.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/batman-adv/fbx/mtu.h	2023-12-12 17:24:34.163627207 +0100
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#ifndef _NET_BATMAN_ADV_FBX_MTU_H_
+#define _NET_BATMAN_ADV_FBX_MTU_H_
+
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "../main.h"
+
+/**
+ * batadv_mtu_get_for_neigh() - get the MTU to use for this neigh
+ *
+ * This functions returns a MTU to use when talking to a given neighbor
+ *
+ * returns: the mtu
+ */
+static inline int batadv_mtu_get_for_neigh(struct batadv_hardif_neigh_node *n)
+{
+	return atomic_read(&n->mtud->mtu);
+}
+#else
+static inline int batadv_mtu_get_for_neigh(struct batadv_hardif_neigh_node *n)
+{
+	return n->if_incoming->net_dev->mtu;
+}
+#endif
+
+#endif /* _NET_BATMAN_ADV_MTU_H_ */
diff -Nruw linux-6.4-fbx/net/batman-adv/fbx./slap.c linux-6.4-fbx/net/batman-adv/fbx/slap.c
--- linux-6.4-fbx/net/batman-adv/fbx./slap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/batman-adv/fbx/slap.c	2023-12-12 17:24:34.163627207 +0100
@@ -0,0 +1,1386 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Remi Pommarel <rpommarel@freebox.fr>
+ */
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "../main.h"
+#include "../hard-interface.h"
+#include "../send.h"
+#include "../originator.h"
+#include "../netlink.h"
+#include "../translation-table.h"
+#include "fbx.h"
+
+#define SLAP_MASTER_ANNOUNCE_RATE 500 /* 500 ms */
+#define SLAP_MASTER_EXPIRE (2 * SLAP_MASTER_ANNOUNCE_RATE)
+
+#define SLAP_PRIO_DEFAULT (U32_MAX >> 1)
+
+#define slap_dereference_check(b, p)					\
+	rcu_dereference_check(p, lockdep_is_held(&(b)->slap_lock))
+
+#define slap_id_get_rcu(b) slap_dereference_check(b, (b)->slap_id)
+#define slap_master_get_rcu(b) slap_dereference_check(b, (b)->slap_master)
+
+#define slap_printk(lvl, p, fmt, args...)				\
+	pr_ ## lvl("%s: " fmt, dev_name(&(p)->soft_iface->dev), ##args)
+#define slap_debug_ratelimited(p, fmt, args...)				\
+	slap_printk(debug_ratelimited, p , fmt, ##args)
+#define slap_debug(p, fmt, args...) slap_printk(debug, p , fmt, ##args)
+#define slap_info(p, fmt, args...) slap_printk(info, p , fmt, ##args)
+#define slap_err(p, fmt, args...) slap_printk(err, p , fmt, ##args)
+
+#define to_ns(w, f)							\
+	container_of(w, struct batadv_hardif_neigh_slap, f.work)
+
+#define to_slap_id(w)							\
+	container_of(w, struct batadv_slap_id, expire.work)
+
+#pragma pack(2)
+
+/**
+ * batadv_slap_tvlv_master - FBX TVLV packet used to propagate our current
+ * master to all nodes through OGM
+ */
+struct batadv_slap_tvlv_master {
+	__u8 addr[ETH_ALEN];
+};
+
+#pragma pack()
+
+/**
+ * Compare SLAP ID with a prio and addr
+ * @id: SLAP ID to compare
+ * @prio: SLAP ID priority to compare id with
+ * @addr: SLAP ID address to compare id with
+ * @return: If id is lower than, equals to, higher than prio and addr returns
+ *          negative number, zero, positive number respectively
+ */
+static int batadv_slap_id_cmp(struct batadv_slap_id const *id1,
+			      u32 prio, u8 const *addr)
+{
+	if (id1->prio < prio)
+		return -1;
+
+	if (id1->prio > prio)
+		return 1;
+
+	return memcmp(id1->addr, addr, ETH_ALEN);
+}
+
+/**
+ * Is the bat soft interface currently master
+ * Should be called either with rcu_read_lock or bat_priv->slap_lock held
+ */
+static bool batadv_slap_is_master(struct batadv_priv const *bat_priv)
+{
+	return slap_id_get_rcu(bat_priv) == slap_master_get_rcu(bat_priv);
+}
+
+/**
+ * Check if SLAP ID could be new SLAP master
+ * Should be called with rcu_read_lock or bat_priv->slap_lock held
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID of the potential master
+ * @return: True if id could be the segment master, false otherwise
+ */
+static bool batadv_slap_id_is_new_master(struct batadv_priv *bat_priv,
+					 struct batadv_slap_id const *id)
+{
+	struct batadv_slap_id *master;
+
+	master = slap_master_get_rcu(bat_priv);
+	return (batadv_slap_id_cmp(id, master->prio, master->addr) < 0);
+}
+
+/**
+ * Queue SLAP ID to be freed after RCU grace period
+ *
+ * @ref: kref pointer of the SLAP ID to free
+ */
+static void batadv_slap_id_free_rcu(struct kref *ref)
+{
+	struct batadv_slap_id *id;
+
+	id = container_of(ref, struct batadv_slap_id, refcount);
+	kfree_rcu(id, rcu);
+}
+
+
+/**
+ * Increase SLAP ID refcount
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @return: True if SLAP ID refcount was successfully incremented, 0 otherwise
+ */
+static bool batadv_slap_id_get(struct batadv_slap_id *id)
+{
+	return kref_get_unless_zero(&id->refcount) != 0;
+}
+
+/**
+ * Release reference on a SLAP ID, potentially freeing it
+ *
+ * @id: ID to release reference on
+ */
+static void batadv_slap_id_put(struct batadv_slap_id *id)
+{
+	kref_put(&id->refcount, batadv_slap_id_free_rcu);
+}
+
+static void _batadv_slap_set_master(struct batadv_priv *bat_priv,
+				    struct batadv_slap_id *id);
+
+/**
+ * Hit a current SLAP ID (prevent its expiration)
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to keep alive
+ */
+static void batadv_slap_id_hit(struct batadv_priv *bat_priv,
+			       struct batadv_slap_id *id)
+{
+	/*
+	 * WRITE_ONCE/READ_ONCE used to avoid load/store tearing, see
+	 * https://lwn.net/Articles/793253/
+	 */
+	WRITE_ONCE(id->exp_time,
+		   jiffies + msecs_to_jiffies(SLAP_MASTER_EXPIRE));
+}
+
+/**
+ * Schedule SLAP ID expiration delayed work
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to schedule expiration delayed work for
+ * @return: False if expiration work has been queued, True if it as only been
+ * rescheduled
+ */
+static bool batadv_slap_id_schedule_expire(struct batadv_priv *bat_priv,
+					   struct batadv_slap_id *id)
+{
+	return mod_delayed_work(bat_priv->slap_wq, &id->expire,
+				READ_ONCE(id->exp_time) - jiffies);
+}
+
+/**
+ * Start SLAP ID expiration routine
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to set expiration deadline for
+ */
+static void batadv_slap_id_start_expire(struct batadv_priv *bat_priv,
+					struct batadv_slap_id *id)
+{
+	bool ret;
+
+	batadv_slap_id_hit(bat_priv, id);
+	ret = batadv_slap_id_schedule_expire(bat_priv, id);
+	if (!ret)
+		batadv_slap_id_get(id);
+}
+
+/**
+ * Force SLAP ID expiration routine
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to set expiration deadline for
+ */
+static void batadv_slap_id_force_expire(struct batadv_priv *bat_priv,
+					struct batadv_slap_id *id)
+{
+	bool ret;
+
+	/* If already expired do nothing */
+	if (!batadv_slap_id_get(id))
+		return;
+
+	/*
+	 * At this point we know we have a reference on ID, so it is safe to
+	 * re-schedule the expiration delayed work here. We will drop the ref if
+	 * the expiration delayed work was only re-scheduled (not queued).
+	 * This is a bit convoluted, but this way the SLAP ID will very be
+	 * likely deleted by the expiration timer outside of bat_priv->slap_lock
+	 * critical section and only very rarely by this function which will
+	 * likely be called while holding bat_priv->slap_lock.
+	 */
+
+	/* Force expiration now */
+	WRITE_ONCE(id->exp_time, jiffies);
+	ret = batadv_slap_id_schedule_expire(bat_priv, id);
+	if (ret)
+		batadv_slap_id_put(id);
+}
+
+/**
+ * Expire a neighbor SLAP ID, if it was master demote it first
+ * Takes bat->slap_lock
+ *
+ * @work: work_struct associate with neighbor
+ * @return: 0 on success, negative number otherwise
+ */
+static void batadv_slap_id_expire(struct work_struct *work)
+{
+	struct batadv_slap_id *id = to_slap_id(work);
+	struct batadv_priv *bat_priv = id->bat_priv;
+
+	/* If expire fire too soon let's rearm it */
+	if (time_before(jiffies, READ_ONCE(id->exp_time))) {
+		batadv_slap_id_schedule_expire(bat_priv, id);
+		return;
+	}
+
+	slap_debug(bat_priv, "Expiring SLAP ID %u/%pM\n", id->prio, id->addr);
+
+	spin_lock_bh(&bat_priv->slap_lock);
+	if (id == slap_master_get_rcu(bat_priv))
+		_batadv_slap_set_master(bat_priv, slap_id_get_rcu(bat_priv));
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	batadv_slap_id_put(id);
+}
+
+/**
+ * Init a new SLAP ID
+ * @id: SLAP ID to initialize
+ * @bat_priv: bat_priv with all soft interface information
+ * @prio: Prio of the new SLAP ID
+ * @addr: Address of the new SLAP ID
+ * @return: New SLAP ID on success, NULL pointer otherwise
+ */
+static void batadv_slap_id_init(struct batadv_slap_id *id,
+				struct batadv_priv *bat_priv,
+				u32 prio,
+				u8 const *addr)
+{
+	kref_init(&id->refcount);
+	id->bat_priv = bat_priv;
+	id->prio = prio;
+	memcpy(id->addr, addr, sizeof(id->addr));
+	INIT_DELAYED_WORK(&id->expire, batadv_slap_id_expire);
+}
+
+/**
+ * Send or Resend our SLAP ID to a specific neighbor if it is not up to date
+ * Takes RCU read lock
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @neigh: Neighbor to send our SLAP ID to
+ * @return: 0 on success, negative number otherwise
+ */
+static int batadv_slap_send_id(struct batadv_priv *bat_priv,
+			       struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_fbx_slap_packet *pkt;
+	struct batadv_slap_id *id;
+	struct sk_buff *skb;
+	int ret = -1;
+	u32 prio;
+
+	rcu_read_lock();
+	id = slap_id_get_rcu(bat_priv);
+	prio = id->prio;
+	skb = skb_copy(rcu_dereference(bat_priv->slap_skb), GFP_ATOMIC);
+	rcu_read_unlock();
+
+	if (!skb)
+		goto out;
+
+	pkt = (struct batadv_fbx_slap_packet *)skb->data;
+	pkt->prio = htonl(prio);
+	batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
+
+	slap_debug_ratelimited(bat_priv, "Sending SLAP Prio %u to %pM\n",
+			       prio, neigh->orig);
+	ret = 0;
+out:
+	return ret;
+}
+
+/**
+ * batadv_slap_get_priv() - Get bat_priv from soft interface
+ *
+ * @soft_iface: batman interface to get bat_priv from
+ * @return: priv on success, NULL otherwise
+ */
+static struct batadv_priv *batadv_slap_get_priv(struct net_device *soft_iface)
+{
+	if (!soft_iface)
+		return NULL;
+	return netdev_priv(soft_iface);
+}
+
+/**
+ * batadv_slap_work_to_neigh() - Get neighbor reference from SLAP neighbor
+ * work.
+ *
+ * @work: work_struct associate with neighbor
+ * @return: NULL if Neighbor is currently being deleted, neighbor hardif
+ * pointer with incremented ref count otherwise
+ */
+static struct batadv_hardif_neigh_node *
+batadv_slap_work_to_neigh(struct work_struct *work)
+{
+	struct batadv_hardif_neigh_slap *ns = to_ns(work, announce);
+	struct batadv_hardif_neigh_node *neigh;
+
+	rcu_read_lock();
+	neigh = rcu_dereference(ns->neigh);
+	if (!neigh)
+		goto out;
+	if (!kref_get_unless_zero(&neigh->refcount))
+		neigh = NULL;
+out:
+	rcu_read_unlock();
+	return neigh;
+}
+
+/**
+ * Announce work that recurrently sends SLAP ID to a specific neighbor while
+ * this originator is SLAP master
+ * Takes RCU read lock
+ *
+ * @work: work_struct associate with neighbor
+ */
+static void batadv_slap_do_announce(struct work_struct *work)
+{
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_priv *bat_priv;
+	bool slap, master;
+
+	neigh = batadv_slap_work_to_neigh(work);
+	/* Neighbor is being delete */
+	if (!neigh)
+		goto out;
+
+	bat_priv = batadv_slap_get_priv(neigh->if_incoming->soft_iface);
+	if (!bat_priv)
+		goto out;
+
+	rcu_read_lock();
+	slap = rcu_dereference(bat_priv->slap_iface) == neigh->if_incoming;
+	master = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+
+	/* Only current SLAP master should announce itself */
+	if (!slap || !master)
+		goto out;
+
+	batadv_slap_send_id(bat_priv, neigh);
+
+	mod_delayed_work(bat_priv->slap_wq, &neigh->slap->announce,
+			 msecs_to_jiffies(SLAP_MASTER_ANNOUNCE_RATE));
+out:
+	batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_slap_do_neigh_release() - Work that effectively clean SLAP neighbor
+ * data.
+ *
+ * @work: work_struct associate with neighbor
+ */
+static void batadv_slap_do_neigh_release(struct work_struct *work)
+{
+	struct batadv_hardif_neigh_slap *ns = to_ns(work, release);
+
+	cancel_delayed_work_sync(&ns->announce);
+	kfree(ns);
+}
+
+/**
+ * Start announce for all neighbor, used when originator just get elected SLAP
+ * master
+ * Takes RCU read lock
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ */
+static void batadv_slap_start_announce(struct batadv_priv *bat_priv)
+{
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_hard_iface *slap_iface;
+
+	rcu_read_lock();
+	slap_iface = rcu_dereference(bat_priv->slap_iface);
+	if (slap_iface == NULL)
+		goto out;
+	hlist_for_each_entry_rcu(neigh, &slap_iface->neigh_list, list)
+		mod_delayed_work(bat_priv->slap_wq, &neigh->slap->announce, 0);
+out:
+	rcu_read_unlock();
+}
+
+/**
+ * Stop announce for all neighbor, used when originator just get demoted as
+ * SLAP master
+ * Takes RCU read lock
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ */
+static void batadv_slap_stop_announce(struct batadv_priv *bat_priv)
+{
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_hard_iface *slap_iface;
+
+	rcu_read_lock();
+	slap_iface = rcu_dereference(bat_priv->slap_iface);
+	if (slap_iface == NULL)
+		goto out;
+	hlist_for_each_entry_rcu(neigh, &slap_iface->neigh_list, list)
+		cancel_delayed_work(&neigh->slap->announce);
+out:
+	rcu_read_unlock();
+}
+
+/**
+ * Update current SLAP master
+ * Needs bat_priv->slap_lock to be held
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: New master ID
+ */
+static void _batadv_slap_set_master(struct batadv_priv *bat_priv,
+				    struct batadv_slap_id *id)
+{
+	struct batadv_slap_tvlv_master tvlv;
+	struct batadv_slap_id *old_master;
+	lockdep_assert_held(bat_priv->slap_lock);
+
+	old_master = slap_master_get_rcu(bat_priv);
+	rcu_assign_pointer(bat_priv->slap_master, id);
+	slap_debug(bat_priv, "New SLAP master %u/%pM\n",
+		   id->prio, id->addr);
+
+	if (old_master == slap_id_get_rcu(bat_priv))
+		batadv_slap_stop_announce(bat_priv);
+	if (batadv_slap_is_master(bat_priv))
+		batadv_slap_start_announce(bat_priv);
+
+	ether_addr_copy(tvlv.addr, id->addr);
+	batadv_fbx_tvlv_container_register(bat_priv,
+					   BATADV_FBX_TVLV_SLAP_MASTER,
+					   BATADV_FBX_TVLV_SLAP_VERSION,
+					   &tvlv, sizeof(tvlv));
+
+	batadv_slap_id_force_expire(bat_priv, old_master);
+}
+
+/**
+ * Set new SLAP segment master
+ * Because master candidate has been tested only under rcu protection, it needs
+ * to be rechecked under lock, if it is still a good candidate then it is
+ * elected the segment master
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: new master candidate SLAP ID
+ * @return: 0 if candidate is now the SLAP master, negative number otherwise
+ */
+static int batadv_slap_set_master(struct batadv_priv *bat_priv,
+				  struct batadv_slap_id *id)
+{
+	int ret = -1;
+
+	spin_lock_bh(&bat_priv->slap_lock);
+
+	/* TODO check for interface to be SLAP interface */
+	if (!batadv_slap_id_is_new_master(bat_priv, id))
+		goto unlock;
+
+	_batadv_slap_set_master(bat_priv, id);
+
+	ret = 0;
+unlock:
+	spin_unlock_bh(&bat_priv->slap_lock);
+	return ret;
+}
+
+/**
+ * Alloc and try to set new neighbor master, if current master is already this
+ * very neighbor just hit it
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @prio: Neighbor SLAP ID priority
+ * @addr: Neigh SLAP ID address
+ */
+static void batadv_slap_recv_neigh_id(struct batadv_priv *bat_priv,
+				      u32 prio, u8 const *addr)
+{
+	struct batadv_slap_id *id;
+	int ret;
+
+	/*
+	 * First try to fastpath test if neighbor is new master, only false
+	 * positive can happen here
+	 */
+	rcu_read_lock();
+	id = slap_master_get_rcu(bat_priv);
+	ret = batadv_slap_id_cmp(id, prio, addr);
+	/* Neighbor is already master, just hit it */
+	if (ret == 0)
+		batadv_slap_id_hit(bat_priv, id);
+	rcu_read_unlock();
+
+	if (ret <= 0)
+		return;
+
+	id = kmalloc(sizeof(*id), GFP_ATOMIC);
+	if (!id)
+		return;
+
+	batadv_slap_id_init(id, bat_priv, prio, addr);
+	ret = batadv_slap_set_master(bat_priv, id);
+	if (ret < 0) {
+		kfree(id);
+		return;
+	}
+
+	batadv_slap_id_start_expire(bat_priv, id);
+	batadv_slap_id_put(id);
+}
+
+/**
+ * Process a SLAP ID packet
+ *
+ * @iface: SLAP ID packet received interfaces
+ * @skb: SLAP ID packet
+ *
+ * @return: NET_RX_SUCCESS on success, NET_RX_DROP otherwise.
+ */
+static int batadv_slap_recv_packet(struct batadv_hard_iface *iface,
+				   struct sk_buff *skb)
+{
+	struct batadv_priv *bat_priv = batadv_slap_get_priv(iface->soft_iface);
+	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
+	struct batadv_hardif_neigh_node *neigh = NULL;
+	struct batadv_fbx_slap_packet *pkt;
+	bool reply = false;
+	u32 prio;
+
+	if (!bat_priv) {
+		kfree_skb(skb);
+		return NET_RX_DROP;
+	}
+
+	if (unlikely(!pskb_may_pull(skb, BATADV_FBX_SLAP_HLEN))) {
+		kfree_skb(skb);
+		return NET_RX_DROP;
+	}
+
+	pkt = (struct batadv_fbx_slap_packet *)skb->data;
+	prio = ntohl(pkt->prio);
+
+	neigh = batadv_hardif_neigh_get(iface, ethhdr->h_source);
+	if (!neigh)
+		goto exit;
+
+	slap_debug_ratelimited(bat_priv,
+			       "Receive SLAP pkt from neighbor %pM\n",
+			       neigh->orig);
+
+	batadv_slap_recv_neigh_id(bat_priv, prio, neigh->orig);
+
+	rcu_read_lock();
+	reply = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+	if (!reply)
+		goto exit;
+
+	batadv_slap_send_id(bat_priv, neigh);
+
+exit:
+	batadv_hardif_neigh_put(neigh);
+	consume_skb(skb);
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * Neighbor disappeared stop announcing we are master to it
+ */
+static void batadv_slap_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+	rcu_assign_pointer(neigh->slap->neigh, NULL);
+	mod_delayed_work(batadv_event_workqueue, &neigh->slap->release, 0);
+}
+
+/**
+ * New neighbor discovered, start announcing we are master to it if it is the
+ * case
+ */
+static int batadv_slap_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_hardif_neigh_slap *ns;
+	struct batadv_priv *bat_priv;
+
+	ns = kmalloc(sizeof(*ns), GFP_ATOMIC);
+	if (!ns)
+		return -ENOMEM;
+
+	bat_priv = batadv_slap_get_priv(neigh->if_incoming->soft_iface);
+	if (!bat_priv)
+		return -EINVAL;
+
+	rcu_assign_pointer(ns->neigh, neigh);
+	INIT_DELAYED_WORK(&ns->announce, batadv_slap_do_announce);
+	INIT_DELAYED_WORK(&ns->release, batadv_slap_do_neigh_release);
+	mod_delayed_work(bat_priv->slap_wq, &ns->announce, 0);
+	neigh->slap = ns;
+
+	return 0;
+}
+
+static void batadv_slap_orig_release(struct batadv_orig_node *node)
+{
+	kfree_rcu(node->slap_segid, rcu);
+}
+
+/**
+ * batadv_slap_orig_init() - Init SLAP specific bit in new originator node
+ *
+ * @node: Originator node to init
+ * @return: 0 on success, negative number otherwise
+ */
+static int batadv_slap_orig_init(struct batadv_orig_node *node)
+{
+	struct batadv_slap_segid *id;
+
+	spin_lock_init(&node->slap_lock);
+	id = kmalloc(sizeof(*id), GFP_ATOMIC);
+	if (!id)
+		return -ENOMEM;
+	ether_addr_copy(id->addr, node->orig);
+	rcu_assign_pointer(node->slap_segid, id);
+	return 0;
+}
+
+/**
+ * Update primary interface callback
+ */
+static void batadv_slap_primary_update(struct batadv_priv *bat_priv,
+				       struct batadv_hard_iface *primary)
+{
+	const u8 *addr = primary->net_dev->dev_addr;
+	struct batadv_slap_id *oid, *id;
+	bool cur_master;
+
+	id = kmalloc(sizeof(*id), GFP_KERNEL);
+	if (!id)
+		return;
+
+	spin_lock_bh(&bat_priv->slap_lock);
+	cur_master = batadv_slap_is_master(bat_priv);
+	oid = slap_id_get_rcu(bat_priv);
+	batadv_slap_id_init(id, bat_priv, oid->prio, addr);
+	slap_debug(bat_priv, "New SLAP ID %u/%pM\n", id->prio, id->addr);
+	rcu_assign_pointer(bat_priv->slap_id, id);
+	if (cur_master || batadv_slap_id_is_new_master(bat_priv, id))
+		_batadv_slap_set_master(bat_priv, id);
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	batadv_slap_id_put(oid);
+}
+
+/**
+ * batadv_slap_ogm_master_recv() - Receive SLAP master OGM TVLV
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: Originator sending the TVLV
+ * @tvlv: TVLV data
+ * @len: TVLV data length
+ */
+static void batadv_slap_ogm_master_recv(struct batadv_priv *bat_priv,
+					struct batadv_orig_node *orig,
+					void *tvlv, u16 len)
+{
+	struct batadv_slap_tvlv_master *tvlv_master;
+	struct batadv_slap_segid *new, *old;
+	bool update;
+
+	if (len < sizeof(*tvlv_master))
+		return;
+
+	tvlv_master = tvlv;
+
+	/* Quick test if master changed */
+	rcu_read_lock();
+	old = rcu_dereference(orig->slap_segid);
+	update = !ether_addr_equal(old->addr, tvlv_master->addr);
+	rcu_read_unlock();
+
+	if (!update)
+		return;
+
+	new = kmalloc(sizeof(*new), GFP_ATOMIC);
+	if (!new)
+		return;
+
+	ether_addr_copy(new->addr, tvlv_master->addr);
+
+	spin_lock_bh(&orig->slap_lock);
+	old = rcu_replace_pointer(orig->slap_segid, new,
+				  lockdep_is_held(&orig->slap_lock));
+	spin_unlock_bh(&orig->slap_lock);
+	kfree_rcu(old, rcu);
+}
+
+/**
+ * Check ingress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: Packet type (UNICAST, ICMP, TVLV, etc)
+ * @skb: incoming skb packet
+ * @return: true if packet shall pass, false otherwise
+ */
+static bool batadv_slap_check_skb_rx(struct batadv_priv *bat_priv,
+				     enum batadv_packettype type,
+				     struct sk_buff *skb)
+{
+	bool master;
+
+	rcu_read_lock();
+	master = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+
+	if (master)
+		return true;
+
+	if (type != BATADV_BCAST)
+		return true;
+
+	return false;
+}
+
+/**
+ * _batadv_slap_orig_same_master() - Check if originator is on same SLAP
+ * segment
+ * rcu_read_lock() should be held
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @orig: Originator to check
+ * @return: True if originator is on same SLAP segment, false otherwise
+ */
+static bool _batadv_slap_orig_same_master(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig)
+{
+	struct batadv_slap_id *master;
+	struct batadv_slap_segid *id;
+
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+			 "batadv_slap_orig_same_master() "
+			 "called but no rcu_read_lock held");
+
+	master = slap_master_get_rcu(bat_priv);
+	id = rcu_dereference(orig->slap_segid);
+
+	return ether_addr_equal(master->addr, id->addr);
+}
+
+/**
+ * batadv_slap_orig_same_master() - Check if originator is on same SLAP
+ * segment
+ * This one actually takes rcu_read_lock()
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @orig: Originator to check
+ * @return: True if originator is on same SLAP segment, false otherwise
+ */
+static bool batadv_slap_orig_same_master(struct batadv_priv *bat_priv,
+					 struct batadv_orig_node *orig)
+{
+	bool ret;
+
+	rcu_read_lock();
+	ret = _batadv_slap_orig_same_master(bat_priv, orig);
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * Check egress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @skb: outgoing skb packet
+ * @vid: skb's vlan ID
+ * @return: true if packet shall pass, false otherwise
+ */
+static bool batadv_slap_check_skb_tx(struct batadv_priv *bat_priv,
+				     struct sk_buff *skb,
+				     unsigned short vid)
+{
+	struct batadv_orig_node *orig = NULL;
+	struct ethhdr *ethhdr;
+	bool ret;
+
+	ethhdr = eth_hdr(skb);
+
+	rcu_read_lock();
+	ret = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+
+	if (is_multicast_ether_addr(ethhdr->h_dest))
+		goto out;
+
+	orig = batadv_transtable_search(bat_priv, ethhdr->h_source,
+					ethhdr->h_dest, vid);
+	if (!orig)
+		goto out;
+
+	if (batadv_slap_orig_same_master(bat_priv, orig)) {
+		ret = false;
+		goto out;
+	}
+
+	ret = true;
+out:
+	batadv_orig_node_put(orig);
+	return ret;
+}
+
+static bool batadv_slap_shortcut(struct batadv_priv *bat_priv, u8 const *dest)
+{
+	struct batadv_orig_node *orig_node = NULL;
+	bool ret = false;
+
+	orig_node = batadv_orig_hash_find(bat_priv, dest);
+	if (!orig_node)
+		goto out;
+
+	ret = batadv_slap_orig_same_master(bat_priv, orig_node);
+out:
+	batadv_orig_node_put(orig_node);
+	return ret;
+}
+
+/**
+ * batadv_slap_tt_global_seen() - Check if global TT entry is actually seen by
+ * any node in same SLAP segement
+ * Takes rcu_read_lock()
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Global TT entry to check
+ * @return: True if a originator on same SLAP segement has seen this entry,
+ * false otherwise
+ */
+static bool batadv_slap_tt_global_seen(struct batadv_priv *bat_priv,
+				       struct batadv_tt_global_entry *tt)
+{
+	struct batadv_tt_orig_list_entry *orig_entry;
+	struct batadv_orig_node *orig;
+	bool ret = false;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(orig_entry, &tt->orig_list, list) {
+		orig = orig_entry->orig_node;
+		if (!batadv_slap_orig_same_master(bat_priv, orig))
+			continue;
+		if (!(orig_entry->flags & BATADV_TT_CLIENT_SEEN))
+			continue;
+		ret = true;
+		break;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/**
+ * batadv_slap_tt_roam() - Check if TT roam from another SLAP segment
+ * Takes rcu_read_lock()
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Global TT entry to check
+ * @return: True if entry was seen on another SLAP segement, false otherwise
+ */
+static bool batadv_slap_tt_roam(struct batadv_priv *bat_priv,
+				struct batadv_tt_global_entry *tt)
+{
+	struct batadv_tt_orig_list_entry *orig_entry;
+	struct batadv_orig_node *orig;
+	bool ret = false;
+
+	if (!tt)
+		return false;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(orig_entry, &tt->orig_list, list) {
+		orig = orig_entry->orig_node;
+		if (batadv_slap_orig_same_master(bat_priv, orig))
+			continue;
+		ret = true;
+		break;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/**
+ * batadv_slap_tt_global_add() - A new global TT has been added, check if it
+ * comes from same segment, if so create new shallow local TT if needed
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: The global TT that is being added
+ * @orig: The Originator seeing this client locally
+ * @return: False if matching local TT removal should not happen, true otherwise
+ */
+static bool batadv_slap_tt_global_add(struct batadv_priv *bat_priv,
+				      struct batadv_tt_global_entry *tt,
+				      struct batadv_orig_node *orig)
+{
+	struct batadv_tt_local_entry *local;
+	u16 local_flags;
+
+	if (!batadv_slap_orig_same_master(bat_priv, orig))
+		return true;
+
+	local = batadv_tt_local_hash_find(bat_priv, tt->common.addr,
+					  tt->common.vid);
+	if (local)
+		local_flags = local->common.flags;
+	batadv_tt_local_entry_put(local);
+
+	/* The client is already seen locally, keep our TL */
+	if (local_flags & BATADV_TT_CLIENT_SEEN)
+		return false;
+
+	/* All SLAP segment ref expired, remove our TL */
+	if (!batadv_slap_tt_global_seen(bat_priv, tt))
+		return true;
+
+	/* Another SLAP node detect a client, add a shallow reference to it
+	 * locally, so that shortcut through this node could happen to reach it
+	 */
+	slap_debug(bat_priv, "New SLAP shortcut for %pM\n", tt->common.addr);
+
+	batadv_tt_local_add(bat_priv->soft_iface, tt->common.addr,
+			    tt->common.vid, 0, 0);
+
+	return false;
+}
+
+/**
+ * batadv_slap_tt_global_del() - Deleting an existing global TT, if there is no
+ * more same SLAP segement node actually seeing this client remove our TL entry
+ *
+ * @bat_priv: The bat_priv with all the soft interface information
+ * @tt: Global TT entry being removed
+ * @orig: Originator node removing this TT entry
+ */
+static void batadv_slap_tt_global_del(struct batadv_priv *bat_priv,
+				      struct batadv_tt_global_entry *tt,
+				      struct batadv_orig_node *orig)
+{
+	struct batadv_tt_local_entry *local;
+	u16 local_flags = 0;
+
+	if (!tt)
+		return;
+
+	if (!batadv_slap_orig_same_master(bat_priv, orig))
+		return;
+
+	local = batadv_tt_local_hash_find(bat_priv, tt->common.addr,
+					  tt->common.vid);
+	if (local)
+		local_flags = local->common.flags;
+	batadv_tt_local_entry_put(local);
+
+	/* The client is still seen locally, keep our TL */
+	if (local_flags & BATADV_TT_CLIENT_SEEN)
+		return;
+
+	/* Entry is still seen by a SLAP node, keep our TL */
+	if (batadv_slap_tt_global_seen(bat_priv, tt))
+		return;
+
+	/* No more hard reference for this client in our SLAP segment, let's
+	 * remove our shallow ref.
+	 * TODO do we need roaming info here ?
+	 */
+	slap_debug(bat_priv, "Del SLAP shortcut for %pM\n", tt->common.addr);
+	batadv_tt_local_remove(bat_priv, tt->common.addr,
+			       tt->common.vid, "No more SLAP ref",
+			       false);
+}
+
+/**
+ * batadv_slap_tt_local_add() - Add a new local TT entry
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tl: New local tt entry
+ * @ifindex: Index receiving packet
+ * @return: false if roaming notification should be prevented, true otherwise
+ */
+static bool batadv_slap_tt_local_add(struct batadv_priv *bat_priv,
+				     struct batadv_tt_local_entry *tl,
+				     struct batadv_tt_global_entry *tg,
+				     int ifindex)
+{
+	if (!ifindex)
+		return true;
+
+	tl->common.flags |= BATADV_TT_CLIENT_SEEN;
+	return batadv_slap_tt_roam(bat_priv, tg);
+}
+
+/**
+ * batadv_slap_tt_local_del() - Remove a local TT entry
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tl: The local tt entry to delete
+ * @return: True if local TT entry should be removed, false otherwise (still
+ * seen in SLAP segment)
+ */
+static bool batadv_slap_tt_local_del(struct batadv_priv *bat_priv,
+				     struct batadv_tt_local_entry *tl)
+{
+	struct batadv_tt_global_entry *tg;
+	bool shared;
+
+	tl->common.flags &= ~BATADV_TT_CLIENT_SEEN;
+
+	tg = batadv_tt_global_hash_find(bat_priv, tl->common.addr,
+					tl->common.vid);
+	if (!tg)
+		return true;
+
+	shared = batadv_slap_tt_global_seen(bat_priv, tg);
+	batadv_tt_global_entry_put(tg);
+
+	if (!shared)
+		return true;
+
+	return false;
+}
+
+/**
+ * batadv_slap_orig_dump() - Dump SLAP specific information for a specific
+ * originator.
+ * @bat_priv: The bat priv with all the soft interface information
+ * @info: NL message info (not used here)
+ * @attr: NL message attributes (not used here)
+ * @skb: Current originator NL message
+ * @data: Here this is the originator being dumped
+ */
+static void batadv_slap_orig_dump(struct batadv_priv *bat_priv,
+				  struct genl_info *info,
+				  struct nlattr **attr,
+				  struct sk_buff *skb,
+				  void *data)
+{
+	struct batadv_slap_segid *segid;
+	struct batadv_orig_node *orig;
+
+	orig = data;
+
+	rcu_read_lock();
+	segid = rcu_dereference(orig->slap_segid);
+	nla_put(skb,
+		BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS,
+		ETH_ALEN,
+		segid->addr);
+	rcu_read_unlock();
+}
+
+/**
+ * Enable SLAP on interface
+ *
+ * @bat_priv: batadv instance
+ * @ifindex: Interface index to activate SLAP on, if 0 disable SLAP
+ */
+static int batadv_slap_set_iface(struct batadv_priv *bat_priv, struct net *net,
+				 int ifindex)
+{
+	struct batadv_hard_iface *hard_iface = NULL;
+	struct net_device *hard_dev = NULL;
+	int ret = -EINVAL;
+
+	if (ifindex) {
+		hard_dev = dev_get_by_index(net, ifindex);
+		if (!hard_dev)
+			goto out;
+		hard_iface = batadv_hardif_get_by_netdev(hard_dev);
+		if (!hard_iface)
+			goto out;
+	}
+
+	/*
+	 * locking bh is not strictly needed here, but slap_lock is also used to
+	 * protect master that needs it
+	 */
+	spin_lock_bh(&bat_priv->slap_lock);
+	if (bat_priv->slap_iface && hard_iface) {
+		spin_unlock_bh(&bat_priv->slap_lock);
+		ret = -EBUSY;
+		goto out;
+	}
+	rcu_assign_pointer(bat_priv->slap_iface, hard_iface);
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	ret = 0;
+	if (!hard_iface)
+		goto out;
+
+	slap_debug(bat_priv, "Enable SLAP on %s\n",
+		   dev_name(&hard_iface->net_dev->dev));
+
+	batadv_slap_start_announce(bat_priv);
+
+out:
+	batadv_hardif_put(hard_iface);
+	dev_put(hard_dev);
+	return ret;
+}
+
+/**
+ * Set SLAP prio
+ *
+ * @bat_priv: batadv instance
+ * @prio: New SLAP prio
+ */
+static void batadv_slap_set_prio(struct batadv_priv *bat_priv, u32 prio)
+{
+	struct batadv_slap_id *oid, *id;
+	bool cur_master;
+
+	id = kmalloc(sizeof(*id), GFP_KERNEL);
+	if (!id)
+		return;
+
+	spin_lock_bh(&bat_priv->slap_lock);
+	cur_master = batadv_slap_is_master(bat_priv);
+	oid = slap_id_get_rcu(bat_priv);
+	batadv_slap_id_init(id, bat_priv, prio, oid->addr);
+	slap_debug(bat_priv, "New SLAP ID %u/%pM\n", id->prio, id->addr);
+	rcu_assign_pointer(bat_priv->slap_id, id);
+	if (cur_master || batadv_slap_id_is_new_master(bat_priv, id))
+		_batadv_slap_set_master(bat_priv, id);
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	batadv_slap_id_put(oid);
+}
+
+/**
+ * batadv_slap_mesh_parse() - Set SLAP specific mesh information
+ * @bat_priv: bat priv with all the soft interface information
+ * @info: NL message info
+ * @attr: FBX specific NL attr to set
+ */
+static void batadv_slap_mesh_parse(struct batadv_priv *bat_priv,
+				   struct genl_info *info,
+				   struct nlattr **attrs)
+{
+	int ifindex;
+	u32 prio;
+
+	if (!info || !attrs)
+		return;
+
+	if (attrs[BATADV_ATTR_FBX_SLAP_IFINDEX]) {
+		ifindex = nla_get_u32(attrs[BATADV_ATTR_FBX_SLAP_IFINDEX]);
+		batadv_slap_set_iface(bat_priv, genl_info_net(info), ifindex);
+	}
+
+	if (attrs[BATADV_ATTR_FBX_SLAP_PRIO]) {
+		prio = nla_get_u32(attrs[BATADV_ATTR_FBX_SLAP_PRIO]);
+		batadv_slap_set_prio(bat_priv, prio);
+	}
+}
+
+/**
+ * batadv_slap_mesh_fill() - Get SLAP specific mesh information
+ * @bat_priv: bat priv with all the soft interface information
+ * @skb: NL response
+ */
+static void batadv_slap_mesh_fill(struct batadv_priv *bat_priv,
+				  struct sk_buff *skb)
+{
+	struct batadv_hard_iface *slap;
+	struct batadv_slap_id *master;
+	struct batadv_slap_id *local;
+
+	if (!skb)
+		return;
+
+	rcu_read_lock();
+	master = slap_master_get_rcu(bat_priv);
+	local = slap_id_get_rcu(bat_priv);
+	nla_put(skb,
+		BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS,
+		ETH_ALEN,
+		master->addr);
+	nla_put_u32(skb,
+		    BATADV_ATTR_FBX_SLAP_MASTER_PRIO,
+		    master->prio);
+	nla_put_u32(skb,
+		    BATADV_ATTR_FBX_SLAP_PRIO,
+		    local->prio);
+
+	slap = rcu_dereference(bat_priv->slap_iface);
+	if (slap)
+		nla_put_u32(skb,
+			    BATADV_ATTR_FBX_SLAP_IFINDEX,
+			    slap->net_dev->ifindex);
+	rcu_read_unlock();
+}
+
+/**
+ * batadv_slap_mesh_nl() - Do SLAP softif related NL work
+ * @bat_priv: bat priv with all the soft interface information
+ * @info: NL message info
+ * @attr: FBX specific NL attr to set
+ * @skb: NL response
+ * @data: Callback specific data, not used here
+ */
+static void batadv_slap_mesh_nl(struct batadv_priv *bat_priv,
+				struct genl_info *info,
+				struct nlattr **attrs,
+				struct sk_buff *skb,
+				void *data)
+{
+	batadv_slap_mesh_parse(bat_priv, info, attrs);
+	batadv_slap_mesh_fill(bat_priv, skb);
+}
+
+/**
+ * batadv_slap_new_priv: init SLAP specific data for a bat_priv
+ * @bat_priv: the bat_priv instance to init SLAP for
+ */
+static int batadv_slap_new_priv(struct batadv_priv *bat_priv)
+{
+	char const *batdev = dev_name(&bat_priv->soft_iface->dev);
+	struct batadv_fbx_slap_packet *slap_pkt;
+	struct batadv_slap_id *id;
+	struct sk_buff *skb;
+	u8 addr[ETH_ALEN];
+	size_t size;
+
+	id = kmalloc(sizeof(*bat_priv->slap_id), GFP_KERNEL);
+	if (!id)
+		goto err;
+
+	/* TODO batdev NULL here */
+	bat_priv->slap_wq = alloc_workqueue("%s-slap-wq", 0, 0, batdev);
+	if (!bat_priv->slap_wq)
+		goto slap_id_free;
+
+	size = ETH_HLEN + NET_IP_ALIGN + BATADV_FBX_SLAP_HLEN;
+	skb = dev_alloc_skb(size);
+	if (!skb)
+		goto workqueue_free;
+
+	skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+	slap_pkt = skb_put_zero(skb, BATADV_FBX_SLAP_HLEN);
+	slap_pkt->hdr.packet_type = BATADV_FBX;
+	slap_pkt->hdr.version = BATADV_COMPAT_VERSION;
+	slap_pkt->hdr.subtype = BATADV_FBX_SUB_SLAP;
+	rcu_assign_pointer(bat_priv->slap_skb, skb);
+
+	ether_addr_copy(addr, bat_priv->soft_iface->dev_addr);
+	batadv_slap_id_init(id, bat_priv, SLAP_PRIO_DEFAULT, addr);
+
+	rcu_assign_pointer(bat_priv->slap_id, id);
+	rcu_assign_pointer(bat_priv->slap_master, id);
+	rcu_assign_pointer(bat_priv->slap_iface, NULL);
+	spin_lock_init(&bat_priv->slap_lock);
+	batadv_fbx_tvlv_handler_register(bat_priv,
+					 BATADV_FBX_TVLV_SLAP_MASTER,
+					 BATADV_FBX_TVLV_SLAP_VERSION,
+					 batadv_slap_ogm_master_recv, NULL);
+
+	return 0;
+
+workqueue_free:
+	destroy_workqueue(bat_priv->slap_wq);
+slap_id_free:
+	kfree(id);
+err:
+	return -1;
+}
+
+/**
+ * batadv_slap_free_priv: free SLAP specific data of a bat_priv
+ * @bat_priv: the bat_priv instance to clean SLAP for
+ */
+static void batadv_slap_free_priv(struct batadv_priv *bat_priv)
+{
+	struct batadv_slap_id *id, *master;
+	batadv_fbx_tvlv_handler_unregister(bat_priv,
+					   BATADV_FBX_TVLV_SLAP_MASTER,
+					   BATADV_FBX_TVLV_SLAP_VERSION);
+	rcu_read_lock();
+	master = slap_master_get_rcu(bat_priv);
+	id = slap_id_get_rcu(bat_priv);
+	if (id != master)
+		batadv_slap_id_force_expire(bat_priv, master);
+	rcu_read_unlock();
+	flush_workqueue(bat_priv->slap_wq);
+	destroy_workqueue(bat_priv->slap_wq);
+	batadv_slap_id_put(bat_priv->slap_id);
+	batadv_fbx_tvlv_container_unregister(bat_priv,
+					    BATADV_FBX_TVLV_SLAP_MASTER,
+					    BATADV_FBX_TVLV_SLAP_VERSION);
+	kfree_skb(bat_priv->slap_skb);
+}
+
+/**
+ * batadv_slap_init: init SLAP specific data for a bat_priv
+ * @bat_priv: the bat_priv instance to init SLAP for
+ */
+static int __init batadv_slap_init(void)
+{
+	BUILD_BUG_ON(sizeof(struct batadv_fbx_slap_packet) != 12);
+	batadv_fbx_recv_handler_register(BATADV_FBX_SUB_SLAP,
+					 batadv_slap_recv_packet);
+	return 0;
+}
+
+/**
+ * batadv_slap_exit: free SLAP specific data of a bat_priv
+ * @bat_priv: the bat_priv instance to clean SLAP for
+ */
+static void __exit batadv_slap_exit(void)
+{
+	batadv_fbx_recv_handler_unregister(BATADV_FBX_SUB_SLAP);
+}
+
+struct batadv_fbx_module_ops const batadv_slap_ops = {
+	.init = batadv_slap_init,
+	.exit = batadv_slap_exit,
+	.new_priv = batadv_slap_new_priv,
+	.free_priv = batadv_slap_free_priv,
+	.neigh_init = batadv_slap_neigh_init,
+	.neigh_release = batadv_slap_neigh_release,
+	.orig_init = batadv_slap_orig_init,
+	.orig_release = batadv_slap_orig_release,
+	.primary_update = batadv_slap_primary_update,
+	.tt_local_add = batadv_slap_tt_local_add,
+	.tt_local_del = batadv_slap_tt_local_del,
+	.tt_global_add = batadv_slap_tt_global_add,
+	.tt_global_del = batadv_slap_tt_global_del,
+	.shortcut = batadv_slap_shortcut,
+	.check_skb_rx = batadv_slap_check_skb_rx,
+	.check_skb_tx = batadv_slap_check_skb_tx,
+};
+
+struct batadv_fbx_nl_ops const batadv_slap_nl_ops[] = {
+	{
+		.cmd = BATADV_CMD_SET_MESH,
+		.hdl = batadv_slap_mesh_nl,
+	},
+	{
+		.cmd = BATADV_CMD_GET_MESH,
+		.hdl = batadv_slap_mesh_nl,
+	},
+	{
+		.cmd = BATADV_CMD_GET_ORIGINATORS,
+		.hdl = batadv_slap_orig_dump,
+	},
+};
+
+struct batadv_fbx_module const batadv_slap_module = {
+	.name = "slap",
+	.ops = &batadv_slap_ops,
+	.nl_ops = batadv_slap_nl_ops,
+	.nl_ops_sz = ARRAY_SIZE(batadv_slap_nl_ops),
+};
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/net/core/gso.c	2023-11-07 13:38:44.066256801 +0100
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/skbuff.h>
+#include <linux/sctp.h>
+#include <net/gso.h>
+#include <net/gro.h>
+
+/**
+ *	skb_eth_gso_segment - segmentation handler for ethernet protocols.
+ *	@skb: buffer to segment
+ *	@features: features for the output path (see dev->features)
+ *	@type: Ethernet Protocol ID
+ */
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+				    netdev_features_t features, __be16 type)
+{
+	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+	struct packet_offload *ptype;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ptype, &offload_base, list) {
+		if (ptype->type == type && ptype->callbacks.gso_segment) {
+			segs = ptype->callbacks.gso_segment(skb, features);
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return segs;
+}
+EXPORT_SYMBOL(skb_eth_gso_segment);
+
+/**
+ *	skb_mac_gso_segment - mac layer segmentation handler.
+ *	@skb: buffer to segment
+ *	@features: features for the output path (see dev->features)
+ */
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+				    netdev_features_t features)
+{
+	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+	struct packet_offload *ptype;
+	int vlan_depth = skb->mac_len;
+	__be16 type = skb_network_protocol(skb, &vlan_depth);
+
+	if (unlikely(!type))
+		return ERR_PTR(-EINVAL);
+
+	__skb_pull(skb, vlan_depth);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ptype, &offload_base, list) {
+		if (ptype->type == type && ptype->callbacks.gso_segment) {
+			segs = ptype->callbacks.gso_segment(skb, features);
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	__skb_push(skb, skb->data - skb_mac_header(skb));
+
+	return segs;
+}
+EXPORT_SYMBOL(skb_mac_gso_segment);
+/* openvswitch calls this on rx path, so we need a different check.
+ */
+static bool skb_needs_check(const struct sk_buff *skb, bool tx_path)
+{
+	if (tx_path)
+		return skb->ip_summed != CHECKSUM_PARTIAL &&
+		       skb->ip_summed != CHECKSUM_UNNECESSARY;
+
+	return skb->ip_summed == CHECKSUM_NONE;
+}
+
+/**
+ *	__skb_gso_segment - Perform segmentation on skb.
+ *	@skb: buffer to segment
+ *	@features: features for the output path (see dev->features)
+ *	@tx_path: whether it is called in TX path
+ *
+ *	This function segments the given skb and returns a list of segments.
+ *
+ *	It may return NULL if the skb requires no segmentation.  This is
+ *	only possible when GSO is used for verifying header integrity.
+ *
+ *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
+ */
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+				  netdev_features_t features, bool tx_path)
+{
+	struct sk_buff *segs;
+
+	if (unlikely(skb_needs_check(skb, tx_path))) {
+		int err;
+
+		/* We're going to init ->check field in TCP or UDP header */
+		err = skb_cow_head(skb, 0);
+		if (err < 0)
+			return ERR_PTR(err);
+	}
+
+	/* Only report GSO partial support if it will enable us to
+	 * support segmentation on this frame without needing additional
+	 * work.
+	 */
+	if (features & NETIF_F_GSO_PARTIAL) {
+		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
+		struct net_device *dev = skb->dev;
+
+		partial_features |= dev->features & dev->gso_partial_features;
+		if (!skb_gso_ok(skb, features | partial_features))
+			features &= ~NETIF_F_GSO_PARTIAL;
+	}
+
+	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
+		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
+
+	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
+	SKB_GSO_CB(skb)->encap_level = 0;
+
+	skb_reset_mac_header(skb);
+	skb_reset_mac_len(skb);
+
+	segs = skb_mac_gso_segment(skb, features);
+
+	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
+		skb_warn_bad_offload(skb);
+
+	return segs;
+}
+EXPORT_SYMBOL(__skb_gso_segment);
+
+/**
+ * skb_gso_transport_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_transport_seglen is used to determine the real size of the
+ * individual segments, including Layer4 headers (TCP/UDP).
+ *
+ * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
+ */
+static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+{
+	const struct skb_shared_info *shinfo = skb_shinfo(skb);
+	unsigned int thlen = 0;
+
+	if (skb->encapsulation) {
+		thlen = skb_inner_transport_header(skb) -
+			skb_transport_header(skb);
+
+		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+			thlen += inner_tcp_hdrlen(skb);
+	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+		thlen = tcp_hdrlen(skb);
+	} else if (unlikely(skb_is_gso_sctp(skb))) {
+		thlen = sizeof(struct sctphdr);
+	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+		thlen = sizeof(struct udphdr);
+	}
+	/* UFO sets gso_size to the size of the fragmentation
+	 * payload, i.e. the size of the L4 (UDP) header is already
+	 * accounted for.
+	 */
+	return thlen + shinfo->gso_size;
+}
+
+/**
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_network_seglen is used to determine the real size of the
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+ *
+ * The MAC/L2 header is not accounted for.
+ */
+static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+{
+	unsigned int hdr_len = skb_transport_header(skb) -
+			       skb_network_header(skb);
+
+	return hdr_len + skb_gso_transport_seglen(skb);
+}
+
+/**
+ * skb_gso_mac_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_mac_seglen is used to determine the real size of the
+ * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
+ * headers (TCP/UDP).
+ */
+static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
+{
+	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+	return hdr_len + skb_gso_transport_seglen(skb);
+}
+
+/**
+ * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
+ *
+ * There are a couple of instances where we have a GSO skb, and we
+ * want to determine what size it would be after it is segmented.
+ *
+ * We might want to check:
+ * -    L3+L4+payload size (e.g. IP forwarding)
+ * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
+ *
+ * This is a helper to do that correctly considering GSO_BY_FRAGS.
+ *
+ * @skb: GSO skb
+ *
+ * @seg_len: The segmented length (from skb_gso_*_seglen). In the
+ *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
+ *
+ * @max_len: The maximum permissible length.
+ *
+ * Returns true if the segmented length <= max length.
+ */
+static inline bool skb_gso_size_check(const struct sk_buff *skb,
+				      unsigned int seg_len,
+				      unsigned int max_len) {
+	const struct skb_shared_info *shinfo = skb_shinfo(skb);
+	const struct sk_buff *iter;
+
+	if (shinfo->gso_size != GSO_BY_FRAGS)
+		return seg_len <= max_len;
+
+	/* Undo this so we can re-use header sizes */
+	seg_len -= GSO_BY_FRAGS;
+
+	skb_walk_frags(skb, iter) {
+		if (seg_len + skb_headlen(iter) > max_len)
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
+ *
+ * @skb: GSO skb
+ * @mtu: MTU to validate against
+ *
+ * skb_gso_validate_network_len validates if a given skb will fit a
+ * wanted MTU once split. It considers L3 headers, L4 headers, and the
+ * payload.
+ */
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
+{
+	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
+}
+EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
+
+/**
+ * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
+ *
+ * @skb: GSO skb
+ * @len: length to validate against
+ *
+ * skb_gso_validate_mac_len validates if a given skb will fit a wanted
+ * length once split, including L2, L3 and L4 headers and the payload.
+ */
+bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
+{
+	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
+}
+EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
+
diff -Nruw linux-6.4-fbx/net/fbxatm./Kconfig linux-6.4-fbx/net/fbxatm/Kconfig
--- linux-6.4-fbx/net/fbxatm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/Kconfig	2023-02-27 17:13:13.141475977 +0100
@@ -0,0 +1,28 @@
+menuconfig FBXATM
+	tristate "Freebox Asynchronous Transfer Mode (ATM)"
+
+if FBXATM
+
+config FBXATM_REMOTE
+	bool
+
+choice
+	prompt "mode"
+	default FBXATM_STACK
+
+config FBXATM_STACK
+	bool "standard"
+
+config FBXATM_REMOTE_STUB
+	bool "remote stub"
+	select FBXATM_REMOTE
+
+endchoice
+
+config FBXATM_REMOTE_DRIVER
+	tristate "remote fbxatm driver"
+	depends on FBXATM_STACK
+	select FBXATM_REMOTE
+	select OF
+
+endif
diff -Nruw linux-6.4-fbx/net/fbxatm./Makefile linux-6.4-fbx/net/fbxatm/Makefile
--- linux-6.4-fbx/net/fbxatm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/Makefile	2023-02-27 17:10:06.964489879 +0100
@@ -0,0 +1,18 @@
+obj-$(CONFIG_FBXATM) += fbxatm.o
+obj-$(CONFIG_FBXATM_REMOTE) += fbxatm_remote.o
+
+fbxatm-y := fbxatm_procfs.o fbxatm_sysfs.o
+
+ifeq ($(CONFIG_FBXATM_STACK),y)
+fbxatm-y += 	fbxatm_core.o	\
+		fbxatm_2684.o	\
+		fbxatm_dev.o	\
+		crc10.o
+fbxatm-$(CONFIG_PPP) += fbxatm_pppoa.o
+endif
+
+ifeq ($(CONFIG_FBXATM_REMOTE_STUB),y)
+fbxatm-y += fbxatm_remote_stub.o
+endif
+
+obj-$(CONFIG_FBXATM_REMOTE_DRIVER) += fbxatm_remote_driver.o
diff -Nruw linux-6.4-fbx/net/fbxatm./crc10.c linux-6.4-fbx/net/fbxatm/crc10.c
--- linux-6.4-fbx/net/fbxatm./crc10.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/crc10.c	2023-02-27 17:10:06.964489879 +0100
@@ -0,0 +1,48 @@
+#include <linux/types.h>
+
+static const u16 crc10_table[256] = {
+	0x0000, 0x0233, 0x0255, 0x0066, 0x0299, 0x00aa, 0x00cc, 0x02ff,
+	0x0301, 0x0132, 0x0154, 0x0367, 0x0198, 0x03ab, 0x03cd, 0x01fe,
+	0x0031, 0x0202, 0x0264, 0x0057, 0x02a8, 0x009b, 0x00fd, 0x02ce,
+	0x0330, 0x0103, 0x0165, 0x0356, 0x01a9, 0x039a, 0x03fc, 0x01cf,
+	0x0062, 0x0251, 0x0237, 0x0004, 0x02fb, 0x00c8, 0x00ae, 0x029d,
+	0x0363, 0x0150, 0x0136, 0x0305, 0x01fa, 0x03c9, 0x03af, 0x019c,
+	0x0053, 0x0260, 0x0206, 0x0035, 0x02ca, 0x00f9, 0x009f, 0x02ac,
+	0x0352, 0x0161, 0x0107, 0x0334, 0x01cb, 0x03f8, 0x039e, 0x01ad,
+	0x00c4, 0x02f7, 0x0291, 0x00a2, 0x025d, 0x006e, 0x0008, 0x023b,
+	0x03c5, 0x01f6, 0x0190, 0x03a3, 0x015c, 0x036f, 0x0309, 0x013a,
+	0x00f5, 0x02c6, 0x02a0, 0x0093, 0x026c, 0x005f, 0x0039, 0x020a,
+	0x03f4, 0x01c7, 0x01a1, 0x0392, 0x016d, 0x035e, 0x0338, 0x010b,
+	0x00a6, 0x0295, 0x02f3, 0x00c0, 0x023f, 0x000c, 0x006a, 0x0259,
+	0x03a7, 0x0194, 0x01f2, 0x03c1, 0x013e, 0x030d, 0x036b, 0x0158,
+	0x0097, 0x02a4, 0x02c2, 0x00f1, 0x020e, 0x003d, 0x005b, 0x0268,
+	0x0396, 0x01a5, 0x01c3, 0x03f0, 0x010f, 0x033c, 0x035a, 0x0169,
+	0x0188, 0x03bb, 0x03dd, 0x01ee, 0x0311, 0x0122, 0x0144, 0x0377,
+	0x0289, 0x00ba, 0x00dc, 0x02ef, 0x0010, 0x0223, 0x0245, 0x0076,
+	0x01b9, 0x038a, 0x03ec, 0x01df, 0x0320, 0x0113, 0x0175, 0x0346,
+	0x02b8, 0x008b, 0x00ed, 0x02de, 0x0021, 0x0212, 0x0274, 0x0047,
+	0x01ea, 0x03d9, 0x03bf, 0x018c, 0x0373, 0x0140, 0x0126, 0x0315,
+	0x02eb, 0x00d8, 0x00be, 0x028d, 0x0072, 0x0241, 0x0227, 0x0014,
+	0x01db, 0x03e8, 0x038e, 0x01bd, 0x0342, 0x0171, 0x0117, 0x0324,
+	0x02da, 0x00e9, 0x008f, 0x02bc, 0x0043, 0x0270, 0x0216, 0x0025,
+	0x014c, 0x037f, 0x0319, 0x012a, 0x03d5, 0x01e6, 0x0180, 0x03b3,
+	0x024d, 0x007e, 0x0018, 0x022b, 0x00d4, 0x02e7, 0x0281, 0x00b2,
+	0x017d, 0x034e, 0x0328, 0x011b, 0x03e4, 0x01d7, 0x01b1, 0x0382,
+	0x027c, 0x004f, 0x0029, 0x021a, 0x00e5, 0x02d6, 0x02b0, 0x0083,
+	0x012e, 0x031d, 0x037b, 0x0148, 0x03b7, 0x0184, 0x01e2, 0x03d1,
+	0x022f, 0x001c, 0x007a, 0x0249, 0x00b6, 0x0285, 0x02e3, 0x00d0,
+	0x011f, 0x032c, 0x034a, 0x0179, 0x0386, 0x01b5, 0x01d3, 0x03e0,
+	0x021e, 0x002d, 0x004b, 0x0278, 0x0087, 0x02b4, 0x02d2, 0x00e1,
+};
+
+static u16 crc10_byte(u16 crc, const u8 c)
+{
+	return ((crc << 8) & 0x3ff) ^ crc10_table[(crc >> 2) & 0xff] ^ c;
+}
+
+u16 crc10(u16 crc, const u8 *buffer, size_t len)
+{
+	while (len--)
+		crc = crc10_byte(crc, *buffer++);
+	return crc;
+}
diff -Nruw linux-6.4-fbx/net/fbxatm./fbxatm_2684.c linux-6.4-fbx/net/fbxatm/fbxatm_2684.c
--- linux-6.4-fbx/net/fbxatm./fbxatm_2684.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/fbxatm_2684.c	2023-05-22 20:06:45.395884444 +0200
@@ -0,0 +1,851 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/proc_fs.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/fbxatm_dev.h>
+
+#include "fbxatm_priv.h"
+
+#define PFX	"fbxatm_2684: "
+
+static LIST_HEAD(fbxatm_2684_dev_list);
+static DEFINE_MUTEX(fbxatm_2684_mutex);
+
+#define LLC_NEEDED_HEADROOM		10
+#define VCMUX_BRIDGED_NEEDED_HEADROOM	2
+
+#define LLC			0xaa, 0xaa, 0x03
+#define SNAP_BRIDGED		0x00, 0x80, 0xc2
+#define SNAP_ROUTED		0x00, 0x00, 0x00
+#define PID_ETHERNET_NOFCS	0x00, 0x07
+
+static u8 llc_bridged_802d3_pad[] = { LLC, SNAP_BRIDGED, PID_ETHERNET_NOFCS,
+				      0, 0 };
+static u8 llc_snap_routed[] = { LLC, SNAP_ROUTED };
+
+/*
+ * private data for 2684 vcc
+ */
+struct fbxatm_2684_vcc;
+
+struct fbxatm_2684_queue {
+	struct fbxatm_vcc		*vcc;
+	unsigned int			queue_idx;
+	struct fbxatm_2684_vcc		*priv;
+};
+
+struct fbxatm_2684_vcc {
+	struct fbxatm_2684_queue	queues[FBXATM_2684_MAX_VCC];
+	size_t				queue_count;
+
+	struct net_device		*dev;
+	struct fbxatm_2684_vcc_params	params;
+
+	spinlock_t			tx_lock;
+
+	struct rtnl_link_stats64	stats;
+
+	struct list_head		next;
+};
+
+static uint32_t tel_last_ip;
+
+static void warn_if_tel(struct fbxatm_2684_vcc *vcc, struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	struct udphdr *udph = NULL;
+
+	iph = (struct iphdr *)skb->data;
+
+	if (iph->protocol != IPPROTO_UDP)
+		return;
+
+	if (skb_headlen(skb) < (iph->ihl * 4) + sizeof (struct udphdr))
+		return;
+
+	udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl * 4));
+	if (ntohs(udph->dest) >= 5004 && ntohs(udph->dest) <= 5020) {
+		static u32 last_ip;
+		static unsigned long last_time;
+		unsigned long now;
+
+		now = jiffies;
+		if ((last_ip == iph->saddr &&
+		     (!last_time || time_before(now, last_time + 2 * HZ)))) {
+			static unsigned int consecutive;
+			consecutive++;
+			if (consecutive > 5) {
+				tel_last_ip = iph->saddr;
+				consecutive = 0;
+			}
+		}
+
+		last_time = now;
+		last_ip = iph->saddr;
+	}
+}
+
+/*
+ * procfs read callback
+ */
+static int tel_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "%pI4\n", &tel_last_ip);
+	return 0;
+}
+
+static ssize_t tel_proc_write(struct file *file, const char __user *ubuf,
+			      size_t len, loff_t *off)
+{
+	tel_last_ip = 0;
+	return len;
+}
+
+static int tel_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, tel_proc_show, pde_data(inode));
+}
+
+static const struct proc_ops tel_proc_fops = {
+	.proc_open	= tel_proc_open,
+	.proc_read	= seq_read,
+	.proc_write	= tel_proc_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+};
+
+/*
+ * fbxatm stack receive callback, called from softirq
+ */
+static void vcc_rx_callback(struct sk_buff *skb, void *data)
+{
+	struct fbxatm_2684_queue *queue;
+	struct fbxatm_2684_vcc *priv;
+
+	queue = (struct fbxatm_2684_queue *)data;
+	priv = queue->priv;
+
+	switch (priv->params.encap) {
+	case FBXATM_E2684_VCMUX:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+			/* assume 802.3, need to remove 2 bytes zero
+			 * padding */
+			if (skb->len < 2 || memcmp(skb->data, "\0\0", 2))
+				goto drop;
+			skb_pull(skb, 2);
+			skb->protocol = eth_type_trans(skb, priv->dev);
+			memset(skb->data, 0, 2);
+			break;
+
+		case FBXATM_P2684_ROUTED:
+			/* kludge to detect ipv6 or ipv4 */
+			if (skb->len && (skb->data[0] & 0xf0) == 0x60)
+				skb->protocol = htons(ETH_P_IPV6);
+			else
+				skb->protocol = htons(ETH_P_IP);
+			skb_reset_mac_header(skb);
+			break;
+		}
+		break;
+
+	case FBXATM_E2684_LLC:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+		{
+			/* recognize only 802.3 */
+			if (skb->len < sizeof(llc_bridged_802d3_pad))
+				goto drop;
+
+			if (memcmp(skb->data, llc_bridged_802d3_pad, 7))
+				goto drop;
+
+			/* don't check the last bytes of pid, it can
+			 * be 1 or 7 depending on the presence of
+			 * FCS */
+			skb_pull(skb, sizeof(llc_bridged_802d3_pad));
+			skb->protocol = eth_type_trans(skb, priv->dev);
+			break;
+		}
+
+		case FBXATM_P2684_ROUTED:
+		{
+			u16 proto;
+			unsigned int offset;
+
+			if (skb->len < sizeof(llc_snap_routed) + 2)
+				goto drop;
+
+			offset = sizeof (llc_snap_routed);
+			proto = skb->data[offset] << 8;
+			proto |= skb->data[offset + 1];
+
+			skb->protocol = proto;
+			skb_pull(skb, sizeof(llc_snap_routed) + 2);
+			skb_reset_mac_header(skb);
+			break;
+		}
+		}
+		break;
+	}
+
+	skb->dev = priv->dev;
+	skb->pkt_type = PACKET_HOST;
+	priv->stats.rx_bytes += skb->len;
+	priv->stats.rx_packets++;
+
+	if (priv->params.encap == FBXATM_E2684_VCMUX &&
+	    priv->params.payload == FBXATM_P2684_ROUTED &&
+	    queue->vcc->vpi == 8 && queue->vcc->vci == 35)
+		warn_if_tel(priv, skb);
+
+	netif_rx(skb);
+	return;
+
+drop:
+	priv->stats.rx_errors++;
+	dev_kfree_skb(skb);
+}
+
+/*
+ * fbxatm stack tx done callback, called from softirq
+ */
+static void vcc_tx_done_callback(void *data)
+{
+	struct fbxatm_2684_queue *queue;
+	struct fbxatm_2684_vcc *priv;
+
+	queue = (struct fbxatm_2684_queue *)data;
+	priv = queue->priv;
+
+	spin_lock(&priv->tx_lock);
+	if (__netif_subqueue_stopped(priv->dev, queue->queue_idx))
+		netif_wake_subqueue(priv->dev, queue->queue_idx);
+	spin_unlock(&priv->tx_lock);
+}
+
+/*
+ * fbxatm stack callback when vcc link changes
+ */
+static void vcc_link_change(void *data, int link,
+			    unsigned int rx_cell_rate,
+			    unsigned int tx_cell_rate)
+{
+	struct fbxatm_2684_queue *queue;
+	struct fbxatm_2684_vcc *priv;
+
+	queue = (struct fbxatm_2684_queue *)data;
+	priv = queue->priv;
+
+	if (link)
+		netif_carrier_on(priv->dev);
+	else
+		netif_carrier_off(priv->dev);
+}
+
+/*
+ * vcc user ops, callback from fbxatm stack
+ */
+static const struct fbxatm_vcc_uops fbxatm_2684_uops = {
+	.link_change	= vcc_link_change,
+	.rx_pkt		= vcc_rx_callback,
+	.tx_done	= vcc_tx_done_callback,
+};
+
+/*
+ * netdevice ->ndo_select_queue() callback
+ */
+static u16 fbxatm_2684_netdev_select_queue(struct net_device *dev,
+					   struct sk_buff *skb,
+					   struct net_device *sb_dev)
+{
+	/* force lower band to avoid kernel doing round robin */
+	return 0;
+}
+
+/*
+ * netdevice xmit callback
+ */
+static int fbxatm_2684_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct fbxatm_2684_vcc *priv;
+	int ret, queue_idx;
+	unsigned int needed_headroom;
+	struct fbxatm_2684_queue *queue;
+	unsigned int len;
+
+	priv = netdev_priv(dev);
+	queue_idx = skb_get_queue_mapping(skb);
+	queue = &priv->queues[queue_idx];
+
+	/*
+	 * check if we have to expand skb head
+	 */
+	needed_headroom = 0;
+	if (priv->params.encap == FBXATM_E2684_VCMUX) {
+		if (priv->params.payload == FBXATM_P2684_BRIDGE)
+			needed_headroom = VCMUX_BRIDGED_NEEDED_HEADROOM;
+	} else
+		needed_headroom = LLC_NEEDED_HEADROOM;
+
+	if (skb_headroom(skb) < needed_headroom) {
+		struct sk_buff *nskb;
+		unsigned int new_head;
+
+		new_head = skb_headroom(skb) + needed_headroom;
+		nskb = skb_realloc_headroom(skb, new_head);
+		dev_kfree_skb(skb);
+		if (!nskb)
+			goto dropped;
+		skb = nskb;
+	}
+
+	switch (priv->params.encap) {
+	case FBXATM_E2684_VCMUX:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+			skb_push(skb, 2);
+			memset(skb->data, 0, 2);
+			break;
+		case FBXATM_P2684_ROUTED:
+			/* nothing to do */
+			break;
+		}
+		break;
+
+	case FBXATM_E2684_LLC:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+			skb_push(skb, sizeof(llc_bridged_802d3_pad));
+			memcpy(skb->data, llc_bridged_802d3_pad,
+			       sizeof(llc_bridged_802d3_pad));
+			break;
+
+		case FBXATM_P2684_ROUTED:
+		{
+			unsigned int offset;
+
+			skb_push(skb, sizeof(llc_snap_routed));
+			memcpy(skb->data, llc_snap_routed,
+			       sizeof(llc_snap_routed));
+
+			offset = sizeof (llc_snap_routed);
+			skb->data[offset] = (skb->protocol >> 8) & 0xff;
+			skb->data[offset + 1] = skb->protocol & 0xff;
+			break;
+		}
+		}
+		break;
+	}
+
+	spin_lock(&priv->tx_lock);
+
+	len = skb->len;
+	ret = fbxatm_send(queue->vcc, skb);
+	if (ret) {
+		/* packet was not sent, queue is full */
+		netif_stop_subqueue(dev, queue_idx);
+		spin_unlock(&priv->tx_lock);
+		WARN_ONCE(1, "fbxatm2684_xmit called with full queue");
+		priv->stats.tx_errors++;
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	priv->stats.tx_bytes += len;
+	priv->stats.tx_packets++;
+
+	/* check if queue is full */
+	if (fbxatm_vcc_queue_full(queue->vcc))
+		netif_stop_subqueue(dev, queue_idx);
+	spin_unlock(&priv->tx_lock);
+
+	return NETDEV_TX_OK;
+
+dropped:
+	priv->stats.tx_errors++;
+	return NETDEV_TX_OK;
+}
+
+/*
+ * netdevice get_stats callback
+ */
+static void
+fbxatm_2684_netdev_get_stats64(struct net_device *dev,
+			       struct rtnl_link_stats64 *stats)
+{
+	struct fbxatm_2684_vcc *priv;
+	priv = netdev_priv(dev);
+	memcpy(stats, &priv->stats, sizeof (*stats));
+}
+
+/*
+ * netdevice setup callback for bridge encap
+ */
+static void setup_bridged(struct net_device *dev)
+{
+	ether_setup(dev);
+}
+
+/*
+ * netdevice setup callback for routed encap
+ */
+static void setup_routed(struct net_device *dev)
+{
+	dev->type		= ARPHRD_PPP;
+	dev->hard_header_len	= 0;
+	dev->mtu		= 1500;
+	dev->addr_len		= 0;
+	dev->tx_queue_len	= 128;
+	dev->flags		= IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+}
+
+static const struct net_device_ops fbxatm_2684_ops = {
+	.ndo_start_xmit		= fbxatm_2684_netdev_xmit,
+	.ndo_get_stats64	= fbxatm_2684_netdev_get_stats64,
+	.ndo_select_queue	= fbxatm_2684_netdev_select_queue,
+};
+
+/*
+ * sysfs callback, show encapsulation
+ */
+static ssize_t show_encap(struct device *d,
+			  struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_2684_vcc *priv = netdev_priv(to_net_dev(d));
+
+	switch (priv->params.encap) {
+	case FBXATM_E2684_LLC:
+		return sprintf(buf, "llc\n");
+	case FBXATM_E2684_VCMUX:
+	default:
+		return sprintf(buf, "vcmux\n");
+	}
+}
+
+static DEVICE_ATTR(encap, S_IRUGO, show_encap, NULL);
+
+/*
+ * sysfs callback, show payload
+ */
+static ssize_t show_payload(struct device *d,
+			    struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_2684_vcc *priv = netdev_priv(to_net_dev(d));
+
+	switch (priv->params.payload) {
+	case FBXATM_P2684_BRIDGE:
+		return sprintf(buf, "bridge\n");
+	case FBXATM_P2684_ROUTED:
+	default:
+		return sprintf(buf, "routed\n");
+	}
+}
+
+static DEVICE_ATTR(payload, S_IRUGO, show_payload, NULL);
+
+/*
+ * sysfs callback, show vcc id
+ */
+static ssize_t show_vcc(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_2684_vcc *priv = netdev_priv(to_net_dev(d));
+
+	return sprintf(buf, "%u.%u.%u\n",
+		       priv->queues[0].vcc->adev->ifindex,
+		       priv->queues[0].vcc->vpi, priv->queues[0].vcc->vci);
+}
+
+static DEVICE_ATTR(vcc, S_IRUGO, show_vcc, NULL);
+
+static struct attribute *fbxatm2684_attrs[] = {
+	&dev_attr_encap.attr,
+	&dev_attr_payload.attr,
+	&dev_attr_vcc.attr,
+	NULL
+};
+
+static struct attribute_group fbxatm2684_group = {
+	.name = "fbxatm2684",
+	.attrs = fbxatm2684_attrs,
+};
+
+/*
+ * create sysfs files for 2684 device
+ */
+static int vcc2684_sysfs_register(struct fbxatm_2684_vcc *priv,
+				  struct net_device *dev)
+{
+	int ret;
+
+	ret = sysfs_create_group(&dev->dev.kobj, &fbxatm2684_group);
+	if (ret)
+		goto out1;
+
+	ret = sysfs_create_link(&dev->dev.kobj,
+				&priv->queues[0].vcc->adev->dev.kobj,
+				"fbxatm_dev");
+	if (ret)
+		goto out2;
+
+	return 0;
+
+out2:
+	sysfs_remove_group(&dev->dev.kobj, &fbxatm2684_group);
+out1:
+	return ret;
+}
+
+/*
+ * remove sysfs files for 2684 device
+ */
+static void vcc2684_sysfs_unregister(struct fbxatm_2684_vcc *priv,
+				     struct net_device *dev)
+{
+	sysfs_remove_group(&dev->dev.kobj, &fbxatm2684_group);
+	sysfs_remove_link(&dev->dev.kobj, "fbxatm_dev");
+}
+
+/*
+ * register netdevice & sysfs attribute
+ */
+static int register_2684_netdev(struct fbxatm_2684_vcc *priv,
+				struct net_device *dev)
+{
+	int ret;
+
+	/* hold rtnl while registering netdevice and creating sysfs
+	 * files to avoid race */
+	rtnl_lock();
+
+	if (strchr(dev->name, '%')) {
+		ret = dev_alloc_name(dev, dev->name);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = register_netdevice(dev);
+	if (ret)
+		goto out;
+
+	ret = vcc2684_sysfs_register(priv, dev);
+	if (ret)
+		goto out_unregister;
+
+	rtnl_unlock();
+	return 0;
+
+out_unregister:
+	unregister_netdevice(dev);
+
+out:
+	rtnl_unlock();
+	return ret;
+}
+
+/*
+ * create a RFC2684 encapsulation on given vcc
+ */
+static int __create_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	struct fbxatm_2684_vcc *priv;
+	struct fbxatm_vcc *vccs[FBXATM_2684_MAX_VCC];
+	struct net_device *dev = NULL;
+	void (*netdev_setup_cb)(struct net_device *dev);
+	unsigned int headroom;
+	size_t i;
+	int ret;
+
+	/* sanity check */
+	switch (params->encap) {
+	case FBXATM_E2684_VCMUX:
+	case FBXATM_E2684_LLC:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (params->payload) {
+	case FBXATM_P2684_BRIDGE:
+		netdev_setup_cb = setup_bridged;
+		break;
+	case FBXATM_P2684_ROUTED:
+		netdev_setup_cb = setup_routed;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!params->dev_name[0])
+		return -EINVAL;
+
+	/* bind to vcc */
+	memset(vccs, 0, sizeof (vccs));
+	for (i = 0; i < params->id_count; i++) {
+		struct fbxatm_vcc *vcc;
+
+		vcc = fbxatm_bind_to_vcc(&params->id_list[i],
+					 FBXATM_VCC_USER_2684);
+		if (IS_ERR(vcc)) {
+			ret = PTR_ERR(vcc);
+			goto fail;
+		}
+		vccs[i] = vcc;
+	}
+
+	/* create netdevice */
+	dev = alloc_netdev_mqs(sizeof(*priv), params->dev_name,
+			       NET_NAME_UNKNOWN, netdev_setup_cb,
+			       params->id_count, 1);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	netif_set_real_num_tx_queues(dev, params->id_count);
+	netif_set_real_num_rx_queues(dev, 1);
+
+	priv = netdev_priv(dev);
+	memset(priv, 0, sizeof (*priv));
+	memcpy(&priv->params, params, sizeof (*params));
+	memcpy(dev->name, priv->params.dev_name, IFNAMSIZ);
+
+	spin_lock_init(&priv->tx_lock);
+	priv->dev = dev;
+	for (i = 0; i < params->id_count; i++) {
+		priv->queues[i].vcc = vccs[i];
+		priv->queues[i].queue_idx = i;
+		priv->queues[i].priv = priv;
+	}
+	priv->queue_count = params->id_count;
+
+	if (!is_zero_ether_addr(params->perm_addr))
+		memcpy(dev->perm_addr, params->perm_addr, 6);
+
+	dev->netdev_ops = &fbxatm_2684_ops;
+
+	/* make sure kernel generated packet have correct headroom for
+	 * encapsulation/payload */
+	headroom = 0;
+	for (i = 0; i < params->id_count; i++)
+		headroom = max_t(int, headroom, vccs[i]->adev->tx_headroom);
+	dev->hard_header_len += headroom;
+
+
+	switch (params->encap) {
+	case FBXATM_E2684_VCMUX:
+	default:
+		if (params->payload == FBXATM_P2684_BRIDGE)
+			dev->hard_header_len += VCMUX_BRIDGED_NEEDED_HEADROOM;
+		break;
+	case FBXATM_E2684_LLC:
+		dev->hard_header_len += LLC_NEEDED_HEADROOM;
+		break;
+	}
+
+	ret = register_2684_netdev(priv, dev);
+	if (ret)
+		goto fail;
+
+	if (fbxatm_vcc_link_is_up(vccs[0])) {
+		netif_carrier_on(dev);
+		netif_tx_start_all_queues(dev);
+	} else
+		netif_carrier_off(dev);
+	list_add_tail(&priv->next, &fbxatm_2684_dev_list);
+
+	for (i = 0; i < params->id_count; i++)
+		fbxatm_set_uops(vccs[i], &fbxatm_2684_uops, &priv->queues[i]);
+
+	return 0;
+
+fail:
+	for (i = 0; i < ARRAY_SIZE(vccs); i++) {
+		if (vccs[i])
+			fbxatm_unbind_vcc(vccs[i]);
+	}
+	if (dev)
+		free_netdev(dev);
+	return ret;
+}
+
+/*
+ * find 2684 vcc from id list
+ */
+static struct fbxatm_2684_vcc *__find_2684_vcc(const struct fbxatm_vcc_id *id,
+					       size_t count)
+{
+	struct fbxatm_2684_vcc *priv;
+	size_t i;
+
+	/* find it */
+	list_for_each_entry(priv, &fbxatm_2684_dev_list, next) {
+		for (i = 0; i < priv->queue_count; i++) {
+			struct fbxatm_2684_queue *q;
+			size_t j;
+
+			q = &priv->queues[i];
+
+			for (j = 0; j < count; j++) {
+				if (q->vcc->adev->ifindex == id[j].dev_idx &&
+				    q->vcc->vpi == id[0].vpi &&
+				    q->vcc->vci == id[0].vci)
+					return priv;
+			}
+		}
+	}
+	return NULL;
+}
+
+/*
+ * create a RFC2684 encapsulation on given vcc
+ */
+static int create_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_2684_mutex);
+	ret = __create_2684_vcc(params);
+	mutex_unlock(&fbxatm_2684_mutex);
+	return ret;
+}
+
+/*
+ * remove RFC2684 encapsulation from given vcc
+ */
+static int __remove_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	struct fbxatm_2684_vcc *priv;
+	size_t i;
+
+	priv = __find_2684_vcc(params->id_list, params->id_count);
+	if (!priv)
+		return -ENOENT;
+
+	/* close netdevice, fbxatm_2684_netdev_xmit cannot be called
+	 * again */
+	rtnl_lock();
+	dev_close(priv->dev);
+	rtnl_unlock();
+
+	for (i = 0; i < priv->queue_count; i++)
+		fbxatm_unbind_vcc(priv->queues[i].vcc);
+	vcc2684_sysfs_unregister(priv, priv->dev);
+	unregister_netdev(priv->dev);
+	list_del(&priv->next);
+	free_netdev(priv->dev);
+	return 0;
+}
+
+/*
+ * remove RFC2684 encapsulation from given vcc
+ */
+static int remove_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_2684_mutex);
+	ret = __remove_2684_vcc(params);
+	mutex_unlock(&fbxatm_2684_mutex);
+	return ret;
+}
+
+/*
+ * 2684 related ioctl handler
+ */
+static int fbxatm_2684_ioctl(struct socket *sock,
+			     unsigned int cmd, void __user *useraddr)
+{
+	int ret;
+
+	ret = 0;
+
+	switch (cmd) {
+	case FBXATM_2684_IOCADD:
+	case FBXATM_2684_IOCDEL:
+	{
+		struct fbxatm_2684_vcc_params params;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		if (cmd == FBXATM_2684_IOCADD)
+			ret = create_2684_vcc(&params);
+		else
+			ret = remove_2684_vcc(&params);
+		break;
+	}
+
+	case FBXATM_2684_IOCGET:
+	{
+		struct fbxatm_2684_vcc_params params;
+		struct fbxatm_2684_vcc *priv;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_2684_mutex);
+		priv = __find_2684_vcc(params.id_list, params.id_count);
+		if (!priv)
+			ret = -ENOENT;
+		else {
+			memcpy(&params, &priv->params, sizeof (params));
+			memcpy(params.dev_name, priv->dev->name, IFNAMSIZ);
+		}
+		mutex_unlock(&fbxatm_2684_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &params, sizeof(params)))
+			return -EFAULT;
+		break;
+	}
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+static struct fbxatm_ioctl fbxatm_2684_ioctl_ops = {
+	.handler	= fbxatm_2684_ioctl,
+	.owner		= THIS_MODULE,
+};
+
+int __init fbxatm_2684_init(void)
+{
+	struct proc_dir_entry *root, *proc;
+
+	root = fbxatm_proc_misc_register("tel");
+	if (!root)
+		return -ENOMEM;
+
+	/* tel debug crap */
+	proc = proc_create_data("bad_ip", 0666, root, &tel_proc_fops, NULL);
+	if (!proc)
+		return -ENOMEM;
+
+	fbxatm_register_ioctl(&fbxatm_2684_ioctl_ops);
+	return 0;
+}
+
+void fbxatm_2684_exit(void)
+{
+	fbxatm_unregister_ioctl(&fbxatm_2684_ioctl_ops);
+}
diff -Nruw linux-6.4-fbx/net/fbxatm./fbxatm_core.c linux-6.4-fbx/net/fbxatm/fbxatm_core.c
--- linux-6.4-fbx/net/fbxatm./fbxatm_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/fbxatm_core.c	2023-02-27 17:13:39.810190205 +0100
@@ -0,0 +1,204 @@
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/fbxatm.h>
+#include <linux/fbxatm_dev.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include "fbxatm_priv.h"
+
+static DEFINE_MUTEX(ioctl_mutex);
+static LIST_HEAD(ioctl_list);
+
+void fbxatm_register_ioctl(struct fbxatm_ioctl *ioctl)
+{
+	mutex_lock(&ioctl_mutex);
+	list_add_tail(&ioctl->next, &ioctl_list);
+	mutex_unlock(&ioctl_mutex);
+}
+
+void fbxatm_unregister_ioctl(struct fbxatm_ioctl *ioctl)
+{
+	mutex_lock(&ioctl_mutex);
+	list_del(&ioctl->next);
+	mutex_unlock(&ioctl_mutex);
+}
+
+static int fbxatm_sock_ioctl(struct socket *sock, unsigned int cmd,
+			     unsigned long arg)
+{
+	struct fbxatm_ioctl *ioctl;
+	void __user *useraddr;
+	int ret;
+
+	/* sanity check */
+	useraddr = (void __user *)arg;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	ret = -ENOIOCTLCMD;
+	mutex_lock(&ioctl_mutex);
+
+	list_for_each_entry(ioctl, &ioctl_list, next) {
+		if (!ioctl->handler)
+			continue;
+
+		if (!try_module_get(ioctl->owner))
+			continue;
+
+		ret = ioctl->handler(sock, cmd, useraddr);
+		module_put(ioctl->owner);
+		if (ret != -ENOIOCTLCMD)
+			break;
+	}
+	mutex_unlock(&ioctl_mutex);
+
+	return ret;
+}
+
+static int fbxatm_sock_release(struct socket *sock)
+{
+	struct fbxatm_ioctl *ioctl;
+	struct sock *sk = sock->sk;
+
+	mutex_lock(&ioctl_mutex);
+
+	list_for_each_entry(ioctl, &ioctl_list, next) {
+		if (!ioctl->release)
+			continue;
+
+		if (!try_module_get(ioctl->owner))
+			continue;
+
+		ioctl->release(sock);
+		module_put(ioctl->owner);
+	}
+	mutex_unlock(&ioctl_mutex);
+
+	if (sk)
+		sock_put(sk);
+
+	return 0;
+}
+
+static const struct proto_ops fbxatm_proto_ops = {
+	.family		= PF_FBXATM,
+
+	.release =	fbxatm_sock_release,
+	.ioctl =	fbxatm_sock_ioctl,
+
+	.bind =		sock_no_bind,
+	.connect =	sock_no_connect,
+	.socketpair =	sock_no_socketpair,
+	.accept =	sock_no_accept,
+	.getname =	sock_no_getname,
+	.listen =	sock_no_listen,
+	.shutdown =	sock_no_shutdown,
+	.sendmsg =	sock_no_sendmsg,
+	.recvmsg =	sock_no_recvmsg,
+	.mmap =		sock_no_mmap,
+	.sendpage =	sock_no_sendpage,
+	.owner		= THIS_MODULE,
+};
+
+static struct proto fbxatm_proto = {
+        .name           = "fbxatm",
+        .owner          =  THIS_MODULE,
+        .obj_size       = sizeof (struct sock),
+};
+
+static int fbxatm_sock_create(struct net *net, struct socket *sock,
+			      int protocol, int kern)
+{
+	struct sock *sk;
+
+        sk = sk_alloc(net, PF_FBXATM, GFP_KERNEL, &fbxatm_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+        sock_init_data(sock, sk);
+        sock->state = SS_UNCONNECTED;
+        sock->ops = &fbxatm_proto_ops;
+	return 0;
+}
+
+static struct net_proto_family fbxatm_family_ops = {
+	.family = PF_FBXATM,
+	.create = fbxatm_sock_create,
+	.owner = THIS_MODULE,
+};
+
+
+static int __init fbxatm_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO "Freebox ATM stack\n");
+	ret = fbxatm_sysfs_init();
+	if (ret)
+		return ret;
+
+	ret = fbxatm_procfs_init();
+	if (ret)
+		goto fail_sysfs;
+
+	ret = fbxatm_vcc_init();
+	if (ret)
+		goto fail_procfs;
+
+	ret = fbxatm_2684_init();
+	if (ret)
+		goto fail_vcc;
+
+	ret = fbxatm_pppoa_init();
+	if (ret)
+		goto fail_2684;
+
+	ret = proto_register(&fbxatm_proto, 0);
+	if (ret)
+		goto fail_pppoa;
+
+	ret = sock_register(&fbxatm_family_ops);
+	if (ret)
+		goto fail_proto;
+
+	return 0;
+
+fail_proto:
+	proto_unregister(&fbxatm_proto);
+
+fail_pppoa:
+	fbxatm_pppoa_exit();
+
+fail_2684:
+	fbxatm_2684_exit();
+
+fail_vcc:
+	fbxatm_vcc_exit();
+
+fail_procfs:
+	fbxatm_procfs_exit();
+
+fail_sysfs:
+	fbxatm_sysfs_exit();
+	printk(KERN_ERR "failed to initialize Freebox ATM stack\n");
+	return ret;
+}
+
+static void __exit fbxatm_exit(void)
+{
+	sock_unregister(PF_FBXATM);
+	proto_unregister(&fbxatm_proto);
+	fbxatm_pppoa_exit();
+	fbxatm_2684_exit();
+	fbxatm_vcc_exit();
+	fbxatm_procfs_exit();
+	fbxatm_sysfs_exit();
+}
+
+subsys_initcall(fbxatm_init);
+module_exit(fbxatm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_FBXATM);
diff -Nruw linux-6.4-fbx/net/fbxatm./fbxatm_dev.c linux-6.4-fbx/net/fbxatm/fbxatm_dev.c
--- linux-6.4-fbx/net/fbxatm./fbxatm_dev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/fbxatm_dev.c	2023-02-27 17:17:27.132278230 +0100
@@ -0,0 +1,983 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/fbxatm_dev.h>
+#include "fbxatm_priv.h"
+
+/*
+ * list of registered device & lock
+ */
+LIST_HEAD(fbxatm_dev_list);
+
+/*
+ * big "rtnl" lock
+ */
+DEFINE_MUTEX(fbxatm_mutex);
+static int fbxatm_ifindex = -1;
+
+/*
+ * find device by index
+ */
+static struct fbxatm_dev *__fbxatm_dev_get_by_index(int ifindex)
+{
+	struct fbxatm_dev *pdev;
+
+	list_for_each_entry(pdev, &fbxatm_dev_list, next) {
+		if (pdev->ifindex == ifindex)
+			return pdev;
+	}
+	return NULL;
+}
+
+/*
+ * find vcc by id
+ */
+static struct fbxatm_vcc *
+__fbxatm_vcc_get_by_id(const struct fbxatm_vcc_id *id)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_vcc *vcc;
+	int found;
+
+	adev = __fbxatm_dev_get_by_index(id->dev_idx);
+	if (!adev)
+		return ERR_PTR(-ENODEV);
+
+	found = 0;
+	spin_lock_bh(&adev->vcc_list_lock);
+	list_for_each_entry(vcc, &adev->vcc_list, next) {
+		if (vcc->vpi != id->vpi || vcc->vci != id->vci)
+			continue;
+		found = 1;
+		break;
+	}
+	spin_unlock_bh(&adev->vcc_list_lock);
+
+	if (found)
+		return vcc;
+	return ERR_PTR(-ENOENT);
+}
+
+/*
+ * allocate device
+ */
+struct fbxatm_dev *fbxatm_alloc_device(int sizeof_priv)
+{
+	unsigned int size;
+
+	size = sizeof(struct fbxatm_dev) + sizeof_priv + FBXATMDEV_ALIGN;
+	return kzalloc(size, GFP_KERNEL);
+}
+
+EXPORT_SYMBOL(fbxatm_alloc_device);
+
+/*
+ * calculate crc10 of oam cell
+ */
+static void compute_oam_crc10(struct fbxatm_oam_cell_payload *cell)
+{
+	u8 *pdu;
+	u16 crc;
+
+	/* crc10 does not cover header */
+	pdu = (u8 *)&cell->cell_type;
+	memset(cell->crc10, 0, 2);
+
+	crc = crc10(0, pdu, sizeof (*cell) - sizeof (cell->cell_hdr));
+	cell->crc10[0] = crc >> 8;
+	cell->crc10[1] = crc & 0xff;
+}
+
+/*
+ * check crc10 of oam cell
+ */
+static int check_oam_crc10(struct fbxatm_oam_cell_payload *cell)
+{
+	u8 *pdu;
+	u16 crc;
+
+	pdu = (u8 *)&cell->cell_type;
+
+	crc = (cell->crc10[0] << 8) | cell->crc10[1];
+	memset(cell->crc10, 0, 2);
+
+	if (crc != crc10(0, pdu, sizeof (*cell) - sizeof (cell->cell_hdr)))
+		return 1;
+
+	return 0;
+}
+
+/*
+ * send an oam ping and wait for answer
+ */
+static int do_oam_ping(struct fbxatm_oam_ping *ping)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_oam_cell *oam_cell;
+	struct fbxatm_oam_cell_payload *cell;
+	u8 *hdr;
+	int ret;
+
+	switch (ping->req.type) {
+	case FBXATM_OAM_PING_SEG_F4:
+	case FBXATM_OAM_PING_E2E_F4:
+		return -ENOTSUPP;
+	case FBXATM_OAM_PING_SEG_F5:
+	case FBXATM_OAM_PING_E2E_F5:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* find device */
+	mutex_lock(&fbxatm_mutex);
+	adev = __fbxatm_dev_get_by_index(ping->req.id.dev_idx);
+	if (!adev) {
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	/* if f5, vcc need to be opened */
+	switch (ping->req.type) {
+	case FBXATM_OAM_PING_SEG_F5:
+	case FBXATM_OAM_PING_E2E_F5:
+	{
+		struct fbxatm_vcc *vcc;
+
+		vcc = __fbxatm_vcc_get_by_id(&ping->req.id);
+		if (IS_ERR(vcc)) {
+			ret = -ENETDOWN;
+			goto out_unlock;
+		}
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	ping->correlation_id = ++adev->oam_correlation_id;
+
+	/* prepare atm oam cell and send it */
+	oam_cell = kmalloc(sizeof (*oam_cell), GFP_KERNEL);
+	if (!oam_cell) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+	cell = &oam_cell->payload;
+
+	hdr = cell->cell_hdr;
+	ATM_SET_GFC(hdr, 0);
+
+	ATM_SET_VPI(hdr, ping->req.id.vpi);
+	ATM_SET_VCI(hdr, ping->req.id.vci);
+	if (ping->req.type == FBXATM_OAM_PING_E2E_F5)
+		ATM_SET_PT(hdr, OAM_PTI_END2END_F5);
+	else
+		ATM_SET_PT(hdr, OAM_PTI_SEG_F5);
+	ATM_SET_CLP(hdr, 0);
+	ATM_SET_HEC(hdr, 0);
+
+	cell->cell_type = (OAM_TYPE_FAULT_MANAGEMENT << OAM_TYPE_SHIFT) |
+		(FUNC_TYPE_OAM_LOOPBACK << FUNC_TYPE_SHIFT);
+	cell->loopback_indication = 1;
+
+	memcpy(cell->correlation_tag, &ping->correlation_id,
+	       sizeof (cell->correlation_tag));
+	memcpy(cell->loopback_id, ping->req.llid, sizeof (ping->req.llid));
+	memset(cell->source_id, 0x6a, sizeof (cell->source_id));
+	memset(cell->reserved, 0x6a, sizeof (cell->reserved));
+
+	compute_oam_crc10(cell);
+
+	spin_lock_bh(&adev->dev_link_lock);
+	if (!test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		ret = -ENETDOWN;
+	else
+		ret = adev->ops->send_oam(adev, oam_cell);
+	spin_unlock_bh(&adev->dev_link_lock);
+	if (ret)
+		goto out_unlock;
+
+	/* wait for an answer */
+	adev->stats.tx_f5_oam++;
+	list_add(&ping->next, &adev->oam_pending_ping);
+	ping->replied = 0;
+	init_waitqueue_head(&ping->wq);
+	mutex_unlock(&fbxatm_mutex);
+
+	ret = wait_event_interruptible_timeout(ping->wq, ping->replied,
+					       HZ * 5);
+	list_del(&ping->next);
+
+	if (ret == -ERESTARTSYS)
+		return ret;
+
+	if (ping->replied < 0) {
+		/* ping failed */
+		return ping->replied;
+	}
+
+	if (!ping->replied) {
+		/* timeout */
+		return -ETIME;
+	}
+
+	return 0;
+
+
+out_unlock:
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+/*
+ * special llid values
+ */
+static const u8 llid_all1[16] = { 0xff, 0xff, 0xff, 0xff,
+				  0xff, 0xff, 0xff, 0xff,
+				  0xff, 0xff, 0xff, 0xff,
+				  0xff, 0xff, 0xff, 0xff };
+
+static const u8 llid_all0[16] = { 0 };
+
+/*
+ * handle incoming oam cell
+ */
+static void handle_oam_cell(struct fbxatm_dev *adev,
+			    struct fbxatm_oam_cell *oam_cell)
+{
+	struct fbxatm_oam_cell_payload *cell;
+	u16 vci;
+	u8 *hdr, pt, oam, func;
+
+	/* check CRC10 */
+	cell = &oam_cell->payload;
+	if (check_oam_crc10(cell)) {
+		adev->stats.rx_bad_oam++;
+		goto out;
+	}
+
+	/* drop f4 cells */
+	hdr = cell->cell_hdr;
+	vci = ATM_GET_VCI(hdr);
+
+	if (vci == OAM_VCI_SEG_F4 || vci == OAM_VCI_END2END_F4) {
+		adev->stats.rx_f4_oam++;
+		goto out;
+	}
+
+	/* keep f5 cells only */
+	pt = ATM_GET_PT(hdr);
+	if (pt != OAM_PTI_SEG_F5 && pt != OAM_PTI_END2END_F5) {
+		adev->stats.rx_other_oam++;
+		goto out;
+	}
+
+	adev->stats.rx_f5_oam++;
+
+	/* keep oam loopback type only */
+	oam = (cell->cell_type & OAM_TYPE_MASK) >> OAM_TYPE_SHIFT;
+	func = (cell->cell_type & FUNC_TYPE_MASK) >> FUNC_TYPE_SHIFT;
+
+	if (oam != OAM_TYPE_FAULT_MANAGEMENT ||
+	    func != FUNC_TYPE_OAM_LOOPBACK) {
+		adev->stats.rx_other_oam++;
+		goto out;
+	}
+
+	if (cell->loopback_indication & 1) {
+		int match, ret;
+
+		/* request, check for llid match */
+		match = 0;
+		switch (pt) {
+		case OAM_PTI_SEG_F5:
+			/* 0x0 or 0xffffffff */
+			if (!memcmp(cell->loopback_id, llid_all0,
+				    sizeof (llid_all0)))
+				match = 1;
+			fallthrough;
+
+		case OAM_PTI_END2END_F5:
+			/* 0xffffffff only */
+			if (!memcmp(cell->loopback_id, llid_all1,
+				    sizeof (llid_all1)))
+				match = 1;
+			break;
+		}
+
+		if (!match) {
+			adev->stats.rx_bad_llid_oam++;
+			goto out;
+		}
+
+		/* ok, update llid and answer */
+		cell->loopback_indication = 0;
+		memcpy(cell->loopback_id, llid_all1, sizeof (llid_all1));
+		compute_oam_crc10(cell);
+
+		spin_lock_bh(&adev->dev_link_lock);
+		if (!test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+			ret = adev->ops->send_oam(adev, oam_cell);
+		else
+			ret = -ENETDOWN;
+		spin_unlock_bh(&adev->dev_link_lock);
+
+		if (!ret) {
+			/* send successful, don't free cell */
+			adev->stats.tx_f5_oam++;
+			return;
+		}
+
+	} else {
+		struct fbxatm_oam_ping *ping;
+
+		/* reply, find a matching sender */
+		spin_lock_bh(&adev->oam_list_lock);
+		list_for_each_entry(ping, &adev->oam_pending_ping, next) {
+
+			/* compare correlation id */
+			if (memcmp(&ping->correlation_id,
+				   cell->correlation_tag,
+				   sizeof (cell->correlation_tag)))
+				continue;
+
+			/* compare ping type */
+			switch (ping->req.type) {
+			case FBXATM_OAM_PING_SEG_F5:
+				if (pt != OAM_PTI_SEG_F5)
+					continue;
+				break;
+			case FBXATM_OAM_PING_E2E_F5:
+				if (pt != OAM_PTI_END2END_F5)
+					continue;
+				break;
+			default:
+				break;
+			}
+
+			/* seems we have a match */
+			ping->replied = 1;
+			wake_up(&ping->wq);
+		}
+		spin_unlock_bh(&adev->oam_list_lock);
+	}
+
+out:
+	kfree(oam_cell);
+}
+
+/*
+ * oam rx processing workqueue
+ */
+static void fbxatm_oam_work(struct work_struct *work)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_oam_cell *cell;
+
+	adev = container_of(work, struct fbxatm_dev, oam_work);
+
+	do {
+		cell = NULL;
+		spin_lock_bh(&adev->oam_list_lock);
+		if (!list_empty(&adev->rx_oam_cells)) {
+			cell = list_first_entry(&adev->rx_oam_cells,
+						struct fbxatm_oam_cell, next);
+			list_del(&cell->next);
+			adev->rx_oam_cells_count--;
+		}
+		spin_unlock_bh(&adev->oam_list_lock);
+
+		if (cell)
+			handle_oam_cell(adev, cell);
+
+	} while (cell);
+}
+
+/*
+ * register given device
+ */
+static int __fbxatm_register_device(struct fbxatm_dev *adev,
+				    const char *base_name,
+				    const struct fbxatm_dev_ops *ops)
+{
+	struct fbxatm_dev *pdev;
+	int name_len, count, ret;
+	long *inuse;
+
+	adev->ops = ops;
+	INIT_LIST_HEAD(&adev->vcc_list);
+	spin_lock_init(&adev->vcc_list_lock);
+	INIT_LIST_HEAD(&adev->next);
+	spin_lock_init(&adev->stats_lock);
+	spin_lock_init(&adev->oam_list_lock);
+	spin_lock_init(&adev->dev_link_lock);
+	INIT_LIST_HEAD(&adev->rx_oam_cells);
+	INIT_WORK(&adev->oam_work, fbxatm_oam_work);
+	INIT_LIST_HEAD(&adev->oam_pending_ping);
+	get_random_bytes(&adev->oam_correlation_id, 4);
+
+	name_len = strlen(base_name);
+	adev->name = kmalloc(name_len + 10, GFP_KERNEL);
+	if (!adev->name) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* allocate ifindex */
+	while (1) {
+		if (++fbxatm_ifindex < 0)
+			fbxatm_ifindex = 0;
+		if (__fbxatm_dev_get_by_index(fbxatm_ifindex))
+			continue;
+		adev->ifindex = fbxatm_ifindex;
+		break;
+	}
+
+	/* allocate device name */
+	inuse = (long *)get_zeroed_page(GFP_ATOMIC);
+	if (!inuse) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	list_for_each_entry(pdev, &fbxatm_dev_list, next) {
+		unsigned long val;
+		char *end;
+
+		/* look for common prefix */
+		if (strncmp(base_name, pdev->name, name_len))
+			continue;
+
+		/* make sure name is the same, not just a prefix */
+		val = simple_strtoul(pdev->name + name_len, &end, 10);
+		if (!*end)
+			continue;
+
+		set_bit(val, inuse);
+	}
+
+	count = find_first_zero_bit(inuse, PAGE_SIZE * 8);
+	free_page((unsigned long)inuse);
+
+	snprintf(adev->name, name_len + 10, "%s%d", base_name, count);
+	list_add_tail(&adev->next, &fbxatm_dev_list);
+
+	/* create procfs entries */
+	ret = fbxatm_proc_dev_register(adev);
+	if (ret)
+		goto fail;
+
+	/* call device procfs init if any */
+	if (adev->ops->init_procfs) {
+		ret = adev->ops->init_procfs(adev);
+		if (ret)
+			goto fail_procfs;
+	}
+
+	/* create sysfs entries */
+	ret = fbxatm_register_dev_sysfs(adev);
+	if (ret)
+		goto fail_procfs;
+
+	return 0;
+
+fail_procfs:
+	fbxatm_proc_dev_deregister(adev);
+
+fail:
+	list_del(&adev->next);
+	kfree(adev->name);
+	return ret;
+}
+
+/*
+ * take lock and register device
+ */
+int fbxatm_register_device(struct fbxatm_dev *adev,
+			   const char *base_name,
+			   const struct fbxatm_dev_ops *ops)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_mutex);
+	ret = __fbxatm_register_device(adev, base_name, ops);
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+EXPORT_SYMBOL(fbxatm_register_device);
+
+/*
+ * change device "link" state
+ */
+static void fbxatm_dev_set_link(struct fbxatm_dev *adev, int link)
+{
+	struct fbxatm_vcc *vcc;
+
+	if (link) {
+		memset(&adev->stats, 0, sizeof (adev->stats));
+		set_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags);
+
+		spin_lock_bh(&adev->vcc_list_lock);
+		list_for_each_entry(vcc, &adev->vcc_list, next) {
+			memset(&vcc->stats, 0, sizeof (vcc->stats));
+			set_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+			if (!vcc->user_ops || !vcc->user_ops->link_change)
+				continue;
+			vcc->user_ops->link_change(vcc->user_cb_data, 1,
+						   adev->link_cell_rate_ds,
+						   adev->link_cell_rate_us);
+		}
+		spin_unlock_bh(&adev->vcc_list_lock);
+	} else {
+		/* prevent further oam cells input */
+		spin_lock_bh(&adev->dev_link_lock);
+		clear_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags);
+		spin_unlock_bh(&adev->dev_link_lock);
+
+		/* flush rx oam work */
+		cancel_work_sync(&adev->oam_work);
+
+		/* now disable tx on all vcc */
+		spin_lock_bh(&adev->vcc_list_lock);
+		list_for_each_entry(vcc, &adev->vcc_list, next) {
+			spin_lock_bh(&vcc->tx_lock);
+			clear_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+			spin_unlock_bh(&vcc->tx_lock);
+			if (!vcc->user_ops || !vcc->user_ops->link_change)
+				continue;
+			vcc->user_ops->link_change(vcc->user_cb_data, 0, 0, 0);
+		}
+		spin_unlock_bh(&adev->vcc_list_lock);
+	}
+
+	fbxatm_dev_change_sysfs(adev);
+}
+
+/*
+ * set device "link" to up, allowing vcc/device send ops to be called,
+ * this function sleeps
+ */
+void fbxatm_dev_set_link_up(struct fbxatm_dev *adev)
+{
+	if (!test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		printk(KERN_INFO "%s: link UP - "
+		       "down: %u kbit/s - up: %u kbit/s\n", adev->name,
+		       adev->link_rate_ds / 1000, adev->link_rate_us / 1000);
+	return fbxatm_dev_set_link(adev, 1);
+}
+
+EXPORT_SYMBOL(fbxatm_dev_set_link_up);
+
+/*
+ * set device link to down, disallowing any vcc/device send ops to be
+ * called, this function sleeps
+ */
+void fbxatm_dev_set_link_down(struct fbxatm_dev *adev)
+{
+	if (test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		printk(KERN_INFO "%s: link DOWN\n", adev->name);
+	return fbxatm_dev_set_link(adev, 0);
+}
+
+EXPORT_SYMBOL(fbxatm_dev_set_link_down);
+
+/*
+ * take lock and unregister device
+ */
+int fbxatm_unregister_device(struct fbxatm_dev *adev)
+{
+	int ret;
+	bool empty;
+
+	ret = 0;
+	mutex_lock(&fbxatm_mutex);
+
+	spin_lock_bh(&adev->vcc_list_lock);
+	empty = list_empty(&adev->vcc_list);
+	spin_unlock_bh(&adev->vcc_list_lock);
+	if (!empty) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (!list_empty(&adev->oam_pending_ping)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	list_del(&adev->next);
+
+	if (adev->ops->release_procfs)
+		adev->ops->release_procfs(adev);
+	fbxatm_proc_dev_deregister(adev);
+
+	fbxatm_unregister_dev_sysfs(adev);
+out:
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+EXPORT_SYMBOL(fbxatm_unregister_device);
+
+/*
+ * actually free device memory
+ */
+void __fbxatm_free_device(struct fbxatm_dev *adev)
+{
+	kfree(adev->name);
+	kfree(adev);
+}
+
+/*
+ * free device memory
+ */
+void fbxatm_free_device(struct fbxatm_dev *adev)
+{
+	/* actual free is done in sysfs release */
+//	class_device_put(&adev->class_dev);
+}
+
+EXPORT_SYMBOL(fbxatm_free_device);
+
+/*
+ * device callback when oam cell comes in
+ */
+void fbxatm_netifrx_oam(struct fbxatm_dev *adev, struct fbxatm_oam_cell *cell)
+{
+	bool link_up;
+
+	spin_lock_bh(&adev->dev_link_lock);
+	link_up = test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags);
+	spin_unlock_bh(&adev->dev_link_lock);
+
+	if (!link_up || adev->rx_oam_cells_count > 8) {
+		kfree(cell);
+		return;
+	}
+
+	adev->rx_oam_cells_count++;
+	spin_lock_bh(&adev->oam_list_lock);
+	list_add_tail(&cell->next, &adev->rx_oam_cells);
+	spin_unlock_bh(&adev->oam_list_lock);
+	schedule_work(&adev->oam_work);
+}
+
+EXPORT_SYMBOL(fbxatm_netifrx_oam);
+
+/*
+ * set user ops on vcc
+ */
+void fbxatm_set_uops(struct fbxatm_vcc *vcc,
+		     const struct fbxatm_vcc_uops *user_ops,
+		     void *user_cb_data)
+{
+	spin_lock_bh(&vcc->user_ops_lock);
+	vcc->user_ops = user_ops;
+	vcc->user_cb_data = user_cb_data;
+	spin_unlock_bh(&vcc->user_ops_lock);
+}
+
+/*
+ * bind to given vcc
+ */
+static struct fbxatm_vcc *
+__fbxatm_bind_to_vcc(const struct fbxatm_vcc_id *id,
+		     enum fbxatm_vcc_user user)
+{
+	struct fbxatm_vcc *vcc;
+
+	vcc = __fbxatm_vcc_get_by_id(id);
+	if (IS_ERR(vcc))
+		return vcc;
+
+	if (vcc->user != FBXATM_VCC_USER_NONE)
+		return ERR_PTR(-EBUSY);
+
+	vcc->user = user;
+	return vcc;
+}
+
+/*
+ * bind to given vcc
+ */
+struct fbxatm_vcc *
+fbxatm_bind_to_vcc(const struct fbxatm_vcc_id *id,
+		   enum fbxatm_vcc_user user)
+{
+	struct fbxatm_vcc *vcc;
+
+	mutex_lock(&fbxatm_mutex);
+	vcc = __fbxatm_bind_to_vcc(id, user);
+	mutex_unlock(&fbxatm_mutex);
+	return vcc;
+}
+
+/*
+ * unbind from given vcc
+ */
+void fbxatm_unbind_vcc(struct fbxatm_vcc *vcc)
+{
+	spin_lock_bh(&vcc->user_ops_lock);
+	vcc->user_ops = NULL;
+	vcc->user_cb_data = NULL;
+	vcc->user = FBXATM_VCC_USER_NONE;
+	spin_unlock_bh(&vcc->user_ops_lock);
+}
+
+/*
+ * open vcc on given device
+ */
+static int __fbxatm_dev_open_vcc(const struct fbxatm_vcc_id *id,
+				 const struct fbxatm_vcc_qos *qos)
+{
+	struct fbxatm_vcc *vcc;
+	struct fbxatm_dev *adev;
+	int ret, count;
+
+	/* check vpi/vci unicity  */
+	vcc = __fbxatm_vcc_get_by_id(id);
+	if (!IS_ERR(vcc))
+		return -EBUSY;
+
+	/* sanity check */
+	switch (qos->traffic_class) {
+	case FBXATM_VCC_TC_UBR_NO_PCR:
+	case FBXATM_VCC_TC_UBR:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (qos->max_sdu > 4096)
+		return -EINVAL;
+
+	if (!qos->max_buffered_pkt || qos->max_buffered_pkt > 128)
+		return -EINVAL;
+
+	adev = __fbxatm_dev_get_by_index(id->dev_idx);
+	if (!adev)
+		return -ENODEV;
+
+	/* make sure device accept requested priorities */
+	if (qos->priority > adev->max_priority)
+		return -EINVAL;
+
+	if (qos->rx_priority > adev->max_rx_priority)
+		return -EINVAL;
+
+	/* don't open more vcc than device can handle */
+	count = 0;
+	list_for_each_entry(vcc, &adev->vcc_list, next)
+		count++;
+	if (count + 1 > adev->max_vcc)
+		return -ENOSPC;
+
+	/* make sure vpi/vci is valid for this device */
+	if ((~adev->vpi_mask & id->vpi) || (~adev->vci_mask & id->vci))
+		return -EINVAL;
+
+	if (!try_module_get(adev->ops->owner))
+		return -ENODEV;
+
+	/* ok, create vcc */
+	vcc = kzalloc(sizeof (*vcc), GFP_KERNEL);
+	if (!vcc)
+		return -ENOMEM;
+
+	spin_lock_init(&vcc->user_ops_lock);
+	spin_lock_init(&vcc->tx_lock);
+	vcc->vpi = id->vpi;
+	vcc->vci = id->vci;
+	vcc->adev = adev;
+	vcc->to_drop_pkt = 0;
+	memcpy(&vcc->qos, qos, sizeof (*qos));
+
+	ret = adev->ops->open(vcc);
+	if (ret) {
+		kfree(vcc);
+		return ret;
+	}
+
+	/* inherit vcc link state from device */
+	spin_lock_bh(&adev->vcc_list_lock);
+	if (test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		set_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+	list_add_tail(&vcc->next, &adev->vcc_list);
+	spin_unlock_bh(&adev->vcc_list_lock);
+
+	return ret;
+}
+
+/*
+ * find device & open vcc on it
+ */
+static int fbxatm_dev_open_vcc(const struct fbxatm_vcc_id *id,
+			       const struct fbxatm_vcc_qos *qos)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_mutex);
+	ret = __fbxatm_dev_open_vcc(id, qos);
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+/*
+ * close vcc on device
+ */
+static int __fbxatm_dev_close_vcc(struct fbxatm_vcc *vcc)
+{
+	struct fbxatm_dev *adev;
+
+	if (vcc->user != FBXATM_VCC_USER_NONE)
+		return -EBUSY;
+	adev = vcc->adev;
+	module_put(adev->ops->owner);
+	adev->ops->close(vcc);
+	spin_lock_bh(&adev->vcc_list_lock);
+	list_del(&vcc->next);
+	spin_unlock_bh(&adev->vcc_list_lock);
+	kfree(vcc);
+	return 0;
+}
+
+/*
+ * find device & vcc and close it
+ */
+static int fbxatm_dev_close_vcc(const struct fbxatm_vcc_id *id)
+{
+	struct fbxatm_vcc *vcc;
+	int ret;
+
+	mutex_lock(&fbxatm_mutex);
+	vcc = __fbxatm_vcc_get_by_id(id);
+	if (IS_ERR(vcc))
+		ret = PTR_ERR(vcc);
+	else
+		ret = __fbxatm_dev_close_vcc(vcc);
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+/*
+ * ioctl handler
+ */
+static int fbxatm_vcc_ioctl(struct socket *sock,
+			    unsigned int cmd, void __user *useraddr)
+{
+	int ret;
+
+	ret = 0;
+
+	switch (cmd) {
+	case FBXATM_IOCADD:
+	case FBXATM_IOCDEL:
+	{
+		struct fbxatm_vcc_params params;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		if (cmd == FBXATM_IOCADD)
+			ret = fbxatm_dev_open_vcc(&params.id, &params.qos);
+		else
+			ret = fbxatm_dev_close_vcc(&params.id);
+		break;
+	}
+
+	case FBXATM_IOCGET:
+	{
+		struct fbxatm_vcc_params params;
+		struct fbxatm_vcc *vcc;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_mutex);
+		vcc = __fbxatm_vcc_get_by_id(&params.id);
+		if (IS_ERR(vcc))
+			ret = PTR_ERR(vcc);
+		else {
+			memcpy(&params.qos, &vcc->qos, sizeof (vcc->qos));
+			params.user = vcc->user;
+		}
+		mutex_unlock(&fbxatm_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &params, sizeof(params)))
+			return -EFAULT;
+		break;
+	}
+
+	case FBXATM_IOCOAMPING:
+	{
+		struct fbxatm_oam_ping ping;
+
+		if (copy_from_user(&ping.req, useraddr, sizeof(ping.req)))
+			return -EFAULT;
+
+		ret = do_oam_ping(&ping);
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &ping.req, sizeof(ping.req)))
+			return -EFAULT;
+		break;
+	}
+
+	case FBXATM_IOCDROP:
+	{
+		struct fbxatm_vcc_drop_params params;
+		struct fbxatm_vcc *vcc;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_mutex);
+		vcc = __fbxatm_vcc_get_by_id(&params.id);
+		if (IS_ERR(vcc))
+			ret = PTR_ERR(vcc);
+		else {
+			spin_lock_bh(&vcc->user_ops_lock);
+			vcc->to_drop_pkt += params.drop_count;
+			spin_unlock_bh(&vcc->user_ops_lock);
+			ret = 0;
+		}
+		mutex_unlock(&fbxatm_mutex);
+
+		if (ret)
+			return ret;
+		break;
+	}
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+static struct fbxatm_ioctl fbxatm_vcc_ioctl_ops = {
+	.handler	= fbxatm_vcc_ioctl,
+	.owner		= THIS_MODULE,
+};
+
+int __init fbxatm_vcc_init(void)
+{
+	fbxatm_register_ioctl(&fbxatm_vcc_ioctl_ops);
+	return 0;
+}
+
+void fbxatm_vcc_exit(void)
+{
+	fbxatm_unregister_ioctl(&fbxatm_vcc_ioctl_ops);
+}
diff -Nruw linux-6.4-fbx/net/fbxatm./fbxatm_pppoa.c linux-6.4-fbx/net/fbxatm/fbxatm_pppoa.c
--- linux-6.4-fbx/net/fbxatm./fbxatm_pppoa.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/fbxatm_pppoa.c	2023-02-27 17:13:18.537620494 +0100
@@ -0,0 +1,500 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/fbxatm.h>
+#include <linux/fbxatm_dev.h>
+#include "fbxatm_priv.h"
+
+#define PFX	"fbxatm_pppoa: "
+
+static LIST_HEAD(fbxatm_pppoa_vcc_list);
+static DEFINE_MUTEX(fbxatm_pppoa_mutex);
+
+/*
+ * private data for pppoa vcc
+ */
+struct fbxatm_pppoa_vcc {
+	struct fbxatm_vcc		*vcc;
+	struct fbxatm_pppoa_vcc_params	params;
+	enum fbxatm_pppoa_encap		cur_encap;
+
+	/* used by ppp */
+	int				flags;
+	struct ppp_channel		chan;
+	struct tasklet_struct		tx_done_tasklet;
+
+	struct socket			*sock;
+	struct list_head		next;
+};
+
+
+#define __LLC_HDR		0xfe, 0xfe, 0x03
+#define __NLPID_PPP		0xcf
+#define __PPP_LCP		0xc0, 0x21
+
+static const u8 llc_ppp[]	= { __LLC_HDR, __NLPID_PPP };
+static const u8 llc_ppp_lcp[]	= { __LLC_HDR, __NLPID_PPP, __PPP_LCP };
+static const u8 lcp[]		= { __PPP_LCP };
+
+
+/*
+ * fbxatm stack receive callback, called from softirq
+ */
+static void vcc_rx_callback(struct sk_buff *skb, void *data)
+{
+	struct fbxatm_pppoa_vcc *priv;
+
+	priv = (struct fbxatm_pppoa_vcc *)data;
+
+	if (priv->chan.ppp == NULL) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	switch (priv->cur_encap) {
+	case FBXATM_EPPPOA_VCMUX:
+		/* nothing to do */
+		break;
+
+	case FBXATM_EPPPOA_LLC:
+		/* make sure llc header is present and remove */
+		if (skb->len < sizeof(llc_ppp) ||
+		    memcmp(skb->data, llc_ppp, sizeof(llc_ppp)))
+			goto error;
+		skb_pull(skb, sizeof(llc_ppp));
+		break;
+
+	case FBXATM_EPPPOA_AUTODETECT:
+		/* look for lcp, with an llc header or not */
+		if (skb->len >= sizeof(llc_ppp_lcp) &&
+		    !memcmp(skb->data, llc_ppp_lcp, sizeof(llc_ppp_lcp))) {
+			priv->cur_encap = FBXATM_EPPPOA_LLC;
+			skb_pull(skb, sizeof(llc_ppp));
+			break;
+		}
+
+		if (skb->len >= sizeof(lcp) &&
+		    !memcmp(skb->data, lcp, sizeof (lcp))) {
+			priv->cur_encap = FBXATM_EPPPOA_VCMUX;
+			break;
+		}
+
+		/* no match */
+		goto error;
+	}
+
+	ppp_input(&priv->chan, skb);
+	return;
+
+error:
+	dev_kfree_skb(skb);
+	ppp_input_error(&priv->chan, 0);
+}
+
+/*
+ * tx done tasklet callback
+ */
+static void tx_done_tasklet_func(unsigned long data)
+{
+	struct fbxatm_pppoa_vcc *priv = (struct fbxatm_pppoa_vcc *)data;
+	ppp_output_wakeup(&priv->chan);
+}
+
+/*
+ * fbxatm stack tx done callback, called from softirq
+ */
+static void vcc_tx_done_callback(void *data)
+{
+	struct fbxatm_pppoa_vcc *priv = data;
+
+	/* schedule taslket to avoid re-entering in ppp_xmit */
+	tasklet_schedule(&priv->tx_done_tasklet);
+}
+
+/*
+ * vcc user ops, callback from fbxatm stack
+ */
+static const struct fbxatm_vcc_uops fbxatm_pppoa_vcc_uops = {
+	.rx_pkt		= vcc_rx_callback,
+	.tx_done	= vcc_tx_done_callback,
+};
+
+/*
+ * ppp xmit callback
+ */
+static int ppp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	struct sk_buff *to_send_skb, *nskb;
+	int ret;
+
+	priv = (struct fbxatm_pppoa_vcc *)chan->private;
+
+	/* MAYBE FIXME: handle protocol compression ? */
+
+	to_send_skb = skb;
+	nskb = NULL;
+
+	/* send using vcmux encap if not yet known */
+	switch (priv->cur_encap) {
+	case FBXATM_EPPPOA_AUTODETECT:
+	case FBXATM_EPPPOA_VCMUX:
+		break;
+
+	case FBXATM_EPPPOA_LLC:
+	{
+		unsigned int headroom;
+
+		headroom = skb_headroom(skb);
+
+		if (headroom < sizeof(llc_ppp)) {
+			headroom += sizeof(llc_ppp);
+			nskb = skb_realloc_headroom(skb, headroom);
+			if (!nskb) {
+				dev_kfree_skb(skb);
+				return 1;
+			}
+			to_send_skb = nskb;
+		}
+
+		skb_push(to_send_skb, sizeof(llc_ppp));
+		memcpy(to_send_skb->data, llc_ppp, sizeof(llc_ppp));
+		break;
+	}
+	}
+
+	ret = fbxatm_send(priv->vcc, to_send_skb);
+	if (ret) {
+		/* packet was not sent, queue is full, free any newly
+		 * created skb */
+		if (nskb)
+			dev_kfree_skb(nskb);
+		else {
+			/* restore original skb if we altered it */
+			if (priv->cur_encap == FBXATM_EPPPOA_LLC)
+				skb_pull(skb, sizeof(llc_ppp));
+		}
+
+		/* suspend ppp output, will be woken up by
+		 * ppp_output_wakeup, we're called under ppp lock so
+		 * we can't race with tx done */
+		return 0;
+	}
+
+	/* packet was sent, if we sent a copy free the original */
+	if (nskb)
+		dev_kfree_skb(skb);
+
+	if (fbxatm_vcc_queue_full(priv->vcc))
+		ppp_output_stop(chan);
+
+	return 1;
+}
+
+static int ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
+		     unsigned long arg)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int ret;
+
+	priv = (struct fbxatm_pppoa_vcc *)chan->private;
+
+	switch (cmd) {
+	case PPPIOCGFLAGS:
+		ret = put_user(priv->flags, (int __user *)arg) ? -EFAULT : 0;
+		break;
+	case PPPIOCSFLAGS:
+		ret = get_user(priv->flags, (int __user *) arg) ? -EFAULT : 0;
+		break;
+	default:
+		ret = -ENOTTY;
+		break;
+	}
+	return ret;
+}
+
+static struct ppp_channel_ops fbxatm_pppoa_ppp_ops = {
+	.start_xmit = ppp_xmit,
+	.ioctl = ppp_ioctl,
+};
+
+/*
+ * find pppoa vcc from id
+ */
+static struct fbxatm_pppoa_vcc *
+__find_pppoa_vcc(const struct fbxatm_vcc_id *id)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int found;
+
+	/* find it */
+	found = 0;
+	list_for_each_entry(priv, &fbxatm_pppoa_vcc_list, next) {
+		if (priv->vcc->adev->ifindex != id->dev_idx ||
+		    priv->vcc->vpi != id->vpi ||
+		    priv->vcc->vci != id->vci)
+			continue;
+
+		found = 1;
+		break;
+	}
+
+	if (found)
+		return priv;
+	return NULL;
+}
+
+/*
+ * find pppoa vcc from socket
+ */
+static struct fbxatm_pppoa_vcc *
+__find_pppoa_vcc_from_socket(const struct socket *sock)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int found;
+
+	/* find it */
+	found = 0;
+	list_for_each_entry(priv, &fbxatm_pppoa_vcc_list, next) {
+		if (priv->sock != sock)
+			continue;
+
+		found = 1;
+		break;
+	}
+
+	if (found)
+		return priv;
+	return NULL;
+}
+
+/*
+ * bind to given vcc
+ */
+static int __bind_pppoa_vcc(const struct fbxatm_pppoa_vcc_params *params,
+			    struct socket *sock)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int ret;
+
+	/* sanity check */
+	switch (params->encap) {
+	case FBXATM_EPPPOA_AUTODETECT:
+	case FBXATM_EPPPOA_VCMUX:
+	case FBXATM_EPPPOA_LLC:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	memcpy(&priv->params, params, sizeof (*params));
+	priv->cur_encap = params->encap;
+
+	/* bind to vcc */
+	priv->vcc = fbxatm_bind_to_vcc(&params->id, FBXATM_VCC_USER_PPPOA);
+	if (IS_ERR(priv->vcc)) {
+		ret = PTR_ERR(priv->vcc);
+		goto fail;
+	}
+
+	fbxatm_set_uops(priv->vcc, &fbxatm_pppoa_vcc_uops, priv);
+	priv->chan.private = priv;
+	priv->chan.ops = &fbxatm_pppoa_ppp_ops;
+	priv->chan.mtu = priv->vcc->qos.max_sdu - PPP_HDRLEN;
+	priv->chan.hdrlen = 0;
+	priv->sock = sock;
+	tasklet_init(&priv->tx_done_tasklet, tx_done_tasklet_func,
+		     (unsigned long)priv);
+
+	if (priv->cur_encap != FBXATM_EPPPOA_VCMUX) {
+		/* assume worst case if vcmux is not forced */
+		priv->chan.mtu -= sizeof(llc_ppp);
+		priv->chan.hdrlen += sizeof(llc_ppp);
+	}
+
+	priv->chan.mtu -= priv->vcc->adev->tx_headroom;
+	priv->chan.hdrlen += priv->vcc->adev->tx_headroom;
+
+	ret = ppp_register_channel(&priv->chan);
+	if (ret)
+		goto fail_unbind;
+	list_add_tail(&priv->next, &fbxatm_pppoa_vcc_list);
+	return 0;
+
+fail_unbind:
+	fbxatm_unbind_vcc(priv->vcc);
+
+fail:
+	kfree(priv);
+	return ret;
+}
+
+/*
+ * bind to given vcc
+ */
+static int bind_pppoa_vcc(const struct fbxatm_pppoa_vcc_params *params,
+			  struct socket *sock)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_pppoa_mutex);
+	ret = __bind_pppoa_vcc(params, sock);
+	mutex_unlock(&fbxatm_pppoa_mutex);
+	return ret;
+}
+
+/*
+ * unbind from given vcc
+ */
+static void __unbind_pppoa_vcc(struct fbxatm_pppoa_vcc *priv)
+{
+	ppp_unregister_channel(&priv->chan);
+	fbxatm_unbind_vcc(priv->vcc);
+	tasklet_kill(&priv->tx_done_tasklet);
+	list_del(&priv->next);
+	kfree(priv);
+}
+
+/*
+ * unbind from given vcc
+ */
+static int unbind_pppoa_vcc(const struct fbxatm_pppoa_vcc_params *params)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int ret;
+
+	ret = 0;
+	mutex_lock(&fbxatm_pppoa_mutex);
+	priv = __find_pppoa_vcc(&params->id);
+	if (!priv)
+		ret = -ENOENT;
+	else
+		__unbind_pppoa_vcc(priv);
+	mutex_unlock(&fbxatm_pppoa_mutex);
+	return ret;
+}
+
+/*
+ * pppoa related ioctl handler
+ */
+static int fbxatm_pppoa_ioctl(struct socket *sock,
+			      unsigned int cmd, void __user *useraddr)
+{
+	int ret;
+
+	ret = 0;
+
+	switch (cmd) {
+	case FBXATM_PPPOA_IOCADD:
+	case FBXATM_PPPOA_IOCDEL:
+	{
+		struct fbxatm_pppoa_vcc_params params;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		if (cmd == FBXATM_PPPOA_IOCADD)
+			ret = bind_pppoa_vcc(&params, sock);
+		else
+			ret = unbind_pppoa_vcc(&params);
+		break;
+	}
+
+	case FBXATM_PPPOA_IOCGET:
+	{
+		struct fbxatm_pppoa_vcc_params params;
+		struct fbxatm_pppoa_vcc *priv;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_pppoa_mutex);
+		priv = __find_pppoa_vcc(&params.id);
+		if (!priv)
+			ret = -ENOENT;
+		else
+			memcpy(&params, &priv->params, sizeof (params));
+		mutex_unlock(&fbxatm_pppoa_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &params, sizeof(params)))
+			return -EFAULT;
+		break;
+	}
+
+	case PPPIOCGCHAN:
+	case PPPIOCGUNIT:
+	{
+		struct fbxatm_pppoa_vcc *priv;
+		int value;
+
+		value = 0;
+
+		mutex_lock(&fbxatm_pppoa_mutex);
+		priv = __find_pppoa_vcc_from_socket(sock);
+		if (!priv)
+			ret = -ENOENT;
+		else {
+			if (cmd == PPPIOCGCHAN)
+				value = ppp_channel_index(&priv->chan);
+			else
+				value = ppp_unit_number(&priv->chan);
+		}
+		mutex_unlock(&fbxatm_pppoa_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &value, sizeof(value)))
+			ret = -EFAULT;
+		break;
+	}
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+/*
+ * pppoa related release handler
+ */
+static void fbxatm_pppoa_release(struct socket *sock)
+{
+	struct fbxatm_pppoa_vcc *priv;
+
+	mutex_lock(&fbxatm_pppoa_mutex);
+	priv = __find_pppoa_vcc_from_socket(sock);
+	if (priv)
+		__unbind_pppoa_vcc(priv);
+	mutex_unlock(&fbxatm_pppoa_mutex);
+}
+
+static struct fbxatm_ioctl fbxatm_pppoa_ioctl_ops = {
+	.handler	= fbxatm_pppoa_ioctl,
+	.release	= fbxatm_pppoa_release,
+	.owner		= THIS_MODULE,
+};
+
+int __init fbxatm_pppoa_init(void)
+{
+	fbxatm_register_ioctl(&fbxatm_pppoa_ioctl_ops);
+	return 0;
+}
+
+void fbxatm_pppoa_exit(void)
+{
+	fbxatm_unregister_ioctl(&fbxatm_pppoa_ioctl_ops);
+}
diff -Nruw linux-6.4-fbx/net/fbxatm./fbxatm_priv.h linux-6.4-fbx/net/fbxatm/fbxatm_priv.h
--- linux-6.4-fbx/net/fbxatm./fbxatm_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/fbxatm_priv.h	2023-02-27 17:10:06.964489879 +0100
@@ -0,0 +1,67 @@
+#ifndef FBXATM_PRIV_H_
+#define FBXATM_PRIV_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+extern struct list_head fbxatm_dev_list;
+extern struct mutex fbxatm_mutex;
+
+int __init fbxatm_vcc_init(void);
+
+void fbxatm_vcc_exit(void);
+
+void __fbxatm_free_device(struct fbxatm_dev *adev);
+
+int __init fbxatm_2684_init(void);
+
+void fbxatm_2684_exit(void);
+
+/*
+ * pppoa
+ */
+#ifdef CONFIG_PPP
+int __init fbxatm_pppoa_init(void);
+
+void fbxatm_pppoa_exit(void);
+#else
+static inline int fbxatm_pppoa_init(void) { return 0; };
+static inline void fbxatm_pppoa_exit(void) { };
+#endif
+
+/*
+ * procfs stuff
+ */
+int fbxatm_proc_dev_register(struct fbxatm_dev *dev);
+
+void fbxatm_proc_dev_deregister(struct fbxatm_dev *dev);
+
+struct proc_dir_entry *fbxatm_proc_misc_register(const char *path);
+
+void fbxatm_proc_misc_deregister(const char *path);
+
+int __init fbxatm_procfs_init(void);
+
+void fbxatm_procfs_exit(void);
+
+
+/*
+ * sysfs stuff
+ */
+int __init fbxatm_sysfs_init(void);
+
+void fbxatm_sysfs_exit(void);
+
+void fbxatm_dev_change_sysfs(struct fbxatm_dev *adev);
+
+int fbxatm_register_dev_sysfs(struct fbxatm_dev *adev);
+
+void fbxatm_unregister_dev_sysfs(struct fbxatm_dev *adev);
+
+
+/*
+ * crc10
+ */
+u16 crc10(u16 crc, const u8 *buffer, size_t len);
+
+#endif /* !FBXATM_PRIV_H_ */
diff -Nruw linux-6.4-fbx/net/fbxatm./fbxatm_procfs.c linux-6.4-fbx/net/fbxatm/fbxatm_procfs.c
--- linux-6.4-fbx/net/fbxatm./fbxatm_procfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/fbxatm_procfs.c	2023-02-27 17:17:38.392579798 +0100
@@ -0,0 +1,340 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/fbxatm_dev.h>
+#include <net/net_namespace.h>
+#include "fbxatm_priv.h"
+
+static struct proc_dir_entry *fbxatm_proc_root;
+
+#define FMT_U64		"%llu"
+
+/*
+ * /proc/net/atm/vcc
+ */
+static int vcc_seq_show(struct seq_file *seq, void *v)
+{
+	struct fbxatm_vcc *vcc;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		seq_printf(seq, "%s",
+			   "Itf.VPI.VCI USER TC MaxSDU  RX TX  RXAAL5 "
+			   "TXAAL5\n");
+		return 0;
+	}
+
+	vcc = (struct fbxatm_vcc *)v;
+	seq_printf(seq, "%d.%u.%u %d ", vcc->adev->ifindex,
+		   vcc->vpi, vcc->vci, vcc->user);
+	seq_printf(seq, "%u %u ", vcc->qos.traffic_class, vcc->qos.max_sdu);
+	seq_printf(seq, FMT_U64 " " FMT_U64 " %u %u\n",
+		   vcc->stats.rx_bytes,
+		   vcc->stats.tx_bytes,
+		   vcc->stats.rx_aal5,
+		   vcc->stats.tx_aal5);
+	return 0;
+}
+
+static void *vcc_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_vcc *tvcc, *vcc;
+	int count;
+
+	mutex_lock(&fbxatm_mutex);
+
+	if (!*pos)
+		return SEQ_START_TOKEN;
+
+	count = 1;
+	tvcc = NULL;
+	list_for_each_entry(adev, &fbxatm_dev_list, next) {
+		list_for_each_entry(vcc, &adev->vcc_list, next) {
+			if (count == *pos) {
+				tvcc = vcc;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return tvcc;
+}
+
+static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_vcc *last_vcc, *vcc, *tvcc;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		if (list_empty(&fbxatm_dev_list))
+			return NULL;
+		adev = list_entry(fbxatm_dev_list.next, struct fbxatm_dev,
+				  next);
+		last_vcc = NULL;
+	} else {
+		last_vcc = (struct fbxatm_vcc *)v;
+		adev = last_vcc->adev;
+	}
+
+	tvcc = NULL;
+	list_for_each_entry_continue(adev, &fbxatm_dev_list, next) {
+
+		if (last_vcc && last_vcc->adev == adev) {
+			vcc = last_vcc;
+			list_for_each_entry_continue(vcc, &adev->vcc_list,
+						     next) {
+				tvcc = vcc;
+				break;
+			}
+		} else {
+			list_for_each_entry(vcc, &adev->vcc_list, next) {
+				tvcc = vcc;
+				break;
+			}
+		}
+	}
+
+	(*pos)++;
+	return tvcc;
+}
+
+static void vcc_seq_stop(struct seq_file *seq, void *v)
+{
+	mutex_unlock(&fbxatm_mutex);
+}
+
+static const struct seq_operations vcc_seq_ops = {
+	.start		= vcc_seq_start,
+	.next		= vcc_seq_next,
+	.stop		= vcc_seq_stop,
+	.show		= vcc_seq_show,
+};
+
+static int vcc_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &vcc_seq_ops);
+}
+
+static const struct proc_ops vcc_seq_fops = {
+	.proc_open	= vcc_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+};
+
+/*
+ * /proc/net/atm/dev
+ */
+static int adev_seq_show(struct seq_file *seq, void *v)
+{
+	struct fbxatm_dev *adev;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		seq_printf(seq, "%s",
+			   "Itf  RX TX  RXAAL5 TXAAL5  RXF4OAM TXF4OAM  "
+			   "RXF5OAM TXF5OAM  RXBADOAM RXBADLLIDOAM "
+			   "RXOTHEROAM RXDROPPED TXDROPNOLINK\n");
+		return 0;
+	}
+
+	adev = (struct fbxatm_dev *)v;
+	seq_printf(seq, "%d  " FMT_U64 " " FMT_U64 "  %u %u  ",
+		   adev->ifindex,
+		   adev->stats.rx_bytes,
+		   adev->stats.tx_bytes,
+		   adev->stats.rx_aal5,
+		   adev->stats.tx_aal5);
+
+	seq_printf(seq, "%u %u  %u %u  %u %u %u %u %u\n",
+		   adev->stats.rx_f4_oam,
+		   adev->stats.tx_f4_oam,
+
+		   adev->stats.rx_f5_oam,
+		   adev->stats.tx_f5_oam,
+
+		   adev->stats.rx_bad_oam,
+		   adev->stats.rx_bad_llid_oam,
+		   adev->stats.rx_other_oam,
+		   adev->stats.rx_dropped,
+		   adev->stats.tx_drop_nolink);
+	return 0;
+}
+
+static void *adev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct fbxatm_dev *adev, *tadev;
+	int count;
+
+	mutex_lock(&fbxatm_mutex);
+
+	if (!*pos)
+		return SEQ_START_TOKEN;
+
+	count = 1;
+	tadev = NULL;
+	list_for_each_entry(adev, &fbxatm_dev_list, next) {
+		if (count == *pos) {
+			tadev = adev;
+			break;
+		}
+		count++;
+	}
+
+	return tadev;
+}
+
+static void *adev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct fbxatm_dev *adev, *tadev;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		if (list_empty(&fbxatm_dev_list))
+			return NULL;
+		adev = list_entry(fbxatm_dev_list.next, struct fbxatm_dev,
+				  next);
+	} else
+		adev = (struct fbxatm_dev *)v;
+
+	tadev = NULL;
+	list_for_each_entry_continue(adev, &fbxatm_dev_list, next) {
+		tadev = adev;
+		break;
+	}
+
+	(*pos)++;
+	return tadev;
+}
+
+static void adev_seq_stop(struct seq_file *seq, void *v)
+{
+	mutex_unlock(&fbxatm_mutex);
+}
+
+static const struct seq_operations adev_seq_ops = {
+	.start		= adev_seq_start,
+	.next		= adev_seq_next,
+	.stop		= adev_seq_stop,
+	.show		= adev_seq_show,
+};
+
+static int adev_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &adev_seq_ops);
+}
+
+static const struct proc_ops adev_seq_fops = {
+	.proc_open	= adev_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+};
+
+
+/*
+ * create device private entry in proc
+ */
+int fbxatm_proc_dev_register(struct fbxatm_dev *adev)
+{
+	adev->dev_proc_entry = proc_mkdir(adev->name, fbxatm_proc_root);
+	if (!adev->dev_proc_entry)
+		return 1;
+	return 0;
+}
+
+
+void fbxatm_proc_dev_deregister(struct fbxatm_dev *adev)
+{
+	remove_proc_entry(adev->name, fbxatm_proc_root);
+}
+
+/*
+ * create misc private entry in proc
+ */
+struct proc_dir_entry *fbxatm_proc_misc_register(const char *path)
+{
+	return proc_mkdir(path, fbxatm_proc_root);
+}
+
+void fbxatm_proc_misc_deregister(const char *path)
+{
+	remove_proc_entry(path, fbxatm_proc_root);
+}
+
+/*
+ * list of proc entries for fbxatm
+ */
+static struct fbxatm_proc_entry {
+	char *name;
+	const struct proc_ops *proc_fops;
+	struct proc_dir_entry *dirent;
+
+} fbxatm_proc_entries[] = {
+	{
+		.name = "dev",
+		.proc_fops = &adev_seq_fops,
+	},
+	{
+		.name = "vcc",
+		.proc_fops = &vcc_seq_fops,
+	},
+};
+
+static void fbxatm_remove_proc(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(fbxatm_proc_entries); i++) {
+		struct fbxatm_proc_entry *e;
+
+		e = &fbxatm_proc_entries[i];
+
+		if (!e->dirent)
+			continue;
+		remove_proc_entry(e->name, fbxatm_proc_root);
+		e->dirent = NULL;
+	}
+
+	remove_proc_entry("fbxatm", init_net.proc_net);
+}
+
+int __init fbxatm_procfs_init(void)
+{
+	unsigned int i;
+	int ret;
+
+	fbxatm_proc_root = proc_net_mkdir(&init_net, "fbxatm",
+					  init_net.proc_net);
+	if (!fbxatm_proc_root) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(fbxatm_proc_entries); i++) {
+		struct proc_dir_entry *dirent;
+		struct fbxatm_proc_entry *e;
+
+		e = &fbxatm_proc_entries[i];
+
+		dirent = proc_create_data(e->name, S_IRUGO, fbxatm_proc_root,
+					  e->proc_fops, NULL);
+		if (!dirent) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		e->dirent = dirent;
+	}
+
+	return 0;
+
+err:
+	if (fbxatm_proc_root)
+		fbxatm_remove_proc();
+	return ret;
+}
+
+void fbxatm_procfs_exit(void)
+{
+	fbxatm_remove_proc();
+}
diff -Nruw linux-6.4-fbx/net/fbxatm./fbxatm_sysfs.c linux-6.4-fbx/net/fbxatm/fbxatm_sysfs.c
--- linux-6.4-fbx/net/fbxatm./fbxatm_sysfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxatm/fbxatm_sysfs.c	2023-05-22 20:06:45.395884444 +0200
@@ -0,0 +1,184 @@
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/stat.h>
+#include <linux/fbxatm_dev.h>
+#include "fbxatm_priv.h"
+
+#define to_fbxatm_dev(cldev) container_of(cldev, struct fbxatm_dev, dev)
+
+static const char fmt_u64[] = "%llu\n";
+
+static ssize_t show_ifindex(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->ifindex);
+}
+
+static ssize_t show_link_state(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n",
+		       test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags) ?
+		       1 : 0);
+}
+
+static ssize_t show_link_rate_us(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->link_rate_us);
+}
+
+static ssize_t show_link_rate_ds(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->link_rate_ds);
+}
+
+static ssize_t show_max_priority(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->max_priority);
+}
+
+static ssize_t show_max_rx_priority(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->max_rx_priority);
+}
+
+static ssize_t show_rx_bytes(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	u64 val;
+
+	spin_lock_bh(&adev->stats_lock);
+	val = adev->stats.rx_bytes;
+	spin_unlock_bh(&adev->stats_lock);
+	return sprintf(buf, fmt_u64, val);
+}
+
+static ssize_t show_tx_bytes(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	u64 val;
+
+	spin_lock_bh(&adev->stats_lock);
+	val = adev->stats.tx_bytes;
+	spin_unlock_bh(&adev->stats_lock);
+	return sprintf(buf, fmt_u64, val);
+}
+
+static DEVICE_ATTR(ifindex, S_IRUGO, show_ifindex, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, show_link_state, NULL);
+static DEVICE_ATTR(link_rate_us, S_IRUGO, show_link_rate_us, NULL);
+static DEVICE_ATTR(link_rate_ds, S_IRUGO, show_link_rate_ds, NULL);
+static DEVICE_ATTR(max_priority, S_IRUGO, show_max_priority, NULL);
+static DEVICE_ATTR(max_rx_priority, S_IRUGO, show_max_rx_priority, NULL);
+static DEVICE_ATTR(rx_bytes, S_IRUGO, show_rx_bytes, NULL);
+static DEVICE_ATTR(tx_bytes, S_IRUGO, show_tx_bytes, NULL);
+
+static struct device_attribute *fbxatm_attrs[] = {
+	&dev_attr_ifindex,
+	&dev_attr_link_state,
+	&dev_attr_link_rate_us,
+	&dev_attr_link_rate_ds,
+	&dev_attr_max_priority,
+	&dev_attr_max_rx_priority,
+	&dev_attr_rx_bytes,
+	&dev_attr_tx_bytes,
+};
+
+static int fbxatm_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+	struct fbxatm_dev *adev;
+
+	if (!dev)
+		return -ENODEV;
+
+	adev = to_fbxatm_dev(dev);
+	if (!adev)
+		return -ENODEV;
+
+	if (add_uevent_var(env, "NAME=%s", adev->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "IFINDEX=%u", adev->ifindex))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "LINK=%u",
+			   test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags) ?
+			   1 : 0))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void fbxatm_release(struct device *dev)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	__fbxatm_free_device(adev);
+}
+
+static struct class fbxatm_class = {
+	.name		= "fbxatm",
+	.dev_release	= fbxatm_release,
+	.dev_uevent	= fbxatm_uevent,
+};
+
+void fbxatm_dev_change_sysfs(struct fbxatm_dev *adev)
+{
+	struct device *dev = &adev->dev;
+
+	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, NULL);
+}
+
+int fbxatm_register_dev_sysfs(struct fbxatm_dev *adev)
+{
+	struct device *dev = &adev->dev;
+	int i, j, ret;
+
+	dev->class = &fbxatm_class;
+	dev_set_name(dev, "%s", adev->name);
+	ret = device_register(dev);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(fbxatm_attrs); i++) {
+		ret = device_create_file(dev, fbxatm_attrs[i]);
+		if (ret)
+			goto err;
+	}
+	return 0;
+
+err:
+	for (j = 0; j < i; j++)
+		device_remove_file(dev, fbxatm_attrs[j]);
+	device_del(dev);
+	return ret;
+}
+
+void fbxatm_unregister_dev_sysfs(struct fbxatm_dev *adev)
+{
+	struct device *dev = &adev->dev;
+	device_del(dev);
+}
+
+int __init fbxatm_sysfs_init(void)
+{
+	return class_register(&fbxatm_class);
+}
+
+void fbxatm_sysfs_exit(void)
+{
+	class_unregister(&fbxatm_class);
+}
diff -Nruw linux-6.4-fbx/net/fbxbridge./Kconfig linux-6.4-fbx/net/fbxbridge/Kconfig
--- linux-6.4-fbx/net/fbxbridge./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/Kconfig	2023-02-27 19:50:22.652261514 +0100
@@ -0,0 +1,8 @@
+
+#
+# Freebox bridge
+#
+config FBXBRIDGE
+	bool "Freebox Bridge"
+	select NETFILTER
+	select NF_CONNTRACK
diff -Nruw linux-6.4-fbx/net/fbxbridge./Makefile linux-6.4-fbx/net/fbxbridge/Makefile
--- linux-6.4-fbx/net/fbxbridge./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/Makefile	2023-02-27 19:50:22.652261514 +0100
@@ -0,0 +1,12 @@
+
+obj-$(CONFIG_FBXBRIDGE)	+= fbxbridge.o
+
+fbxbridge-objs := 		\
+	fbxbr_dev.o		\
+	fbxbr_dhcp.o		\
+	fbxbr_filter.o		\
+	fbxbr_fwcache.o		\
+	fbxbr_input.o		\
+	fbxbr_ioctl.o		\
+	fbxbr_output.o		\
+	fbxbr_utils.o
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_dev.c linux-6.4-fbx/net/fbxbridge/fbxbr_dev.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_dev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_dev.c	2023-02-27 19:50:23.744290661 +0100
@@ -0,0 +1,734 @@
+#define pr_fmt(fmt)	"fbxbridge: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sockios.h>
+#include <linux/inetdevice.h>
+#include <linux/notifier.h>
+#include <linux/if_arp.h>
+#include <linux/mutex.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+static LIST_HEAD(fbxbr_list);
+static DEFINE_MUTEX(fbxbr_list_mutex);
+
+/*
+ * ioctl "install" func
+ */
+extern void fbxbridge_set(int (*hook)(struct net *net,
+				      unsigned int, void __user *));
+
+
+/*
+ * caller must hold rtnl lock
+ */
+struct fbxbr *__fbxbr_get_by_name(struct net *net, const char *name)
+{
+	struct net_device *dev;
+
+	dev = __dev_get_by_name(net, name);
+	if (dev == NULL)
+		return ERR_PTR(-ENODEV);
+
+	if (!(dev->priv_flags & IFF_FBXBRIDGE))
+		return ERR_PTR(-ENODEV);
+
+	return netdev_priv(dev);
+}
+
+/*
+ * compute ip address that we will pretend to be on the lan side
+ */
+static inline __be32 gen_lan_gw(__be32 be_ipaddr, __be32 be_netmask)
+{
+	u32 ipaddr, netmask;
+	u32 gw, mask;
+
+	ipaddr = __be32_to_cpu(be_ipaddr);
+	netmask = __be32_to_cpu(be_netmask);
+
+	/* default to last address of subnet */
+	gw = ipaddr & netmask;
+	mask = ~netmask;
+	gw |= (mask - 1);
+
+	/* if it happens to be the ip address, then take another one */
+	if (gw == ipaddr) {
+		gw &= netmask;
+		gw |= mask - 2;
+	}
+	return __cpu_to_be32(gw);
+}
+
+/*
+ * must be called with bridge write lock held
+ */
+static void __fetch_wan_parameters(struct fbxbr *br, struct in_ifaddr *ifa)
+{
+	struct net_device *wan_dev;
+
+	if (!ifa)
+		return;
+
+	if (WARN_ON(!br->wan_port))
+		return;
+
+	if (br->wan_ipaddr == ifa->ifa_local &&
+	    br->wan_netmask == ifa->ifa_mask)
+		return;
+
+	br->wan_ipaddr = ifa->ifa_local;
+	br->wan_netmask = ifa->ifa_mask;
+
+	if (br->wan_netmask != 0xffffffff) {
+		/* standard netmask */
+		br->lan_gw = gen_lan_gw(br->wan_ipaddr,	br->wan_netmask);
+		br->lan_netmask = br->wan_netmask;
+	} else {
+		u32 gw;
+
+		/* switch to /24 if wan it pointtopoint */
+		gw = ntohl(br->wan_ipaddr) & 0xffffff00;
+		if ((gw | 0xfe) == ntohl(br->wan_ipaddr))
+			gw |= 0xfd;
+		else
+			gw |= 0xfe;
+
+		br->lan_gw = htonl(gw);
+		br->lan_netmask = htonl(0xffffff00);
+	}
+
+	wan_dev = br->wan_port->dev;
+	pr_notice("%s: wan inet device %s address changed to [%pI4]\n",
+		  br->dev->name, wan_dev->name, &br->wan_ipaddr);
+
+	pr_info("%s: %s: wan netmask: %pI4\n",
+		br->dev->name, wan_dev->name, &br->wan_netmask);
+
+	pr_info("%s: %s: lan gw: %pI4\n",
+		br->dev->name, wan_dev->name, &br->lan_gw);
+}
+
+/*
+ * caller must hold rtnl lock
+ */
+int __fbxbr_add_br_port(struct net *net, const char *name,
+			const char *port_name, bool is_wan)
+{
+	struct net_device *dev;
+	struct fbxbr *br;
+	struct fbxbr_port *p;
+	int ret;
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br))
+		return PTR_ERR(br);
+
+	/* check that we don't have a device already */
+	if ((is_wan && br->wan_port) || (!is_wan && br->lan_port))
+		return -EBUSY;
+
+	/* locate port */
+	dev = __dev_get_by_name(net, port_name);
+	if (!dev)
+		return -ENODEV;
+
+	/* make sure it's not used by us */
+	if (dev->priv_flags & (IFF_FBXBRIDGE | IFF_FBXBRIDGE_PORT))
+		return -EBUSY;
+
+	/* allocate new port */
+	p = kzalloc(sizeof (*p), GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	p->br = br;
+	p->dev = dev;
+	p->is_wan = is_wan;
+
+	write_lock_bh(&br->lock);
+	if (is_wan)
+		br->wan_port = p;
+	else
+		br->lan_port = p;
+
+	if (is_wan) {
+		struct in_device *in_dev;
+
+		rcu_read_lock();
+
+		in_dev = __in_dev_get_rcu(dev);
+		if (in_dev)
+			__fetch_wan_parameters(br, in_dev->ifa_list);
+
+		rcu_read_unlock();
+	}
+
+	write_unlock_bh(&br->lock);
+
+	ret = netdev_rx_handler_register(dev, fbxbr_handle_frame, p);
+	if (ret)
+		goto err;
+
+	dev->priv_flags |= IFF_FBXBRIDGE_PORT;
+
+	ret = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, NULL);
+	if (ret)
+		goto err;
+
+	pr_info("%s: %s device %s grabbed\n",
+		br->dev->name, is_wan ? "wan" : "lan", dev->name);
+
+	return 0;
+
+err:
+	write_lock_bh(&br->lock);
+	netdev_rx_handler_unregister(dev);
+	if (is_wan)
+		br->wan_port = NULL;
+	else
+		br->lan_port = NULL;
+	dev->priv_flags &= ~IFF_FBXBRIDGE_PORT;
+	write_unlock_bh(&br->lock);
+	kfree(p);
+	return ret;
+}
+
+/*
+ * caller must hold rtnl lock
+ */
+void __fbxbr_del_br_port(struct fbxbr_port *p)
+{
+	struct fbxbr *br = p->br;
+	struct net_device *dev = p->dev;
+	bool is_wan;
+
+	netdev_upper_dev_unlink(dev, br->dev);
+	netdev_rx_handler_unregister(dev);
+	dev->priv_flags &= ~IFF_FBXBRIDGE_PORT;
+	is_wan = p->is_wan;
+
+	write_lock_bh(&br->lock);
+	if (p->is_wan)
+		br->wan_port = NULL;
+	else
+		br->lan_port = NULL;
+
+	if (p->rt)
+		ip_rt_put(p->rt);
+	write_unlock_bh(&br->lock);
+	kfree(p);
+
+	pr_info("%s: %s device %s released\n",
+		br->dev->name, is_wan ? "wan" : "lan", dev->name);
+}
+
+/*
+ * caller must hold rtnl lock
+ */
+int __fbxbr_del_br_port_by_name(struct net *net, const char *name,
+				const char *port_name)
+{
+	struct net_device *dev;
+	struct fbxbr *br;
+	struct fbxbr_port *p;
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br))
+		return PTR_ERR(br);
+
+	/* locate port */
+	dev = __dev_get_by_name(net, port_name);
+	if (!dev)
+		return -ENODEV;
+
+	p = fbxbr_port_get_rtnl(dev);
+	if (!p || p->br != br)
+		return -EINVAL;
+
+	__fbxbr_del_br_port(p);
+	return 0;
+}
+
+/*
+ * bridge device netdevice ops
+ */
+static int fbxbr_net_open(struct net_device *dev)
+{
+	return 0;
+}
+
+static int fbxbr_net_stop(struct net_device *dev)
+{
+	return 0;
+}
+
+static int fbxbr_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct fbxbr *br = netdev_priv(dev);
+	const struct iphdr *iph;
+
+	read_lock(&br->lock);
+
+	if (skb->protocol != htons(ETH_P_IP))
+		goto drop;
+
+	if (!br->wan_ipaddr)
+		goto drop;
+
+	if (!br->lan_port)
+		goto drop;
+
+	if (!pskb_may_pull(skb, sizeof (*iph)))
+		goto drop;
+
+	iph = ip_hdr(skb);
+
+	if (ipv4_is_multicast(iph->daddr)) {
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += skb->len;
+		fbxbr_output_lan_mcast_frame(br, skb);
+		goto done;
+	}
+
+	if (iph->daddr != br->br_remote_ipaddr)
+		goto drop;
+
+	fbxbr_dnat_packet(skb, br->wan_ipaddr);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+	fbxbr_output_lan_frame(br, skb);
+
+done:
+	read_unlock(&br->lock);
+	return 0;
+
+drop:
+	dev->stats.tx_dropped++;
+	read_unlock(&br->lock);
+	kfree_skb(skb);
+	return 0;
+}
+
+static const struct net_device_ops fbxbr_net_ops = {
+	.ndo_open		= fbxbr_net_open,
+	.ndo_stop		= fbxbr_net_stop,
+	.ndo_start_xmit		= fbxbr_net_start_xmit,
+};
+
+static struct device_type fbxbr_type = {
+	.name	= "fbxbridge",
+};
+
+/*
+ * fbxbridge alloc_netdev setup func
+ */
+#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
+			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
+
+static void fbxbr_netdev_setup(struct net_device *dev)
+{
+	struct fbxbr *br = netdev_priv(dev);
+	size_t i;
+
+	dev->flags = IFF_NOARP;
+	dev->type = ARPHRD_PPP;
+	dev->mtu = 1500;
+	dev->hard_header_len = 16;
+
+	dev->netdev_ops = &fbxbr_net_ops;
+	dev->needs_free_netdev = true;
+	SET_NETDEV_DEVTYPE(dev, &fbxbr_type);
+	dev->priv_flags = IFF_FBXBRIDGE | IFF_NO_QUEUE;
+
+	dev->features = 0;
+	dev->hw_features = 0;
+	dev->vlan_features = 0;
+
+	br->dev = dev;
+	rwlock_init(&br->lock);
+	rwlock_init(&br->lan_hwaddr_lock);
+	br->dhcpd_renew_time = DEFAULT_RENEWAL_TIME;
+	br->dhcpd_rebind_time = DEFAULT_REBIND_TIME;
+	br->dhcpd_lease_time = DEFAULT_LEASE_TIME;
+	spin_lock_init(&br->last_arp_lock);
+	br->last_arp_send = jiffies;
+
+	rwlock_init(&br->fwcache_lock);
+	INIT_LIST_HEAD(&br->fwcache_rules);
+	for (i = 0; i < ARRAY_SIZE(br->fwcache_hrules); i++)
+		INIT_HLIST_HEAD(&br->fwcache_hrules[i]);
+}
+
+/*
+ *
+ */
+int fbxbr_add_br(struct net *net, const char *name)
+{
+	struct net_device *dev;
+	struct fbxbr *br;
+	int ret;
+
+	dev = alloc_netdev(sizeof (struct fbxbr), name, NET_NAME_UNKNOWN,
+			   fbxbr_netdev_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	dev_net_set(dev, net);
+
+	ret = register_netdev(dev);
+	if (ret) {
+		free_netdev(dev);
+		return ret;
+	}
+
+	br = netdev_priv(dev);
+	mutex_lock(&fbxbr_list_mutex);
+	list_add(&br->next, &fbxbr_list);
+	mutex_unlock(&fbxbr_list_mutex);
+
+	pr_notice("%s: new fbxbridge\n", dev->name);
+	return 0;
+}
+
+
+/*
+ * caller must hold rtnl lock
+ */
+int __fbxbr_del_br(struct net *net, const char *name)
+{
+	struct fbxbr *br;
+
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br))
+		return PTR_ERR(br);
+
+	mutex_lock(&fbxbr_list_mutex);
+	list_del(&br->next);
+	mutex_unlock(&fbxbr_list_mutex);
+
+	if (br->wan_port)
+		__fbxbr_del_br_port(br->wan_port);
+	if (br->lan_port)
+		__fbxbr_del_br_port(br->lan_port);
+
+	unregister_netdevice(br->dev);
+	return 0;
+}
+
+/*
+ *
+ */
+int fbxbr_get_params(struct net *net, const char *name,
+		     struct fbxbridge_ioctl_params *params)
+{
+	struct fbxbr *br;
+
+	rtnl_lock();
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br)) {
+		rtnl_unlock();
+		return PTR_ERR(br);
+	}
+
+	/* copy current config */
+	params->flags = br->flags;
+	params->dns1_addr = br->dns1_ipaddr;
+	params->dns2_addr = br->dns2_ipaddr;
+	memcpy(params->ip_aliases, br->ip_aliases, sizeof (br->ip_aliases));
+	params->dhcpd_renew_time = br->dhcpd_renew_time;
+	params->dhcpd_rebind_time = br->dhcpd_rebind_time;
+	params->dhcpd_lease_time = br->dhcpd_lease_time;
+	params->inputmark = br->inputmark;
+
+	/* current ports */
+	if (br->wan_port) {
+		memcpy(params->wan_dev.name,
+		       br->wan_port->dev->name,
+		       IFNAMSIZ);
+		params->wan_dev.present = 1;
+	} else {
+		params->wan_dev.name[0] = 0;
+		params->wan_dev.present = 0;
+	}
+
+	if (br->lan_port) {
+		memcpy(params->lan_dev.name,
+		       br->lan_port->dev->name,
+		       IFNAMSIZ);
+		params->lan_dev.present = 1;
+	} else {
+		params->lan_dev.name[0] = 0;
+		params->lan_dev.present = 0;
+	}
+
+	/* copy state */
+	read_lock_bh(&br->lan_hwaddr_lock);
+	params->have_hw_addr = br->have_hw_addr;
+	memcpy(params->lan_hwaddr, br->lan_hwaddr, ETH_ALEN);
+	read_unlock_bh(&br->lan_hwaddr_lock);
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+/*
+ *
+ */
+int fbxbr_set_params(struct net *net, const char *name,
+		     const struct fbxbridge_ioctl_params *params)
+{
+	struct fbxbr *br;
+
+	rtnl_lock();
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br)) {
+		rtnl_unlock();
+		return PTR_ERR(br);
+	}
+
+	write_lock_bh(&br->lock);
+
+	br->flags = params->flags;
+	br->dns1_ipaddr = params->dns1_addr;
+	br->dns2_ipaddr = params->dns2_addr;
+	memcpy(br->ip_aliases, params->ip_aliases, sizeof (br->ip_aliases));
+	br->dhcpd_renew_time = params->dhcpd_renew_time;
+	br->dhcpd_rebind_time = params->dhcpd_rebind_time;
+	br->dhcpd_lease_time = params->dhcpd_lease_time;
+	br->inputmark = params->inputmark;
+
+	write_unlock_bh(&br->lock);
+
+	fbxbr_fwcache_flush(br);
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+/*
+ *
+ */
+void fbxbr_flush_cache(void)
+{
+	struct fbxbr *br;
+
+	mutex_lock(&fbxbr_list_mutex);
+	list_for_each_entry(br, &fbxbr_list, next)
+		fbxbr_fwcache_flush(br);
+	mutex_unlock(&fbxbr_list_mutex);
+}
+
+/*
+ * must be called with BH disabled
+ */
+void fbxbr_capture_hw_addr(struct fbxbr *br, const u8 *hwaddr)
+{
+	bool same;
+
+	read_lock(&br->lan_hwaddr_lock);
+	same = (br->have_hw_addr && !memcmp(br->lan_hwaddr, hwaddr, ETH_ALEN));
+	read_unlock(&br->lan_hwaddr_lock);
+
+	if (same)
+		return;
+
+	write_lock(&br->lan_hwaddr_lock);
+	memcpy(br->lan_hwaddr, hwaddr, ETH_ALEN);
+	br->have_hw_addr = 1;
+	write_unlock(&br->lan_hwaddr_lock);
+
+	pr_notice("%s: new lan hw address is now %pM\n",
+		  br->dev->name, hwaddr);
+}
+
+/*
+ * netdevice notifier callback, called with rtnl lock
+ */
+static int fbxbr_netdev_event_callback(struct notifier_block *this,
+				       unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	ASSERT_RTNL();
+
+	if (!(dev->priv_flags & IFF_FBXBRIDGE_PORT))
+		return NOTIFY_DONE;
+
+	/* catch port that goes away */
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		__fbxbr_del_br_port(fbxbr_port_get_rtnl(dev));
+		break;
+
+	default:
+		break;
+	};
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * handle inet configuration event on port
+ */
+static void __handle_inet_port_event(struct fbxbr_port *p,
+				     unsigned long event,
+				     struct in_ifaddr *ifa)
+{
+	struct fbxbr *br;
+
+	if (!p->is_wan)
+		return;
+
+	br = p->br;
+
+	switch (event) {
+	case NETDEV_UP:
+		write_lock_bh(&br->lan_hwaddr_lock);
+		__fetch_wan_parameters(br, ifa);
+		write_unlock_bh(&br->lan_hwaddr_lock);
+		break;
+
+	case NETDEV_DOWN:
+		/* we never  clear wan address, so we  can continue to
+		 * use the bridge on lan side even if wan is down */
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * handle inet configuration event on bridge interface (fbxbr%d)
+ */
+static void __handle_inet_bridge_event(struct fbxbr *br,
+				       unsigned long event,
+				       struct in_ifaddr *ifa)
+{
+	switch (event) {
+	case NETDEV_UP:
+		if (!ifa->ifa_address || ifa->ifa_local == ifa->ifa_address)
+			return;
+
+		write_lock_bh(&br->lan_hwaddr_lock);
+		br->br_ipaddr = ifa->ifa_local;
+		br->br_remote_ipaddr = ifa->ifa_address;
+		write_unlock_bh(&br->lan_hwaddr_lock);
+
+		if (br->br_ipaddr)
+			pr_info("%s: bridge local interface configured: "
+				"[%pI4 -> %pI4]\n",
+				br->dev->name,
+				&br->br_ipaddr,
+				&br->br_remote_ipaddr);
+		break;
+
+	case NETDEV_DOWN:
+		write_lock_bh(&br->lan_hwaddr_lock);
+		if (br->br_ipaddr) {
+			br->br_ipaddr = br->br_remote_ipaddr = 0;
+			pr_info("%s: bridge interface unconfigured\n",
+				br->dev->name);
+		}
+		write_unlock_bh(&br->lan_hwaddr_lock);
+		break;
+
+	default:
+		return;
+	}
+}
+
+/*
+ * kernel inet event notifier callback
+ */
+static int fbxbr_inet_event_callback(struct notifier_block *this,
+				     unsigned long event, void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+	struct net_device *dev = ifa->ifa_dev->dev;
+
+	ASSERT_RTNL();
+
+	/* is it a bridge ? */
+	if (dev->priv_flags & IFF_FBXBRIDGE) {
+		struct fbxbr *br = netdev_priv(dev);
+		__handle_inet_bridge_event(br, event, ifa);
+		return NOTIFY_DONE;
+	}
+
+	/* is it a bridge port */
+	if (dev->priv_flags & IFF_FBXBRIDGE_PORT) {
+		struct fbxbr_port *p = fbxbr_port_get_rtnl(dev);
+		__handle_inet_port_event(p, event, ifa);
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_DONE;
+}
+
+
+static struct notifier_block fbxbr_netdev_notifier = {
+	notifier_call: fbxbr_netdev_event_callback,
+};
+
+static struct notifier_block fbxbr_inet_notifier = {
+	notifier_call: fbxbr_inet_event_callback,
+};
+
+/*
+ *
+ */
+static int __init fbxbr_init_module(void)
+{
+	int err;
+
+	err = register_netdevice_notifier(&fbxbr_netdev_notifier);
+	if (err) {
+		pr_err("can't register netdevice notifier\n");
+		return err;
+	}
+
+	err = register_inetaddr_notifier(&fbxbr_inet_notifier);
+	if (err) {
+		pr_err("can't register inet notifier\n");
+		goto err_netdev;
+	}
+
+	fbxbridge_set(fbxbr_ioctl);
+	return 0;
+
+err_netdev:
+	unregister_netdevice_notifier(&fbxbr_netdev_notifier);
+	return err;
+}
+
+/*
+ *
+ */
+static void __exit fbxbr_exit_module(void)
+{
+	unregister_netdevice_notifier(&fbxbr_netdev_notifier);
+	unregister_inetaddr_notifier(&fbxbr_inet_notifier);
+	fbxbridge_set(NULL);
+}
+
+module_init(fbxbr_init_module);
+module_exit(fbxbr_exit_module);
+
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_DESCRIPTION("Freebox Network Bridge - www.freebox.fr");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_dhcp.c linux-6.4-fbx/net/fbxbridge/fbxbr_dhcp.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_dhcp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_dhcp.c	2023-02-27 19:50:23.744290661 +0100
@@ -0,0 +1,502 @@
+#include "fbxbr_private.h"
+#include <linux/udp.h>
+#include <net/ip.h>
+#include <asm/checksum.h>
+
+#define BOOTP_REQUEST   1
+#define BOOTP_REPLY     2
+
+struct bootp_pkt {              /* BOOTP packet format */
+	struct iphdr iph;       /* IP header */
+	struct udphdr udph;     /* UDP header */
+	u8 op;                  /* 1=request, 2=reply */
+	u8 htype;               /* HW address type */
+	u8 hlen;                /* HW address length */
+	u8 hops;                /* Used only by gateways */
+	u32 xid;                /* Transaction ID */
+	u16 secs;               /* Seconds since we started */
+	u16 flags;              /* Just what it says */
+	u32 client_ip;          /* Client's IP address if known */
+	u32 your_ip;            /* Assigned IP address */
+	u32 server_ip;          /* (Next, e.g. NFS) Server's IP address */
+	u32 relay_ip;           /* IP address of BOOTP relay */
+	u8 hw_addr[16];         /* Client's HW address */
+	u8 serv_name[64];       /* Server host name */
+	u8 boot_file[128];      /* Name of boot file */
+	u8 exten[312];          /* DHCP options / BOOTP vendor extensions */
+};
+
+#define FBX_OPT_VENDOR_F_IGNORE_BRIDGE	(1 << 0)
+
+struct fbx_opt_vendor {
+	u8	oui[3];
+	u32	version;
+	u32	flags;
+} __attribute__((packed));
+
+#define DHCPDISCOVER	1
+#define DHCPOFFER	2
+#define DHCPREQUEST	3
+#define DHCPDECLINE	4
+#define DHCPACK		5
+#define DHCPNACK	6
+#define DHCPRELEASE	7
+#define DHCPINFORM	8
+
+#define BROADCAST_FLAG	0x8000 /* "I need broadcast replies" */
+
+static const char *dhcp_to_name[] = {
+	"NONE",
+	"DHCPDISCOVER",
+	"DHCPOFFER",
+	"DHCPREQUEST",
+	"DHCPDECLINE",
+	"DHCPACK",
+	"DHCPNACK",
+	"DHCPRELEASE",
+	"DHCPINFORM",
+};
+
+
+#define PARAM_SUBMASK	(1 << 0)
+#define PARAM_ROUTER	(1 << 1)
+#define PARAM_DNS	(1 << 2)
+#define PARAM_BROADCAST	(1 << 3)
+
+struct dhcp_options
+{
+	u8	msg_type;
+	u32	t1;		/* renewal timeout */
+	u32	t2;		/* rebinding timemout */
+	u32	lease_time;	/* lease time */
+	u32	server_id;	/* server identifier */
+	u32	request_param;	/* requested config params (bitfield) */
+
+	u32	netmask;	/* netmask assigne to client */
+	u32	router;
+	u32	bcast;
+	u32	dns1;
+	u32	dns2;
+	u32	requested_ip;
+
+	struct fbx_opt_vendor	fbx;
+	bool			fbx_valid;
+
+	bool	need_bcast;
+};
+
+static const unsigned char dhcp_magic_cookie[] = { 0x63, 0x82, 0x53, 0x63 };
+
+/* parse the dhcp options string to a struct */
+static void parse_dhcp_opts(const u8 *opts_str, int maxlen,
+			    struct dhcp_options *opts)
+{
+	const u8 *p, *end;
+
+	memset(opts, 0, sizeof(*opts));
+
+	/* check magic cookie */
+	if (memcmp(opts_str, dhcp_magic_cookie, sizeof(dhcp_magic_cookie)))
+		return;
+
+	/* now go for options */
+	p = opts_str + 4;
+	end = opts_str + maxlen;
+
+	while (p < end && *p != 0xff) {
+		const u8 *option;
+		size_t len, i;
+
+		option = p++;
+
+                if (*option == 0)
+                        continue;
+
+		/* jump of 'len' + 1 bytes */
+		len = *p;
+		p += len + 1;
+		if (p >= end)
+			break;
+
+		/* search for known parameter */
+		switch (*option) {
+		case 53: /* msg_type */
+			if (len)
+				opts->msg_type = option[2];
+			break;
+
+		case 55: /* param request */
+			for (i = 0; i < len; i++) {
+				switch (option[2 + i]) {
+				case 1: /* subnet */
+					opts->request_param |= PARAM_SUBMASK;
+					break;
+
+				case 3: /* router */
+					opts->request_param |= PARAM_ROUTER;
+					break;
+
+				case 6: /* dns */
+					opts->request_param |= PARAM_DNS;
+					break;
+
+				case 28: /* broadcast */
+					opts->request_param |= PARAM_BROADCAST;
+					break;
+				}
+			}
+			break;
+
+		case 50: /* requested_ip */
+			if (len >= 4)
+				memcpy(&opts->requested_ip, option + 2, 4);
+			break;
+
+		case 54: /* server_id */
+			if (len >= 4)
+				memcpy(&opts->server_id, option + 2, 4);
+			break;
+
+		case 224: /* IANA reserved for freebox use */
+		{
+			if (len >= sizeof (opts->fbx)) {
+				memcpy(&opts->fbx, option + 2,
+				       sizeof (opts->fbx));
+				if (opts->fbx.oui[0] == 0x00 &&
+				    opts->fbx.oui[1] == 0x07 &&
+				    opts->fbx.oui[2] == 0xCB)
+					opts->fbx_valid = true;
+			}
+			break;
+		}
+		}
+	}
+}
+
+static void dump_dhcp_message(struct fbxbr *br, struct sk_buff *skb,
+			      struct bootp_pkt *bpkt, const char *action,
+			      const char *dest)
+{
+	struct dhcp_options opts;
+
+	parse_dhcp_opts(bpkt->exten, skb->len - (sizeof(*bpkt) - 312),
+			&opts);
+
+	if (opts.msg_type < 9) {
+		struct iphdr *iph;
+
+		iph = ip_hdr(skb);
+		printk(KERN_DEBUG "%s: %s dhcp %s %s "
+		       "(%pI4 -> %pI4) "
+		       "(caddr: %pI4 - yaddr: %pI4 - "
+		       "saddr: %pI4 - req_addr: %pI4)\n",
+		       br->dev->name,
+		       action,
+		       dhcp_to_name[opts.msg_type],
+		       dest,
+		       &iph->saddr,
+		       &iph->daddr,
+		       &bpkt->client_ip,
+		       &bpkt->your_ip,
+		       &bpkt->server_ip,
+		       &opts.requested_ip);
+	} else {
+		printk(KERN_DEBUG "%s: %s unknown dhcp message %s\n",
+		       br->dev->name, action, dest);
+	}
+}
+
+/* write a the dhcp options string from a struct */
+static void make_dhcp_opts(u8 *opts_str, const struct dhcp_options *opts,
+			   int type)
+{
+	int len = 0;
+
+	memcpy(opts_str, dhcp_magic_cookie, sizeof(dhcp_magic_cookie));
+	len += sizeof(dhcp_magic_cookie);
+
+	/* msg type (REPLY or OFFER) */
+	opts_str[len++] = 53;
+	opts_str[len++] = 1;
+	opts_str[len++] = opts->msg_type;
+
+	/* server id */
+	opts_str[len++] = 54;
+	opts_str[len++] = 4;
+	memcpy(opts_str + len, &opts->server_id, 4);
+	len += 4;
+
+	/* t1 */
+	if (opts->t1) {
+		opts_str[len++] = 58;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->t1, 4);
+		len += 4;
+	}
+
+	/* t2 */
+	if (opts->t2) {
+		opts_str[len++] = 59;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->t2, 4);
+		len += 4;
+	}
+
+	/* lease time */
+	if (opts->lease_time) {
+		opts_str[len++] = 51;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->lease_time, 4);
+		len += 4;
+	}
+
+	/* add requested_param */
+	if (opts->request_param & PARAM_SUBMASK) {
+		opts_str[len++] = 1;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->netmask, 4);
+		len += 4;
+	}
+
+	if (opts->request_param & PARAM_ROUTER) {
+		opts_str[len++] = 3;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->router, 4);
+		len += 4;
+	}
+
+	if (opts->request_param & PARAM_BROADCAST) {
+		opts_str[len++] = 28;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->bcast, 4);
+		len += 4;
+	}
+
+	if (opts->request_param & PARAM_DNS) {
+		opts_str[len++] = 6;
+		opts_str[len++] = (opts->dns2 ? 8 : 4);
+		memcpy(opts_str + len, &opts->dns1, 4);
+		if (opts->dns2)
+			memcpy(opts_str + len + 4, &opts->dns2, 4);
+		len += (opts->dns2 ? 8 : 4);
+	}
+
+	opts_str[len++] = 255;
+}
+
+/* dhcp server */
+static void send_dhcp_reply(struct fbxbr *br,
+			    struct net_device *dev,
+			    const u8 *dest_hw,
+			    int type,
+			    const struct bootp_pkt *src_packet,
+			    const struct dhcp_options *src_opts)
+{
+	struct sk_buff *skb;
+	struct iphdr *h;
+	struct bootp_pkt *b;
+	struct dhcp_options dhcp_opts;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
+
+	/* Allocate packet */
+	skb = alloc_skb(sizeof (struct bootp_pkt) + hlen + tlen, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->dev = dev;
+	skb_reserve(skb, hlen);
+	skb_reset_network_header(skb);
+
+	b = (struct bootp_pkt *)skb_put(skb, sizeof(struct bootp_pkt));
+	memset(b, 0, sizeof(struct bootp_pkt));
+
+	/* Construct IP header */
+	h = &b->iph;
+	h->version = 4;
+	h->ihl = 5;
+	h->tot_len = htons(sizeof(struct bootp_pkt));
+	h->frag_off = htons(IP_DF);
+	h->ttl = 64;
+	h->protocol = IPPROTO_UDP;
+	h->saddr = br->lan_gw;
+
+	switch (type) {
+	case DHCPOFFER:
+	case DHCPACK:
+		if (src_packet->client_ip)
+			h->daddr = src_packet->client_ip;
+                else if (src_opts->need_bcast)
+                        h->daddr = INADDR_BROADCAST;
+		else
+			h->daddr = br->wan_ipaddr;
+		break;
+
+	case DHCPNACK:
+		/* always broadcast NAK */
+		h->daddr = INADDR_BROADCAST;
+		break;
+	}
+
+	h->check = ip_fast_csum((unsigned char *) h, h->ihl);
+
+	/* Construct UDP header */
+	b->udph.source = __constant_htons(67);
+	b->udph.dest = __constant_htons(68);
+	b->udph.len = htons(sizeof(struct bootp_pkt) - sizeof(struct iphdr));
+
+	/* Construct DHCP header */
+	b->op = BOOTP_REPLY;
+	b->htype = ARPHRD_ETHER;
+	b->hlen = ETH_ALEN;
+	b->secs = 0;
+	b->xid = src_packet->xid;
+
+	switch (type) {
+	case DHCPOFFER:
+		b->server_ip = br->lan_gw;
+		b->your_ip = br->wan_ipaddr;
+		break;
+
+	case DHCPACK:
+		b->client_ip = src_packet->client_ip;
+		b->server_ip = br->lan_gw;
+		b->your_ip = br->wan_ipaddr;
+		break;
+
+	case DHCPNACK:
+		break;
+	}
+
+	b->relay_ip = src_packet->relay_ip;
+	memcpy(b->hw_addr, src_packet->hw_addr, sizeof(src_packet->hw_addr));
+
+	/* Construct DHCP options */
+	memset(&dhcp_opts, 0, sizeof (dhcp_opts));
+	dhcp_opts.msg_type = type;
+	dhcp_opts.server_id = br->lan_gw;
+
+	switch (type) {
+	case DHCPOFFER:
+	case DHCPACK:
+		dhcp_opts.t1 = htonl(br->dhcpd_renew_time);
+		dhcp_opts.t2 = htonl(br->dhcpd_rebind_time);
+		dhcp_opts.lease_time = htonl(br->dhcpd_lease_time);
+		dhcp_opts.netmask = br->lan_netmask;
+		dhcp_opts.bcast = (br->lan_netmask & br->lan_gw) |
+			~br->lan_netmask;
+		dhcp_opts.dns1 = br->dns1_ipaddr;
+		dhcp_opts.dns2 = br->dns2_ipaddr ? br->dns2_ipaddr : 0;
+		dhcp_opts.router = br->lan_gw;
+		dhcp_opts.request_param = src_opts->request_param;
+		break;
+	}
+
+	make_dhcp_opts(b->exten, &dhcp_opts, type);
+	dump_dhcp_message(br, skb, b, "sending", "to lan");
+
+	if (dev_hard_header(skb, dev, ETH_P_IP,
+			    dest_hw, dev->dev_addr, skb->len) < 0) {
+		kfree_skb(skb);
+		return;
+	}
+
+	dev_queue_xmit(skb);
+}
+
+/*
+ * called under bridge lock
+ *
+ * packet must be a valid IP & UDP packet with dport 67
+ *
+ * answer will be sent to skb->dev
+ */
+void fbxbr_dhcpd(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct bootp_pkt *bpkt;
+	struct dhcp_options opts;
+
+	/* code assumes linear skb */
+	if (skb_linearize(skb) < 0)
+		return;
+
+	/* reject short packet */
+	if (skb->len < (sizeof(*bpkt) - 312))
+		return;
+
+	bpkt = (struct bootp_pkt *)skb->data;
+
+	/* select only valid BOOTP Request/Discover */
+	if (bpkt->op != BOOTP_REQUEST || bpkt->hlen != ETH_ALEN)
+		return;
+
+	parse_dhcp_opts(bpkt->exten, skb->len - (sizeof(*bpkt) - 312), &opts);
+
+	if (opts.fbx_valid &&
+	    (be32_to_cpu(opts.fbx.flags) & FBX_OPT_VENDOR_F_IGNORE_BRIDGE)) {
+		printk(KERN_DEBUG "%s: ignore DHCP message with "
+		       "freebox ignore-bridge flags set\n", br->dev->name);
+		return;
+	}
+
+        if (ntohs(bpkt->flags) & BROADCAST_FLAG)
+		opts.need_bcast = true;
+
+	dump_dhcp_message(br, skb, bpkt, "received", "from lan");
+
+	/* select DHCPDISCOVER to send a DHCPOFFER */
+	if (opts.msg_type == DHCPDISCOVER) {
+		send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+				DHCPOFFER, bpkt, &opts);
+
+	} else if (opts.msg_type == DHCPREQUEST) {
+		/* send ACK or NACK */
+		if (!opts.requested_ip) {
+			/* RENEWING/REBINDING */
+			if (!bpkt->client_ip) {
+				/* invalid packet; ignore */
+				return;
+			}
+
+			if (bpkt->client_ip != br->wan_ipaddr)
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPNACK, bpkt, &opts);
+			else {
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPACK, bpkt, &opts);
+				fbxbr_capture_hw_addr(br, bpkt->hw_addr);
+			}
+			return;
+
+		}
+
+		/* INIT-REBOOT or SELECTING */
+		if (bpkt->client_ip) {
+			/* invalid packet; ignore */
+			return;
+		}
+
+		if (!opts.server_id) {
+			/* INIT-REBOOT */
+			if (opts.requested_ip != br->wan_ipaddr)
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPNACK, bpkt, &opts);
+			else {
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPACK, bpkt, &opts);
+				fbxbr_capture_hw_addr(br, bpkt->hw_addr);
+			}
+			return;
+		}
+
+		/* SELECTING */
+		if (opts.server_id == br->lan_gw) {
+			/* client selected us */
+			send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+					DHCPACK, bpkt, &opts);
+			fbxbr_capture_hw_addr(br, bpkt->hw_addr);
+		} else {
+			/* ignore */
+		}
+	}
+}
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_filter.c linux-6.4-fbx/net/fbxbridge/fbxbr_filter.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_filter.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_filter.c	2023-02-27 19:50:23.028271550 +0100
@@ -0,0 +1,258 @@
+#include <net/ip.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter.h>
+#include "fbxbr_private.h"
+
+static int lolfn(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	return 0;
+}
+
+/*
+ * invoke netfilter table for finer grained control
+ */
+static int
+netfilter_call_hook(struct sk_buff *skb,
+		    unsigned int hook,
+		    struct net_device *in_dev,
+		    struct net_device *out_dev)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	int ret;
+
+	/* don't run frags into netfilter */
+	if ((iph->frag_off & htons(IP_OFFSET)))
+		return NF_ACCEPT;
+
+	nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+
+	/* NF_HOOK will kfree_skb(), guard against this */
+	skb_get(skb);
+
+	ret = NF_HOOK(NFPROTO_IPV4,
+		      hook,
+		      &init_net,
+		      NULL,
+		      skb,
+		      in_dev,
+		      out_dev,
+		      lolfn);
+
+	if (ret < 0)
+		return NF_DROP;
+
+	skb_unref(skb);
+	return NF_ACCEPT;
+}
+
+static int
+netfilter_forward_hook(struct sk_buff *skb,
+		       struct net_device *in_dev,
+		       struct net_device *out_dev)
+{
+	return netfilter_call_hook(skb, NF_INET_FORWARD, in_dev, out_dev);
+}
+
+static int
+netfilter_input_hook(struct sk_buff *skb, struct net_device *in_dev)
+{
+	return netfilter_call_hook(skb, NF_INET_LOCAL_IN, in_dev, NULL);
+}
+
+/*
+ * set input mark bits, return true if changed
+ */
+static bool skb_set_br_inputmark(struct fbxbr *br, struct sk_buff *skb)
+{
+	if (unlikely(skb->mark & br->inputmark)) {
+		if (net_ratelimit())
+			pr_err("%s: input mark already set on skb\n",
+			       br->dev->name);
+		return false;
+	}
+
+	skb->mark |= br->inputmark;
+	return true;
+}
+
+static inline void skb_clear_br_inputmark(struct fbxbr *br,
+					  struct sk_buff *skb)
+{
+	skb->mark &= ~br->inputmark;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool wan_to_lan_want_keep(struct fbxbr *br,
+				 struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	bool changed;
+	int ret;
+
+	/* keep ETHER_IP packets */
+	if (iph->protocol == 97)
+		return true;
+
+	/* give ipv6 in ip private to freebox back to the
+	 * kernel */
+	if (iph->protocol == IPPROTO_IPV6) {
+		struct ipv6hdr *iph6;
+		unsigned int hlen;
+
+		/* capture at least all traffic from our GW
+		 * (192.88.99.101) */
+		if (iph->saddr == htonl(0xc0586365))
+			return true;
+
+		/* rest if peer-to-peer shortcut traffic, check if
+		 * this is for our IPv6 subnet, we cannot do it on
+		 * fragmented traffic thought */
+		if (iph->frag_off & htons(IP_OFFSET))
+			return false;
+
+		/* sanity check on header value */
+		hlen = iph->ihl * 4;
+		if (skb->len < hlen + sizeof(struct ipv6hdr))
+			return false;
+
+		iph6 = (struct ipv6hdr *)((unsigned char *)iph + hlen);
+		if ((iph6->daddr.s6_addr32[0] & htonl(0xfffffff0)) ==
+		    htonl(0x2a010e30))
+			return true;
+	}
+
+	if (!(br->flags & FBXBRIDGE_FLAGS_NETFILTER))
+		return false;
+
+	/* we cant filter frags with netfilter */
+	if (iph->frag_off & htons(IP_OFFSET))
+		return false;
+
+	/* check netfilter input hook */
+	changed = skb_set_br_inputmark(br, skb);
+	ret = netfilter_input_hook(skb, skb->dev);
+	if (changed)
+		skb_clear_br_inputmark(br, skb);
+
+	if (ret == NF_ACCEPT)
+		return true;
+
+	return false;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool wan_to_lan_can_forward(struct fbxbr *br, struct sk_buff *skb)
+{
+	if ((br->flags & FBXBRIDGE_FLAGS_NETFILTER)) {
+		int ret;
+
+		ret = netfilter_forward_hook(skb, br->wan_port->dev, br->dev);
+		if (ret == NF_DROP)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * note: caller assured that ip header is valid and holds bridge read
+ * lock
+ *
+ * use netfilter hook return type
+ */
+int
+fbxbr_filter_wan_to_lan_packet(struct fbxbr *br, struct sk_buff *skb)
+{
+	int ret;
+
+	if (wan_to_lan_want_keep(br, skb))
+		return NF_STOP;
+
+	if (!br->lan_port)
+		return NF_DROP;
+
+	ret = wan_to_lan_can_forward(br, skb);
+	if (ret != NF_ACCEPT)
+		return NF_DROP;
+
+	return NF_ACCEPT;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool lan_to_wan_want_keep(struct fbxbr *br, struct sk_buff *skb)
+{
+	return false;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool lan_to_wan_can_forward(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	/* disallow source spoofing */
+	if (iph->saddr != br->wan_ipaddr)
+		return false;
+
+	/* disallow all private net destination */
+	if (ipv4_is_loopback(iph->daddr) ||
+	    ipv4_is_private_10(iph->daddr) ||
+	    ipv4_is_private_172(iph->daddr) ||
+	    ipv4_is_private_192(iph->daddr) ||
+	    ipv4_is_linklocal_169(iph->daddr) ||
+	    ipv4_is_anycast_6to4(iph->daddr) ||
+	    ipv4_is_test_192(iph->daddr) ||
+	    ipv4_is_test_198(iph->daddr))
+		return false;
+
+	/* no multicast please */
+	if (ipv4_is_multicast(iph->daddr))
+		return false;
+
+	/* Don't let IP broadcast go through us */
+	if (ipv4_is_zeronet(iph->daddr))
+		return false;
+
+	if (ipv4_is_lbcast(iph->daddr))
+		return false;
+
+	if ((br->flags & FBXBRIDGE_FLAGS_NETFILTER)) {
+		int ret;
+
+		ret = netfilter_forward_hook(skb, br->dev, br->wan_port->dev);
+		if (ret == NF_DROP)
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * note: caller assured that ip header is valid and holds bridge read
+ * lock
+ *
+ * use netfilter hook return type
+ */
+int
+fbxbr_filter_lan_to_wan_packet(struct fbxbr *br, struct sk_buff *skb)
+{
+	int ret;
+
+	if (lan_to_wan_want_keep(br, skb))
+		return NF_STOP;
+
+	if (!br->wan_port)
+		return NF_DROP;
+
+	ret = lan_to_wan_can_forward(br, skb);
+	if (ret != NF_ACCEPT)
+		return NF_DROP;
+
+	return NF_ACCEPT;
+}
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_fwcache.c linux-6.4-fbx/net/fbxbridge/fbxbr_fwcache.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_fwcache.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_fwcache.c	2024-03-08 17:37:03.612237482 +0100
@@ -0,0 +1,215 @@
+#include <linux/jhash.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+/*
+ *
+ */
+u32 fbxbr_fwcache_hash(const struct fbxbr_fwcache_key *k)
+{
+	return jhash_3words(k->lan_ip,
+			    k->is_tcp ? k->wan_ip : ~k->wan_ip,
+			    k->lan_port | k->wan_port << 16, 0);
+}
+EXPORT_SYMBOL(fbxbr_fwcache_hash);
+
+/*
+ *
+ */
+static bool entry_match(const struct fbxbr_fwcache *fwc,
+			const struct fbxbr_fwcache_key *k)
+{
+	return (fwc->lan_ip == k->lan_ip &&
+		fwc->wan_ip == k->wan_ip &&
+		fwc->is_tcp == k->is_tcp &&
+		fwc->lan_port == k->lan_port &&
+		fwc->wan_port == k->wan_port);
+}
+
+/*
+ * must be called with bh disabled and rcu read lock held
+ */
+struct fbxbr_fwcache *
+__fbxbr_fwcache_lookup_rcu(struct fbxbr *br, u32 hash,
+			   const struct fbxbr_fwcache_key *k)
+{
+	struct fbxbr_fwcache *fwc;
+
+	hlist_for_each_entry_rcu(fwc,
+				 &br->fwcache_hrules[hash % FBXBR_FWCACHE_SIZE],
+				 hnext) {
+		if (entry_match(fwc, k))
+			return fwc;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(__fbxbr_fwcache_lookup_rcu);
+
+/*
+ * must be called with bh disabled and cache lock held
+ */
+static struct fbxbr_fwcache *
+__fbxbr_fwcache_lookup(struct fbxbr *br, u32 hash,
+		       const struct fbxbr_fwcache_key *k)
+{
+	struct fbxbr_fwcache *fwc;
+
+	hlist_for_each_entry(fwc,
+			     &br->fwcache_hrules[hash % FBXBR_FWCACHE_SIZE],
+			     hnext) {
+		if (entry_match(fwc, k))
+			return fwc;
+	}
+
+	return NULL;
+}
+
+/*
+ * return true if the flow has a chance to be in the fwcache
+ *
+ * skb must be a valid ipv4 packet
+ */
+bool fbxbr_fwcache_skb_allowable(struct sk_buff *skb,
+				 bool from_wan,
+				 struct fbxbr_fwcache_key *k,
+				 bool *can_create)
+{
+	const struct iphdr *iph;
+	__be16 psrc, pdst;
+
+	iph = ip_hdr(skb);
+
+	if (iph->frag_off & htons(IP_OFFSET))
+		return false;
+
+	if (iph->protocol != IPPROTO_UDP && iph->protocol != IPPROTO_TCP)
+		return false;
+
+	if (from_wan) {
+		k->wan_ip = iph->saddr;
+		k->lan_ip = iph->daddr;
+	} else {
+		k->lan_ip = iph->saddr;
+		k->wan_ip = iph->daddr;
+	}
+
+	if (iph->protocol == IPPROTO_UDP) {
+		struct udphdr *udph;
+
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct udphdr)))
+			return false;
+
+		udph = (struct udphdr *)skb_transport_header(skb);
+		*can_create = true;
+
+		psrc = udph->source;
+		pdst = udph->dest;
+		k->is_tcp = false;
+	} else {
+		struct tcphdr *tcph;
+
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct tcphdr)))
+			return false;
+
+		tcph = (struct tcphdr *)skb_transport_header(skb);
+		if (tcph->syn)
+			*can_create = true;
+		else
+			*can_create = false;
+
+		psrc = tcph->source;
+		pdst = tcph->dest;
+		k->is_tcp = true;
+	}
+
+	if (from_wan) {
+		k->wan_port = psrc;
+		k->lan_port = pdst;
+	} else {
+		k->lan_port = psrc;
+		k->wan_port = pdst;
+	}
+	return true;
+}
+
+/*
+ * rcu release defered callback
+ */
+static void delayed_fwc_free_entry(struct rcu_head *rhp)
+{
+	struct fbxbr_fwcache *fwc;
+
+	fwc = container_of(rhp, struct fbxbr_fwcache, rcu);
+	if (fwc->priv_destructor)
+		fwc->priv_destructor((void *)fwc->priv_area);
+        kfree(fwc);
+}
+
+/*
+ * must be called with bh disabled
+ */
+int fbxbr_fwcache_add(struct fbxbr *br,
+		      u32 hash, const struct fbxbr_fwcache_key *k)
+{
+	struct fbxbr_fwcache *fwc;
+
+	write_lock(&br->fwcache_lock);
+
+	if (unlikely(__fbxbr_fwcache_lookup(br, hash, k)))
+		goto done;
+
+	/* add new entry */
+	if (br->fwcache_count >= FBXBR_FWCACHE_MAX_ENTRY) {
+		/* make some room */
+		fwc = list_first_entry(&br->fwcache_rules,
+				       struct fbxbr_fwcache,
+				       next);
+		hlist_del_rcu(&fwc->hnext);
+		list_del_rcu(&fwc->next);
+		call_rcu(&fwc->rcu, delayed_fwc_free_entry);
+		br->fwcache_count--;
+	}
+
+	fwc = kmalloc(sizeof (*fwc), GFP_ATOMIC);
+	if (!fwc)
+		goto done;
+
+	br->fwcache_count++;
+	fwc->lan_ip = k->lan_ip;
+	fwc->wan_ip = k->wan_ip;
+	fwc->lan_port = k->lan_port;
+	fwc->wan_port = k->wan_port;
+	fwc->is_tcp = k->is_tcp;
+	fwc->priv_destructor = NULL;
+	memset(fwc->priv_area, 0, sizeof (fwc->priv_area));
+
+	hlist_add_head_rcu(&fwc->hnext,
+			   &br->fwcache_hrules[hash % FBXBR_FWCACHE_SIZE]);
+	list_add_tail_rcu(&fwc->next, &br->fwcache_rules);
+
+done:
+	write_unlock(&br->fwcache_lock);
+	return 0;
+}
+
+/*
+ *
+ */
+void fbxbr_fwcache_flush(struct fbxbr *br)
+{
+	struct fbxbr_fwcache *fwc, *tmp;
+
+	write_lock_bh(&br->fwcache_lock);
+
+	list_for_each_entry_safe(fwc, tmp, &br->fwcache_rules, next) {
+		hlist_del_rcu(&fwc->hnext);
+		list_del_rcu(&fwc->next);
+		call_rcu(&fwc->rcu, delayed_fwc_free_entry);
+		br->fwcache_count--;
+	}
+
+	write_unlock_bh(&br->fwcache_lock);
+}
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_input.c linux-6.4-fbx/net/fbxbridge/fbxbr_input.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_input.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_input.c	2024-03-08 17:37:03.612237482 +0100
@@ -0,0 +1,347 @@
+#include <linux/if_arp.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+/*
+ *
+ */
+static rx_handler_result_t __handle_wan_frame(struct fbxbr_port *p,
+					      struct sk_buff *skb)
+{
+	struct fbxbr *br = p->br;
+	struct fbxbr_fwcache_key fwk;
+	bool fwc_present, fwc_can_create;
+	u32 hash = 0;
+	int ret;
+
+	/* give back non IPv4 packets */
+	if (skb->protocol != htons(ETH_P_IP))
+		return RX_HANDLER_PASS;
+
+	/* stop here if we have no idea what the wan ip address is or
+	 * was */
+	if (!br->wan_ipaddr)
+		goto drop;
+
+	if (!fbxbr_is_valid_ip_packet(skb))
+		goto drop;
+
+	/* lookup into forward cache */
+	fwc_present = false;
+	fwc_can_create = false;
+
+	if (fbxbr_fwcache_skb_allowable(skb, true, &fwk, &fwc_can_create)) {
+		hash = fbxbr_fwcache_hash(&fwk);
+		rcu_read_lock();
+		fwc_present = (__fbxbr_fwcache_lookup_rcu(br, hash, &fwk) != NULL);
+		rcu_read_unlock();
+	}
+
+	if (fwc_present)
+		goto output_lan;
+
+	ret = fbxbr_filter_wan_to_lan_packet(br, skb);
+	switch (ret) {
+	default:
+		WARN(1, "unsupported filter action");
+		fallthrough;
+
+	case NF_DROP:
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+
+	case NF_STOP:
+		nf_reset_ct(skb);
+		return RX_HANDLER_PASS;
+
+	case NF_ACCEPT:
+		break;
+	}
+
+output_lan:
+	if (!br->lan_port)
+		goto drop;
+
+	fbxbr_output_lan_frame(br, skb);
+
+	if (!fwc_present && fwc_can_create)
+		fbxbr_fwcache_add(br, hash, &fwk);
+
+	return RX_HANDLER_CONSUMED;
+
+drop:
+	kfree_skb(skb);
+	return RX_HANDLER_CONSUMED;
+}
+
+/*
+ *
+ */
+static void
+__handle_lan_arp_frame(struct fbxbr_port *p, struct sk_buff *skb)
+{
+	struct net_device *dev = p->dev;
+	struct fbxbr *br = p->br;
+	__be32 sender_ipaddr, target_ipaddr;
+	u8 *sender_hwaddr, *req;
+	struct arphdr *arp;
+
+	if (!pskb_may_pull(skb, arp_hdr_len(p->dev)))
+		goto done;
+
+	arp = arp_hdr(skb);
+	if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
+		goto done;
+
+	if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
+	     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
+	    arp->ar_pro != htons(ETH_P_IP))
+		goto done;
+
+	if (arp->ar_op != htons(ARPOP_REQUEST) &&
+	    arp->ar_op != htons(ARPOP_REPLY))
+		goto done;
+
+	/* fetch subfields */
+	req = (unsigned char *)(arp + 1);
+
+	sender_hwaddr = req;
+	req += ETH_ALEN;
+
+	memcpy(&sender_ipaddr, req, 4);
+	req += 4;
+
+	/* skip target_hwaddr */
+	req += dev->addr_len;
+
+	memcpy(&target_ipaddr, req, 4);
+
+	/* ignore gratuitous ARP */
+	if (!sender_ipaddr)
+		goto done;
+
+	if (arp->ar_op == htons(ARPOP_REQUEST)) {
+
+		/* client is sending an arp request */
+		if (!br->wan_ipaddr) {
+			/* wan has never been up, our wan address is
+			 * not known, answer to every arp requests */
+
+			/* ignore what looks like gratuitous ARP */
+			if (sender_ipaddr == target_ipaddr)
+				goto done;
+
+			/* don't answer for special ip address */
+			if (ipv4_is_private_10(target_ipaddr) ||
+			    ipv4_is_private_172(target_ipaddr) ||
+			    ipv4_is_private_192(target_ipaddr) ||
+			    ipv4_is_linklocal_169(target_ipaddr) ||
+			    ipv4_is_anycast_6to4(target_ipaddr) ||
+			    ipv4_is_test_192(target_ipaddr) ||
+			    ipv4_is_test_198(target_ipaddr))
+				goto done;
+
+			/* ok, will reply with a zero source
+			 * address */
+		} else {
+			/* wan is up, filter our arp reply to match
+			 * WAN */
+
+			/* accept only arp from remote client */
+			if (sender_ipaddr != br->wan_ipaddr)
+				goto done;
+
+			/* accept only arp request for wan network */
+			if ((target_ipaddr & br->lan_netmask) !=
+			    (br->wan_ipaddr & br->lan_netmask))
+				goto done;
+
+			/* request is for the client's address, keep quiet */
+			if (target_ipaddr == br->wan_ipaddr)
+				goto done;
+		}
+
+		/* ok I can answer */
+		fbxbr_send_arp_frame(dev, ARPOP_REPLY, sender_hwaddr,
+				     target_ipaddr, NULL,
+				     br->wan_ipaddr, sender_hwaddr);
+
+		/* keep the client address */
+		fbxbr_capture_hw_addr(br, sender_hwaddr);
+
+	} else {
+
+		/* accept only arp from remote client */
+		if (sender_ipaddr != br->wan_ipaddr)
+			goto done;
+
+		/* we received  an arp reply,  iff it was  addressed to
+		 * us, then keep the client mac address  */
+		if (target_ipaddr != br->lan_gw)
+			goto done;
+
+		fbxbr_capture_hw_addr(br, sender_hwaddr);
+	}
+
+done:
+	kfree_skb(skb);
+}
+
+/*
+ *
+ */
+static inline bool __is_local_ip(struct fbxbr *br, __be32 ipaddr)
+{
+	int i;
+
+	if (ipaddr == br->br_ipaddr || ipv4_is_multicast(ipaddr))
+		return true;
+
+	for (i = 0; i < MAX_ALIASES; i++) {
+		if (br->ip_aliases[i] && br->ip_aliases[i] == ipaddr)
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ *
+ */
+static rx_handler_result_t __handle_lan_frame(struct fbxbr_port *p,
+					      struct sk_buff *skb)
+{
+	struct fbxbr *br = p->br;
+	struct iphdr *iph;
+	struct fbxbr_fwcache_key fwk;
+	bool fwc_present, fwc_can_create, is_fragment;
+	u32 hash = 0;
+	int ret;
+
+	if (skb->protocol == htons(ETH_P_ARP)) {
+		__handle_lan_arp_frame(p, skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	/* give back non IPv4 packets */
+	if (skb->protocol != htons(ETH_P_IP))
+		return RX_HANDLER_PASS;
+
+	if (!fbxbr_is_valid_ip_packet(skb))
+		goto drop;
+
+	iph = ip_hdr(skb);
+
+	/* look  the destination  address, if  talking to  our private
+	 * address or alias, then frame is local */
+	if (__is_local_ip(br, iph->daddr)) {
+
+		if (!br->br_remote_ipaddr)
+			goto drop;
+
+		/* packet comes from lan, snat it and make it local */
+		fbxbr_snat_packet(skb, br->br_remote_ipaddr);
+		skb->dev = br->dev;
+		skb->pkt_type = PACKET_HOST;
+		br->dev->stats.rx_packets++;
+		br->dev->stats.rx_bytes += skb->len;
+		netif_rx(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	/* stop here if we have no idea what the wan ip address is or
+	 * was */
+	if (!br->wan_ipaddr)
+		goto drop;
+
+	/* lookup into forward cache */
+	fwc_present = false;
+	fwc_can_create = false;
+
+	if (fbxbr_fwcache_skb_allowable(skb, false, &fwk, &fwc_can_create)) {
+		hash = fbxbr_fwcache_hash(&fwk);
+		rcu_read_lock();
+		fwc_present = (__fbxbr_fwcache_lookup_rcu(br, hash, &fwk) != NULL);
+		rcu_read_unlock();
+	}
+
+	if (fwc_present)
+		goto output_wan;
+
+	/* process DHCP if enabled */
+	is_fragment = iph->frag_off & htons(IP_OFFSET);
+	if (iph->protocol == IPPROTO_UDP &&
+	    !is_fragment &&
+	    (br->flags & FBXBRIDGE_FLAGS_DHCPD)) {
+		struct udphdr *udp;
+
+		if (!fbxbr_is_valid_udp_tcp_packet(skb))
+			goto drop;
+
+		udp = udp_hdr(skb);
+		if (udp->dest == htons(67)) {
+			fbxbr_dhcpd(br, skb);
+			goto drop;
+		}
+	}
+
+	ret = fbxbr_filter_lan_to_wan_packet(br, skb);
+	switch (ret) {
+	default:
+		WARN(1, "unsupported filter action");
+		fallthrough;
+
+	case NF_DROP:
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+
+	case NF_STOP:
+		nf_reset_ct(skb);
+		return RX_HANDLER_PASS;
+
+	case NF_ACCEPT:
+		break;
+	}
+
+output_wan:
+	if (!br->wan_port)
+		goto drop;
+
+	fbxbr_output_wan_frame(br, skb);
+
+	if (!fwc_present && fwc_can_create)
+		fbxbr_fwcache_add(br, hash, &fwk);
+
+	return RX_HANDLER_CONSUMED;
+
+drop:
+	kfree_skb(skb);
+	return RX_HANDLER_CONSUMED;
+}
+
+/*
+ *
+ */
+rx_handler_result_t fbxbr_handle_frame(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+	struct fbxbr_port *p;
+	rx_handler_result_t ret;
+
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		return RX_HANDLER_CONSUMED;
+
+	p = fbxbr_port_get_rcu(skb->dev);
+
+	read_lock(&p->br->lock);
+	if (p->is_wan)
+		ret = __handle_wan_frame(p, skb);
+	else
+		ret = __handle_lan_frame(p, skb);
+	read_unlock(&p->br->lock);
+
+	return ret;
+}
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_ioctl.c linux-6.4-fbx/net/fbxbridge/fbxbr_ioctl.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_ioctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_ioctl.c	2023-02-27 19:50:22.652261514 +0100
@@ -0,0 +1,85 @@
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <linux/uaccess.h>
+#include "fbxbr_private.h"
+
+/*
+ * ioctl handling
+ */
+int fbxbr_ioctl(struct net *net, unsigned int ign, void __user *arg)
+{
+	struct fbxbridge_ioctl_req req;
+	struct fbxbridge_ioctl_chg chg;
+	struct fbxbridge_ioctl_dev_chg dev_chg;
+	struct fbxbridge_ioctl_params params;
+	int ret;
+
+	/* fetch ioctl request */
+	if (copy_from_user(&req, arg, sizeof (req)))
+		return -EFAULT;
+
+	switch (req.cmd) {
+	case E_CMD_BR_CHG:
+		if (copy_from_user(&chg, (void *)req.arg, sizeof (chg)))
+			return -EFAULT;
+
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (!chg.action)
+			return fbxbr_add_br(net, chg.brname);
+
+		rtnl_lock();
+		ret = __fbxbr_del_br(net, chg.brname);
+		rtnl_unlock();
+		return ret;
+
+	case E_CMD_BR_DEV_CHG:
+		if (copy_from_user(&dev_chg, (void *)req.arg,
+				   sizeof (dev_chg)))
+			return -EFAULT;
+
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
+
+		rtnl_lock();
+		if (!dev_chg.action)
+			ret = __fbxbr_add_br_port(net,
+						  dev_chg.brname,
+						  dev_chg.devname,
+						  dev_chg.wan);
+		else
+			ret = __fbxbr_del_br_port_by_name(net,
+							  dev_chg.brname,
+							  dev_chg.devname);
+		rtnl_unlock();
+		return ret;
+
+	case E_CMD_BR_PARAMS:
+		if (copy_from_user(&params, (void *)req.arg, sizeof (params)))
+			return -EFAULT;
+
+		if (!params.action) {
+			/* this is a get */
+			ret = fbxbr_get_params(net, params.brname, &params);
+			if (ret)
+				return ret;
+
+			return copy_to_user((void *)req.arg, &params,
+					    sizeof (params));
+		}
+
+		/* this is a set */
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
+
+		return fbxbr_set_params(net, params.brname, &params);
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_output.c linux-6.4-fbx/net/fbxbridge/fbxbr_output.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_output.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_output.c	2023-02-27 19:50:22.652261514 +0100
@@ -0,0 +1,164 @@
+#include <net/ip.h>
+#include <net/arp.h>
+#include "fbxbr_private.h"
+
+/*
+ * caller must hold bridge lock
+ *
+ * lan port must be valid
+ */
+void fbxbr_output_lan_mcast_frame(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct fbxbr_port *p = br->lan_port;
+	struct net_device *dev = p->dev;
+	struct iphdr *ip;
+	u8 mcast_hwaddr[6];
+	u32 daddr;
+
+	ip = ip_hdr(skb);
+
+	/* compute mcast hwaddr */
+	mcast_hwaddr[0] = 0x1;
+	mcast_hwaddr[1] = 0x0;
+	mcast_hwaddr[2] = 0x5e;
+	daddr = ntohl(ip->daddr);
+	mcast_hwaddr[3] = (daddr & 0x7f0000) >> 16;
+	mcast_hwaddr[4] = (daddr & 0xff00) >> 8;
+	mcast_hwaddr[5] = (daddr & 0xff);
+
+	skb->dev = dev;
+	dev_hard_header(skb, dev, ETH_P_802_3, mcast_hwaddr, dev->dev_addr,
+			ETH_P_IP);
+	dev_queue_xmit(skb);
+}
+
+/*
+ * caller must hold bridge lock and have BH disabled
+ *
+ * lan port must be valid
+ *
+ * must be a valid ip packet
+ */
+void fbxbr_output_lan_frame(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct fbxbr_port *p = br->lan_port;
+	struct net_device *dev = p->dev;
+	struct iphdr *iph;
+	const char *dest_hw;
+
+	iph = ip_hdr(skb);
+
+	if (!br->have_hw_addr && iph->daddr != INADDR_BROADCAST) {
+
+		/* (fixme: try to queue instead of dropping ?) */
+		kfree_skb(skb);
+
+		/* rate limit arp sending to ARP_RATE_LIMIT  */
+		spin_lock(&br->last_arp_lock);
+		if (time_before(jiffies, br->last_arp_send + ARP_RATE_LIMIT)) {
+			spin_unlock(&br->last_arp_lock);
+			return;
+		}
+
+		br->last_arp_send = jiffies;
+		spin_unlock(&br->last_arp_lock);
+
+		fbxbr_send_arp_frame(dev,
+				     ARPOP_REQUEST,
+				     NULL,
+				     br->lan_gw,
+				     NULL,
+				     br->wan_ipaddr,
+				     NULL);
+		return;
+	}
+
+	/* we have  an active device, send  to the hw addr  if we have
+	 * it, or to  the bcast hw addr if we don't  or the packet is
+	 * an ip broadcast */
+	skb->dev = dev;
+
+	if (br->have_hw_addr && iph->daddr != INADDR_BROADCAST)
+		dest_hw = br->lan_hwaddr;
+	else
+		dest_hw = dev->broadcast;
+
+	dev_hard_header(skb, dev, ETH_P_802_3, dest_hw, dev->dev_addr,
+			ETH_P_IP);
+	dev_queue_xmit(skb);
+}
+
+/*
+ * caller must hold bridge lock and have BH disabled
+ *
+ * wan port must be valid
+ *
+ * must be a valid ip packet
+ */
+void fbxbr_output_wan_frame(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct fbxbr_port *p = br->wan_port;
+	struct net_device *dev = p->dev;
+	struct iphdr *iph;
+	struct neighbour *neigh;
+	__be32 nh;
+
+	skb->dev = dev;
+
+	if (!dev->hard_header_len) {
+		dev_queue_xmit(skb);
+		return;
+	}
+
+	iph = ip_hdr(skb);
+
+	/* resolve next hop */
+	nh = iph->daddr;
+	if ((nh & br->wan_netmask) != (br->wan_ipaddr & br->wan_netmask)) {
+		struct rtable *rt;
+
+		rt = p->rt;
+		if (rt && rt->dst.obsolete > 0) {
+			ip_rt_put(rt);
+			p->rt = NULL;
+			rt = NULL;
+		}
+
+		/* need to find default gateway */
+		if (!rt) {
+			rt = ip_route_output(&init_net, nh, 0, 0,
+					     dev->ifindex);
+			if (IS_ERR(rt) || rt->rt_type != RTN_UNICAST) {
+				kfree_skb(skb);
+				return;
+			}
+
+			p->rt = rt;
+		}
+
+		nh = rt_nexthop(rt, nh);
+	}
+
+	/* resolve neighbour */
+	neigh = __ipv4_neigh_lookup_noref(dev, nh);
+        if (unlikely(!neigh))
+                neigh = __neigh_create(&arp_tbl, &nh, dev, false);
+
+	if (IS_ERR(neigh)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	if (!(neigh->nud_state & NUD_VALID)) {
+		neigh_event_send(neigh, NULL);
+		kfree_skb(skb);
+		return;
+	}
+
+	neigh_event_send(neigh, NULL);
+
+	/* send */
+	dev_hard_header(skb, dev, ETH_P_802_3, neigh->ha, dev->dev_addr,
+			ETH_P_IP);
+	dev_queue_xmit(skb);
+}
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_private.h linux-6.4-fbx/net/fbxbridge/fbxbr_private.h
--- linux-6.4-fbx/net/fbxbridge./fbxbr_private.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_private.h	2024-03-08 17:37:03.612237482 +0100
@@ -0,0 +1,198 @@
+#ifndef FBXBRIDGE_PRIVATE_H_
+#define FBXBRIDGE_PRIVATE_H_
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/fbxbridge.h>
+#include <linux/rtnetlink.h>
+#include <linux/spinlock.h>
+
+#define ARP_RATE_LIMIT			(HZ)
+#define ARP_ETHER_SIZE			(8 + ETH_ALEN * 2 + 4 * 2)
+#define	DEFAULT_RENEWAL_TIME		60
+#define	DEFAULT_REBIND_TIME		300
+#define	DEFAULT_LEASE_TIME		600
+
+#define FBXBR_FWCACHE_SIZE		256
+#define FBXBR_FWCACHE_MAX_ENTRY		2048
+
+struct fbxbr;
+
+struct fbxbr_fwcache_key {
+	__be32			lan_ip;
+	__be32			wan_ip;
+	__be16			lan_port;
+	__be16			wan_port;
+	bool			is_tcp;
+};
+
+struct fbxbr_fwcache {
+	__be32			lan_ip;
+	__be32			wan_ip;
+	__be16			lan_port;
+	__be16			wan_port;
+	u8			is_tcp;
+	struct hlist_node       hnext;
+	struct list_head        next;
+
+	void			(*priv_destructor)(void *);
+	u32			priv_area[8];
+	struct rcu_head		rcu;
+};
+
+struct fbxbr_port {
+	struct fbxbr		*br;
+	struct net_device	*dev;
+	struct rtable		*rt;
+	bool			is_wan;
+};
+
+struct fbxbr {
+	struct net_device	*dev;
+
+	/* protect all fields but lan_hwaddr */
+	rwlock_t		lock;
+
+	/*
+	 * currently assigned lan & wan port, updated by userspace
+	 * under rtnl
+	 */
+	struct fbxbr_port	*wan_port;
+	struct fbxbr_port	*lan_port;
+
+	/*
+	 * config, updated by userspace
+	 */
+	unsigned int		flags;
+	unsigned int		inputmark;
+
+	unsigned int		dns1_ipaddr;
+	unsigned int		dns2_ipaddr;
+
+	unsigned long		dhcpd_renew_time;
+	unsigned long		dhcpd_rebind_time;
+	unsigned long		dhcpd_lease_time;
+
+	/* list of ip we consider to be local */
+	unsigned long		ip_aliases[MAX_ALIASES];
+
+	/*
+	 * runtime state
+	 */
+
+	/* local and remote (fbx) ip address, maintained using inet
+	 * notifier */
+	__be32			br_ipaddr;
+	__be32			br_remote_ipaddr;
+
+	/* wan side inet info */
+	__be32			wan_ipaddr;
+	__be32			wan_netmask;
+	__be32			lan_gw;
+	__be32			lan_netmask;
+
+	/* currently detected lan device hardware address */
+	rwlock_t		lan_hwaddr_lock;
+	bool			have_hw_addr;
+	unsigned char		lan_hwaddr[ETH_ALEN];
+
+	spinlock_t		last_arp_lock;
+	unsigned long		last_arp_send;
+
+	rwlock_t		fwcache_lock;
+        struct hlist_head       fwcache_hrules[FBXBR_FWCACHE_SIZE];
+        struct list_head        fwcache_rules;
+        unsigned int            fwcache_count;
+
+	struct list_head	next;
+};
+
+/*
+ * helpers to get bridge port from netdevice
+ */
+#define fbxbr_port_exists(dev) (dev->priv_flags & IFF_FBXBRIDGE_PORT)
+
+static inline struct fbxbr_port *
+fbxbr_port_get_rcu(const struct net_device *dev)
+{
+	return rcu_dereference(dev->rx_handler_data);
+}
+
+static inline struct fbxbr_port *
+fbxbr_port_get_rtnl(const struct net_device *dev)
+{
+	return fbxbr_port_exists(dev) ?
+		rtnl_dereference(dev->rx_handler_data) : NULL;
+}
+
+/* fbxbr_dev.c */
+int fbxbr_add_br(struct net *net, const char *name);
+
+int __fbxbr_del_br(struct net *net, const char *name);
+
+int __fbxbr_add_br_port(struct net *net, const char *name,
+			const char *port_name, bool is_wan);
+
+int __fbxbr_del_br_port_by_name(struct net *net, const char *name,
+				const char *port_name);
+
+void __fbxbr_del_br_port(struct fbxbr_port *p);
+
+int fbxbr_get_params(struct net *net, const char *name,
+		     struct fbxbridge_ioctl_params *params);
+
+int fbxbr_set_params(struct net *net, const char *name,
+		     const struct fbxbridge_ioctl_params *params);
+
+void fbxbr_capture_hw_addr(struct fbxbr *br, const u8 *hwaddr);
+
+
+/* fbxbr_dhcp.c */
+void fbxbr_dhcpd(struct fbxbr *br, struct sk_buff *skb);
+
+/* fbxbr_filter.c */
+int
+fbxbr_filter_wan_to_lan_packet(struct fbxbr *br, struct sk_buff *skb);
+int
+fbxbr_filter_lan_to_wan_packet(struct fbxbr *br, struct sk_buff *skb);
+
+/* fbxbr_fwcache.c */
+u32 fbxbr_fwcache_hash(const struct fbxbr_fwcache_key *k);
+
+struct fbxbr_fwcache *
+__fbxbr_fwcache_lookup_rcu(struct fbxbr *br, u32 hash,
+			   const struct fbxbr_fwcache_key *k);
+
+bool fbxbr_fwcache_skb_allowable(struct sk_buff *skb,
+				 bool from_wan,
+				 struct fbxbr_fwcache_key *k,
+				 bool *can_create);
+int fbxbr_fwcache_add(struct fbxbr *br,
+		      u32 hash, const struct fbxbr_fwcache_key *k);
+
+void fbxbr_fwcache_flush(struct fbxbr *br);
+
+/* fbxbr_ioctl.c */
+int fbxbr_ioctl(struct net *net, unsigned int ign, void __user *arg);
+
+/* fbxbr_input.c */
+rx_handler_result_t fbxbr_handle_frame(struct sk_buff **pskb);
+
+/* fbxbr_output.c */
+void fbxbr_output_lan_mcast_frame(struct fbxbr *br, struct sk_buff *skb);
+void fbxbr_output_lan_frame(struct fbxbr *br, struct sk_buff *skb);
+void fbxbr_output_wan_frame(struct fbxbr *br, struct sk_buff *skb);
+
+/* fbxbr_utils.c */
+int fbxbr_send_arp_frame(struct net_device *dev, u16 op,
+			 const u8 *dest_hw,
+			 __be32 src_ip, const u8 *src_hw,
+			 __be32 target_ip, const u8 *target_hw);
+
+bool fbxbr_is_valid_ip_packet(struct sk_buff *skb);
+bool fbxbr_is_valid_udp_tcp_packet(struct sk_buff *skb);
+void fbxbr_snat_packet(struct sk_buff *skb, __be32 new_addr);
+void fbxbr_dnat_packet(struct sk_buff *skb, __be32 new_addr);
+
+
+#endif /* !FBXBRIDGE_PRIVATE_H_ */
diff -Nruw linux-6.4-fbx/net/fbxbridge./fbxbr_utils.c linux-6.4-fbx/net/fbxbridge/fbxbr_utils.c
--- linux-6.4-fbx/net/fbxbridge./fbxbr_utils.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/fbxbridge/fbxbr_utils.c	2023-02-27 19:50:23.744290661 +0100
@@ -0,0 +1,204 @@
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+/*
+ * allocate & send ARP frame to given device
+ *
+ * src_hw can be NULL, device address is used instead
+ * dest_hw can be NULL, device broadcast address is used instead
+ * target_hw can be NULL, empty address is used instead
+ */
+int fbxbr_send_arp_frame(struct net_device *dev, u16 op,
+			 const u8 *dest_hw,
+			 __be32 src_ip, const u8 *src_hw,
+			 __be32 target_ip, const u8 *target_hw)
+{
+	struct arphdr *arp;
+	struct sk_buff *skb;
+	unsigned char *arp_ptr;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
+	int ret;
+
+	/* prepare arp packet */
+	skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_reserve(skb, hlen);
+	skb_reset_network_header(skb);
+	arp = skb_put(skb, arp_hdr_len(dev));
+	skb->dev = dev;
+	skb->protocol = htons(ETH_P_ARP);
+
+	if (!src_hw)
+		src_hw = dev->dev_addr;
+	if (!dest_hw)
+		dest_hw = dev->broadcast;
+
+	arp->ar_hrd = htons(dev->type);
+	arp->ar_pro = htons(ETH_P_IP);
+	arp->ar_hln = dev->addr_len;
+	arp->ar_pln = 4;
+	arp->ar_op = htons(op);
+
+	arp_ptr = (unsigned char *)(arp + 1);
+
+	memcpy(arp_ptr, src_hw, dev->addr_len);
+	arp_ptr += dev->addr_len;
+	memcpy(arp_ptr, &src_ip, 4);
+	arp_ptr += 4;
+
+	if (target_hw)
+		memcpy(arp_ptr, target_hw, dev->addr_len);
+	else
+		memset(arp_ptr, 0, dev->addr_len);
+
+	arp_ptr += dev->addr_len;
+	memcpy(arp_ptr, &target_ip, 4);
+
+	ret = dev_hard_header(skb, dev, ETH_P_ARP, dest_hw, src_hw, skb->len);
+	if (ret < 0) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	return dev_queue_xmit(skb);
+}
+
+/*
+ * validate header fields & checksum, also linearize IP header and
+ * setup transport headers
+ */
+bool fbxbr_is_valid_ip_packet(struct sk_buff *skb)
+{
+	const struct iphdr *iph;
+
+	if (!pskb_may_pull(skb, sizeof (*iph)))
+		return false;
+
+	iph = ip_hdr(skb);
+
+	if (iph->ihl < 5 || iph->version != 4)
+		return false;
+
+	if (!pskb_may_pull(skb, iph->ihl * 4))
+		return false;
+
+	iph = ip_hdr(skb);
+
+	if (ntohs(iph->tot_len) > skb->len)
+		return false;
+
+	skb->transport_header = skb->network_header + iph->ihl * 4;
+
+	return true;
+}
+
+/*
+ * make sure the udp/tcp header is present in the linear section
+ */
+bool fbxbr_is_valid_udp_tcp_packet(struct sk_buff *skb)
+{
+	const struct iphdr *iph;
+
+	iph = ip_hdr(skb);
+
+	switch (iph->protocol) {
+	case IPPROTO_UDP:
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct udphdr)))
+			return false;
+		break;
+	case IPPROTO_TCP:
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct tcphdr)))
+			return false;
+		break;
+	}
+	return true;
+}
+
+
+/*
+ * do source or destination nat
+ */
+static void recalculate_l4_checksum(struct sk_buff *skb,
+				    __be32 osaddr, __be32 odaddr)
+{
+	struct iphdr *iph;
+	u16 check;
+
+	iph = ip_hdr(skb);
+	if (iph->frag_off & htons(IP_OFFSET))
+		return;
+
+	if (!fbxbr_is_valid_udp_tcp_packet(skb))
+		return;
+
+	iph = ip_hdr(skb);
+
+	switch (iph->protocol) {
+	case IPPROTO_TCP:
+	{
+		struct tcphdr *tcph;
+
+		tcph = (struct tcphdr *)skb_transport_header(skb);
+		check = tcph->check;
+		if (skb->ip_summed != CHECKSUM_COMPLETE)
+			check = ~check;
+		check = csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 0, check);
+		check = csum_tcpudp_magic(~osaddr, ~odaddr, 0, 0, ~check);
+		if (skb->ip_summed == CHECKSUM_COMPLETE)
+			check = ~check;
+		tcph->check = check;
+		break;
+	}
+
+	case IPPROTO_UDP:
+	{
+		struct udphdr *udph;
+
+		udph = (struct udphdr *)skb_transport_header(skb);
+		check = udph->check;
+		if (check != 0) {
+			check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+						  0, 0, ~check);
+			check = csum_tcpudp_magic(~osaddr, ~odaddr, 0, 0,
+						  ~check);
+			udph->check = check ? : 0xFFFF;
+		}
+		break;
+	}
+	}
+}
+
+/*
+ * packet must be valid IPv4 with header in linear section
+ */
+void fbxbr_snat_packet(struct sk_buff *skb, __be32 new_addr)
+{
+	struct iphdr *ip;
+	__be32 oaddr;
+
+	ip = ip_hdr(skb);
+	oaddr = ip->saddr;
+	ip->saddr = new_addr;
+	ip->check = 0;
+	ip->check = ip_fast_csum((unsigned char *) ip, ip->ihl);
+	recalculate_l4_checksum(skb, oaddr, ip->daddr);
+}
+
+void fbxbr_dnat_packet(struct sk_buff *skb, __be32 new_addr)
+{
+	struct iphdr *ip;
+	__be32 oaddr;
+
+	ip = ip_hdr(skb);
+	oaddr = ip->daddr;
+	ip->daddr = new_addr;
+	ip->check = 0;
+	ip->check = ip_fast_csum((unsigned char *) ip, ip->ihl);
+	recalculate_l4_checksum(skb, ip->saddr, oaddr);
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/net/ipv4/ip_ffn.c	2024-01-19 17:01:19.905848123 +0100
@@ -0,0 +1,766 @@
+/*
+ * IP fast forwarding and NAT
+ *
+ * Very restrictive code, that only cope non fragmented UDP and TCP
+ * packets, that are routed and NATed with no other modification.
+ *
+ * Provide a fast path for established conntrack entries so that
+ * packets go out ASAP.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <linux/proc_fs.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+#include <net/ip_ffn.h>
+
+#define FFN_CACHE_SIZE		256
+#define MAX_FFN_ENTRY		2048
+
+static DEFINE_SPINLOCK(ffn_lock);
+static struct list_head ffn_cache[FFN_CACHE_SIZE];
+static struct list_head ffn_all;
+static unsigned int ffn_entry_count;
+
+/*
+ * hash on five parameter
+ */
+static inline unsigned int ffn_hash(const struct ffn_lookup_key *k)
+{
+	return jhash_3words(k->sip, k->is_tcp ? k->dip : ~k->dip,
+			    k->sport | k->dport << 16, 0);
+}
+
+/*
+ * attempt to find entry with given value in cache, under RCU lock
+ */
+struct ffn_lookup_entry *__ffn_get_rcu(const struct ffn_lookup_key *k)
+{
+	struct ffn_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn_hash(k);
+
+	list_for_each_entry_rcu(tmp, &ffn_cache[hash % FFN_CACHE_SIZE], next) {
+		if (tmp->sip == k->sip && tmp->dip == k->dip &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+EXPORT_SYMBOL(__ffn_get_rcu);
+
+
+/*
+ * attempt to find entry with given value in cache, under ff lock
+ */
+static struct ffn_lookup_entry *__ffn_get(const struct ffn_lookup_key *k)
+{
+	struct ffn_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn_hash(k);
+
+	list_for_each_entry(tmp, &ffn_cache[hash % FFN_CACHE_SIZE], next) {
+		if (tmp->sip == k->sip && tmp->dip == k->dip &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+/*
+ * rcu release defered callback
+ */
+static void delayed_ffn_free_entry(struct rcu_head *rhp)
+{
+	struct ffn_lookup_entry *e;
+
+	e = container_of(rhp, struct ffn_lookup_entry, rcu);
+	if (e->manip.priv_destructor)
+		e->manip.priv_destructor((void *)e->manip.ffn_priv_area);
+
+	dst_release(e->manip.dst);
+	kfree(e);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static void __ffn_remove_entry(struct ffn_lookup_entry *e)
+{
+	list_del_rcu(&e->next);
+	list_del_rcu(&e->all_next);
+	call_rcu(&e->rcu, delayed_ffn_free_entry);
+	ffn_entry_count--;
+}
+
+/*
+ *
+ */
+static void ffn_find_and_remove(const struct ffn_lookup_key *k)
+{
+	struct ffn_lookup_entry *e;
+
+	spin_lock_bh(&ffn_lock);
+	e = __ffn_get(k);
+	if (e)
+		__ffn_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static int __ffn_add_entry(struct ffn_lookup_entry *e)
+{
+	struct ffn_lookup_key k = {
+		.sip = e->sip,
+		.dip = e->dip,
+		.sport = e->sport,
+		.dport = e->dport,
+		.is_tcp = (e->protocol == IPPROTO_TCP)
+	};
+
+	/* make sure it's not present */
+	if (__ffn_get(&k))
+		return 1;
+
+	if (ffn_entry_count >= MAX_FFN_ENTRY)
+		return 1;
+
+	/* add new entry */
+	list_add_tail_rcu(&e->next, &ffn_cache[ffn_hash(&k) % FFN_CACHE_SIZE]);
+	list_add_tail_rcu(&e->all_next, &ffn_all);
+	ffn_entry_count++;
+	return 0;
+}
+
+/*
+ *
+ */
+static inline __sum16 checksum_adjust(u32 osip,
+				      u32 nsip,
+				      u32 odip,
+				      u32 ndip,
+				      u16 osport,
+				      u16 nsport,
+				      u16 odport,
+				      u16 ndport)
+{
+	const u32 old[] = { osip, odip, osport, odport };
+	const u32 new[] = { nsip, ndip, nsport, ndport };
+	__wsum osum, nsum;
+
+	osum = csum_partial(old, sizeof (old), 0);
+	nsum = csum_partial(new, sizeof (new), 0);
+
+	return ~csum_fold(csum_sub(nsum, osum));
+}
+
+/*
+ *
+ */
+static inline __sum16 checksum_adjust_ip(u32 osip,
+					 u32 nsip,
+					 u32 odip,
+					 u32 ndip)
+{
+	const u32 old[] = { osip, odip };
+	const u32 new[] = { nsip, ndip };
+	__wsum osum, nsum;
+
+	osum = csum_partial(old, sizeof (old), 0);
+	nsum = csum_partial(new, sizeof (new), 0);
+
+	/* -1 for TTL decrease */
+	return ~csum_fold(csum_sub(csum_sub(nsum, osum), 1));
+}
+
+/*
+ * two hooks into netfilter code
+ */
+extern int external_tcpv4_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+extern int external_udpv4_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+extern int ip_local_deliver_finish(struct net *net,
+				   struct sock *sk, struct sk_buff *skb);
+
+/*
+ * check if packet is in ffn cache, or mark it if it can be added
+ * later
+ */
+int ip_ffn_process(struct sk_buff *skb)
+{
+	struct ffn_lookup_entry *e;
+	struct nf_conntrack *nfct;
+	struct iphdr *iph;
+	struct tcphdr *tcph = NULL;
+	struct udphdr *udph = NULL;
+	struct ffn_lookup_key k;
+	bool remove_me, drop_ct;
+	u16 tcheck;
+	u8 proto;
+	int res, added_when;
+
+	if (!net_eq(dev_net(skb->dev), &init_net))
+		goto not_ffnable;
+
+	iph = ip_hdr(skb);
+
+	/* refuse fragmented IP packet, or packets with IP options */
+	if (iph->ihl > 5 || (iph->frag_off & htons(IP_MF | IP_OFFSET)))
+		goto not_ffnable;
+
+	/* check encapsulated protocol is udp or tcp */
+	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+		goto not_ffnable;
+
+	if (iph->ttl <= 1)
+		goto not_ffnable;
+
+	proto = iph->protocol;
+	if (proto == IPPROTO_TCP) {
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		tcph = (struct tcphdr *)((unsigned char *)iph + sizeof (*iph));
+
+		if (tcph->doff * 4 < sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		if (skb_headlen(skb) < sizeof (*iph) + tcph->doff * 4)
+			goto not_ffnable;
+
+		k.sport = tcph->source;
+		k.dport = tcph->dest;
+		k.is_tcp = true;
+	} else {
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct udphdr))
+			goto not_ffnable;
+
+		udph = (struct udphdr *)((unsigned char *)iph + sizeof (*iph));
+		k.sport = udph->source;
+		k.dport = udph->dest;
+		k.is_tcp = false;
+	}
+
+	rcu_read_lock();
+
+	k.sip = iph->saddr;
+	k.dip = iph->daddr;
+	e = __ffn_get_rcu(&k);
+	if (!e) {
+		rcu_read_unlock();
+		goto ffnable;
+	}
+
+	if (e->manip.dst->obsolete > 0) {
+		rcu_read_unlock();
+		ffn_find_and_remove(&k);
+		goto ffnable;
+	}
+
+	remove_me = false;
+	nfct = &e->manip.ct->ct_general;
+	nf_conntrack_get(nfct);
+
+	if (proto == IPPROTO_TCP) {
+		/* do sequence number checking and update
+		 * conntrack info */
+		res = external_tcpv4_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+		if (e->manip.ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+			remove_me = true;
+		tcheck = tcph->check;
+
+	} else {
+		res = external_udpv4_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+		tcheck = udph->check;
+	}
+
+	if (unlikely(res != NF_ACCEPT)) {
+		/* packet rejected by conntrack, unless asked to drop,
+		 * send it back into kernel */
+		rcu_read_unlock();
+		nf_conntrack_put(nfct);
+
+		if (remove_me)
+			ffn_find_and_remove(&k);
+
+		if (res == NF_DROP) {
+			dev_kfree_skb(skb);
+			return 0;
+		}
+
+		goto ffnable;
+	}
+
+	if (!e->manip.alter)
+		goto fix_ip_hdr;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL) {
+		/* fix ports & transport protocol checksum */
+		if (proto == IPPROTO_TCP) {
+			tcph->source = e->manip.new_sport;
+			tcph->dest = e->manip.new_dport;
+			tcph->check = csum16_sub(tcph->check,
+						 e->manip.l4_adjustment);
+		} else {
+			udph->source = e->manip.new_sport;
+			udph->dest = e->manip.new_dport;
+			if (udph->check) {
+				u16 tcheck;
+
+				tcheck = csum16_sub(udph->check,
+						    e->manip.l4_adjustment);
+				udph->check = tcheck ? tcheck : 0xffff;
+			}
+		}
+	} else {
+		unsigned int len;
+
+		/*
+		 * assume tcph->check only covers ip pseudo header, so
+		 * don't update checksum wrt port change
+		 *
+		 * we might check skb->csum_offset to confirm that
+		 * this is a valid assertion
+		 */
+		if (proto == IPPROTO_TCP) {
+			len = skb->len - ((void *)tcph - (void *)iph);
+			tcheck = ~csum_tcpudp_magic(e->manip.new_sip,
+						    e->manip.new_dip,
+						    len, IPPROTO_TCP, 0);
+			tcph->check = tcheck;
+			tcph->source = e->manip.new_sport;
+			tcph->dest = e->manip.new_dport;
+		} else {
+			len = skb->len - ((void *)udph - (void *)iph);
+			if (udph->check) {
+				tcheck = ~csum_tcpudp_magic(e->manip.new_sip,
+							    e->manip.new_dip,
+							    len,
+							    IPPROTO_UDP, 0);
+				udph->check = tcheck ? tcheck : 0xffff;
+			}
+			udph->source = e->manip.new_sport;
+			udph->dest = e->manip.new_dport;
+		}
+	}
+
+	/* update IP header field */
+	iph->saddr = e->manip.new_sip;
+	iph->daddr = e->manip.new_dip;
+
+fix_ip_hdr:
+	iph->ttl--;
+
+	if (e->manip.tos_change) {
+		iph->tos = e->manip.new_tos;
+		iph->check = 0;
+		iph->check = ip_fast_csum((u8 *)iph, 5);
+	} else {
+		iph->check = csum16_sub(iph->check,
+					e->manip.ip_adjustment);
+	}
+
+	/* forward skb */
+	if (e->manip.force_skb_prio)
+		skb->priority = e->manip.new_skb_prio;
+	else
+		skb->priority = rt_tos2priority(iph->tos);
+
+	skb->mark = e->manip.new_mark;
+
+#ifdef CONFIG_IP_FFN_PROCFS
+	e->forwarded_packets++;
+	e->forwarded_bytes += skb->len;
+#endif
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, dst_clone(e->manip.dst));
+
+	added_when = e->added_when;
+
+	drop_ct = true;
+	if (nfct != skb_nfct(skb)) {
+		if (unlikely(skb_nfct(skb) != NULL)) {
+			/*
+			 * conntrack is not NULL here and it is not
+			 * the same as the one we have in the
+			 * ffn_entry, this shoud not happen, warn once
+			 * and switch to slow path.
+			 */
+			WARN_ONCE(1,
+				  "weird skb->nfct %p, NULL was expected\n",
+				  skb_nfct(skb));
+			printk_once(KERN_WARNING "ffn entry:\n"
+				    " added_when: %i\n"
+				    " sip: %pI4 -> %pI4\n"
+				    " dip: %pI4 -> %pI4\n"
+				    " sport: %u -> %u\n"
+				    " dport: %u -> %u\n",
+				    e->added_when,
+				    &e->sip, &e->manip.new_sip,
+				    &e->dip, &e->manip.new_dip,
+				    htons(e->sport), htons(e->manip.new_sport),
+				    htons(e->dport), htons(e->manip.new_dport));
+			rcu_read_unlock();
+
+			if (remove_me)
+				ffn_find_and_remove(&k);
+			goto not_ffnable;
+		}
+
+		nf_ct_set(skb, (struct nf_conn *)nfct, e->manip.ctinfo);
+		drop_ct = false;
+	}
+
+	rcu_read_unlock();
+	if (unlikely(remove_me))
+		ffn_find_and_remove(&k);
+
+	if (drop_ct) {
+		/*
+		 * skbs to/from localhost will have the conntrack
+		 * already set, don't leak references here.
+		 */
+		nf_conntrack_put(nfct);
+	}
+
+	skb->ffn_state = FFN_STATE_FAST_FORWARDED;
+	IPCB(skb)->flags |= IPSKB_FORWARDED;
+
+	if (added_when == IP_FFN_FINISH_OUT)
+		dst_output(&init_net, skb->sk, skb);
+	else
+		ip_local_deliver_finish(&init_net, skb->sk, skb);
+
+	return 0;
+
+ffnable:
+	skb->ffn_state = FFN_STATE_FORWARDABLE;
+	skb->ffn_orig_tos = iph->tos;
+	return 1;
+
+not_ffnable:
+	skb->ffn_state = FFN_STATE_INCOMPATIBLE;
+	return 1;
+}
+
+/*
+ * check if skb is candidate for ffn, and if so add it to ffn cache
+ *
+ * called after post routing
+ */
+void ip_ffn_add(struct sk_buff *skb, int when)
+{
+	struct nf_conn *ct;
+	struct nf_conntrack_tuple *tuple, *rtuple;
+	enum ip_conntrack_info ctinfo;
+	struct ffn_lookup_entry *e;
+	struct iphdr *iph;
+	struct net *skb_net;
+	int dir;
+
+	skb_net = dev_net(skb->dev);
+	if (!unlikely(net_eq(skb_net, &init_net)))
+		return;
+
+	/* make sure external_tcp_packet/external_udp_packet won't
+	 * attempt to checksum packet, the ffn code does not update
+	 * skb->csum, which must stay valid if skb_checksum_complete
+	 * has been called */
+	if (unlikely(skb_net->ct.sysctl_checksum))
+		skb_net->ct.sysctl_checksum = 0;
+
+	if (ffn_entry_count >= MAX_FFN_ENTRY)
+		return;
+
+	iph = ip_hdr(skb);
+
+	if (skb_dst(skb)->output != ip_output && when == IP_FFN_FINISH_OUT)
+		return;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct || ctinfo == IP_CT_UNTRACKED)
+		return;
+
+	if ((ctinfo != IP_CT_ESTABLISHED) &&
+	    (ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)) {
+		return;
+	}
+
+	if (nfct_help(ct))
+		return;
+
+	dir = (ctinfo == IP_CT_ESTABLISHED) ?
+		IP_CT_DIR_ORIGINAL : IP_CT_DIR_REPLY;
+	tuple = &ct->tuplehash[dir].tuple;
+
+	if (tuple->dst.protonum != IPPROTO_TCP &&
+	    tuple->dst.protonum != IPPROTO_UDP)
+		return;
+
+	if (tuple->dst.protonum == IPPROTO_TCP &&
+	    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+		return;
+
+	rtuple = &ct->tuplehash[1 - dir].tuple;
+
+	e = kmalloc(sizeof (*e), GFP_ATOMIC);
+	if (!e)
+		return;
+
+	e->added_when = when;
+	e->sip = tuple->src.u3.ip;
+	e->dip = tuple->dst.u3.ip;
+	e->sport = tuple->src.u.all;
+	e->dport = tuple->dst.u.all;
+	e->protocol = tuple->dst.protonum;
+
+#ifdef CONFIG_IP_FFN_PROCFS
+	e->forwarded_packets = 0;
+	e->forwarded_bytes = 0;
+#endif
+
+	e->manip.new_sip = rtuple->dst.u3.ip;
+	e->manip.new_dip = rtuple->src.u3.ip;
+	e->manip.new_sport = rtuple->dst.u.all;
+	e->manip.new_dport = rtuple->src.u.all;
+
+	if (e->manip.new_sip == e->sip &&
+	    e->manip.new_dip == e->dip &&
+	    e->manip.new_sport == e->sport &&
+	    e->manip.new_dport == e->dport)
+		e->manip.alter = 0;
+	else
+		e->manip.alter = 1;
+
+	if (e->manip.alter) {
+		/* compute checksum adjustement */
+		e->manip.l4_adjustment = checksum_adjust(e->sip,
+							 e->manip.new_sip,
+							 e->dip,
+							 e->manip.new_dip,
+							 e->sport,
+							 e->manip.new_sport,
+							 e->dport,
+							 e->manip.new_dport);
+	}
+
+	e->manip.ip_adjustment = checksum_adjust_ip(e->sip,
+						    e->manip.new_sip,
+						    e->dip,
+						    e->manip.new_dip);
+
+	if (skb->ffn_orig_tos != iph->tos) {
+		e->manip.tos_change = 1;
+		e->manip.new_tos = iph->tos;
+	} else
+		e->manip.tos_change = 0;
+
+	if (skb->priority != rt_tos2priority(iph->tos)) {
+		e->manip.force_skb_prio = 1;
+		e->manip.new_skb_prio = skb->priority;
+	} else
+		e->manip.force_skb_prio = 0;
+
+	e->manip.new_mark = skb->mark;
+	e->manip.priv_destructor = NULL;
+	e->manip.dst = skb_dst(skb);
+	dst_hold(e->manip.dst);
+	e->manip.ct = ct;
+	e->manip.ctinfo = ctinfo;
+
+	spin_lock_bh(&ffn_lock);
+	if (__ffn_add_entry(e)) {
+		spin_unlock_bh(&ffn_lock);
+		dst_release(e->manip.dst);
+		kfree(e);
+		return;
+	}
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * netfilter callback when conntrack is about to be destroyed
+ */
+void ip_ffn_ct_destroy(struct nf_conn *ct)
+{
+	struct nf_conntrack_tuple *tuple;
+	int dir;
+
+	/* locate all entry that use this conntrack */
+	for (dir = 0; dir < 2; dir++) {
+		struct ffn_lookup_key k;
+
+		tuple = &ct->tuplehash[dir].tuple;
+
+		if (tuple->dst.protonum != IPPROTO_TCP &&
+		    tuple->dst.protonum != IPPROTO_UDP)
+			return;
+
+		k.sip = tuple->src.u3.ip;
+		k.dip = tuple->dst.u3.ip;
+		k.sport = tuple->src.u.all;
+		k.dport = tuple->dst.u.all;
+		k.is_tcp = (tuple->dst.protonum == IPPROTO_TCP);
+		ffn_find_and_remove(&k);
+	}
+}
+
+/*
+ * initialize ffn cache data
+ */
+static void __ip_ffn_init_cache(void)
+{
+	int i;
+
+	for (i = 0; i < FFN_CACHE_SIZE; i++)
+		INIT_LIST_HEAD(&ffn_cache[i]);
+	INIT_LIST_HEAD(&ffn_all);
+	ffn_entry_count = 0;
+}
+
+/*
+ * flush all ffn cache
+ */
+void ip_ffn_flush_all(void)
+{
+	struct ffn_lookup_entry *e, *tmp;
+
+	spin_lock_bh(&ffn_lock);
+	list_for_each_entry_safe(e, tmp, &ffn_all, all_next)
+		__ffn_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+#ifdef CONFIG_IP_FFN_PROCFS
+struct proc_dir_entry *proc_net_ip_ffn;
+
+static int ip_ffn_entries_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	rcu_read_lock();
+
+	for (i = 0; i < FFN_CACHE_SIZE; ++i) {
+		struct ffn_lookup_entry *e;
+
+		if (list_empty(&ffn_cache[i]))
+			continue;
+
+		seq_printf(m, "Bucket %i:\n", i);
+		list_for_each_entry_rcu(e, &ffn_cache[i], next) {
+			seq_printf(m, " Protocol: ");
+			switch (e->protocol) {
+			case IPPROTO_TCP:
+				seq_printf(m, "TCPv4\n");
+				break;
+			case IPPROTO_UDP:
+				seq_printf(m, "UDPv4\n");
+				break;
+			default:
+				seq_printf(m, "ipproto_%i\n", e->protocol);
+				break;
+			}
+			seq_printf(m, " Original flow: %pI4:%u -> %pI4:%u\n",
+				   &e->sip,
+				   ntohs(e->sport),
+				   &e->dip,
+				   ntohs(e->dport));
+
+			if (e->sip != e->manip.new_sip ||
+			    e->dip != e->manip.new_dip ||
+			    e->sport != e->manip.new_sport ||
+			    e->dport != e->manip.new_dport) {
+				seq_printf(m,
+					   " Modified flow: %pI4:%u -> "
+					   "%pI4:%u\n",
+					   &e->manip.new_sip,
+					   ntohs(e->manip.new_sport),
+					   &e->manip.new_dip,
+					   ntohs(e->manip.new_dport));
+			}
+
+			seq_printf(m, "  Forwarded packets: %u\n",
+				   e->forwarded_packets);
+			seq_printf(m, "  Forwarded bytes: %llu\n",
+				   e->forwarded_bytes);
+			seq_printf(m, "\n");
+		}
+	}
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static int ip_ffn_entries_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ip_ffn_entries_show, NULL);
+}
+
+static const struct proc_ops ip_ffn_entries_fops = {
+	.proc_open	= ip_ffn_entries_open,
+	.proc_release	= single_release,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+};
+
+static int __init __ip_ffn_init_procfs(void)
+{
+	proc_net_ip_ffn = proc_net_mkdir(&init_net, "ip_ffn",
+					 init_net.proc_net);
+	if (!proc_net_ip_ffn) {
+		printk(KERN_ERR "proc_mkdir() has failed for 'net/ip_ffn'.\n");
+		return -1;
+	}
+
+	if (proc_create("entries", 0400, proc_net_ip_ffn,
+			&ip_ffn_entries_fops) == NULL) {
+		printk(KERN_ERR "proc_create() has failed for "
+		       "'net/ip_ffn/entries'.\n");
+		return -1;
+	}
+	return 0;
+}
+#endif
+
+/*
+ * initialize ffn
+ */
+void __init ip_ffn_init(void)
+{
+	printk("IP Fast Forward and NAT enabled\n");
+	__ip_ffn_init_cache();
+
+#ifdef CONFIG_IP_FFN_PROCFS
+	if (__ip_ffn_init_procfs() < 0)
+		printk(KERN_WARNING "IP FFN: unable to create proc entries.\n");
+#endif
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/net/ipv6/ip6_ffn.c	2024-01-19 17:01:19.905848123 +0100
@@ -0,0 +1,705 @@
+/*
+ * IPv6 fast forwarding and NAT
+ *
+ * Very restrictive code, that only cope non fragmented UDP and TCP
+ * packets, that are routed and NATed with no other modification.
+ *
+ * Provide a fast path for established conntrack entries so that
+ * packets go out ASAP.
+ */
+
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <linux/proc_fs.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+#include <net/ip6_ffn.h>
+#include <net/dsfield.h>
+
+#define FFN6_CACHE_SIZE		256
+#define MAX_FFN6_ENTRY		2048
+
+static DEFINE_SPINLOCK(ffn_lock);
+static struct list_head ffn6_cache[FFN6_CACHE_SIZE];
+static struct list_head ffn6_all;
+static unsigned int ffn6_entry_count;
+
+/*
+ * rcu release defered callback
+ */
+static void delayed_ffn_free_entry(struct rcu_head *rhp)
+{
+	struct ffn6_lookup_entry *e;
+
+	e = container_of(rhp, struct ffn6_lookup_entry, rcu);
+	if (e->manip.priv_destructor)
+		e->manip.priv_destructor((void *)e->manip.ffn_priv_area);
+
+	dst_release(e->manip.dst);
+	kfree(e);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static void __ffn6_remove_entry(struct ffn6_lookup_entry *e)
+{
+	list_del_rcu(&e->next);
+	list_del_rcu(&e->all_next);
+	call_rcu(&e->rcu, delayed_ffn_free_entry);
+	ffn6_entry_count--;
+}
+
+/*
+ * hash on five parameter
+ */
+static inline unsigned int ffn6_hash(const struct ffn6_lookup_key *k)
+{
+	return jhash_3words(k->sip[3], k->is_tcp ? k->dip[3] : ~k->dip[3],
+			    k->sport | k->dport << 16, 0);
+}
+
+/*
+ * attempt to find entry with given value in cache, under RCU lock
+ */
+struct ffn6_lookup_entry *
+__ffn6_get_rcu(const struct ffn6_lookup_key *k)
+{
+	struct ffn6_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn6_hash(k);
+
+	list_for_each_entry_rcu(tmp,
+				&ffn6_cache[hash % FFN6_CACHE_SIZE], next) {
+		if (!memcmp(tmp->sip, k->sip, 16) &&
+		    !memcmp(tmp->dip, k->dip, 16) &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+EXPORT_SYMBOL(__ffn6_get_rcu);
+
+/*
+ * attempt to find entry with given value in cache, under ff lock
+ */
+struct ffn6_lookup_entry *__ffn6_get(const struct ffn6_lookup_key *k)
+{
+	struct ffn6_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn6_hash(k);
+
+	list_for_each_entry(tmp, &ffn6_cache[hash % FFN6_CACHE_SIZE], next) {
+		if (!memcmp(tmp->sip, k->sip, 16) &&
+		    !memcmp(tmp->dip, k->dip, 16) &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+/*
+ *
+ */
+static void ffn6_find_and_remove(const struct ffn6_lookup_key *k)
+{
+	struct ffn6_lookup_entry *e;
+
+	spin_lock_bh(&ffn_lock);
+	e = __ffn6_get(k);
+	if (e)
+		__ffn6_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static int __ffn6_add_entry(struct ffn6_lookup_entry *e)
+{
+	struct ffn6_lookup_key k = {
+		.sip = e->sip,
+		.dip = e->dip,
+		.sport = e->sport,
+		.dport = e->dport,
+		.is_tcp = (e->protocol == IPPROTO_TCP)
+	};
+
+	/* make sure it's not present */
+	if (__ffn6_get(&k))
+		return 1;
+
+	if (ffn6_entry_count >= MAX_FFN6_ENTRY)
+		return 1;
+
+	/* add new entry */
+	list_add_tail(&e->next, &ffn6_cache[ffn6_hash(&k) % FFN6_CACHE_SIZE]);
+	list_add_tail(&e->all_next, &ffn6_all);
+	ffn6_entry_count++;
+	return 0;
+}
+
+/*
+ * two hooks into netfilter code
+ */
+extern int external_tcpv6_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+extern int external_udpv6_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+/*
+ * check if packet is in ffn cache, or mark it if it can be added
+ * later
+ */
+int ipv6_ffn_process(struct sk_buff *skb)
+{
+	struct ffn6_lookup_entry *e;
+	struct nf_conntrack *nfct;
+	struct ipv6hdr *iph;
+	struct tcphdr *tcph = NULL;
+	struct udphdr *udph = NULL;
+	struct ffn6_lookup_key k;
+	bool remove_me, drop_ct;
+	int added_when;
+	u8 proto;
+	int res;
+
+	if (!net_eq(dev_net(skb->dev), &init_net))
+		goto not_ffnable;
+
+	iph = ipv6_hdr(skb);
+
+	/* check encapsulated protocol is udp or tcp */
+	proto = iph->nexthdr;
+	if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
+		goto not_ffnable;
+
+	if (iph->hop_limit <= 1 || !iph->payload_len)
+		goto not_ffnable;
+
+	/* TODO: implement this later, no hardware to test for now */
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		goto not_ffnable;
+
+	proto = iph->nexthdr;
+	if (proto == IPPROTO_TCP) {
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		tcph = (struct tcphdr *)((unsigned char *)iph + sizeof (*iph));
+
+		if (tcph->doff * 4 < sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		if (skb_headlen(skb) < sizeof (*iph) + tcph->doff * 4)
+			goto not_ffnable;
+
+		k.sport = tcph->source;
+		k.dport = tcph->dest;
+		k.is_tcp = true;
+	} else {
+
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct udphdr))
+			goto not_ffnable;
+
+		udph = (struct udphdr *)((unsigned char *)iph + sizeof (*iph));
+		k.sport = udph->source;
+		k.dport = udph->dest;
+		k.is_tcp = false;
+	}
+
+	rcu_read_lock();
+
+	k.sip = iph->saddr.s6_addr32;
+	k.dip = iph->daddr.s6_addr32;
+
+	e = __ffn6_get_rcu(&k);
+	if (!e) {
+		rcu_read_unlock();
+		goto ffnable;
+	}
+
+	if (e->manip.dst->obsolete > 0) {
+		rcu_read_unlock();
+		ffn6_find_and_remove(&k);
+		goto ffnable;
+	}
+
+	nfct = &e->manip.ct->ct_general;
+	nf_conntrack_get(nfct);
+
+	remove_me = false;
+	if (proto == IPPROTO_TCP) {
+		/* do sequence number checking and update
+		 * conntrack info */
+		res = external_tcpv6_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+		if (e->manip.ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+			remove_me = true;
+	} else {
+		res = external_udpv6_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+	}
+
+	if (unlikely(res != NF_ACCEPT)) {
+		/* packet rejected by conntrack, unless asked to drop,
+		 * send it back into kernel */
+		rcu_read_unlock();
+		nf_conntrack_put(nfct);
+
+		if (remove_me)
+			ffn6_find_and_remove(&k);
+
+		if (res == NF_DROP) {
+			dev_kfree_skb(skb);
+			return 0;
+		}
+
+		goto ffnable;
+	}
+
+	if (!e->manip.alter)
+		goto fix_ip_hdr;
+
+	/* fix ports & transport protocol checksum */
+	if (proto == IPPROTO_TCP) {
+		tcph->source = e->manip.new_sport;
+		tcph->dest = e->manip.new_dport;
+		tcph->check = csum16_sub(tcph->check, e->manip.adjustment);
+	} else {
+		udph->source = e->manip.new_sport;
+		udph->dest = e->manip.new_dport;
+		if (udph->check) {
+			u16 tcheck;
+
+			tcheck = csum16_sub(udph->check, e->manip.adjustment);
+			udph->check = tcheck ? tcheck : 0xffff;
+		}
+	}
+
+	memcpy(iph->saddr.s6_addr32, e->manip.new_sip, 16);
+	memcpy(iph->daddr.s6_addr32, e->manip.new_dip, 16);
+
+fix_ip_hdr:
+	/* update IP header field */
+	iph->hop_limit--;
+	if (e->manip.tos_change)
+		ipv6_change_dsfield(iph, 0, e->manip.new_tos);
+
+	if (e->manip.force_skb_prio)
+		skb->priority = e->manip.new_skb_prio;
+	else
+		skb->priority = rt_tos2priority(ipv6_get_dsfield(iph));
+
+	skb->mark = e->manip.new_mark;
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	e->forwarded_packets++;
+	e->forwarded_bytes += skb->len;
+#endif
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, dst_clone(e->manip.dst));
+
+	added_when = e->added_when;
+
+	drop_ct = true;
+	if (nfct != skb_nfct(skb)) {
+		if (unlikely(skb_nfct(skb) != NULL)) {
+			/*
+			 * conntrack is not NULL here and it is not
+			 * the same as the one we have in the
+			 * ffn_entry, this shoud not happen, warn once
+			 * and switch to slow path.
+			 */
+			WARN_ONCE(1,
+				  "weird skb->nfct %p, NULL was expected\n",
+				  skb_nfct(skb));
+			printk_once(KERN_WARNING "ffn entry:\n"
+				    " added_when: %i\n"
+				    " sip: %pI6 -> %pI6\n"
+				    " dip: %pI6 -> %pI6\n"
+				    " sport: %u -> %u\n"
+				    " dport: %u -> %u\n",
+				    e->added_when,
+				    e->sip, e->manip.new_sip,
+				    e->dip, e->manip.new_dip,
+				    htons(e->sport), htons(e->manip.new_sport),
+				    htons(e->dport), htons(e->manip.new_dport));
+			rcu_read_unlock();
+
+			if (remove_me)
+				ffn6_find_and_remove(&k);
+
+			goto not_ffnable;
+		}
+		nf_ct_set(skb, (struct nf_conn *)nfct, e->manip.ctinfo);
+		drop_ct = false;
+	}
+
+	rcu_read_unlock();
+	if (unlikely(remove_me))
+		ffn6_find_and_remove(&k);
+
+	if (drop_ct) {
+		/*
+		 * skbs to/from localhost will have the conntrack
+		 * already set, don't leak references here.
+		 */
+		nf_conntrack_put(nfct);
+	}
+
+	skb->ffn_state = FFN_STATE_FAST_FORWARDED;
+
+	if (added_when == IPV6_FFN_FINISH_OUT)
+		dst_output(&init_net, skb->sk, skb);
+	else
+		ip6_input_finish(&init_net, skb->sk, skb);
+
+	return 0;
+
+ffnable:
+	skb->ffn_state = FFN_STATE_FORWARDABLE;
+	skb->ffn_orig_tos = ipv6_get_dsfield(iph);
+	return 1;
+
+not_ffnable:
+	skb->ffn_state = FFN_STATE_INCOMPATIBLE;
+	return 1;
+}
+
+/*
+ *
+ */
+static inline __sum16 checksum_adjust(const u32 *osip,
+				      const u32 *nsip,
+				      const u32 *odip,
+				      const u32 *ndip,
+				      u16 osport,
+				      u16 nsport,
+				      u16 odport,
+				      u16 ndport)
+{
+	const u32 oports[] = { osport, odport };
+	const u32 nports[] = { nsport, ndport };
+	__wsum osum, nsum;
+
+	osum = csum_partial(osip, 16, 0);
+	osum = csum_partial(odip, 16, osum);
+	osum = csum_partial(oports, 8, osum);
+
+	nsum = csum_partial(nsip, 16, 0);
+	nsum = csum_partial(ndip, 16, nsum);
+	nsum = csum_partial(nports, 8, nsum);
+
+	return ~csum_fold(csum_sub(nsum, osum));
+}
+
+/*
+ * check if skb is candidate for ffn, and if so add it to ffn cache
+ *
+ * called after post routing
+ */
+void ipv6_ffn_add(struct sk_buff *skb, int when)
+{
+	struct nf_conn *ct;
+	struct nf_conntrack_tuple *tuple, *rtuple;
+	enum ip_conntrack_info ctinfo;
+	struct ffn6_lookup_entry *e;
+	struct ipv6hdr *iph;
+	int dir;
+	struct net *skb_net;
+	u8 tos;
+
+	skb_net = dev_net(skb->dev);
+	if (!unlikely(net_eq(skb_net, &init_net)))
+		return;
+
+	/* make sure external_tcp_packet/external_udp_packet won't
+	 * attempt to checksum packet, the ffn code does not update
+	 * skb->csum, which must stay valid if skb_checksum_complete
+	 * has been called */
+	if (unlikely(skb_net->ct.sysctl_checksum))
+		skb_net->ct.sysctl_checksum = 0;
+
+	if (ffn6_entry_count >= MAX_FFN6_ENTRY)
+		return;
+
+	iph = ipv6_hdr(skb);
+
+	if ((when == IPV6_FFN_FINISH_OUT &&
+	     skb_dst(skb)->output != ip6_output))
+		return;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct || ctinfo == IP_CT_UNTRACKED)
+		return;
+
+	if ((ctinfo != IP_CT_ESTABLISHED) &&
+	    (ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)) {
+		return;
+	}
+
+	if (nfct_help(ct))
+		return;
+
+	dir = (ctinfo == IP_CT_ESTABLISHED) ?
+		IP_CT_DIR_ORIGINAL : IP_CT_DIR_REPLY;
+	tuple = &ct->tuplehash[dir].tuple;
+
+	if (tuple->dst.protonum != IPPROTO_TCP &&
+	    tuple->dst.protonum != IPPROTO_UDP)
+		return;
+
+	if (tuple->dst.protonum == IPPROTO_TCP &&
+	    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+		return;
+
+	rtuple = &ct->tuplehash[1 - dir].tuple;
+
+	e = kmalloc(sizeof (*e), GFP_ATOMIC);
+	if (!e)
+		return;
+
+	e->added_when = when;
+	memcpy(e->sip, tuple->src.u3.ip6, 16);
+	memcpy(e->dip, tuple->dst.u3.ip6, 16);
+	e->sport = tuple->src.u.all;
+	e->dport = tuple->dst.u.all;
+	e->protocol = tuple->dst.protonum;
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	e->forwarded_packets = 0;
+	e->forwarded_bytes = 0;
+#endif
+
+	memcpy(e->manip.new_sip, rtuple->dst.u3.ip6, 16);
+	memcpy(e->manip.new_dip, rtuple->src.u3.ip6, 16);
+	e->manip.new_sport = rtuple->dst.u.all;
+	e->manip.new_dport = rtuple->src.u.all;
+
+	if (!memcmp(e->manip.new_sip, e->sip, 16) &&
+	    !memcmp(e->manip.new_dip, e->dip, 16) &&
+	    e->manip.new_sport == e->sport &&
+	    e->manip.new_dport == e->dport)
+		e->manip.alter = 0;
+	else
+		e->manip.alter = 1;
+
+	if (e->manip.alter) {
+		/* compute checksum adjustement */
+		e->manip.adjustment = checksum_adjust(e->sip,
+						      e->manip.new_sip,
+						      e->dip,
+						      e->manip.new_dip,
+						      e->sport,
+						      e->manip.new_sport,
+						      e->dport,
+						      e->manip.new_dport);
+	}
+
+	tos = ipv6_get_dsfield(iph);
+	if (skb->ffn_orig_tos != tos) {
+		e->manip.tos_change = 1;
+		e->manip.new_tos = tos;
+	} else
+		e->manip.tos_change = 0;
+
+	if (skb->priority != rt_tos2priority(tos)) {
+		e->manip.force_skb_prio = 1;
+		e->manip.new_skb_prio = skb->priority;
+	} else
+		e->manip.force_skb_prio = 0;
+
+	e->manip.new_mark = skb->mark;
+	e->manip.dst = skb_dst(skb);
+	e->manip.priv_destructor = NULL;
+	dst_hold(e->manip.dst);
+	e->manip.ct = ct;
+	e->manip.ctinfo = ctinfo;
+
+	spin_lock_bh(&ffn_lock);
+	if (__ffn6_add_entry(e)) {
+		spin_unlock_bh(&ffn_lock);
+		dst_release(e->manip.dst);
+		kfree(e);
+		return;
+	}
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * netfilter callback when conntrack is about to be destroyed
+ */
+void ipv6_ffn_ct_destroy(struct nf_conn *ct)
+{
+	struct nf_conntrack_tuple *tuple;
+	int dir;
+
+	/* locate all entry that use this conntrack */
+	for (dir = 0; dir < 2; dir++) {
+		struct ffn6_lookup_key k;
+
+		tuple = &ct->tuplehash[dir].tuple;
+
+		if (tuple->dst.protonum != IPPROTO_TCP &&
+		    tuple->dst.protonum != IPPROTO_UDP)
+			return;
+
+		k.sip = tuple->src.u3.ip6;
+		k.dip = tuple->dst.u3.ip6;
+		k.sport = tuple->src.u.all;
+		k.dport = tuple->dst.u.all;
+		k.is_tcp = (tuple->dst.protonum == IPPROTO_TCP);
+		ffn6_find_and_remove(&k);
+	}
+}
+
+/*
+ * initialize ffn cache data
+ */
+static void __ipv6_ffn_init_cache(void)
+{
+	int i;
+
+	for (i = 0; i < FFN6_CACHE_SIZE; i++)
+		INIT_LIST_HEAD(&ffn6_cache[i]);
+	INIT_LIST_HEAD(&ffn6_all);
+	ffn6_entry_count = 0;
+}
+
+/*
+ * flush all ffn cache
+ */
+void ipv6_ffn_flush_all(void)
+{
+	struct ffn6_lookup_entry *e, *tmp;
+
+	spin_lock_bh(&ffn_lock);
+	list_for_each_entry_safe(e, tmp, &ffn6_all, all_next)
+		__ffn6_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+struct proc_dir_entry *proc_net_ipv6_ffn;
+
+static int ipv6_ffn_entries_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	rcu_read_lock();
+
+	for (i = 0; i < FFN6_CACHE_SIZE; ++i) {
+		struct ffn6_lookup_entry *e;
+
+		if (list_empty(&ffn6_cache[i]))
+			continue;
+
+		seq_printf(m, "Bucket %i:\n", i);
+		list_for_each_entry_rcu(e, &ffn6_cache[i], next) {
+			seq_printf(m, " Protocol: ");
+			switch (e->protocol) {
+			case IPPROTO_TCP:
+				seq_printf(m, "TCPv6\n");
+				break;
+			case IPPROTO_UDP:
+				seq_printf(m, "UDPv6\n");
+				break;
+			default:
+				seq_printf(m, "ipproto_%i\n", e->protocol);
+				break;
+			}
+
+			seq_printf(m, " Original flow: %pI6:%u -> %pI6:%u\n",
+				   e->sip,
+				   ntohs(e->sport),
+				   e->dip,
+				   ntohs(e->dport));
+
+			if (memcmp(e->sip, e->manip.new_sip, 16) ||
+			    memcmp(e->dip, e->manip.new_dip, 16) ||
+			    e->sport != e->manip.new_sport ||
+			    e->dport != e->manip.new_dport) {
+				seq_printf(m,
+					   " Modified flow: %pI6:%u -> "
+					   "%pI6:%u\n",
+					   e->manip.new_sip,
+					   ntohs(e->manip.new_sport),
+					   e->manip.new_dip,
+					   ntohs(e->manip.new_dport));
+			}
+
+			seq_printf(m, "  Forwarded packets: %u\n",
+				   e->forwarded_packets);
+			seq_printf(m, "  Forwarded bytes: %llu\n",
+				   e->forwarded_bytes);
+			seq_printf(m, "\n");
+		}
+	}
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static int ipv6_ffn_entries_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ipv6_ffn_entries_show, NULL);
+}
+
+static const struct proc_ops ipv6_ffn_entries_fops = {
+	.proc_open	= ipv6_ffn_entries_open,
+	.proc_release	= single_release,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+};
+
+static int __init __ipv6_ffn_init_procfs(void)
+{
+	proc_net_ipv6_ffn = proc_net_mkdir(&init_net, "ipv6_ffn",
+					 init_net.proc_net);
+	if (!proc_net_ipv6_ffn) {
+		printk(KERN_ERR "proc_mkdir() has failed "
+		       "for 'net/ipv6_ffn'.\n");
+		return -1;
+	}
+
+	if (proc_create("entries", 0400, proc_net_ipv6_ffn,
+			&ipv6_ffn_entries_fops) == NULL) {
+		printk(KERN_ERR "proc_create() has failed for "
+		       "'net/ipv6_ffn/entries'.\n");
+		return -1;
+	}
+	return 0;
+}
+#endif
+
+/*
+ * initialize ffn
+ */
+void __init ipv6_ffn_init(void)
+{
+	printk("IPv6 Fast Forward and NAT enabled\n");
+	__ipv6_ffn_init_cache();
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	if (__ipv6_ffn_init_procfs() < 0)
+		printk(KERN_WARNING "IPv6 FFN: unable to create proc entries.\n");
+#endif
+}
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/net/mac80211/fbx_scum.h	2023-12-12 17:24:34.175627535 +0100
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2023 Freebox
+ */
+
+#ifdef CONFIG_FBX80211_SCUM
+void fbx80211_scum_setup(struct ieee80211_sub_if_data *sdata);
+void fbx80211_scum_teardown(struct ieee80211_sub_if_data *sdata);
+void fbx80211_scum_local_init(struct ieee80211_local *local);
+void fbx80211_scum_local_cleanup(struct ieee80211_local *local);
+void fbx80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
+			 struct ieee80211_rate *rate, unsigned int rtap_space);
+
+static inline bool fbx80211_skip_mon(struct ieee80211_sub_if_data *sdata)
+{
+	return sdata->u.mntr.scum.skip_mon;
+}
+#else
+static inline void
+fbx80211_scum_client_start(struct ieee80211_sub_if_data *sdata)
+{
+}
+
+static inline void
+fbx80211_scum_client_stop(struct ieee80211_sub_if_data *sdata)
+{
+}
+
+static inline void fbx80211_scum_teardown(struct ieee80211_sub_if_data *sdata)
+{
+}
+
+static inline void fbx80211_scum_setup(struct ieee80211_sub_if_data *sdata)
+{
+}
+
+static inline void fbx80211_scum_local_init(struct ieee80211_local *local)
+{
+}
+
+static inline void fbx80211_scum_local_cleanup(struct ieee80211_local *local)
+{
+}
+
+static inline void fbx80211_rx_monitor(struct ieee80211_local *local,
+				       struct sk_buff *skb,
+				       struct ieee80211_rate *rate,
+				       unsigned int rtap_space)
+{
+}
+
+static inline bool fbx80211_skip_mon(struct ieee80211_sub_if_data *sdata)
+{
+	return false;
+}
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/net/mac80211/nmeshd_nl.c	2024-04-19 16:04:28.973736213 +0200
@@ -0,0 +1,454 @@
+#include "nmeshd_nl.h"
+
+static struct genl_family nmeshd_nl_family;
+
+enum multicast_groups {
+	NNL_MCGRP_MLME,
+};
+
+static const struct genl_multicast_group nmeshd_nl_mcgrps[] = {
+	[NNL_MCGRP_MLME] = { .name = NNL_MULTICAST_GROUP_MLME },
+};
+
+static const struct nla_policy nmeshd_nl_policy[NNL_NUM_ATTR] = {
+	[NNL_ATTR_IFINDEX] = { .type = NLA_U32 },
+	[NNL_ATTR_WIPHY] = { .type = NLA_U32 },
+	[NNL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+	[NNL_ATTR_MAC] = { .len = ETH_ALEN },
+	[NNL_ATTR_FRAME_TYPE] = { .type = NLA_U16 },
+	[NNL_ATTR_SIGNAL_STRENGTH] = {.type = NLA_S8},
+	[NNL_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
+	[NNL_ATTR_MPP_PROXY] = {.len = ETH_ALEN },
+};
+
+/* IE validation */
+static bool is_valid_ie_attr(const struct nlattr *attr)
+{
+	const u8 *pos;
+	int len;
+
+	if (!attr)
+		return true;
+
+	pos = nla_data(attr);
+	len = nla_len(attr);
+
+	while (len) {
+		u8 elemlen;
+
+		if (len < 2)
+			return false;
+		len -= 2;
+
+		elemlen = pos[1];
+		if (elemlen > len)
+			return false;
+
+		len -= elemlen;
+		pos += 2 + elemlen;
+	}
+
+	return true;
+}
+
+static int nmeshd_nl_update_mesh_vendor_path_metrics_ie(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[0];
+	struct wireless_dev *wdev = info->user_ptr[1];
+	struct mesh_vendor_ie mv_ie;
+	struct nlattr *ieattr;
+
+	if (wdev->iftype == NL80211_IFTYPE_MESH_POINT) {
+		if (info->attrs[NNL_ATTR_IE_PATH_METRICS]) {
+			ieattr = info->attrs[NNL_ATTR_IE_PATH_METRICS];
+			if (!is_valid_ie_attr(ieattr))
+				return -EINVAL;
+			mv_ie.ie = nla_data(ieattr);
+			mv_ie.ie_len = nla_len(ieattr);
+			return ieee80211_update_mesh_vendor_path_metrics_ie(NULL, dev, &mv_ie);
+		}
+	}
+	return -EOPNOTSUPP;
+}
+
+static int  nmeshd_nl_update_mesh_vendor_node_metrics_ie(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[0];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct mesh_vendor_ie mv_ie;
+	struct nlattr *ieattr;
+
+	if (wdev->iftype == NL80211_IFTYPE_MESH_POINT) {
+		if (info->attrs[NNL_ATTR_IE_NODE_METRICS]) {
+			ieattr = info->attrs[NNL_ATTR_IE_NODE_METRICS];
+			if (!is_valid_ie_attr(ieattr))
+				return -EINVAL;
+			mv_ie.ie = nla_data(ieattr);
+			mv_ie.ie_len = nla_len(ieattr);
+			return ieee80211_update_mesh_vendor_node_metrics_ie(NULL, dev, &mv_ie);
+		}
+	}
+	return -EOPNOTSUPP;
+}
+
+void nmeshd_nl_send_vendor_ies(struct net_device *dev, const u8 *macaddr, u16 stype, s8 signal,
+			       u32 beacon_int, struct ieee802_11_mesh_vendor_specific_elems *pm,
+			       struct ieee802_11_mesh_vendor_specific_elems *nm, gfp_t gfp)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+	struct sk_buff *msg;
+	void *hdr;
+	bool fail = false;
+
+	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
+		return;
+
+	msg = nlmsg_new(100 + nm->ie_len + pm->ie_len, gfp);
+	if (!msg)
+		return;
+
+	hdr = genlmsg_put(msg, 0, 0, &nmeshd_nl_family, 0, QBC_VENDOR_IE);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	if (nla_put_u32(msg, NNL_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_string(msg, NNL_ATTR_IFNAME, dev->name) ||
+	    nla_put_u32(msg, NNL_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	if (nm->parse_error ||
+	    nla_put(msg, NNL_ATTR_MAC, ETH_ALEN, macaddr) ||
+	    nla_put_u16(msg, NNL_ATTR_FRAME_TYPE, stype) ||
+	    nla_put_s8(msg, NNL_ATTR_SIGNAL_STRENGTH, signal) ||
+	    nla_put_u32(msg, NNL_ATTR_BEACON_INTERVAL, beacon_int) ||
+	    (nm->ie_len && nm->ie_start &&
+	     nla_put(msg, NNL_ATTR_IE_NODE_METRICS, nm->ie_len, nm->ie_start)))
+		fail = true;
+
+	if (pm->parse_error ||
+	    (pm->ie_len && pm->ie_start &&
+		nla_put(msg, NNL_ATTR_IE_PATH_METRICS, pm->ie_len, pm->ie_start))) {
+		if (fail)
+			goto nla_put_failure;
+	}
+
+	genlmsg_end(msg, hdr);
+	genlmsg_multicast_netns(&nmeshd_nl_family, wiphy_net(wdev->wiphy), msg, 0,
+				NNL_MCGRP_MLME, gfp);
+	return;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+}
+
+static int nmeshd_nl_set_mpp(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[0];
+
+	u8 *dst = NULL;
+	u8 *proxy = NULL;
+
+	if (!info->attrs[NNL_ATTR_MAC])
+		return -EINVAL;
+
+	if (!info->attrs[NNL_ATTR_MPP_PROXY])
+		return -EINVAL;
+
+	dst = nla_data(info->attrs[NNL_ATTR_MAC]);
+	proxy = nla_data(info->attrs[NNL_ATTR_MPP_PROXY]);
+
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
+	return ieee80211_update_mpp(NULL, dev, dst, proxy);
+}
+
+static int nmeshd_nl_del_mpp(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[0];
+	u8 *dst;
+
+	if (!info->attrs[NNL_ATTR_MAC])
+		return -EINVAL;
+
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
+	dst = nla_data(info->attrs[NNL_ATTR_MAC]);
+
+	return ieee80211_delete_mpp(NULL, dev, dst);
+}
+
+static int nmeshd_nl_mplink_block(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[0];
+	u8 *dst;
+
+	if (!info->attrs[NNL_ATTR_MAC])
+		return -EINVAL;
+
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
+	dst = nla_data(info->attrs[NNL_ATTR_MAC]);
+	return ieee80211_mplink_block(NULL, dev, dst);
+}
+
+static int nmeshd_nl_mplink_unblock(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[0];
+	u8 *dst;
+
+	if (!info->attrs[NNL_ATTR_MAC])
+		return -EINVAL;
+
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+	return -EOPNOTSUPP;
+
+	dst = nla_data(info->attrs[NNL_ATTR_MAC]);
+	return ieee80211_mplink_unblock(NULL, dev, dst);
+}
+
+static int  nmeshd_nl_get_net_device(struct sk_buff *skb,
+				     struct netlink_callback *cb,
+				     struct net_device **dev)
+{
+	int err, ifindex;
+	struct nlattr **attrs;
+
+	attrs = kcalloc(NNL_NUM_ATTR, sizeof(*attrs), GFP_KERNEL);
+	if (!attrs)
+		return -ENOMEM;
+
+	err = nlmsg_parse_deprecated(cb->nlh,
+				     GENL_HDRLEN + nmeshd_nl_family.hdrsize,
+				     attrs, nmeshd_nl_family.maxattr,
+				     nmeshd_nl_policy, NULL);
+	if (err) {
+		kfree(attrs);
+		return err;
+	}
+
+	if (!attrs[NNL_ATTR_IFINDEX]) {
+		kfree(attrs);
+		return -EINVAL;
+	}
+
+	ifindex = nla_get_u32(attrs[NNL_ATTR_IFINDEX]);
+	kfree(attrs);
+	*dev = dev_get_by_index(&init_net, ifindex);
+	if (!(*dev))
+		err = -ENODEV;
+	return err;
+}
+
+static int nmeshd_nl_send_mplink_info(struct sk_buff *msg, u32 cmd, u32 portid,
+				      u32 seq, int flags,
+				      struct net_device *dev,
+				      struct mplink_blocked_info *mplink_info)
+{
+	void *hdr;
+	struct nlattr *mplink_infoattr;
+	struct nlattr *infoattr;
+	u32 len = 0;
+	u32 i;
+
+	hdr = genlmsg_put(msg, portid, seq, &nmeshd_nl_family, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NNL_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	mplink_infoattr = nla_nest_start(msg, NNL_ATTR_MPLINK_INFO);
+	if (!mplink_infoattr)
+		goto nla_put_failure;
+
+	for (i = 0; i < mplink_info->count; i++) {
+		infoattr = nla_nest_start(msg, i);
+		if (!infoattr)
+			goto nla_put_failure;
+
+		nla_put(msg, NNL_MPLINK_ATTR_MAC, ETH_ALEN, mplink_info->info + len);
+		len += ETH_ALEN;
+		nla_nest_end(msg, infoattr);
+	}
+
+	nla_nest_end(msg, mplink_infoattr);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int nmeshd_nl_dump_blocked_mplink_info(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct mplink_blocked_info mplink_info;
+	struct net_device *dev = NULL;
+	int err = 0;
+
+	rtnl_lock();
+	err = nmeshd_nl_get_net_device(skb, cb, &dev);
+	if (err)
+		goto fail;
+
+	memset(&mplink_info, 0, sizeof(mplink_info));
+	err = ieee80211_dump_blocked_mplink_info(NULL, dev, &mplink_info);
+	if (err)
+		goto fail;
+
+	if (nmeshd_nl_send_mplink_info(skb, NNL_CMD_DUMP_BLOCKED_MPLINK_INFO,
+				       NETLINK_CB(cb->skb).portid,
+				       cb->nlh->nlmsg_seq, NLM_F_MULTI,
+				       dev,
+				       &mplink_info) < 0)
+		err = skb->len;
+
+fail:
+	if (dev)
+		dev_put(dev);
+	rtnl_unlock();
+	return err;
+}
+
+static int nmeshd_nl_mplink_flush(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev = info->user_ptr[0];
+
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+		return -EOPNOTSUPP;
+
+	return ieee80211_mplink_flush(NULL, dev);
+}
+
+static int nmeshd_nl_pre_doit(const struct genl_split_ops *ops,
+			      struct sk_buff *skb,
+			      struct genl_info *info)
+{
+	struct net_device *dev;
+	struct wireless_dev *wdev;
+	struct nlattr **attrs = info->attrs;
+	int ifindex;
+
+	if (!attrs[NNL_ATTR_IFINDEX])
+		return -EINVAL;
+
+	rtnl_lock();
+	ifindex = nla_get_u32(attrs[NNL_ATTR_IFINDEX]);
+
+	dev = dev_get_by_index(&init_net, ifindex);
+	if (!dev) {
+		rtnl_unlock();
+		return -ENODEV;
+	}
+
+	wdev = dev->ieee80211_ptr;
+	if (!wdev) {
+		dev_put(dev);
+		rtnl_unlock();
+		return -ENODEV;
+	}
+
+	if (!wdev_running(wdev)) {
+		dev_put(dev);
+		rtnl_unlock();
+		return -ENETDOWN;
+	}
+
+	info->user_ptr[0] = dev;
+	info->user_ptr[1] = wdev;
+	return 0;
+}
+
+static void nmeshd_nl_post_doit(const struct genl_split_ops *ops,
+				struct sk_buff *skb,
+				struct genl_info *info)
+{
+	if (info->user_ptr[0])
+		dev_put(info->user_ptr[0]);
+	rtnl_unlock();
+}
+
+static const struct genl_ops ops[] = {
+	{
+		.cmd = NNL_CMD_MESH_PEER_PATH_METRICS,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.doit = nmeshd_nl_update_mesh_vendor_path_metrics_ie,
+		.policy = nmeshd_nl_policy,
+	},
+	{
+		.cmd = NNL_CMD_MESH_NODE_METRICS,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.doit = nmeshd_nl_update_mesh_vendor_node_metrics_ie,
+		.policy = nmeshd_nl_policy,
+	},
+	{
+		.cmd = NNL_CMD_SET_MPP,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.doit = nmeshd_nl_set_mpp,
+		.policy = nmeshd_nl_policy,
+	},
+	{
+		.cmd = NNL_CMD_DEL_MPP,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.doit = nmeshd_nl_del_mpp,
+		.policy = nmeshd_nl_policy,
+	},
+	{
+		.cmd =  NNL_CMD_MPLINK_BLOCK,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.doit = nmeshd_nl_mplink_block,
+		.policy = nmeshd_nl_policy,
+	},
+	{
+		.cmd = NNL_CMD_MPLINK_UNBLOCK,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.doit = nmeshd_nl_mplink_unblock,
+		.policy = nmeshd_nl_policy,
+	},
+	{
+		.cmd = NNL_CMD_DUMP_BLOCKED_MPLINK_INFO,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.dumpit = nmeshd_nl_dump_blocked_mplink_info,
+		.policy = nmeshd_nl_policy,
+	},
+	{
+		.cmd = NNL_CMD_MPLINK_FLUSH,
+		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+		.doit = nmeshd_nl_mplink_flush,
+		.policy = nmeshd_nl_policy,
+	}
+};
+
+static struct genl_family nmeshd_nl_family __ro_after_init = {
+	.name = NMESHD_NL_FAMILY,
+	.hdrsize = 0,
+	.version = 1,
+	.maxattr = NNL_ATTR_MAX,
+	.module = THIS_MODULE,
+	.ops = ops,
+	.n_ops = ARRAY_SIZE(ops),
+	.mcgrps = nmeshd_nl_mcgrps,
+	.n_mcgrps = ARRAY_SIZE(nmeshd_nl_mcgrps),
+	.pre_doit = nmeshd_nl_pre_doit,
+	.post_doit = nmeshd_nl_post_doit,
+	.resv_start_op	= NNL_CMD_AFTER_LAST,
+};
+
+int nmeshd_nl_init(void)
+{
+	return genl_register_family(&nmeshd_nl_family);
+}
+
+int nmeshd_nl_deinit(void)
+{
+	return genl_unregister_family(&nmeshd_nl_family);
+}
+
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/net/mac80211/nmeshd_nl.h	2024-04-19 16:04:28.973736213 +0200
@@ -0,0 +1,33 @@
+#ifndef NMESHD_NL_H
+#define NMESHD_NL_H
+
+#include <net/genetlink.h>
+#include <linux/nl80211.h>
+#include <net/cfg80211.h>
+#include "../wireless/core.h"
+#include <linux/nmeshd_nl.h>
+#include "ieee80211_i.h"
+
+int nmeshd_nl_init(void);
+int nmeshd_nl_deinit(void);
+int ieee80211_update_mesh_vendor_path_metrics_ie(struct wiphy *wiphy,
+						 struct net_device *dev,
+						 const struct mesh_vendor_ie *vendor_ie);
+
+int ieee80211_update_mesh_vendor_node_metrics_ie(struct wiphy *wiphy,
+						 struct net_device *dev,
+						 const struct mesh_vendor_ie *vendor_ie);
+
+int ieee80211_update_mpp(struct wiphy *wiphy, struct net_device *dev,
+			 const u8 *dst, const u8 *next_hop);
+
+int ieee80211_delete_mpp(struct wiphy *wiphy, struct net_device *dev,
+			 const u8 *dst);
+int ieee80211_mplink_block(struct wiphy *wiphy, struct net_device *dev,
+			   const u8 *dst);
+int ieee80211_mplink_unblock(struct wiphy *wiphy, struct net_device *dev,
+			     const u8 *dst);
+int ieee80211_dump_blocked_mplink_info(struct wiphy *wiphy, struct net_device *dev,
+				       struct mplink_blocked_info *minfo);
+int ieee80211_mplink_flush(struct wiphy *wiphy, struct net_device *dev);
+#endif /* NMESHD_NL_H */
diff -Nruw linux-6.4-fbx/net/mac80211/tests./Makefile linux-6.4-fbx/net/mac80211/tests/Makefile
--- linux-6.4-fbx/net/mac80211/tests./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/mac80211/tests/Makefile	2023-11-07 13:38:44.090257456 +0100
@@ -0,0 +1,3 @@
+mac80211-tests-y += module.o elems.o
+
+obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/net/wireless/nlfbx.h	2023-12-12 17:24:34.183627754 +0100
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Freebox
+ */
+
+#ifdef CONFIG_FBX80211
+int nlfbx_init(void);
+void nlfbx_exit(void);
+#else
+static int nlfbx_init(void) {
+	return 0;
+}
+
+static inline void nlfbx_exit(void)
+{
+}
+#endif
diff -Nruw linux-6.4-fbx/net/wireless/tests./Makefile linux-6.4-fbx/net/wireless/tests/Makefile
--- linux-6.4-fbx/net/wireless/tests./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/net/wireless/tests/Makefile	2023-11-07 13:38:44.114258112 +0100
@@ -0,0 +1,3 @@
+cfg80211-tests-y += module.o fragmentation.o
+
+obj-$(CONFIG_CFG80211_KUNIT_TEST) += cfg80211-tests.o
diff -Nruw linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./Makefile linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/Makefile
--- linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/Makefile	2024-03-18 14:40:14.835740896 +0100
@@ -0,0 +1,26 @@
+board-dtbs = \
+	fbxgw8r-board-00.dtb \
+	fbxgw8r-board-01.dtb \
+	fbxgw8r-board-02.dtb \
+	fbxgw8r-board-03.dtb \
+	fbxgw8r-board-04.dtb
+
+dtb-$(CONFIG_ARCH_BCMBCA) += bcm963158ref1d.dtb fbxgw8r.dtb $(board-dtbs)
+
+always-y	:= $(dtb-y)
+always-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_dtbs
+
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb fbxgw8r_dtbs
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+$(obj)/fbxgw8r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(call cmd,dtbs)
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
+
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_asmedia.dtb
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_dualband_noswitch.dtb
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/qcom/fbxgw9r.dts	2024-04-19 15:59:31.193600561 +0200
@@ -0,0 +1,983 @@
+/*
+ * Freebox FBXGW8R Board DTS
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "ipq9574.dtsi"
+#include "ipq9574-firmware-optee.dtsi"
+
+#undef USE_PHYLINK_SFP
+
+/ {
+	compatible = "freebox,fbxgw9r-board-00", "freebox,fbxgw9r-board-01",
+		   "freebox,fbxgw9r", "qcom,ipq9574";
+	model = "Freebox FBXGW9R";
+
+	// for diagchar module
+	qcom,diag@0 {
+		compatible = "qcom,diag";
+		status = "ok";
+	};
+
+	aliases {
+		serial0 = &blsp1_uart2;
+		serial1 = &blsp1_uart0;
+		i2c0 = &blsp1_i2c4;
+		i2c1 = &blsp1_i2c1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	reserved-memory {
+		ramoops@ffff0000 {
+		       compatible = "ramoops";
+		       /* RAM top - 64k */
+		       reg = <0x0 0xffff0000 0x0 (64 * 1024)>;
+		       record-size = <(64 * 1024)>;
+		       ecc-size = <16>;
+		       no-dump-oops;
+	       };
+	};
+
+#ifdef USE_PHYLINK_SFP
+	sfp_lan: sfp-lan {
+		compatible = "sff,sfp";
+		i2c-bus = <&blsp1_i2c1>;
+		maximum-power-milliwatt = <3000>;
+		mod-def0-gpios = <&fbxpmu_gpio_expander 1 GPIO_ACTIVE_LOW>;
+		pwr-enable-gpios = <&fbxpmu_gpio_expander 8 GPIO_ACTIVE_HIGH>;
+	};
+#endif
+
+	keypad {
+		compatible = "gpio-keys";
+		autorepeat = <1>;
+
+		keyup {
+			label = "key up";
+			linux,code = <KEY_UP>;
+			gpios = <&fbxpmu_gpio_expander 10 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+		keydown {
+			label = "key down";
+			linux,code = <KEY_DOWN>;
+			gpios = <&fbxpmu_gpio_expander 9 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+		keyright {
+			label = "key right";
+			linux,code = <KEY_RIGHT>;
+			gpios = <&fbxpmu_gpio_expander 12 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+		keyleft {
+			label = "key left";
+			linux,code = <KEY_LEFT>;
+			gpios = <&fbxpmu_gpio_expander 11 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	powerbtn {
+		compatible = "gpio-keys";
+		autorepeat = <0>;
+
+		powerbtn {
+			label = "power";
+			linux,code = <KEY_POWER>;
+			gpios = <&fbxpmu_gpio_expander 13 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	fbxgw9r-gpio {
+		compatible = "fbx,fbxgpio";
+
+		lan-sfp-presence {
+			gpio = <&fbxpmu_gpio_expander 1 GPIO_ACTIVE_HIGH>;
+			input;
+#ifdef USE_PHYLINK_SFP
+			no-claim;
+#endif
+		};
+
+		lan-sfp-pwrgood {
+			gpio = <&fbxpmu_gpio_expander 5 GPIO_ACTIVE_HIGH>;
+			input;
+#ifdef USE_PHYLINK_SFP
+			no-claim;
+#endif
+		};
+
+		lan-sfp-pwren {
+			gpio = <&fbxpmu_gpio_expander 8 GPIO_ACTIVE_HIGH>;
+			output-low;
+#ifdef USE_PHYLINK_SFP
+			no-claim;
+#endif
+		};
+
+		usb3-pwren {
+			gpio = <&fbxpmu_gpio_expander 4 GPIO_ACTIVE_HIGH>;
+			output-high;
+		};
+
+		usb3-pwrgood {
+			gpio = <&fbxpmu_gpio_expander 14 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		poe-status {
+			gpio = <&fbxpmu_gpio_expander 6 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		poe-disable {
+			gpio = <&fbxpmu_gpio_expander 3 GPIO_ACTIVE_HIGH>;
+			output-high;
+		};
+
+		pon-rst {
+			gpio = <&fbxpmu_gpio_expander 7 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		test-mode {
+			gpio = <&fbxpmu_gpio_expander 15 GPIO_ACTIVE_LOW>;
+			input;
+		};
+
+		iot-rst {
+			gpio = <&tlmm 12 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		iot-swd-data {
+			gpio = <&tlmm 63 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		iot-swd-clk {
+			gpio = <&tlmm 64 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		soc-rst-inhibit {
+			gpio = <&tlmm 44 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
+
+		nvme-pwrgood {
+			gpio = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		nvme-is-pcie {
+			gpio = <&tlmm 57 GPIO_ACTIVE_HIGH>;
+			input;
+		};
+
+		nvme-door-opened {
+			gpio = <&tlmm 61 GPIO_ACTIVE_HIGH>;
+			input;
+			no-claim;
+		};
+
+		pcie0-wdisable {
+			gpio = <&tlmm 54 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		pcie1-wdisable {
+			gpio = <&tlmm 55 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+
+		pcie2-wdisable {
+			gpio = <&tlmm 56 GPIO_ACTIVE_LOW>;
+			output-low;
+		};
+	};
+
+	nvme_regulator: nvme-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "nvme-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&tlmm 58 GPIO_ACTIVE_HIGH>;
+		startup-delay-us = <10000>;
+		enable-active-high;
+		fault-sense-gpio = <&tlmm 61 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&blsp1_uart0 {
+	// PON uart
+	pinctrl-0 = <&uart_0_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&blsp1_uart2 {
+	// main uart
+	pinctrl-0 = <&uart2_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&blsp1_i2c1 {
+	// used for SFP lan
+	pinctrl-0 = <&i2c_1_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&blsp1_spi3 {
+	// used for OLED + cortina SPI + FXS + IOT
+	pinctrl-0 = <&spi_3_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	spi-nand@0 {
+		compatible = "spi-nand";
+                reg = <0>;
+                spi-max-frequency = <(50 * 1000 * 1000)>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			uboot@0 {
+				label = "cortina-uboot";
+				reg = <0x0 0x400000>;
+			};
+
+			env@400000 {
+				label = "cortina-uboot-env";
+				reg = <0x400000 0x200000>;
+			};
+
+			dtb@600000 {
+				label = "cortina-dtb";
+				reg = <0x600000 0x80000>;
+			};
+
+			uimage@700000 {
+				label = "cortina-uimage";
+				reg = <0x700000 0x600000>;
+			};
+
+			rootfs@d00000 {
+				label = "cortina-rootfs";
+				reg = <0xd00000 0x2800000>;
+			};
+
+			ubi@6400000 {
+				label = "cortina-ubi";
+				reg = <0x6400000 0x1400000>;
+			};
+		};
+	};
+
+	spi-slac@1 {
+		compatible = "microsemi,le9641";
+		reg = <1>;
+		spi-max-frequency = <(500 * 1000)>;
+	};
+
+	ssd1320@2 {
+		compatible = "chipwealth,ch1120";
+		reg = <2>;
+		spi-max-frequency = <(14 * 1000 * 1000)>;
+
+		/*
+		* display mapping info (when looking at it such as keypad
+		* is on the right):
+		*
+		* SEG used on x-axis
+		* COM used on y-axis
+		*
+		* top-left: COM0/SEG159
+		* bottom-right: COM159/SEG0
+		*
+		* visible area (160x128)
+		*  top-left: COM16/SEG159
+		*  bottom-right: COM143/SEG0
+		*
+		* SEG are mapped in alternate: SEG0, SEG80, SEG1, ...
+		*/
+		ssd1320,com-range = <16 143>;
+		ssd1320,seg-range = <0 159>;
+		ssd1320,seg-reverse-dir;
+
+		ssd1320,clk-divide-ratio = <0x0>;
+		ssd1320,precharge-period = <0x1f>;
+		ssd1320,vcom-deselect-level = <0x3f>;
+		ssd1320,iref = <0x02>;
+		ssd1320,discharge-period = <0x02>;
+
+		ssd1320,display-enh-a = <0x02>;
+
+		ssd1320,default-brightness = <0xff>;
+		ssd1320,max-brightness = <0xff>;
+
+		ssd1320,watchdog = <300>;
+		ssd1320,data-select-gpio = <&tlmm 53 GPIO_ACTIVE_HIGH>;
+		ssd1320,reset-gpio = <&fbxpmu_gpio_expander 2 GPIO_ACTIVE_LOW>;
+		ssd1320,vcc-gpio = <&fbxpmu_gpio_expander 16 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&blsp1_i2c4 {
+	// used for PMU
+	pinctrl-0 = <&i2c_4_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	fbxpmu@3c {
+		compatible = "freebox,fbxgwr-pmu";
+		reg = <0x3c>;
+
+		fbxpmu_gpio_expander: fbxpmu@3c {
+			compatible = "freebox,fbxgwr-pmu-gpio";
+			interrupt-parent = <&tlmm>;
+			interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+			gpio-controller;
+			ngpios = <24>;
+			#gpio-cells = <2>;
+			gpio-line-names = "", /* 0 */
+					  "lan-sfp-presence", /* 1 */
+					  "oled-rst", /* 2 */
+					  "poe-dis", /* 3 */
+					  "usb3-pwren", /* 4 */
+					  "lan-sfp-pwrgood", /* 5 */
+					  "poe-status", /* 6 */
+					  "pon-rst", /* 7 */
+					  "lan-sfp-pwren", /* 8 */
+					  "keypad-down", /* 9 */
+					  "keypad-up", /* 10 */
+					  "keypad-cancel", /* 11 */
+					  "keypad-ok", /* 12 */
+					  "power-button", /* 13 */
+					  "usb-pwr-fault", /* 14 */
+					  "test-mode", /* 15 */
+					  "oled-vcc-en"; /* 16 */
+		};
+
+		led-controller {
+			compatible = "freebox,fbxgwr-pmu-led";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			nleds = <3>;
+
+			led0@0 {
+				label = "green";
+				reg = <0x00>;
+			};
+
+			led1@1 {
+				label = "red";
+				reg = <0x01>;
+			};
+
+			led2@2 {
+				label = "blue";
+				reg = <0x02>;
+			};
+		};
+
+		watchdog {
+			compatible = "freebox,fbxgwr-pmu-watchdog";
+
+			interrupt-parent = <&tlmm>;
+			interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+
+		};
+	};
+};
+
+&lpass {
+	status = "okay";
+};
+
+&lpass_pcm {
+	status = "okay";
+	pinctrl-0 = <&audio_pins_pri>;
+        pinctrl-names = "default";
+};
+
+&tlmm {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "", /* 9 */
+			  "", /* 10 */
+			  "pmu_int", /* 11 */
+			  "iot_rst", /* 12 */
+			  "", /* 13 */
+			  "", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "", /* 19 */
+			  "", /* 20 */
+			  "", /* 21 */
+			  "", /* 22 */
+			  "pcie0_rst", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "pcie1_rst", /* 26 */
+			  "", /* 27 */
+			  "", /* 28 */
+			  "pcie2_rst", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "pcie3_rst", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "", /* 36 */
+			  "", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "", /* 40 */
+			  "", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "soc_rst_inhibit", /* 44 */
+			  "fxs_int", /* 45 */
+			  "", /* 46 */
+			  "lanqphy_int", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "nvme_pwrfault", /* 52 */
+			  "oled_data_cmd", /* 53 */
+			  "pcie0_wdisable", /* 54 */
+			  "pcie1_wdisable", /* 55 */
+			  "pcie2_wdisable", /* 56 */
+			  "nvme_pcie_sata", /* 57 */
+			  "nvme_pwren", /* 58 */
+			  "", /* 59 */
+			  "lanqphy_rst", /* 60 */
+			  "nvme_door_open", /* 61 */
+			  "iot_int", /* 62 */
+			  "iot_swd_data", /* 63 */
+			  "iot_swd_clk"; /* 64 */
+};
+
+&rpm_requests {
+	regulators {
+		compatible = "qcom,rpm-mp5496-regulators";
+
+		ipq9574_s1: s1 {
+		/*
+		 * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
+		 * During regulator registration, kernel not knowing the initial voltage,
+		 * considers it as zero and brings up the regulators with minimum supported voltage.
+		 * Update the regulator-min-microvolt with SVS voltage of 725mV so that
+		 * the regulators are brought up with 725mV which is sufficient for all the
+		 * corner parts to operate at 800MHz
+		 */
+			regulator-min-microvolt = <725000>;
+			regulator-max-microvolt = <1075000>;
+		};
+	};
+};
+
+&dwc_0 {
+	dr_mode = "host";
+};
+
+&pcie0_phy {
+	status = "okay";
+};
+
+&pcie0 {
+	// wifi low, PCI x1
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_0_pin>;
+
+	// FIXME: not supported on kernel 6.4, check
+	max-payload-size = <1>; // 1-256 TLP bytes for WKK
+
+	perst-gpios = <&tlmm 23 GPIO_ACTIVE_LOW>;
+	wake-gpios = <&tlmm 24 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pcie2_phy {
+	status = "okay";
+};
+
+&pcie2 {
+	// wifi high, PCI x2
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_2_pin>;
+
+	assigned-clocks = <&gcc GCC_PCIE2_AXI_M_CLK>,
+			<&gcc GCC_PCIE2_RCHNG_CLK>;
+	assigned-clock-rates = <342857143>,
+			<100000000>;
+
+	perst-gpios = <&tlmm 29 GPIO_ACTIVE_LOW>;
+	wake-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pcie3_phy {
+	status = "okay";
+};
+
+&pcie3 {
+	// NVME, PCI x2
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_3_pin>;
+
+	assigned-clocks = <&gcc GCC_PCIE3_AXI_M_CLK>,
+			<&gcc GCC_PCIE3_RCHNG_CLK>;
+	assigned-clock-rates = <342857143>,
+			<100000000>;
+
+	vddpe-3v3-supply = <&nvme_regulator>;
+	reset-names = "powerctl";
+	perst-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
+	wake-gpios = <&tlmm 33 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&sdhc_1 {
+	pinctrl-0 = <&sdc_default_state>;
+	pinctrl-names = "default";
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+	mmc-hs400-1_8v;
+	mmc-hs400-enhanced-strobe;
+	max-frequency = <384000000>;
+	bus-width = <8>;
+	status = "okay";
+
+	partitions-boot0 {
+                compatible = "fixed-partitions";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                disk-name = "mmcblk%dboot0";
+
+                qickstart@0 {
+                        label = "qickstart0";
+                        reg = /bits/64 <0 (2 * 1024 * 1024)>;
+                        read-only;
+                };
+
+                serial@0 {
+                        label = "fbxserial";
+                        reg = /bits/64 <(-1) (8 * 1024)>;
+                        read-only;
+                };
+
+		fbxboot@0 {
+                        label = "fbxboot";
+                        reg = /bits/64 <(-1) (8 * 1024)>;
+                        read-only;
+                };
+
+		lang@0 {
+			label = "lang";
+			reg = /bits/64 <(-1) (128 * 1024)>;
+			read-only;
+		};
+
+		calibration@0 {
+			label = "calibration";
+			reg = /bits/64 <(-1) (64 * 1024)>;
+			read-only;
+		};
+	};
+
+	partitions-boot1 {
+                compatible = "fixed-partitions";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                disk-name = "mmcblk%dboot1";
+
+                qickstart@0 {
+                        label = "qickstart1";
+                        reg = /bits/64 <0 (2 * 1024 * 1024)>;
+                        read-only;
+                };
+	};
+
+	partitions-main {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%d";
+
+		bank0@0 {
+			label = "bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+			read-only;
+		};
+
+		bank1@0 {
+			label= "bank1";
+			reg = /bits/64 <(-1) (256 * 1024 * 1024)>;
+		};
+
+		nvram@0 {
+			label= "nvram";
+			reg = /bits/64 <(-1) (4 * 1024 * 1024)>;
+		};
+
+		config@0 {
+			label= "config";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		newbank0@0 {
+			label= "new_bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		newboot@0 {
+			label= "newboot";
+			reg = /bits/64 <(-1) (2 * 1024 * 1024)>;
+		};
+
+                fbxmbr@0 {
+			label = "fbxmbr";
+			reg = /bits/64 <(-1) (4096)>;
+                };
+
+		fortknox@0 {
+			label = "fortknox";
+			reg = /bits/64 <(-1) (128 * 1024 * 1024)>;
+                };
+
+		userdata@0 {
+			label = "userdata";
+			reg = /bits/64 <(-1) (-1)>;
+                };
+	};
+};
+
+&sleep_clk {
+	clock-frequency = <32000>;
+};
+
+&usb_0_qmpphy {
+	status = "okay";
+};
+
+&usb_0_qusbphy {
+	status = "okay";
+};
+
+&tlmm {
+	pcie_0_pin: pcie-0-state {
+		clkreq-n-pins {
+			pins = "gpio22";
+			function = "pcie0_clk";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+
+		perst-n-pins {
+			pins = "gpio23";
+			function = "gpio";
+			drive-strength = <8>;
+			bias-pull-down;
+			output-low;
+		};
+
+		wake-n-pins {
+			pins = "gpio24";
+			function = "pcie0_wake";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	pcie_2_pin: pcie-2-state {
+		clkreq-n-pins {
+			pins = "gpio28";
+			function = "pcie2_clk";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+
+		perst-n-pins {
+			pins = "gpio29";
+			function = "gpio";
+			drive-strength = <8>;
+			bias-pull-down;
+			output-low;
+		};
+
+		wake-n-pins {
+			pins = "gpio30";
+			function = "pcie2_wake";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	pcie_3_pin: pcie-3-state {
+		clkreq-n-pins {
+			pins = "gpio31";
+			function = "pcie3_clk";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+
+		perst-n-pins {
+			pins = "gpio32";
+			function = "gpio";
+			drive-strength = <8>;
+			bias-pull-up;
+			output-low;
+		};
+
+		wake-n-pins {
+			pins = "gpio33";
+			function = "pcie3_wake";
+			drive-strength = <6>;
+			bias-pull-up;
+		};
+	};
+
+	sdc_default_state: sdc-default-state {
+		clk-pins {
+			pins = "gpio5";
+			function = "sdc_clk";
+			drive-strength = <8>;
+			bias-disable;
+		};
+
+		cmd-pins {
+			pins = "gpio4";
+			function = "sdc_cmd";
+			drive-strength = <8>;
+			bias-pull-up;
+		};
+
+		data-pins {
+			pins = "gpio0", "gpio1", "gpio2",
+			       "gpio3", "gpio6", "gpio7",
+			       "gpio8", "gpio9";
+			function = "sdc_data";
+			drive-strength = <8>;
+			bias-pull-up;
+		};
+
+		rclk-pins {
+			pins = "gpio10";
+			function = "sdc_rclk";
+			drive-strength = <8>;
+			bias-pull-down;
+		};
+	};
+
+	i2c_1_pins: i2c-1-state {
+		pins = "gpio36", "gpio37";
+		function = "blsp1_i2c";
+		drive-strength = <8>;
+		bias-disable;
+	};
+
+	spi_3_pins: spi-3-state {
+		pins = "gpio15", "gpio16",
+			"gpio17", "gpio18", "gpio19", "gpio20", "gpio21";
+		function = "blsp3_spi";
+		drive-strength = <8>;
+		bias-disable;
+	};
+
+	i2c_4_pins: i2c-4-state {
+		pins = "gpio50", "gpio51";
+		function = "blsp4_i2c";
+		drive-strength = <8>;
+		bias-disable;
+	};
+
+	uart_0_pins: uart-0-state {
+		pins = "gpio13", "gpio14";
+		function = "blsp0_uart";
+		drive-strength = <8>;
+		bias-disable;
+	};
+};
+
+&usb3 {
+	status = "okay";
+};
+
+&xo_board_clk {
+	clock-frequency = <24000000>;
+};
+
+&mdio {
+	status = "okay";
+
+	clock-frequency = <6250000>;
+
+	reset-gpio = <&tlmm 60 GPIO_ACTIVE_LOW>;
+	reset-delay-us = <1000>;
+	reset-post-delay-us = <1000>;
+
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+
+	ess_qphy0: qca8084-qphy0@1 {
+		reg = <1>;
+		/*
+		* phy address can be remapped, so we need to
+		* designate which port we are actually talking of
+		*/
+		qca,phy-type = <0>;
+		qca,phy-physid = <0>;
+		qca,led-tlmm-pin = <16>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	ess_qphy1: qca8084-qphy1@2 {
+		reg = <2>;
+		qca,phy-type = <0>;
+		qca,phy-physid = <1>;
+		qca,led-tlmm-pin = <17>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	ess_qphy2: qca8084-qphy2@3 {
+		reg = <3>;
+		qca,phy-type = <0>;
+		qca,phy-physid = <2>;
+		qca,led-tlmm-pin = <18>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	ess_qphy3: qca8084-qphy3@4 {
+		reg = <4>;
+		qca,phy-type = <0>;
+		qca,phy-physid = <3>;
+		qca,led-tlmm-pin = <19>;
+		qca,led-act-blink;
+		qca,led-link-speed-any;
+		compatible = "ethernet-phy-id004d.d180", "ethernet-phy-ieee802.3-c22";
+		status = "okay";
+	};
+
+	qca8084-uniphy1@5 {
+		/*
+		 * actual serdes connected to CPU, it's mandatory to allocate
+		 * an MDIO address for this port
+		 */
+		reg = <5>;
+		qca,phy-type = <1>;
+		qca,phy-physid = <1>;
+		compatible = "ethernet-phy-id004d.d180";
+		status = "okay";
+	};
+
+	qca8084-xpcs@6 {
+		/*
+		 * actual serdes connected to CPU, it's mandatory to allocate
+		 * an MDIO address for this port
+		 */
+		reg = <6>;
+		qca,phy-type = <1>;
+		qca,phy-physid = <2>;
+		compatible = "ethernet-phy-id004d.d180";
+		status = "okay";
+	};
+};
+
+&ess_phys_port0 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp1";
+	phy-handle = <&ess_qphy0>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port1 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp2";
+	phy-handle = <&ess_qphy1>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port2 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp3";
+	phy-handle = <&ess_qphy2>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port3 {
+	mdio-bus = <&mdio>;
+	status = "okay";
+	label = "swp4";
+	phy-handle = <&ess_qphy3>;
+	phy-mode = "10g-qxgmii";
+	fbxserial-mac-address = <0>;
+};
+
+&ess_phys_port4 {
+	status = "okay";
+	label = "ftth0";
+	phy-mode = "10gbase-r";
+	fbxserial-mac-address = <0>;
+	managed = "in-band-status";
+};
+
+&ess_phys_port5 {
+	status = "okay";
+	label = "sfplan0";
+	phy-mode = "1000base-x";
+	fbxserial-mac-address = <0>;
+	managed = "in-band-status";
+#ifdef USE_PHYLINK_SFP
+	sfp = <&sfp_lan>;
+#endif
+};
+
+&imem_reset_reason {
+	status = "okay";
+	qcom-fbx,scm-el3-reasons;
+};
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/arm64/qcom/ipq9574-firmware-optee.dtsi	2023-07-20 17:19:14.714368621 +0200
@@ -0,0 +1,23 @@
+
+/ {
+	reserved-memory {
+		optee-shared-memory@4ae00000 {
+			no-map;
+			reg = <0x0 0x4ae00000 0x0 0x00100000>;
+		};
+
+		tz@4a600000 {
+			// for OP-TEE: secure RAM size raised to 8MiB.
+			reg = <0x0 0x4a600000 0x0 0x800000>;
+			no-map;
+		};
+	};
+
+	firmware {
+		optee {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+			skip-enumeration;
+		};
+	};
+};
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/clock/qcom,nsscc-ipq9574.h	2023-05-22 20:30:14.549854255 +0200
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_NSSCC_9048_H
+#define _DT_BINDINGS_CLOCK_IPQ_NSSCC_9048_H
+
+#define NSS_CC_CE_APB_CLK					0
+#define NSS_CC_CE_AXI_CLK					1
+#define NSS_CC_CE_CLK_SRC					2
+#define NSS_CC_CFG_CLK_SRC					3
+#define NSS_CC_CLC_AXI_CLK					4
+#define NSS_CC_CLC_CLK_SRC					5
+#define NSS_CC_CRYPTO_CLK					6
+#define NSS_CC_CRYPTO_CLK_SRC					7
+#define NSS_CC_CRYPTO_PPE_CLK					8
+#define NSS_CC_HAQ_AHB_CLK					9
+#define NSS_CC_HAQ_AXI_CLK					10
+#define NSS_CC_HAQ_CLK_SRC					11
+#define NSS_CC_IMEM_AHB_CLK					12
+#define NSS_CC_IMEM_CLK_SRC					13
+#define NSS_CC_IMEM_QSB_CLK					14
+#define NSS_CC_INT_CFG_CLK_SRC					15
+#define NSS_CC_NSS_CSR_CLK					16
+#define NSS_CC_NSSNOC_CE_APB_CLK				17
+#define NSS_CC_NSSNOC_CE_AXI_CLK				18
+#define NSS_CC_NSSNOC_CLC_AXI_CLK				19
+#define NSS_CC_NSSNOC_CRYPTO_CLK				20
+#define NSS_CC_NSSNOC_HAQ_AHB_CLK				21
+#define NSS_CC_NSSNOC_HAQ_AXI_CLK				22
+#define NSS_CC_NSSNOC_IMEM_AHB_CLK				23
+#define NSS_CC_NSSNOC_IMEM_QSB_CLK				24
+#define NSS_CC_NSSNOC_NSS_CSR_CLK				25
+#define NSS_CC_NSSNOC_PPE_CFG_CLK				26
+#define NSS_CC_NSSNOC_PPE_CLK					27
+#define NSS_CC_NSSNOC_UBI32_AHB0_CLK				28
+#define NSS_CC_NSSNOC_UBI32_AXI0_CLK				29
+#define NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK			30
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK			31
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK				32
+#define NSS_CC_PORT1_MAC_CLK					33
+#define NSS_CC_PORT1_RX_CLK					34
+#define NSS_CC_PORT1_RX_CLK_SRC					35
+#define NSS_CC_PORT1_RX_DIV_CLK_SRC				36
+#define NSS_CC_PORT1_TX_CLK					37
+#define NSS_CC_PORT1_TX_CLK_SRC					38
+#define NSS_CC_PORT1_TX_DIV_CLK_SRC				39
+#define NSS_CC_PORT2_MAC_CLK					40
+#define NSS_CC_PORT2_RX_CLK					41
+#define NSS_CC_PORT2_RX_CLK_SRC					42
+#define NSS_CC_PORT2_RX_DIV_CLK_SRC				43
+#define NSS_CC_PORT2_TX_CLK					44
+#define NSS_CC_PORT2_TX_CLK_SRC					45
+#define NSS_CC_PORT2_TX_DIV_CLK_SRC				46
+#define NSS_CC_PORT3_MAC_CLK					47
+#define NSS_CC_PORT3_RX_CLK					48
+#define NSS_CC_PORT3_RX_CLK_SRC					49
+#define NSS_CC_PORT3_RX_DIV_CLK_SRC				50
+#define NSS_CC_PORT3_TX_CLK					51
+#define NSS_CC_PORT3_TX_CLK_SRC					52
+#define NSS_CC_PORT3_TX_DIV_CLK_SRC				53
+#define NSS_CC_PORT4_MAC_CLK					54
+#define NSS_CC_PORT4_RX_CLK					55
+#define NSS_CC_PORT4_RX_CLK_SRC					56
+#define NSS_CC_PORT4_RX_DIV_CLK_SRC				57
+#define NSS_CC_PORT4_TX_CLK					58
+#define NSS_CC_PORT4_TX_CLK_SRC					59
+#define NSS_CC_PORT4_TX_DIV_CLK_SRC				60
+#define NSS_CC_PORT5_MAC_CLK					61
+#define NSS_CC_PORT5_RX_CLK					62
+#define NSS_CC_PORT5_RX_CLK_SRC					63
+#define NSS_CC_PORT5_RX_DIV_CLK_SRC				64
+#define NSS_CC_PORT5_TX_CLK					65
+#define NSS_CC_PORT5_TX_CLK_SRC					66
+#define NSS_CC_PORT5_TX_DIV_CLK_SRC				67
+#define NSS_CC_PORT6_MAC_CLK					68
+#define NSS_CC_PORT6_RX_CLK					69
+#define NSS_CC_PORT6_RX_CLK_SRC					70
+#define NSS_CC_PORT6_RX_DIV_CLK_SRC				71
+#define NSS_CC_PORT6_TX_CLK					72
+#define NSS_CC_PORT6_TX_CLK_SRC					73
+#define NSS_CC_PORT6_TX_DIV_CLK_SRC				74
+#define NSS_CC_PPE_CLK_SRC					75
+#define NSS_CC_PPE_EDMA_CFG_CLK					76
+#define NSS_CC_PPE_EDMA_CLK					77
+#define NSS_CC_PPE_SWITCH_BTQ_CLK				78
+#define NSS_CC_PPE_SWITCH_CFG_CLK				79
+#define NSS_CC_PPE_SWITCH_CLK					80
+#define NSS_CC_PPE_SWITCH_IPE_CLK				81
+#define NSS_CC_UBI0_CLK_SRC					82
+#define NSS_CC_UBI0_DIV_CLK_SRC					83
+#define NSS_CC_UBI1_CLK_SRC					84
+#define NSS_CC_UBI1_DIV_CLK_SRC					85
+#define NSS_CC_UBI2_CLK_SRC					86
+#define NSS_CC_UBI2_DIV_CLK_SRC					87
+#define NSS_CC_UBI32_AHB0_CLK					88
+#define NSS_CC_UBI32_AHB1_CLK					89
+#define NSS_CC_UBI32_AHB2_CLK					90
+#define NSS_CC_UBI32_AHB3_CLK					91
+#define NSS_CC_UBI32_AXI0_CLK					92
+#define NSS_CC_UBI32_AXI1_CLK					93
+#define NSS_CC_UBI32_AXI2_CLK					94
+#define NSS_CC_UBI32_AXI3_CLK					95
+#define NSS_CC_UBI32_CORE0_CLK					96
+#define NSS_CC_UBI32_CORE1_CLK					97
+#define NSS_CC_UBI32_CORE2_CLK					98
+#define NSS_CC_UBI32_CORE3_CLK					99
+#define NSS_CC_UBI32_INTR0_AHB_CLK				100
+#define NSS_CC_UBI32_INTR1_AHB_CLK				101
+#define NSS_CC_UBI32_INTR2_AHB_CLK				102
+#define NSS_CC_UBI32_INTR3_AHB_CLK				103
+#define NSS_CC_UBI32_NC_AXI0_CLK				104
+#define NSS_CC_UBI32_NC_AXI1_CLK				105
+#define NSS_CC_UBI32_NC_AXI2_CLK				106
+#define NSS_CC_UBI32_NC_AXI3_CLK				107
+#define NSS_CC_UBI32_UTCM0_CLK					108
+#define NSS_CC_UBI32_UTCM1_CLK					109
+#define NSS_CC_UBI32_UTCM2_CLK					110
+#define NSS_CC_UBI32_UTCM3_CLK					111
+#define NSS_CC_UBI3_CLK_SRC					112
+#define NSS_CC_UBI3_DIV_CLK_SRC					113
+#define NSS_CC_UBI_AXI_CLK_SRC					114
+#define NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC				115
+#define NSS_CC_UNIPHY_PORT1_RX_CLK				116
+#define NSS_CC_UNIPHY_PORT1_TX_CLK				117
+#define NSS_CC_UNIPHY_PORT2_RX_CLK				118
+#define NSS_CC_UNIPHY_PORT2_TX_CLK				119
+#define NSS_CC_UNIPHY_PORT3_RX_CLK				120
+#define NSS_CC_UNIPHY_PORT3_TX_CLK				121
+#define NSS_CC_UNIPHY_PORT4_RX_CLK				122
+#define NSS_CC_UNIPHY_PORT4_TX_CLK				123
+#define NSS_CC_UNIPHY_PORT5_RX_CLK				124
+#define NSS_CC_UNIPHY_PORT5_TX_CLK				125
+#define NSS_CC_UNIPHY_PORT6_RX_CLK				126
+#define NSS_CC_UNIPHY_PORT6_TX_CLK				127
+#define NSS_CC_XGMAC0_PTP_REF_CLK				128
+#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC			129
+#define NSS_CC_XGMAC1_PTP_REF_CLK				130
+#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC			131
+#define NSS_CC_XGMAC2_PTP_REF_CLK				132
+#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC			133
+#define NSS_CC_XGMAC3_PTP_REF_CLK				134
+#define NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC			135
+#define NSS_CC_XGMAC4_PTP_REF_CLK				136
+#define NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC			137
+#define NSS_CC_XGMAC5_PTP_REF_CLK				138
+#define NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC			139
+#define UBI32_PLL						140
+#define UBI32_PLL_MAIN						141
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/clock/qcom,uniphycc-ipq9574.h	2023-05-22 20:30:14.549854255 +0200
@@ -0,0 +1,11 @@
+#ifndef _DT_BINDINGS_CLOCK_IPQ_UNIPHYCC_95XX_H
+#define _DT_BINDINGS_CLOCK_IPQ_UNIPHYCC_95XX_H
+
+#define UNIPHY0_GCC_RX_CLK				0
+#define UNIPHY0_GCC_TX_CLK				1
+#define UNIPHY1_GCC_RX_CLK				2
+#define UNIPHY1_GCC_TX_CLK				3
+#define UNIPHY2_GCC_RX_CLK				4
+#define UNIPHY2_GCC_TX_CLK				5
+
+#endif
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/net/realtek-phy-rtl8211f.h	2023-03-09 19:50:18.846938506 +0100
@@ -0,0 +1,19 @@
+/*
+ * Device Tree constants for Realek rtl8211f PHY
+ *
+ * Author: Remi Pommarel
+ *
+ * License: GPL
+ * Copyright (c) 2017 Remi Pommarel
+ */
+
+#ifndef _DT_BINDINGS_RTL_8211F_H
+#define _DT_BINDINGS_RTL_8211F_H
+
+#define RTL8211F_LED_MODE_10M			0x1
+#define RTL8211F_LED_MODE_100M			0x2
+#define RTL8211F_LED_MODE_1000M			0x8
+#define RTL8211F_LED_MODE_ACT			0x10
+
+#endif
+
--- /dev/null	2024-03-28 18:35:30.816070989 +0100
+++ linux-6.4-fbx/scripts/dtc/include-prefixes/dt-bindings/reset/qcom,nsscc-ipq9574.h	2023-05-22 20:30:14.549854255 +0200
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_NSSCC_9048_H
+#define _DT_BINDINGS_RESET_IPQ_NSSCC_9048_H
+
+#define NSS_CC_CE_BCR			1
+#define NSS_CC_CLC_BCR			2
+#define NSS_CC_EIP197_BCR		3
+#define NSS_CC_HAQ_BCR			4
+#define NSS_CC_IMEM_BCR			5
+#define NSS_CC_MAC_BCR			6
+#define NSS_CC_PPE_BCR			7
+#define NSS_CC_UBI_BCR			8
+#define NSS_CC_UNIPHY_BCR		9
+#define UBI3_CLKRST_CLAMP_ENABLE	10
+#define UBI3_CORE_CLAMP_ENABLE		11
+#define UBI2_CLKRST_CLAMP_ENABLE	12
+#define UBI2_CORE_CLAMP_ENABLE		13
+#define UBI1_CLKRST_CLAMP_ENABLE	14
+#define UBI1_CORE_CLAMP_ENABLE		15
+#define UBI0_CLKRST_CLAMP_ENABLE	16
+#define UBI0_CORE_CLAMP_ENABLE		17
+#define NSSNOC_NSS_CSR_ARES		18
+#define NSS_CSR_ARES			19
+#define PPE_BTQ_ARES			20
+#define PPE_IPE_ARES			21
+#define PPE_ARES			22
+#define PPE_CFG_ARES			23
+#define PPE_EDMA_ARES			24
+#define PPE_EDMA_CFG_ARES		25
+#define CRY_PPE_ARES			26
+#define NSSNOC_PPE_ARES			27
+#define NSSNOC_PPE_CFG_ARES		28
+#define PORT1_MAC_ARES			29
+#define PORT2_MAC_ARES			30
+#define PORT3_MAC_ARES			31
+#define PORT4_MAC_ARES			32
+#define PORT5_MAC_ARES			33
+#define PORT6_MAC_ARES			34
+#define XGMAC0_PTP_REF_ARES		35
+#define XGMAC1_PTP_REF_ARES		36
+#define XGMAC2_PTP_REF_ARES		37
+#define XGMAC3_PTP_REF_ARES		38
+#define XGMAC4_PTP_REF_ARES		39
+#define XGMAC5_PTP_REF_ARES		40
+#define HAQ_AHB_ARES			41
+#define HAQ_AXI_ARES			42
+#define NSSNOC_HAQ_AHB_ARES		43
+#define NSSNOC_HAQ_AXI_ARES		44
+#define CE_APB_ARES			45
+#define CE_AXI_ARES			46
+#define NSSNOC_CE_APB_ARES		47
+#define NSSNOC_CE_AXI_ARES		48
+#define CRYPTO_ARES			49
+#define NSSNOC_CRYPTO_ARES		50
+#define NSSNOC_NC_AXI0_1_ARES		51
+#define UBI0_CORE_ARES			52
+#define UBI1_CORE_ARES			53
+#define UBI2_CORE_ARES			54
+#define UBI3_CORE_ARES			55
+#define NC_AXI0_ARES			56
+#define UTCM0_ARES			57
+#define NC_AXI1_ARES			58
+#define UTCM1_ARES			59
+#define NC_AXI2_ARES			60
+#define UTCM2_ARES			61
+#define NC_AXI3_ARES			62
+#define UTCM3_ARES			63
+#define NSSNOC_NC_AXI0_ARES		64
+#define AHB0_ARES			65
+#define INTR0_AHB_ARES			66
+#define AHB1_ARES			67
+#define INTR1_AHB_ARES			68
+#define AHB2_ARES			69
+#define INTR2_AHB_ARES			70
+#define AHB3_ARES			71
+#define INTR3_AHB_ARES			72
+#define NSSNOC_AHB0_ARES		73
+#define NSSNOC_INT0_AHB_ARES		74
+#define AXI0_ARES			75
+#define AXI1_ARES			76
+#define AXI2_ARES			77
+#define AXI3_ARES			78
+#define NSSNOC_AXI0_ARES		79
+#define IMEM_QSB_ARES			80
+#define NSSNOC_IMEM_QSB_ARES		81
+#define IMEM_AHB_ARES			82
+#define NSSNOC_IMEM_AHB_ARES		83
+#define UNIPHY_PORT1_RX_ARES		84
+#define UNIPHY_PORT1_TX_ARES		85
+#define UNIPHY_PORT2_RX_ARES		86
+#define UNIPHY_PORT2_TX_ARES		87
+#define UNIPHY_PORT3_RX_ARES		88
+#define UNIPHY_PORT3_TX_ARES		89
+#define UNIPHY_PORT4_RX_ARES		90
+#define UNIPHY_PORT4_TX_ARES		91
+#define UNIPHY_PORT5_RX_ARES		92
+#define UNIPHY_PORT5_TX_ARES		93
+#define UNIPHY_PORT6_RX_ARES		94
+#define UNIPHY_PORT6_TX_ARES		95
+#define PORT1_RX_ARES			96
+#define PORT1_TX_ARES			97
+#define PORT2_RX_ARES			98
+#define PORT2_TX_ARES			99
+#define PORT3_RX_ARES			100
+#define PORT3_TX_ARES			101
+#define PORT4_RX_ARES			102
+#define PORT4_TX_ARES			103
+#define PORT5_RX_ARES			104
+#define PORT5_TX_ARES			105
+#define PORT6_RX_ARES			106
+#define PORT6_TX_ARES			107
+#define PPE_FULL_RESET			108
+#define UNIPHY0_SOFT_RESET		109
+#define UNIPHY1_SOFT_RESET		110
+#define UNIPHY2_SOFT_RESET		111
+#define UNIPHY_PORT1_ARES		112
+#define UNIPHY_PORT2_ARES		113
+#define UNIPHY_PORT3_ARES		114
+#define UNIPHY_PORT4_ARES		115
+#define UNIPHY_PORT5_ARES		116
+#define UNIPHY_PORT6_ARES		117
+#define NSSPORT1_RESET			118
+#define NSSPORT2_RESET 			119
+#define NSSPORT3_RESET			120
+#define NSSPORT4_RESET			121
+#define NSSPORT5_RESET			122
+#define NSSPORT6_RESET			123
+#define EDMA_HW_RESET			124
+
+#endif
